fal-client.ts 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. import * as fal from "@fal-ai/serverless-client";
  2. import { getTranslations } from 'next-intl/server';
  3. // 类型定义
  4. interface FalImageResult {
  5. url: string;
  6. width?: number;
  7. height?: number;
  8. content_type?: string;
  9. }
  10. // 纵横比类型
  11. type AspectRatio = "21:9" | "16:9" | "4:3" | "3:2" | "1:1" | "2:3" | "3:4" | "9:16" | "9:21";
  12. interface FalResponse {
  13. images?: FalImageResult[];
  14. image?: FalImageResult;
  15. data?: {
  16. images?: FalImageResult[];
  17. image?: FalImageResult;
  18. };
  19. timings?: any;
  20. seed?: number;
  21. has_nsfw_concepts?: boolean[];
  22. prompt?: string;
  23. requestId?: string;
  24. logs?: any[];
  25. }
  26. // 翻译函数
  27. async function getErrorMessage(key: string, locale?: string): Promise<string> {
  28. try {
  29. const t = await getTranslations({ locale: locale || 'en', namespace: 'errors.imageProcessing' });
  30. return t(key);
  31. } catch (error) {
  32. // 如果翻译失败,返回英文默认值
  33. const fallbackMessages: Record<string, string> = {
  34. 'invalidImageData': 'API returned invalid image data',
  35. 'noImagesReturned': 'API did not return any images',
  36. 'noProcessedImages': 'API did not return any processed images',
  37. 'unknownError': 'Unknown error occurred',
  38. 'multiImageProcessingError': 'Unknown error occurred during multi-image processing',
  39. 'batchProcessingError': 'Unknown error occurred during batch processing',
  40. 'multiImageNoData': 'Multi-image processing found no image data',
  41. 'batchProcessingFailed': 'Some batches failed to process',
  42. 'batchError': 'processing failed',
  43. 'batchException': 'processing error'
  44. };
  45. return fallbackMessages[key] || 'Unknown error occurred';
  46. }
  47. }
  48. // 配置 fal client
  49. fal.config({
  50. credentials: process.env.FAL_KEY,
  51. });
  52. // 智能图像编辑 - 使用 Kontext Dev 模型
  53. export async function smartImageEdit(imageUrl: string, prompt: string, options?: {
  54. guidance_scale?: number;
  55. num_images?: number;
  56. sync_mode?: boolean;
  57. aspect_ratio?: AspectRatio;
  58. output_format?: "jpeg" | "png";
  59. seed?: number;
  60. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  61. locale?: string;
  62. }) {
  63. try {
  64. console.log('Calling Kontext Dev model for smart editing...');
  65. const result = await fal.subscribe("fal-ai/flux-kontext/dev", {
  66. input: {
  67. prompt: prompt,
  68. image_url: imageUrl,
  69. guidance_scale: options?.guidance_scale ?? 3.5,
  70. num_images: options?.num_images ?? 1,
  71. sync_mode: options?.sync_mode ?? true,
  72. safety_tolerance: options?.safety_tolerance ?? "2",
  73. output_format: options?.output_format ?? "jpeg",
  74. ...(options?.aspect_ratio && { aspect_ratio: options.aspect_ratio }),
  75. ...(options?.seed && { seed: options.seed }),
  76. },
  77. logs: true,
  78. onQueueUpdate: (update) => {
  79. if (update.status === "IN_PROGRESS") {
  80. console.log("Processing...", update.logs);
  81. }
  82. },
  83. }) as FalResponse;
  84. console.log('Fal AI response:', result);
  85. // 添加更详细的调试信息
  86. console.log('========== Fal AI detailed response analysis ==========');
  87. console.log('Complete response object:', JSON.stringify(result, null, 2));
  88. console.log('result.images:', result.images);
  89. console.log('result.data:', result.data);
  90. console.log('result.data type:', typeof result.data);
  91. if (result.data) {
  92. console.log('result.data.images:', result.data.images);
  93. console.log('result.data.image:', result.data.image);
  94. }
  95. console.log('=======================================');
  96. // 处理不同的响应格式
  97. let images: FalImageResult[] = [];
  98. // 首先检查根对象中的 images
  99. if (result.images && Array.isArray(result.images)) {
  100. images = result.images;
  101. }
  102. // 然后检查根对象中的 image
  103. else if (result.image) {
  104. images = [result.image];
  105. }
  106. // 最后检查 data 对象中的数据
  107. else if (result.data?.images && Array.isArray(result.data.images)) {
  108. images = result.data.images;
  109. } else if (result.data?.image) {
  110. images = [result.data.image];
  111. } else {
  112. console.error('Image data not found:', result);
  113. const errorMessage = await getErrorMessage('invalidImageData', options?.locale);
  114. throw new Error(errorMessage);
  115. }
  116. if (images.length === 0) {
  117. const errorMessage = await getErrorMessage('noImagesReturned', options?.locale);
  118. throw new Error(errorMessage);
  119. }
  120. return {
  121. success: true,
  122. data: {
  123. images: images,
  124. model_used: 'flux-kontext-dev',
  125. prompt_used: prompt,
  126. parameters: {
  127. guidance_scale: options?.guidance_scale ?? 3.5,
  128. aspect_ratio: options?.aspect_ratio,
  129. output_format: options?.output_format ?? "jpeg",
  130. }
  131. }
  132. };
  133. } catch (error) {
  134. console.error('Smart image edit error:', error);
  135. return {
  136. success: false,
  137. error: error instanceof Error ? error.message : await getErrorMessage('unknownError', options?.locale)
  138. };
  139. }
  140. }
  141. // 精确图像编辑(用于细节调整)
  142. export async function preciseImageEdit(imageUrl: string, prompt: string, options?: {
  143. guidance_scale?: number;
  144. num_images?: number;
  145. aspect_ratio?: AspectRatio;
  146. output_format?: "jpeg" | "png";
  147. seed?: number;
  148. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  149. locale?: string;
  150. }) {
  151. try {
  152. console.log('Calling Kontext Dev model for precise editing...');
  153. const result = await fal.subscribe("fal-ai/flux-kontext/dev", {
  154. input: {
  155. prompt: prompt,
  156. image_url: imageUrl,
  157. guidance_scale: options?.guidance_scale ?? 4.5,
  158. num_images: options?.num_images ?? 1,
  159. sync_mode: true,
  160. safety_tolerance: options?.safety_tolerance ?? "1",
  161. output_format: options?.output_format ?? "jpeg",
  162. ...(options?.aspect_ratio && { aspect_ratio: options.aspect_ratio }),
  163. ...(options?.seed && { seed: options.seed }),
  164. },
  165. logs: true,
  166. }) as FalResponse;
  167. console.log('Fal AI response:', result);
  168. // 处理不同的响应格式
  169. let images: FalImageResult[] = [];
  170. if (result.images && Array.isArray(result.images)) {
  171. images = result.images;
  172. } else if (result.image) {
  173. images = [result.image];
  174. } else if (result.data?.images && Array.isArray(result.data.images)) {
  175. images = result.data.images;
  176. } else if (result.data?.image) {
  177. images = [result.data.image];
  178. } else {
  179. const errorMessage = await getErrorMessage('invalidImageData', options?.locale);
  180. throw new Error(errorMessage);
  181. }
  182. return {
  183. success: true,
  184. data: {
  185. images: images,
  186. model_used: 'flux-kontext-dev-precise',
  187. prompt_used: prompt,
  188. parameters: {
  189. guidance_scale: options?.guidance_scale ?? 4.5,
  190. aspect_ratio: options?.aspect_ratio,
  191. output_format: options?.output_format ?? "jpeg",
  192. }
  193. }
  194. };
  195. } catch (error) {
  196. console.error('Precise image edit error:', error);
  197. return {
  198. success: false,
  199. error: error instanceof Error ? error.message : await getErrorMessage('unknownError', options?.locale)
  200. };
  201. }
  202. }
  203. // 创意图像编辑(用于大幅度变换)
  204. export async function creativeImageEdit(imageUrl: string, prompt: string, options?: {
  205. guidance_scale?: number;
  206. num_images?: number;
  207. aspect_ratio?: AspectRatio;
  208. output_format?: "jpeg" | "png";
  209. seed?: number;
  210. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  211. locale?: string;
  212. }) {
  213. try {
  214. console.log('Calling Kontext Dev model for creative editing...');
  215. const result = await fal.subscribe("fal-ai/flux-kontext/dev", {
  216. input: {
  217. prompt: prompt,
  218. image_url: imageUrl,
  219. guidance_scale: options?.guidance_scale ?? 2.5,
  220. num_images: options?.num_images ?? 1,
  221. sync_mode: true,
  222. safety_tolerance: options?.safety_tolerance ?? "3",
  223. output_format: options?.output_format ?? "jpeg",
  224. ...(options?.aspect_ratio && { aspect_ratio: options.aspect_ratio }),
  225. ...(options?.seed && { seed: options.seed }),
  226. },
  227. logs: true,
  228. }) as FalResponse;
  229. console.log('Fal AI response:', result);
  230. // 处理不同的响应格式
  231. let images: FalImageResult[] = [];
  232. if (result.images && Array.isArray(result.images)) {
  233. images = result.images;
  234. } else if (result.image) {
  235. images = [result.image];
  236. } else if (result.data?.images && Array.isArray(result.data.images)) {
  237. images = result.data.images;
  238. } else if (result.data?.image) {
  239. images = [result.data.image];
  240. } else {
  241. const errorMessage = await getErrorMessage('invalidImageData', options?.locale);
  242. throw new Error(errorMessage);
  243. }
  244. return {
  245. success: true,
  246. data: {
  247. images: images,
  248. model_used: 'flux-kontext-dev-creative',
  249. prompt_used: prompt,
  250. parameters: {
  251. guidance_scale: options?.guidance_scale ?? 2.5,
  252. aspect_ratio: options?.aspect_ratio,
  253. output_format: options?.output_format ?? "jpeg",
  254. }
  255. }
  256. };
  257. } catch (error) {
  258. console.error('Creative image edit error:', error);
  259. return {
  260. success: false,
  261. error: error instanceof Error ? error.message : await getErrorMessage('unknownError', options?.locale)
  262. };
  263. }
  264. }
  265. // 通用编辑函数(保持与interactive-demo的兼容性)
  266. export async function editImage(imageUrl: string, prompt: string, locale?: string) {
  267. return await smartImageEdit(imageUrl, prompt, { locale });
  268. }
  269. // 背景移除功能(使用Kontext Dev实现)
  270. export async function removeBackground(imageUrl: string, options?: {
  271. aspect_ratio?: AspectRatio;
  272. output_format?: "jpeg" | "png";
  273. seed?: number;
  274. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  275. locale?: string;
  276. }) {
  277. try {
  278. console.log('Calling Kontext Dev model for background removal...');
  279. const result = await fal.subscribe("fal-ai/flux-kontext/dev", {
  280. input: {
  281. prompt: "remove background, transparent background, clean cutout",
  282. image_url: imageUrl,
  283. guidance_scale: 4.0,
  284. num_images: 1,
  285. sync_mode: true,
  286. safety_tolerance: options?.safety_tolerance ?? "1",
  287. output_format: options?.output_format ?? "png",
  288. ...(options?.aspect_ratio && { aspect_ratio: options.aspect_ratio }),
  289. ...(options?.seed && { seed: options.seed }),
  290. },
  291. logs: true,
  292. }) as FalResponse;
  293. console.log('Fal AI response:', result);
  294. // 处理不同的响应格式
  295. let images: FalImageResult[] = [];
  296. if (result.images && Array.isArray(result.images)) {
  297. images = result.images;
  298. } else if (result.image) {
  299. images = [result.image];
  300. } else if (result.data?.images && Array.isArray(result.data.images)) {
  301. images = result.data.images;
  302. } else if (result.data?.image) {
  303. images = [result.data.image];
  304. } else {
  305. const errorMessage = await getErrorMessage('invalidImageData', options?.locale);
  306. throw new Error(errorMessage);
  307. }
  308. return {
  309. success: true,
  310. data: {
  311. images: images,
  312. model_used: 'flux-kontext-dev-background-removal',
  313. parameters: {
  314. aspect_ratio: options?.aspect_ratio,
  315. output_format: options?.output_format ?? "png",
  316. }
  317. }
  318. };
  319. } catch (error) {
  320. console.error('Remove background error:', error);
  321. return {
  322. success: false,
  323. error: error instanceof Error ? error.message : await getErrorMessage('unknownError', options?.locale)
  324. };
  325. }
  326. }
  327. // 多图像处理 - 使用 Kontext Max Multi 模型
  328. export async function multiImageEdit(imageUrls: string[], prompt: string, options?: {
  329. guidance_scale?: number;
  330. num_images?: number;
  331. sync_mode?: boolean;
  332. aspect_ratio?: AspectRatio;
  333. output_format?: "jpeg" | "png";
  334. seed?: number;
  335. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  336. batch_size?: number;
  337. locale?: string;
  338. }) {
  339. try {
  340. console.log('Calling Kontext Max Multi model for multi-image editing...');
  341. console.log('Input image count:', imageUrls.length);
  342. const result = await fal.subscribe("fal-ai/flux-pro/kontext/max/multi", {
  343. input: {
  344. prompt: prompt,
  345. image_urls: imageUrls,
  346. guidance_scale: options?.guidance_scale ?? 3.5,
  347. num_images: options?.num_images ?? imageUrls.length,
  348. sync_mode: options?.sync_mode ?? true,
  349. safety_tolerance: options?.safety_tolerance ?? "2",
  350. output_format: options?.output_format ?? "jpeg",
  351. batch_size: options?.batch_size ?? Math.min(imageUrls.length, 4), // Limit batch size
  352. ...(options?.aspect_ratio && { aspect_ratio: options.aspect_ratio }),
  353. ...(options?.seed && { seed: options.seed }),
  354. },
  355. logs: true,
  356. onQueueUpdate: (update) => {
  357. if (update.status === "IN_PROGRESS") {
  358. console.log("Processing multi-images...", update.logs);
  359. }
  360. },
  361. }) as FalResponse;
  362. console.log('Fal AI Multi response:', result);
  363. // Handle multi-image response format
  364. let images: FalImageResult[] = [];
  365. if (result.images && Array.isArray(result.images)) {
  366. images = result.images;
  367. } else if (result.data?.images && Array.isArray(result.data.images)) {
  368. images = result.data.images;
  369. } else if (result.image) {
  370. images = [result.image];
  371. } else if (result.data?.image) {
  372. images = [result.data.image];
  373. } else {
  374. console.error('Multi-image processing found no image data:', result);
  375. const errorMessage = await getErrorMessage('multiImageNoData', options?.locale);
  376. throw new Error(errorMessage);
  377. }
  378. if (images.length === 0) {
  379. const errorMessage = await getErrorMessage('noProcessedImages', options?.locale);
  380. throw new Error(errorMessage);
  381. }
  382. return {
  383. success: true,
  384. data: {
  385. images: images,
  386. model_used: 'flux-pro-kontext-max-multi',
  387. prompt_used: prompt,
  388. input_count: imageUrls.length,
  389. output_count: images.length,
  390. parameters: {
  391. guidance_scale: options?.guidance_scale ?? 3.5,
  392. aspect_ratio: options?.aspect_ratio,
  393. output_format: options?.output_format ?? "jpeg",
  394. batch_size: options?.batch_size ?? Math.min(imageUrls.length, 4),
  395. }
  396. }
  397. };
  398. } catch (error) {
  399. console.error('Multi-image edit error:', error);
  400. return {
  401. success: false,
  402. error: error instanceof Error ? error.message : await getErrorMessage('multiImageProcessingError', options?.locale)
  403. };
  404. }
  405. }
  406. // Batch image processing (for handling large numbers of images in batches)
  407. export async function batchImageEdit(imageUrls: string[], prompt: string, options?: {
  408. guidance_scale?: number;
  409. aspect_ratio?: AspectRatio;
  410. output_format?: "jpeg" | "png";
  411. seed?: number;
  412. safety_tolerance?: "1" | "2" | "3" | "4" | "5" | "6";
  413. batch_size?: number;
  414. max_concurrent?: number;
  415. locale?: string;
  416. }) {
  417. try {
  418. const batchSize = options?.batch_size ?? 4;
  419. const maxConcurrent = options?.max_concurrent ?? 2;
  420. const batches: string[][] = [];
  421. // Split images into batches
  422. for (let i = 0; i < imageUrls.length; i += batchSize) {
  423. batches.push(imageUrls.slice(i, i + batchSize));
  424. }
  425. console.log(`Batch processing: ${imageUrls.length} images split into ${batches.length} batches`);
  426. const allResults: FalImageResult[] = [];
  427. const errors: string[] = [];
  428. // Process batches concurrently
  429. for (let i = 0; i < batches.length; i += maxConcurrent) {
  430. const currentBatches = batches.slice(i, i + maxConcurrent);
  431. const batchPromises = currentBatches.map(async (batch, batchIndex) => {
  432. try {
  433. const result = await multiImageEdit(batch, prompt, {
  434. ...options,
  435. batch_size: batch.length,
  436. });
  437. if (result.success && result.data?.images) {
  438. return result.data.images;
  439. } else {
  440. const errorMessage = await getErrorMessage('batchError', options?.locale);
  441. errors.push(`Batch ${i + batchIndex + 1} ${errorMessage}: ${result.error}`);
  442. return [];
  443. }
  444. } catch (error) {
  445. const errorMessage = await getErrorMessage('batchException', options?.locale);
  446. const unknownError = await getErrorMessage('unknownError', options?.locale);
  447. errors.push(`Batch ${i + batchIndex + 1} ${errorMessage}: ${error instanceof Error ? error.message : unknownError}`);
  448. return [];
  449. }
  450. });
  451. const batchResults = await Promise.all(batchPromises);
  452. batchResults.forEach(images => allResults.push(...images));
  453. }
  454. const batchFailedMessage = await getErrorMessage('batchProcessingFailed', options?.locale);
  455. return {
  456. success: allResults.length > 0,
  457. data: {
  458. images: allResults,
  459. model_used: 'flux-pro-kontext-max-multi-batch',
  460. prompt_used: prompt,
  461. input_count: imageUrls.length,
  462. output_count: allResults.length,
  463. batch_count: batches.length,
  464. errors: errors.length > 0 ? errors : undefined,
  465. parameters: {
  466. guidance_scale: options?.guidance_scale ?? 3.5,
  467. aspect_ratio: options?.aspect_ratio,
  468. output_format: options?.output_format ?? "jpeg",
  469. batch_size: batchSize,
  470. max_concurrent: maxConcurrent,
  471. }
  472. },
  473. error: errors.length > 0 ? `${batchFailedMessage}: ${errors.join('; ')}` : undefined
  474. };
  475. } catch (error) {
  476. console.error('Batch image edit error:', error);
  477. return {
  478. success: false,
  479. error: error instanceof Error ? error.message : await getErrorMessage('batchProcessingError', options?.locale)
  480. };
  481. }
  482. }
  483. // 导出纵横比类型
  484. export type { AspectRatio };
  485. // 导出 fal 客户端以供其他用途
  486. export { fal };