ysharma HF Staff commited on
Commit
5af5718
·
verified ·
1 Parent(s): 190b2c3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +259 -0
app.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from functools import cache, lru_cache
4
+ from pathlib import Path
5
+ from typing import Any
6
+ import random
7
+
8
+ import gradio as gr
9
+ from PIL import Image, ImageDraw, ImageFont
10
+ import numpy as np
11
+
12
+ # Dummy initialization - no actual model loading
13
+ DEVICE_CPU = "cpu"
14
+ DTYPE = "dummy_dtype"
15
+ FG_API_KEY = os.getenv("FG_API_KEY", "dummy_api_key")
16
+
17
+ # Dummy model and prompt objects
18
+ class DummyModel:
19
+ def __init__(self):
20
+ pass
21
+
22
+ def to(self, device, dtype):
23
+ return self
24
+
25
+ class DummyPrompt:
26
+ def to(self, device, dtype):
27
+ return self
28
+
29
+ model = DummyModel()
30
+ prompt = DummyPrompt()
31
+
32
+ @cache
33
+ def _ctx():
34
+ # Dummy context
35
+ class DummyContext:
36
+ def reset(self):
37
+ pass
38
+ def run_one_sync(self, func, *args):
39
+ # Return a dummy cutout image
40
+ img = Image.new('RGBA', (200, 200), (255, 0, 0, 128))
41
+ return img
42
+
43
+ return DummyContext()
44
+
45
+
46
+ def on_change(scene: dict[str, Any] | None, reference: Image.Image | None) -> tuple[dict[str, Any], str]:
47
+ bbox_str = ""
48
+ if scene is not None and isinstance(scene.get("boxes"), list) and len(scene.get("boxes", [])) == 1:
49
+ assert scene is not None
50
+ box = scene["boxes"][0]
51
+ bbox_str = f"({box['xmin']}, {box['ymin']}, {box['xmax']}, {box['ymax']})"
52
+ return (gr.update(interactive=reference is not None and bbox_str != ""), bbox_str)
53
+
54
+
55
+ def create_dummy_image(width: int = 512, height: int = 512, color: tuple = (100, 150, 200), text: str = "Dummy Output") -> Image.Image:
56
+ """Create a dummy image with some text"""
57
+ img = Image.new('RGB', (width, height), color)
58
+ draw = ImageDraw.Draw(img)
59
+
60
+ # Try to use a font, fallback to default if not available
61
+ try:
62
+ font = ImageFont.truetype("arial.ttf", 24)
63
+ except:
64
+ font = ImageFont.load_default()
65
+
66
+ # Get text bounding box for centering
67
+ bbox = draw.textbbox((0, 0), text, font=font)
68
+ text_width = bbox[2] - bbox[0]
69
+ text_height = bbox[3] - bbox[1]
70
+
71
+ x = (width - text_width) // 2
72
+ y = (height - text_height) // 2
73
+
74
+ draw.text((x, y), text, fill=(255, 255, 255), font=font)
75
+ return img
76
+
77
+
78
+ def _process(
79
+ scene: dict[str, Any],
80
+ reference: Image.Image,
81
+ seed: int = 1234,
82
+ ) -> tuple[tuple[Image.Image, Image.Image], Image.Image, Image.Image]:
83
+ """Dummy processing function that returns placeholder images"""
84
+
85
+ # Get scene image or create dummy
86
+ if isinstance(scene.get("image"), Image.Image):
87
+ scene_image = scene["image"]
88
+ else:
89
+ scene_image = create_dummy_image(512, 512, (150, 100, 200), "Dummy Scene")
90
+
91
+ # Create dummy output image
92
+ output = create_dummy_image(
93
+ scene_image.width,
94
+ scene_image.height,
95
+ (random.randint(50, 200), random.randint(50, 200), random.randint(50, 200)),
96
+ f"Processed (seed: {seed})"
97
+ )
98
+
99
+ # Create dummy reference output
100
+ reference_output = reference.copy() if reference else create_dummy_image(200, 200, (255, 100, 100), "Ref")
101
+
102
+ # Create dummy scene output
103
+ scene_output = scene_image.copy()
104
+
105
+ before_after = (scene_image.resize(output.size), output)
106
+ return (before_after, reference_output, scene_output)
107
+
108
+
109
+ @lru_cache(maxsize=32)
110
+ def _cutout_reference(image_bytes: bytes) -> Image.Image:
111
+ """Dummy cutout function"""
112
+ # Create a simple dummy cutout image
113
+ return create_dummy_image(200, 200, (255, 0, 0, 128), "Cutout")
114
+
115
+
116
+ def cutout_reference(reference: Image.Image) -> Image.Image:
117
+ """Dummy cutout wrapper"""
118
+ if reference:
119
+ # Create a simple mask effect by making it semi-transparent
120
+ buf = io.BytesIO()
121
+ reference.save(buf, format="PNG")
122
+ return _cutout_reference(buf.getvalue())
123
+ else:
124
+ return create_dummy_image(200, 200, (255, 0, 0), "No Reference")
125
+
126
+
127
+ def process(
128
+ scene: dict[str, Any],
129
+ reference: Image.Image,
130
+ seed: int = 1234,
131
+ cut_out_reference: bool = False,
132
+ ) -> tuple[tuple[Image.Image, Image.Image], Image.Image, Image.Image]:
133
+ """Main dummy processing function"""
134
+ if cut_out_reference and reference:
135
+ reference = cutout_reference(reference)
136
+
137
+ return _process(scene, reference, seed)
138
+
139
+
140
+ TITLE = """
141
+ <h1>Finegrain Product Placement LoRA</h1>
142
+ <p>
143
+ 🧪 An experiment to extend Flux Kontext with product placement capabilities.
144
+ The LoRA was trained using EditNet, our before / after image editing dataset.
145
+ </p>
146
+ <p>
147
+ Just draw a box to set where the subject should be blended, and at what size.
148
+ </p>
149
+ <p>
150
+ <a href="https://huggingface.co/finegrain/finegrain-product-placement-lora">Model Card</a> |
151
+ <a href="https://blog.finegrain.ai/posts/product-placement-flux-lora-experiment/">Blog Post</a> |
152
+ <a href="https://finegrain.ai/editnet">EditNet</a>
153
+ </p>
154
+ """
155
+
156
+ with gr.Blocks() as demo:
157
+ gr.HTML(TITLE)
158
+ with gr.Row():
159
+ with gr.Column():
160
+ scene = gr.Image(
161
+ label="Scene",
162
+ type="pil",
163
+ image_mode="RGB",
164
+ )
165
+ reference = gr.Image(
166
+ label="Product Reference",
167
+ visible=True,
168
+ interactive=True,
169
+ type="pil",
170
+ image_mode="RGBA",
171
+ )
172
+ with gr.Accordion("Options", open=False):
173
+ seed = gr.Slider(
174
+ minimum=0,
175
+ maximum=10_000,
176
+ value=1234,
177
+ step=1,
178
+ label="Seed",
179
+ )
180
+ cut_out_reference = gr.Checkbox(
181
+ label="Cut out reference",
182
+ value=bool(FG_API_KEY),
183
+ interactive=bool(FG_API_KEY),
184
+ )
185
+ with gr.Row():
186
+ run_btn = gr.Button(value="Blend", interactive=True)
187
+ with gr.Column():
188
+ output_image = gr.Image(label="Output Image")
189
+ with gr.Accordion("Debug", open=False):
190
+ output_textbox = gr.Textbox(label="Bounding Box", interactive=False)
191
+ output_reference = gr.Image(
192
+ label="Reference",
193
+ visible=True,
194
+ interactive=False,
195
+ type="pil",
196
+ image_mode="RGB",
197
+ )
198
+ output_scene = gr.Image(
199
+ label="Scene",
200
+ visible=True,
201
+ interactive=False,
202
+ type="pil",
203
+ image_mode="RGB",
204
+ )
205
+
206
+ # Dummy change function for scene and reference
207
+ def dummy_on_change(scene, reference):
208
+ return gr.update(interactive=scene is not None and reference is not None), "Dummy bbox (100, 100, 200, 200)"
209
+
210
+ # Watch for changes (scene and reference)
211
+ scene.change(fn=dummy_on_change, inputs=[scene, reference], outputs=[run_btn, output_textbox])
212
+ reference.change(fn=dummy_on_change, inputs=[scene, reference], outputs=[run_btn, output_textbox])
213
+
214
+ def dummy_process_wrapper(scene, reference, seed, cut_out_reference):
215
+ """Wrapper for the dummy process function"""
216
+ if not scene or not reference:
217
+ # Return dummy images if inputs are missing
218
+ dummy_img = create_dummy_image(512, 512, (100, 100, 100), "No Input")
219
+ return dummy_img, dummy_img, dummy_img
220
+
221
+ # Convert scene to the expected format
222
+ scene_dict = {"image": scene, "boxes": [{"xmin": 100, "ymin": 100, "xmax": 200, "ymax": 200}]}
223
+ result = process(scene_dict, reference, seed, cut_out_reference)
224
+
225
+ # Unpack the results
226
+ before_after, ref_out, scene_out = result
227
+ output_combined = before_after[1] # Get the "after" image
228
+
229
+ return output_combined, ref_out, scene_out
230
+
231
+ run_btn.click(
232
+ fn=dummy_process_wrapper,
233
+ inputs=[scene, reference, seed, cut_out_reference],
234
+ outputs=[output_image, output_reference, output_scene],
235
+ )
236
+
237
+ # Create dummy examples with placeholder images
238
+ def create_dummy_examples():
239
+ examples = []
240
+ colors = [(255, 100, 100), (100, 255, 100), (100, 100, 255), (255, 255, 100), (255, 100, 255)]
241
+ names = ["Sunglasses", "Kitchen", "Glass", "Chair", "Lantern"]
242
+
243
+ for i, (color, name) in enumerate(zip(colors, names)):
244
+ scene_img = create_dummy_image(400, 400, color, f"Scene {name}")
245
+ ref_img = create_dummy_image(200, 200, tuple(c//2 for c in color), f"Ref {name}")
246
+ examples.append([scene_img, ref_img])
247
+
248
+ return examples
249
+
250
+ ex = gr.Examples(
251
+ examples=create_dummy_examples(),
252
+ inputs=[scene, reference],
253
+ outputs=[output_image, output_reference, output_scene],
254
+ fn=dummy_process_wrapper,
255
+ )
256
+
257
+
258
+ if __name__ == "__main__":
259
+ demo.launch(debug=True)