File size: 13,523 Bytes
ad19ccb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
import gradio as gr
from huggingface_hub import InferenceClient
import pandas as pd
import json
import os
import time
from datetime import datetime

# Custom system instructions for business category descriptions
SYSTEM_INSTRUCTIONS = """You are an expert at writing clear and visual descriptions for a business category keyword for a yellow pages or business listing website. Given a category keyword, generate a single, detailed description that defines its key visual elements, location, and context. Do not add artistic or stylistic flair. Ensure that the description is CLIP model ready and not too verbose.

Here are some examples of the correct format:

Category: "Car Rental For Self Driven"

Description: "a car available for self-drive rental, parked at a pickup spot without a chauffeur; looks travel-ready, clean, well-maintained, keys handed over to customer"

Category: "Mehandi"

Description: "Temporary henna artwork applied on hands and feet using cones; fine brown or maroon floral and paisley patterns, mandalas, and lace-like detailing, commonly seen at weddings and festivals."

Category: "Photographer"

Description: "a person actively shooting photos or posing with a camera; holding a camera to eye, adjusting lens, or directing a subject during a shoot"

Category: "Equipment"

Description: "lighting stands, softboxes, strobes, tripods, reflectors, gimbals, battery packs, memory cards arranged as gear kits"

---

IMPORTANT: You must respond with ONLY a valid JSON object in this exact format:
{"Category": "category name", "Description": "description text"}

Do not include any other text, explanations, or markdown formatting. Only output the JSON object."""


def extract_json_from_response(response_text):
    """Extract and validate JSON from model response."""
    # Try to find JSON in the response
    response_text = response_text.strip()
    
    # Remove markdown code blocks if present
    if "```json" in response_text:
        response_text = response_text.split("```json")[1].split("```")[0].strip()
    elif "```" in response_text:
        response_text = response_text.split("```")[1].split("```")[0].strip()
    
    # Try to find JSON object in the text
    if "{" in response_text and "}" in response_text:
        start = response_text.find("{")
        end = response_text.rfind("}") + 1
        response_text = response_text[start:end]
    
    # Parse JSON
    parsed = json.loads(response_text)
    
    # Validate structure
    if not isinstance(parsed, dict):
        raise ValueError("Response is not a JSON object")
    
    # Get description with various possible keys
    description = (
        parsed.get("Description") or 
        parsed.get("description") or 
        parsed.get("desc") or 
        ""
    )
    
    if not description or len(description.strip()) < 10:
        raise ValueError("Description is missing or too short")
    
    return description.strip()


def process_single_category(category, client, max_tokens, temperature, top_p, retry_count=3):
    """Process a single category keyword and return the description with retry logic."""
    messages = [
        {"role": "system", "content": SYSTEM_INSTRUCTIONS},
        {"role": "user", "content": f"Category: \"{category}\""}
    ]
    
    last_error = None
    
    for attempt in range(retry_count):
        try:
            # Add small delay between retries
            if attempt > 0:
                time.sleep(1)
            
            # Try streaming approach (more reliable for this model)
            response_text = ""
            for message in client.chat_completion(
                messages,
                max_tokens=max_tokens,
                stream=True,
                temperature=temperature,
                top_p=top_p,
            ):
                if hasattr(message, 'choices') and len(message.choices) > 0:
                    if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'):
                        token = message.choices[0].delta.content
                        if token:
                            response_text += token
                elif isinstance(message, str):
                    response_text += message
            
            # Validate we got a response
            if not response_text or len(response_text.strip()) < 5:
                raise ValueError("Empty or too short response from model")
            
            # Extract and validate JSON
            description = extract_json_from_response(response_text)
            
            # Return both the description and raw response
            return response_text.strip(), description
            
        except json.JSONDecodeError as e:
            last_error = f"JSON parsing failed (attempt {attempt + 1}/{retry_count}): {str(e)}"
            # If JSON parsing fails, try to extract description from raw text
            if attempt == retry_count - 1 and response_text:
                # Last attempt - try to use raw response if it looks like a description
                if len(response_text.strip()) > 20 and not response_text.startswith("{"):
                    return response_text.strip(), response_text.strip()
        except Exception as e:
            last_error = f"Processing failed (attempt {attempt + 1}/{retry_count}): {str(e)}"
    
    # All retries failed
    raise Exception(f"Failed after {retry_count} attempts. Last error: {last_error}")


def process_csv_files(
    files,
    category_column,
    max_tokens,
    temperature,
    top_p,
    progress=gr.Progress()
):
    """
    Process multiple CSV files and generate descriptions for category keywords.
    """
    if not files or len(files) == 0:
        return "Please upload at least one CSV file.", None
    
    # Get HF token from environment variables
    import os
    hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN")
    
    if not hf_token:
        return "❌ Error: HF_TOKEN not found. Please add your Hugging Face token as a Space Secret.\n\nGo to Space Settings β†’ Secrets β†’ Add 'HF_TOKEN'", None
    
    client = InferenceClient(token=hf_token, model="openai/gpt-oss-20b")
    
    output_files = []
    status_messages = []
    
    for file_idx, file in enumerate(files):
        try:
            # Read CSV file
            df = pd.read_csv(file.name)
            status_messages.append(f"πŸ“„ Processing file {file_idx + 1}/{len(files)}: {os.path.basename(file.name)}")
            
            # Check if category column exists
            if category_column not in df.columns:
                status_messages.append(f"⚠️ Warning: Column '{category_column}' not found in {os.path.basename(file.name)}. Available columns: {', '.join(df.columns)}")
                continue
            
            # Process each category
            descriptions = []
            raw_responses = []
            
            categories = df[category_column].dropna().unique()
            total_categories = len(categories)
            
            for idx, category in enumerate(categories):
                progress((file_idx * total_categories + idx) / (len(files) * total_categories), 
                        desc=f"Processing category {idx + 1}/{total_categories} in file {file_idx + 1}")
                
                try:
                    # Process with retry logic
                    raw_response, description = process_single_category(
                        category, client, max_tokens, temperature, top_p, retry_count=3
                    )
                    
                    # Validate description
                    if not description or len(description.strip()) < 10:
                        raise ValueError("Description is too short or empty")
                    
                    descriptions.append({
                        "Category": category,
                        "Description": description,
                        "Raw_Response": raw_response,
                        "Status": "Success"
                    })
                    
                    status_messages.append(f"βœ… Processed: {category}")
                    
                except Exception as e:
                    error_msg = str(e)
                    status_messages.append(f"⚠️ Error processing '{category}': {error_msg}")
                    
                    descriptions.append({
                        "Category": category,
                        "Description": f"[FAILED - {error_msg[:100]}]",
                        "Raw_Response": "",
                        "Status": "Failed"
                    })
                
                # Small delay to avoid rate limiting
                time.sleep(0.5)
            
            # Create output dataframe
            output_df = pd.DataFrame(descriptions)
            
            # Save to file
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            base_name = os.path.splitext(os.path.basename(file.name))[0]
            output_filename = f"output_{base_name}_{timestamp}.csv"
            output_df.to_csv(output_filename, index=False)
            output_files.append(output_filename)
            
            # Count successes and failures
            success_count = len([d for d in descriptions if d.get("Status") == "Success"])
            failed_count = len([d for d in descriptions if d.get("Status") == "Failed"])
            
            status_messages.append(f"βœ… Completed: {success_count} succeeded, {failed_count} failed out of {len(descriptions)} categories from {os.path.basename(file.name)}")
            
        except Exception as e:
            status_messages.append(f"❌ Error processing {os.path.basename(file.name)}: {str(e)}")
    
    status_text = "\n".join(status_messages)
    
    if output_files:
        return status_text, output_files
    else:
        return status_text + "\n\n❌ No output files generated.", None


# Create Gradio interface
with gr.Blocks(title="Business Category Description Generator") as demo:
    gr.Markdown("""
    # 🏒 Business Category Description Generator
    
    Upload CSV files containing business category keywords, and this app will generate 
    CLIP-ready visual descriptions for each category using AI.
    
    **Instructions:**
    1. Upload one or more CSV files
    2. Specify the column name that contains the category keywords
    3. Adjust model settings (lower temperature = more consistent output)
    4. Click "Process Files" to generate descriptions
    5. Download the output CSV files with Status column
    
    **Features:**
    - βœ… Automatic retry logic (3 attempts per category)
    - βœ… JSON validation and error recovery
    - βœ… Progress tracking with detailed status
    - βœ… Success/failure reporting
    
    *Note: For faster processing, use Zero GPU (see Space Settings). Authentication via HF_TOKEN secret.*
    """)
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### βš™οΈ Model Settings")
            max_tokens = gr.Slider(
                minimum=64, 
                maximum=512, 
                value=256, 
                step=16, 
                label="Max Tokens"
            )
            temperature = gr.Slider(
                minimum=0.1, 
                maximum=1.0, 
                value=0.3, 
                step=0.1, 
                label="Temperature",
                info="Lower = more consistent output"
            )
            top_p = gr.Slider(
                minimum=0.1, 
                maximum=1.0, 
                value=0.9, 
                step=0.05, 
                label="Top-p"
            )
        
        with gr.Column(scale=2):
            files_input = gr.File(
                label="πŸ“€ Upload CSV Files",
                file_count="multiple",
                file_types=[".csv"]
            )
            category_column = gr.Textbox(
                label="πŸ“ Category Column Name",
                value="category",
                placeholder="Enter the name of the column containing categories"
            )
            process_btn = gr.Button("πŸš€ Process Files", variant="primary", size="lg")
            
            status_output = gr.Textbox(
                label="πŸ“Š Status",
                lines=10,
                interactive=False
            )
            files_output = gr.File(
                label="πŸ’Ύ Download Output Files",
                file_count="multiple"
            )
    
    process_btn.click(
        fn=process_csv_files,
        inputs=[
            files_input,
            category_column,
            max_tokens,
            temperature,
            top_p
        ],
        outputs=[status_output, files_output]
    )
    
    gr.Markdown("""
    ---
    ### πŸ“ Output Format
    Each output CSV file will contain:
    - **Category**: The original category keyword
    - **Description**: The generated visual description (validated and cleaned)
    - **Raw_Response**: The complete model response (for debugging)
    - **Status**: Success or Failed (with error details)
    
    πŸ’‘ **Tips for Best Results:**
    - Use Temperature 0.2-0.4 for consistent, focused descriptions
    - Use Temperature 0.6-0.8 for more creative variations
    - Failed categories are marked clearly - you can reprocess them separately
    - Zero GPU acceleration: Add @spaces.GPU decorator or enable in Space Settings
    """)

if __name__ == "__main__":
    demo.launch()