import gradio as gr from huggingface_hub import InferenceClient import pandas as pd import json import os import time from datetime import datetime import traceback # Working model configurations - These are verified to work with HF Inference API MODEL_CONFIGS = { "GPT-OSS 20B (Reliable)": { "model_id": "openai/gpt-oss-20b", "description": "Your current model - reliable for structured output", "default_temp": 0.3, "max_tokens": 256 }, "Mistral 7B Instruct (Fast)": { "model_id": "mistralai/Mistral-7B-Instruct-v0.2", "description": "Fast and efficient, good for large batches", "default_temp": 0.4, "max_tokens": 300 }, "Zephyr 7B Beta (Quality)": { "model_id": "HuggingFaceH4/zephyr-7b-beta", "description": "Good balance of quality and speed", "default_temp": 0.35, "max_tokens": 300 }, "OpenChat 3.5 (Creative)": { "model_id": "openchat/openchat-3.5-0106", "description": "More creative descriptions", "default_temp": 0.5, "max_tokens": 300 } } # Enhanced prompt templates for better clip-ready descriptions PROMPT_TEMPLATES = { "Clip-Ready Visual (15-30 words)": """You are an expert at writing ultra-concise, visual descriptions for CLIP models and image search. For each business category, create a description that: 1. Is 15-30 words maximum 2. Focuses on VISUAL elements only (what you would SEE in an image) 3. Uses concrete, observable details 4. Avoids abstract concepts or services 5. Describes physical appearance, setting, or visual activity Examples: Category: "Car Rental For Self Driven" Description: "rental car with keys, parked at pickup location, clean interior visible, rental company signage" Category: "Mehandi" Description: "henna artwork on hands, intricate patterns being applied, cones and design templates visible" Category: "Photographer" Description: "person with camera shooting, tripods and lighting equipment, studio setup with backdrops" IMPORTANT: Respond with ONLY a JSON object in this exact format: {"Category": "category name", "Description": "visual description"} Do not include any other text, explanations, or markdown formatting.""", "Standard Business (40-60 words)": """You are creating professional business descriptions for directory listings. Generate descriptions that: 1. Are 40-60 words 2. Define the service clearly 3. Include key visual and contextual elements 4. Are suitable for yellow pages or business directories Example format: Category: "Photography Studio" Description: "Professional photography space with lighting equipment, backdrops, and cameras. Photographer capturing portraits, events, or products. Studio setup with tripods, reflectors, softboxes. Clients posing for shots, reviewing images on screens." IMPORTANT: Respond with ONLY a JSON object: {"Category": "category name", "Description": "description text"}""", "Your Original Prompt": """You are an expert at writing clear and visual descriptions for a business category keyword for a yellow pages or business listing website. Given a category keyword, generate a single, detailed description that defines its key visual elements, location, and context. Do not add artistic or stylistic flair. Ensure that the description is CLIP model ready and not too verbose. IMPORTANT: You must respond with ONLY a valid JSON object in this exact format: {"Category": "category name", "Description": "description text"} Do not include any other text, explanations, or markdown formatting. Only output the JSON object.""" } def extract_json_from_response(response_text): """Enhanced JSON extraction with better error handling""" if not response_text: raise ValueError("Empty response") response_text = response_text.strip() # Clean markdown formatting if "```json" in response_text: response_text = response_text.split("```json")[1].split("```")[0].strip() elif "```" in response_text: response_text = response_text.split("```")[1].split("```")[0].strip() # Find JSON object if "{" in response_text and "}" in response_text: start = response_text.find("{") end = response_text.rfind("}") + 1 json_str = response_text[start:end] else: json_str = response_text try: # Try to parse JSON parsed = json.loads(json_str) except json.JSONDecodeError as e: # Try to fix common issues json_str = json_str.replace("'", '"') json_str = json_str.replace("\n", " ") json_str = json_str.replace("\t", " ") # Try again try: parsed = json.loads(json_str) except: # Last resort - try to extract description from raw text if "description" in response_text.lower(): # Try to find the description part lines = response_text.split('\n') for line in lines: if 'description' in line.lower() and ':' in line: desc = line.split(':', 1)[1].strip().strip('"').strip("'") if len(desc) > 10: return desc raise ValueError(f"Cannot parse JSON: {str(e)}") # Extract description description = ( parsed.get("Description") or parsed.get("description") or parsed.get("Desc") or parsed.get("desc") or "" ) if not description or len(description.strip()) < 10: raise ValueError("Description is missing or too short") return description.strip() def process_single_category_with_fallback( category, model_name, prompt_template, max_tokens, temperature, top_p, hf_token, retry_count=3 ): """Process with fallback to working model if primary fails""" # Try primary model try: client = InferenceClient( token=hf_token, model=MODEL_CONFIGS[model_name]["model_id"] ) system_prompt = PROMPT_TEMPLATES[prompt_template] messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": f"Category: \"{category}\""} ] for attempt in range(retry_count): try: if attempt > 0: time.sleep(1) response_text = "" # Try streaming for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): if hasattr(message, 'choices') and len(message.choices) > 0: if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'): token = message.choices[0].delta.content if token: response_text += token elif isinstance(message, str): response_text += message if not response_text or len(response_text.strip()) < 5: raise ValueError("Empty response") description = extract_json_from_response(response_text) return response_text.strip(), description, model_name except Exception as e: if attempt == retry_count - 1: raise e except Exception as primary_error: # Fallback to GPT-OSS-20B which we know works if model_name != "GPT-OSS 20B (Reliable)": try: print(f"Primary model failed, falling back to GPT-OSS-20B: {str(primary_error)[:100]}") client = InferenceClient( token=hf_token, model="openai/gpt-oss-20b" ) system_prompt = PROMPT_TEMPLATES[prompt_template] messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": f"Category: \"{category}\""} ] response_text = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): if hasattr(message, 'choices') and len(message.choices) > 0: if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'): token = message.choices[0].delta.content if token: response_text += token elif isinstance(message, str): response_text += message if response_text: description = extract_json_from_response(response_text) return response_text.strip(), description, "GPT-OSS-20B (Fallback)" except Exception as fallback_error: raise Exception(f"Both primary and fallback failed. Primary: {str(primary_error)[:100]}, Fallback: {str(fallback_error)[:100]}") else: raise primary_error def process_csv_enhanced( files, category_column, model_name, prompt_template, max_tokens, temperature, top_p, output_format, progress=gr.Progress() ): """Enhanced processing with better error messages and fallbacks""" if not files or len(files) == 0: return "Please upload at least one CSV file.", None, None # Get HF token hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN") if not hf_token: return """⚠️ Error: HF_TOKEN not found. Please add your Hugging Face token as a Space Secret. Go to: Space Settings → Secrets → Add 'HF_TOKEN'""", None, None all_results = [] status_messages = [] output_files = [] for file_idx, file in enumerate(files): try: # Read CSV df = pd.read_csv(file.name) file_name = os.path.basename(file.name) status_messages.append(f"📄 Processing file {file_idx + 1}/{len(files)}: {file_name}") # Check column if category_column not in df.columns: available_cols = ', '.join(df.columns[:5]) status_messages.append(f"⚠️ Column '{category_column}' not found. Available: {available_cols}") continue # Get unique categories categories = df[category_column].dropna().unique() total_categories = len(categories) file_results = [] for idx, category in enumerate(categories): progress( (file_idx * total_categories + idx) / (len(files) * total_categories), desc=f"Processing: {category[:30]}..." ) try: raw_response, description, used_model = process_single_category_with_fallback( category, model_name, prompt_template, max_tokens, temperature, top_p, hf_token ) result = { "Category": category, "Description": description, "Word_Count": len(description.split()), "Model_Used": used_model, "Raw_Response": raw_response, "Status": "Success" } file_results.append(result) all_results.append(result) status_messages.append(f"✅ {category[:30]}... ({len(description.split())} words)") except Exception as e: error_msg = str(e) if "Request ID" in error_msg: error_msg = "API Error - Try lowering temperature or using GPT-OSS model" result = { "Category": category, "Description": f"[FAILED: {error_msg[:100]}]", "Word_Count": 0, "Model_Used": model_name, "Raw_Response": "", "Status": f"Failed" } file_results.append(result) all_results.append(result) status_messages.append(f"❌ {category[:30]}... - {error_msg[:50]}") # Rate limiting time.sleep(0.5) # Save output files if file_results: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") base_name = os.path.splitext(file_name)[0] # Create DataFrame output_df = pd.DataFrame(file_results) if output_format in ["CSV", "Both"]: csv_filename = f"output_{base_name}_{timestamp}.csv" output_df.to_csv(csv_filename, index=False) output_files.append(csv_filename) if output_format in ["JSON", "Both"]: json_filename = f"output_{base_name}_{timestamp}.json" with open(json_filename, 'w') as f: json.dump(file_results, f, indent=2) output_files.append(json_filename) # Summary success_count = sum(1 for r in file_results if r["Status"] == "Success") failed_count = len(file_results) - success_count status_messages.append(f""" 📊 {file_name} Summary: - Total: {len(file_results)} categories - Success: {success_count} ({success_count/max(len(file_results),1)*100:.1f}%) - Failed: {failed_count} """) except Exception as e: status_messages.append(f"❌ Error processing {file_name}: {str(e)}") # Create summary if all_results: total_success = sum(1 for r in all_results if r["Status"] == "Success") total_failed = len(all_results) - total_success summary = f"""## 🎯 Processing Complete! ### Statistics: - **Total Processed:** {len(all_results)} categories - **Successful:** {total_success} ({total_success/len(all_results)*100:.1f}%) - **Failed:** {total_failed} ### Details: """ status_text = summary + "\n".join(status_messages) # Create preview DataFrame preview_df = pd.DataFrame(all_results)[['Category', 'Description', 'Word_Count', 'Status']][:20] return status_text, output_files, preview_df else: return "\n".join(status_messages), None, None # Create Gradio interface with gr.Blocks(title="Multi-Model Business Description Generator", theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🚀 Multi-Model Business Description Generator Generate CLIP-ready visual descriptions using multiple AI models. ### Features: - 🤖 **4 Different Models** - Choose the best for your needs - 📝 **3 Prompt Templates** - Optimized for different use cases - 🔄 **Automatic Fallback** - Falls back to GPT-OSS if primary model fails - 💾 **CSV & JSON Export** - Multiple output formats """) with gr.Row(): with gr.Column(scale=1): gr.Markdown("### 📤 Input") files_input = gr.File( label="Upload CSV Files", file_count="multiple", file_types=[".csv"] ) category_column = gr.Textbox( label="Category Column Name", value="category", placeholder="Column name containing categories" ) gr.Markdown("### 🤖 Model Configuration") model_selector = gr.Dropdown( label="Select Model", choices=list(MODEL_CONFIGS.keys()), value="GPT-OSS 20B (Reliable)", info="GPT-OSS is most reliable, others may require fallback" ) prompt_template = gr.Dropdown( label="Prompt Template", choices=list(PROMPT_TEMPLATES.keys()), value="Your Original Prompt", info="Choose based on desired output style" ) gr.Markdown("### ⚙️ Settings") with gr.Row(): temperature = gr.Slider( minimum=0.1, maximum=1.0, value=0.3, step=0.05, label="Temperature", info="Lower = consistent" ) top_p = gr.Slider( minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p" ) max_tokens = gr.Slider( minimum=64, maximum=512, value=256, step=16, label="Max Tokens" ) output_format = gr.Radio( label="Output Format", choices=["CSV", "JSON", "Both"], value="CSV" ) process_btn = gr.Button("🚀 Generate Descriptions", variant="primary", size="lg") with gr.Column(scale=2): gr.Markdown("### 📊 Results") status_output = gr.Markdown( value="Results will appear here...", label="Status" ) results_preview = gr.Dataframe( label="Preview (First 20 Results)", headers=["Category", "Description", "Word_Count", "Status"], wrap=True ) files_output = gr.File( label="📥 Download Output Files", file_count="multiple" ) with gr.Row(): gr.Markdown(""" ### 💡 Tips: - **GPT-OSS 20B** is the most reliable model - Use **Temperature 0.2-0.4** for consistent results - **Clip-Ready** template gives 15-30 word descriptions - If a model fails, it automatically falls back to GPT-OSS ### ⚠️ Troubleshooting: - **API Errors**: Try using GPT-OSS 20B model - **Failed Categories**: Lower temperature to 0.2 - **Empty Responses**: Check your HF_TOKEN is valid """) # Process button process_btn.click( fn=process_csv_enhanced, inputs=[ files_input, category_column, model_selector, prompt_template, max_tokens, temperature, top_p, output_format ], outputs=[status_output, files_output, results_preview] ) if __name__ == "__main__": demo.launch()