Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Tunnel Crack Detection - Hugging Face Spaces App | |
| A Streamlit-based web interface for tunnel crack detection using YOLOv12-DINO. | |
| Deployed on Hugging Face Spaces for easy access and sharing. | |
| """ | |
| import streamlit as st | |
| import cv2 | |
| import numpy as np | |
| import pandas as pd | |
| from pathlib import Path | |
| import tempfile | |
| import os | |
| import sys | |
| from typing import Dict, List, Tuple | |
| import time | |
| from PIL import Image | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| import requests | |
| from io import BytesIO | |
| # Add the current directory to the Python path | |
| current_dir = Path(__file__).parent | |
| sys.path.insert(0, str(current_dir)) | |
| try: | |
| from inference import YOLOInference | |
| except ImportError as e: | |
| st.error(f"Error importing inference module: {e}") | |
| st.stop() | |
| # Page configuration | |
| st.set_page_config( | |
| page_title="π Tunnel Crack Detection", | |
| page_icon="π", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| # Custom CSS for better styling | |
| st.markdown(""" | |
| <style> | |
| .main-header { | |
| font-size: 3rem; | |
| color: #1e88e5; | |
| text-align: center; | |
| margin-bottom: 2rem; | |
| text-shadow: 2px 2px 4px rgba(0,0,0,0.1); | |
| } | |
| .sub-header { | |
| text-align: center; | |
| color: #666; | |
| margin-bottom: 3rem; | |
| font-size: 1.2rem; | |
| } | |
| .metric-card { | |
| background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
| padding: 1.5rem; | |
| border-radius: 1rem; | |
| margin: 0.5rem 0; | |
| box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); | |
| } | |
| .detection-box { | |
| border: 2px solid #1e88e5; | |
| border-radius: 1rem; | |
| padding: 1.5rem; | |
| margin: 1rem 0; | |
| background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); | |
| box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); | |
| } | |
| .success-box { | |
| border: 2px solid #4caf50; | |
| border-radius: 1rem; | |
| padding: 1.5rem; | |
| margin: 1rem 0; | |
| background: linear-gradient(135deg, #f1f8e9 0%, #dcedc8 100%); | |
| box-shadow: 0 4px 8px rgba(76, 175, 80, 0.2); | |
| } | |
| .error-box { | |
| border: 2px solid #f44336; | |
| border-radius: 1rem; | |
| padding: 1.5rem; | |
| margin: 1rem 0; | |
| background: linear-gradient(135deg, #ffebee 0%, #ffcdd2 100%); | |
| box-shadow: 0 4px 8px rgba(244, 67, 54, 0.2); | |
| } | |
| .warning-box { | |
| border: 2px solid #ff9800; | |
| border-radius: 1rem; | |
| padding: 1.5rem; | |
| margin: 1rem 0; | |
| background: linear-gradient(135deg, #fff3e0 0%, #ffe0b2 100%); | |
| box-shadow: 0 4px 8px rgba(255, 152, 0, 0.2); | |
| } | |
| .info-box { | |
| border: 2px solid #2196f3; | |
| border-radius: 1rem; | |
| padding: 1.5rem; | |
| margin: 1rem 0; | |
| background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%); | |
| box-shadow: 0 4px 8px rgba(33, 150, 243, 0.2); | |
| } | |
| .stButton > button { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border: none; | |
| border-radius: 0.5rem; | |
| padding: 0.5rem 2rem; | |
| font-weight: bold; | |
| transition: all 0.3s ease; | |
| } | |
| .stButton > button:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 6px 12px rgba(0, 0, 0, 0.2); | |
| } | |
| .sidebar .stSelectbox > div > div { | |
| background-color: #f8f9fa; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # Initialize session state | |
| if 'model_loaded' not in st.session_state: | |
| st.session_state.model_loaded = False | |
| if 'model_instance' not in st.session_state: | |
| st.session_state.model_instance = None | |
| if 'detection_history' not in st.session_state: | |
| st.session_state.detection_history = [] | |
| # Model download URL (you'll need to replace this with actual model URL or use HF Hub) | |
| MODEL_URL = "https://huggingface.co/spaces/YOUR_USERNAME/tunnel-crack-detection/resolve/main/segment_defect.pt" | |
| def download_model(): | |
| """Download model from Hugging Face Hub or URL.""" | |
| model_path = "segment_defect.pt" | |
| if not os.path.exists(model_path): | |
| st.info("π Downloading model weights... This may take a few minutes.") | |
| try: | |
| # Try to download from URL (you may need to adjust this) | |
| # For HF Spaces, you might want to use huggingface_hub library | |
| response = requests.get(MODEL_URL, stream=True) | |
| response.raise_for_status() | |
| with open(model_path, 'wb') as f: | |
| for chunk in response.iter_content(chunk_size=8192): | |
| f.write(chunk) | |
| st.success("β Model downloaded successfully!") | |
| except Exception as e: | |
| st.error(f"β Failed to download model: {e}") | |
| return None | |
| return model_path | |
| def load_model(weights_path: str, device: str = "cpu") -> Tuple[bool, str]: | |
| """Load the YOLO model with specified weights.""" | |
| try: | |
| if not Path(weights_path).exists(): | |
| return False, f"β Model file not found: {weights_path}" | |
| with st.spinner("π Loading tunnel crack detection model..."): | |
| st.session_state.model_instance = YOLOInference( | |
| weights=weights_path, | |
| conf=0.25, | |
| iou=0.7, | |
| imgsz=640, | |
| device=device, | |
| verbose=True | |
| ) | |
| st.session_state.model_loaded = True | |
| # Get model information | |
| model_info = f"β Model loaded successfully\n" | |
| model_info += f"π Task: {st.session_state.model_instance.model.task}\n" | |
| if hasattr(st.session_state.model_instance.model.model, 'names'): | |
| class_names = list(st.session_state.model_instance.model.model.names.values()) | |
| model_info += f"π·οΈ Classes ({len(class_names)}): {', '.join(class_names)}" | |
| return True, model_info | |
| except Exception as e: | |
| return False, f"β Error loading model: {str(e)}" | |
| def perform_detection( | |
| image: np.ndarray, | |
| conf_threshold: float, | |
| iou_threshold: float, | |
| image_size: int | |
| ) -> Tuple[np.ndarray, Dict, str]: | |
| """Perform crack detection using the loaded model.""" | |
| if st.session_state.model_instance is None: | |
| return None, {}, "β No model loaded" | |
| try: | |
| # Update model parameters | |
| st.session_state.model_instance.conf = conf_threshold | |
| st.session_state.model_instance.iou = iou_threshold | |
| st.session_state.model_instance.imgsz = image_size | |
| # Save image to temporary file | |
| with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp_file: | |
| # Convert RGB to BGR for OpenCV | |
| image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| cv2.imwrite(tmp_file.name, image_bgr) | |
| tmp_path = tmp_file.name | |
| start_time = time.time() | |
| try: | |
| # Use the exact same method as inference.py | |
| results = st.session_state.model_instance.predict_single( | |
| source=tmp_path, | |
| save=False, | |
| show=False, | |
| save_txt=False, | |
| save_conf=False, | |
| save_crop=False, | |
| output_dir=None | |
| ) | |
| inference_time = time.time() - start_time | |
| if not results: | |
| return image, {}, "β No results returned from model" | |
| result = results[0] | |
| # Get annotated image | |
| annotated_img = result.plot() | |
| annotated_img = cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB) | |
| # Process detection results | |
| detection_data = process_detection_results(result, inference_time) | |
| # Generate summary text | |
| summary_text = generate_detection_summary(result, detection_data, inference_time) | |
| return annotated_img, detection_data, summary_text | |
| finally: | |
| # Clean up temporary file | |
| if os.path.exists(tmp_path): | |
| os.unlink(tmp_path) | |
| except Exception as e: | |
| return None, {}, f"β Error during detection: {str(e)}" | |
| def process_detection_results(result, inference_time: float) -> Dict: | |
| """Process detection results into structured data.""" | |
| if result.boxes is None or len(result.boxes) == 0: | |
| return { | |
| 'total_detections': 0, | |
| 'class_counts': {}, | |
| 'detections': [], | |
| 'inference_time': inference_time | |
| } | |
| detections = result.boxes | |
| # Get class names | |
| if hasattr(st.session_state.model_instance.model.model, 'names'): | |
| class_names = st.session_state.model_instance.model.model.names | |
| else: | |
| class_names = getattr(result, 'names', {i: f"Class_{i}" for i in range(100)}) | |
| # Process each detection | |
| detection_list = [] | |
| class_counts = {} | |
| for i, (box, conf, cls) in enumerate(zip(detections.xyxy, detections.conf, detections.cls)): | |
| cls_id = int(cls) | |
| cls_name = class_names.get(cls_id, f"Class_{cls_id}") | |
| confidence = float(conf) | |
| x1, y1, x2, y2 = box.tolist() | |
| detection_list.append({ | |
| 'id': i + 1, | |
| 'class': cls_name, | |
| 'confidence': confidence, | |
| 'x1': int(x1), | |
| 'y1': int(y1), | |
| 'x2': int(x2), | |
| 'y2': int(y2), | |
| 'width': int(x2 - x1), | |
| 'height': int(y2 - y1), | |
| 'area': int((x2 - x1) * (y2 - y1)) | |
| }) | |
| class_counts[cls_name] = class_counts.get(cls_name, 0) + 1 | |
| return { | |
| 'total_detections': len(detection_list), | |
| 'class_counts': class_counts, | |
| 'detections': detection_list, | |
| 'inference_time': inference_time | |
| } | |
| def generate_detection_summary(result, detection_data: Dict, inference_time: float) -> str: | |
| """Generate detection summary text.""" | |
| total_detections = detection_data['total_detections'] | |
| if total_detections == 0: | |
| return "π No cracks or defects detected in the image." | |
| summary = f"β **Detection Results:**\n\n" | |
| summary += f"π **Images processed:** 1\n" | |
| summary += f"π **Total detections:** {total_detections}\n" | |
| summary += f"β±οΈ **Inference time:** {inference_time:.3f}s\n\n" | |
| summary += "π **Detections by class:**\n" | |
| for cls_name, count in sorted(detection_data['class_counts'].items()): | |
| summary += f" β’ {cls_name}: {count}\n" | |
| return summary | |
| def create_detection_chart(detection_data: Dict): | |
| """Create interactive charts for detection results.""" | |
| if detection_data['total_detections'] == 0: | |
| st.info("π No detections to visualize") | |
| return | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| # Class distribution pie chart | |
| class_counts = detection_data['class_counts'] | |
| fig_pie = px.pie( | |
| values=list(class_counts.values()), | |
| names=list(class_counts.keys()), | |
| title="π₯§ Detection Distribution by Class", | |
| color_discrete_sequence=px.colors.qualitative.Set3 | |
| ) | |
| fig_pie.update_layout(height=400) | |
| st.plotly_chart(fig_pie, use_container_width=True) | |
| with col2: | |
| # Confidence distribution | |
| confidences = [det['confidence'] for det in detection_data['detections']] | |
| classes = [det['class'] for det in detection_data['detections']] | |
| fig_conf = px.box( | |
| x=classes, | |
| y=confidences, | |
| title="π Confidence Distribution by Class", | |
| color=classes | |
| ) | |
| fig_conf.update_layout(height=400) | |
| fig_conf.update_xaxes(title="Class") | |
| fig_conf.update_yaxes(title="Confidence Score") | |
| st.plotly_chart(fig_conf, use_container_width=True) | |
| def create_detection_table(detection_data: Dict): | |
| """Create detailed detection table.""" | |
| if detection_data['total_detections'] == 0: | |
| st.info("π No detections to display") | |
| return | |
| # Convert to DataFrame | |
| df = pd.DataFrame(detection_data['detections']) | |
| # Format confidence as percentage | |
| df['confidence_pct'] = df['confidence'].apply(lambda x: f"{x:.1%}") | |
| # Reorder columns for better display | |
| display_columns = ['id', 'class', 'confidence_pct', 'x1', 'y1', 'x2', 'y2', 'width', 'height', 'area'] | |
| df_display = df[display_columns].copy() | |
| # Rename columns for better readability | |
| df_display.columns = ['ID', 'Class', 'Confidence', 'X1', 'Y1', 'X2', 'Y2', 'Width', 'Height', 'Area'] | |
| st.dataframe(df_display, use_container_width=True, height=300) | |
| # Download button for results | |
| csv = df_display.to_csv(index=False) | |
| st.download_button( | |
| label="π₯ Download Detection Results (CSV)", | |
| data=csv, | |
| file_name=f"crack_detection_results_{int(time.time())}.csv", | |
| mime="text/csv", | |
| use_container_width=True | |
| ) | |
| def main(): | |
| """Main Streamlit application.""" | |
| # Header | |
| st.markdown('<h1 class="main-header">π Tunnel Crack Detection</h1>', unsafe_allow_html=True) | |
| st.markdown('<p class="sub-header">AI-powered crack and defect detection for tunnel infrastructure using YOLOv12-DINO</p>', unsafe_allow_html=True) | |
| # Info about the model | |
| st.markdown(""" | |
| <div class="info-box"> | |
| <h4>π€ About This Model</h4> | |
| <p>This application uses YOLOv12-DINO, a state-of-the-art deep learning model that combines:</p> | |
| <ul> | |
| <li><strong>YOLOv12</strong>: Latest version of the YOLO object detection architecture</li> | |
| <li><strong>DINOv3</strong>: Vision Transformer backbone for enhanced feature extraction</li> | |
| <li><strong>Specialized Training</strong>: Fine-tuned specifically for tunnel crack and defect detection</li> | |
| </ul> | |
| <p><em>Perfect for infrastructure monitoring and maintenance applications!</em></p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # Auto-load model on startup | |
| if not st.session_state.model_loaded: | |
| with st.spinner("π Initializing model..."): | |
| # For HF Spaces, you might want to have the model pre-loaded or use a different approach | |
| # model_path = download_model() | |
| # if model_path: | |
| # success, message = load_model(model_path, "cpu") | |
| # if success: | |
| # st.success("π Model ready for inference!") | |
| pass | |
| # Sidebar for configuration | |
| with st.sidebar: | |
| st.header("π οΈ Configuration") | |
| # Model loading section | |
| st.subheader("π Model") | |
| if not st.session_state.model_loaded: | |
| st.markdown('<div class="warning-box">β οΈ Upload a model to get started</div>', unsafe_allow_html=True) | |
| uploaded_file = st.file_uploader( | |
| "Upload Model Weights (.pt file)", | |
| type=['pt'], | |
| help="Upload your trained YOLOv12-DINO model weights", | |
| label_visibility="visible" | |
| ) | |
| if uploaded_file is not None: | |
| # Show file info | |
| file_size_mb = uploaded_file.size / (1024 * 1024) | |
| st.success(f"π File uploaded: {uploaded_file.name} ({file_size_mb:.1f} MB)") | |
| if st.button("π Load Model", type="primary"): | |
| # Show progress for file processing | |
| progress_bar = st.progress(0) | |
| status_text = st.empty() | |
| try: | |
| # Save uploaded file temporarily | |
| status_text.text("πΎ Processing uploaded file...") | |
| progress_bar.progress(25) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.pt') as tmp_file: | |
| tmp_file.write(uploaded_file.read()) | |
| tmp_path = tmp_file.name | |
| progress_bar.progress(50) | |
| status_text.text("π Loading model...") | |
| success, message = load_model(tmp_path, "cpu") | |
| progress_bar.progress(100) | |
| status_text.text("β Model loading complete!") | |
| if success: | |
| st.success(message) | |
| st.rerun() | |
| else: | |
| st.error(message) | |
| # Clean up progress indicators | |
| time.sleep(1) | |
| progress_bar.empty() | |
| status_text.empty() | |
| except Exception as e: | |
| st.error(f"β Error processing file: {str(e)}") | |
| progress_bar.empty() | |
| status_text.empty() | |
| else: | |
| st.markdown('<div class="success-box">β Model loaded and ready!</div>', unsafe_allow_html=True) | |
| # Detection parameters | |
| st.subheader("βοΈ Detection Parameters") | |
| conf_threshold = st.slider( | |
| "Confidence Threshold", | |
| min_value=0.01, | |
| max_value=1.0, | |
| value=0.25, | |
| step=0.01, | |
| help="Minimum confidence for detection" | |
| ) | |
| iou_threshold = st.slider( | |
| "IoU Threshold", | |
| min_value=0.01, | |
| max_value=1.0, | |
| value=0.7, | |
| step=0.01, | |
| help="IoU threshold for Non-Maximum Suppression" | |
| ) | |
| image_size = st.selectbox( | |
| "Image Size", | |
| options=[320, 416, 512, 640, 832, 1024], | |
| index=3, | |
| help="Input image size for model" | |
| ) | |
| # Statistics | |
| if st.session_state.detection_history: | |
| st.subheader("π Session Stats") | |
| total_detections = sum([h['detections'] for h in st.session_state.detection_history]) | |
| st.metric("Images Processed", len(st.session_state.detection_history)) | |
| st.metric("Total Detections", total_detections) | |
| # Main content area | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.header("πΌοΈ Input Image") | |
| # Image upload | |
| image_file = st.file_uploader( | |
| "Upload Image for Analysis", | |
| type=['jpg', 'jpeg', 'png', 'bmp'], | |
| help="Upload a tunnel image for crack detection" | |
| ) | |
| if image_file is not None: | |
| # Load and display image | |
| try: | |
| image = Image.open(image_file) | |
| image_np = np.array(image) | |
| # Display image | |
| st.image(image, caption="π· Input Image", use_column_width=True) | |
| # Image info | |
| st.info(f"π Image size: {image.width} Γ {image.height} pixels") | |
| except Exception as e: | |
| st.error(f"β Error loading image: {str(e)}") | |
| return | |
| # Detection button | |
| detect_button = st.button( | |
| "π Analyze for Cracks", | |
| type="primary", | |
| disabled=not st.session_state.model_loaded, | |
| use_container_width=True | |
| ) | |
| if detect_button: | |
| if st.session_state.model_loaded: | |
| with st.spinner("π Analyzing image for cracks and defects..."): | |
| annotated_img, detection_data, summary_text = perform_detection( | |
| image_np, conf_threshold, iou_threshold, image_size | |
| ) | |
| # Store results in session state | |
| st.session_state.last_detection = { | |
| 'annotated_img': annotated_img, | |
| 'detection_data': detection_data, | |
| 'summary_text': summary_text, | |
| 'timestamp': time.time() | |
| } | |
| # Add to history | |
| st.session_state.detection_history.append({ | |
| 'filename': image_file.name, | |
| 'detections': detection_data['total_detections'], | |
| 'timestamp': time.time() | |
| }) | |
| st.rerun() | |
| else: | |
| st.error("β Please upload and load a model first") | |
| else: | |
| st.markdown(""" | |
| <div class="info-box"> | |
| <h4>π Get Started</h4> | |
| <ol> | |
| <li>Upload your YOLOv12-DINO model weights in the sidebar</li> | |
| <li>Upload a tunnel image above</li> | |
| <li>Click "Analyze for Cracks" to detect defects</li> | |
| </ol> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| with col2: | |
| st.header("π― Detection Results") | |
| if 'last_detection' in st.session_state: | |
| detection_result = st.session_state.last_detection | |
| # Display annotated image | |
| if detection_result['annotated_img'] is not None: | |
| try: | |
| st.image( | |
| detection_result['annotated_img'], | |
| caption="π― Detection Results", | |
| use_column_width=True | |
| ) | |
| except Exception as e: | |
| st.error(f"Error displaying result image: {str(e)}") | |
| # Display summary | |
| st.markdown(f'<div class="detection-box">{detection_result["summary_text"]}</div>', | |
| unsafe_allow_html=True) | |
| else: | |
| st.markdown(""" | |
| <div class="info-box"> | |
| <h4>π Results will appear here</h4> | |
| <p>Upload an image and click 'Analyze for Cracks' to see detection results with:</p> | |
| <ul> | |
| <li>π― Annotated image with detected cracks</li> | |
| <li>π Detection statistics and confidence scores</li> | |
| <li>π Detailed analysis tables</li> | |
| <li>π Interactive charts and visualizations</li> | |
| </ul> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # Additional tabs for detailed analysis | |
| if 'last_detection' in st.session_state and st.session_state.last_detection['detection_data']['total_detections'] > 0: | |
| st.markdown("---") | |
| st.header("π Detailed Analysis") | |
| tab1, tab2, tab3 = st.tabs(["π Visualizations", "π Detection Table", "π Session History"]) | |
| with tab1: | |
| create_detection_chart(st.session_state.last_detection['detection_data']) | |
| with tab2: | |
| create_detection_table(st.session_state.last_detection['detection_data']) | |
| with tab3: | |
| if st.session_state.detection_history: | |
| st.subheader("π Analysis History") | |
| history_df = pd.DataFrame(st.session_state.detection_history) | |
| history_df['timestamp'] = pd.to_datetime(history_df['timestamp'], unit='s') | |
| history_df.columns = ['Filename', 'Detections', 'Timestamp'] | |
| st.dataframe(history_df, use_container_width=True, height=300) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| if st.button("ποΈ Clear History", use_container_width=True): | |
| st.session_state.detection_history = [] | |
| st.rerun() | |
| with col2: | |
| if len(st.session_state.detection_history) > 0: | |
| csv = history_df.to_csv(index=False) | |
| st.download_button( | |
| label="π₯ Download History", | |
| data=csv, | |
| file_name=f"detection_history_{int(time.time())}.csv", | |
| mime="text/csv", | |
| use_container_width=True | |
| ) | |
| else: | |
| st.info("π No analysis history yet. Start by uploading and analyzing images!") | |
| # Footer | |
| st.markdown("---") | |
| st.markdown(""" | |
| <div style='text-align: center; color: #666; padding: 2rem;'> | |
| <h4>π Tunnel Crack Detection System</h4> | |
| <p> | |
| Powered by <strong>YOLOv12-DINO</strong> β’ Built with <strong>Streamlit</strong> β’ | |
| Deployed on <strong>π€ Hugging Face Spaces</strong> | |
| </p> | |
| <p><em>Advanced AI for Infrastructure Monitoring and Maintenance</em></p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| if __name__ == "__main__": | |
| main() | |