Spaces:
Sleeping
Sleeping
Merge pull request #2 from 23f3003322/dev
Browse files- .gitignore +1 -1
- app/__pycache__/__init__.cpython-313.pyc +0 -0
- app/__pycache__/main.cpython-313.pyc +0 -0
- app/api/routes/__pycache__/task.cpython-313.pyc +0 -0
- app/core/__pycache__/config.cpython-313.pyc +0 -0
- app/core/__pycache__/exceptions.cpython-313.pyc +0 -0
- app/core/__pycache__/logging.cpython-313.pyc +0 -0
- app/core/__pycache__/security.cpython-313.pyc +0 -0
- app/models/__pycache__/request.cpython-313.pyc +0 -0
- app/models/__pycache__/response.cpython-313.pyc +0 -0
- app/services/__pycache__/task_processor.cpython-313.pyc +0 -0
- app/services/answer_generator.py +31 -0
- app/services/audio_processor.py +155 -0
- app/services/task_fetcher.py +97 -47
.gitignore
CHANGED
|
@@ -50,4 +50,4 @@ orchestrator.md
|
|
| 50 |
questions.md
|
| 51 |
task_processor.md
|
| 52 |
unified.md
|
| 53 |
-
|
|
|
|
| 50 |
questions.md
|
| 51 |
task_processor.md
|
| 52 |
unified.md
|
| 53 |
+
audio_processor.md
|
app/__pycache__/__init__.cpython-313.pyc
DELETED
|
Binary file (300 Bytes)
|
|
|
app/__pycache__/main.cpython-313.pyc
DELETED
|
Binary file (3.6 kB)
|
|
|
app/api/routes/__pycache__/task.cpython-313.pyc
CHANGED
|
Binary files a/app/api/routes/__pycache__/task.cpython-313.pyc and b/app/api/routes/__pycache__/task.cpython-313.pyc differ
|
|
|
app/core/__pycache__/config.cpython-313.pyc
DELETED
|
Binary file (7.11 kB)
|
|
|
app/core/__pycache__/exceptions.cpython-313.pyc
DELETED
|
Binary file (6.43 kB)
|
|
|
app/core/__pycache__/logging.cpython-313.pyc
DELETED
|
Binary file (5.49 kB)
|
|
|
app/core/__pycache__/security.cpython-313.pyc
DELETED
|
Binary file (1.66 kB)
|
|
|
app/models/__pycache__/request.cpython-313.pyc
DELETED
|
Binary file (2.62 kB)
|
|
|
app/models/__pycache__/response.cpython-313.pyc
DELETED
|
Binary file (4.4 kB)
|
|
|
app/services/__pycache__/task_processor.cpython-313.pyc
DELETED
|
Binary file (10.4 kB)
|
|
|
app/services/answer_generator.py
CHANGED
|
@@ -5,6 +5,7 @@ from app.core.logging import get_logger
|
|
| 5 |
from app.core.exceptions import AnswerGenerationError
|
| 6 |
from app.models.answer import AnswerResult
|
| 7 |
from app.models.analysis import QuestionAnalysis
|
|
|
|
| 8 |
logger = get_logger(__name__)
|
| 9 |
|
| 10 |
|
|
@@ -21,6 +22,7 @@ class AnswerGenerator:
|
|
| 21 |
"""
|
| 22 |
self.llm_client = llm_client
|
| 23 |
self._generator_agent = None
|
|
|
|
| 24 |
|
| 25 |
async def initialize(self):
|
| 26 |
"""Initialize LLM agent for answer generation"""
|
|
@@ -63,6 +65,35 @@ class AnswerGenerator:
|
|
| 63 |
logger.info(f"π‘ Generating answer for {analysis.question_type}...")
|
| 64 |
|
| 65 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
# Step 1: Build comprehensive context for LLM
|
| 67 |
context = self._build_generation_context(
|
| 68 |
analysis=analysis,
|
|
|
|
| 5 |
from app.core.exceptions import AnswerGenerationError
|
| 6 |
from app.models.answer import AnswerResult
|
| 7 |
from app.models.analysis import QuestionAnalysis
|
| 8 |
+
from app.services.audio_processor import AudioProcessor
|
| 9 |
logger = get_logger(__name__)
|
| 10 |
|
| 11 |
|
|
|
|
| 22 |
"""
|
| 23 |
self.llm_client = llm_client
|
| 24 |
self._generator_agent = None
|
| 25 |
+
self.audio_processor = AudioProcessor()
|
| 26 |
|
| 27 |
async def initialize(self):
|
| 28 |
"""Initialize LLM agent for answer generation"""
|
|
|
|
| 65 |
logger.info(f"π‘ Generating answer for {analysis.question_type}...")
|
| 66 |
|
| 67 |
try:
|
| 68 |
+
if analysis.question_type == 'audio_transcription':
|
| 69 |
+
logger.info("π€ Audio transcription task detected")
|
| 70 |
+
|
| 71 |
+
# Find audio file
|
| 72 |
+
audio_file = next(
|
| 73 |
+
(f for f in downloaded_files
|
| 74 |
+
if f['type'] in ['.opus', '.mp3', '.wav', '.m4a', '.ogg']),
|
| 75 |
+
None
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
if not audio_file:
|
| 79 |
+
raise AnswerGenerationError(
|
| 80 |
+
"Audio file not found. Expected .opus, .mp3, or .wav file."
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
logger.info(f" Found audio file: {audio_file['filename']}")
|
| 84 |
+
|
| 85 |
+
# Transcribe audio
|
| 86 |
+
answer = await self.audio_processor.transcribe_audio(
|
| 87 |
+
audio_file_path=audio_file['local_path'],
|
| 88 |
+
language='en', # English for Q5
|
| 89 |
+
lowercase=True # Q5 requires lowercase
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
logger.info(f"β Audio transcribed successfully")
|
| 93 |
+
logger.info(f" Answer: {answer}")
|
| 94 |
+
|
| 95 |
+
return answer
|
| 96 |
+
|
| 97 |
# Step 1: Build comprehensive context for LLM
|
| 98 |
context = self._build_generation_context(
|
| 99 |
analysis=analysis,
|
app/services/audio_processor.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Optional, List
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import base64
|
| 6 |
+
import httpx
|
| 7 |
+
|
| 8 |
+
from app.core.logging import get_logger
|
| 9 |
+
from app.core.exceptions import AnswerGenerationError
|
| 10 |
+
|
| 11 |
+
logger = get_logger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class AudioProcessor:
|
| 15 |
+
"""
|
| 16 |
+
Transcribe audio using OpenRouter (via aipipe) by sending audio to an
|
| 17 |
+
audio-capable model and asking it to transcribe.
|
| 18 |
+
|
| 19 |
+
This is designed for Project2 Q5: return lowercase transcription including 3-digit code.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
aipipe_token: Optional[str] = None,
|
| 25 |
+
base_url: str = "https://aipipe.org/openrouter/v1",
|
| 26 |
+
# Best default from your available list for transcription:
|
| 27 |
+
primary_model: str = "mistralai/voxtral-small-24b-2507",
|
| 28 |
+
# Fallbacks (all appear in your /models list):
|
| 29 |
+
fallback_models: Optional[List[str]] = None,
|
| 30 |
+
):
|
| 31 |
+
self.base_url = base_url.rstrip("/")
|
| 32 |
+
self.primary_model = primary_model
|
| 33 |
+
self.fallback_models = fallback_models or [
|
| 34 |
+
"google/gemini-2.5-pro",
|
| 35 |
+
"google/gemini-2.5-flash",
|
| 36 |
+
"google/gemini-2.5-flash-lite",
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
if aipipe_token is None:
|
| 40 |
+
import os
|
| 41 |
+
aipipe_token = os.getenv("AIPIPE_TOKEN")
|
| 42 |
+
|
| 43 |
+
if not aipipe_token:
|
| 44 |
+
raise ValueError("AIPIPE_TOKEN not found in environment or constructor")
|
| 45 |
+
|
| 46 |
+
self.aipipe_token = aipipe_token
|
| 47 |
+
logger.info(
|
| 48 |
+
"β AudioProcessor(OpenRouter) initialized "
|
| 49 |
+
f"primary_model={self.primary_model}"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def _models_to_try(self) -> List[str]:
|
| 53 |
+
# Keep order: primary first, then fallbacks
|
| 54 |
+
models = [self.primary_model]
|
| 55 |
+
for m in self.fallback_models:
|
| 56 |
+
if m not in models:
|
| 57 |
+
models.append(m)
|
| 58 |
+
return models
|
| 59 |
+
|
| 60 |
+
async def transcribe_audio(
|
| 61 |
+
self,
|
| 62 |
+
audio_file_path: str,
|
| 63 |
+
language: Optional[str] = "en",
|
| 64 |
+
lowercase: bool = True,
|
| 65 |
+
) -> str:
|
| 66 |
+
audio_path = Path(audio_file_path)
|
| 67 |
+
if not audio_path.exists():
|
| 68 |
+
raise AnswerGenerationError(f"Audio file not found: {audio_file_path}")
|
| 69 |
+
|
| 70 |
+
audio_bytes = audio_path.read_bytes()
|
| 71 |
+
audio_b64 = base64.b64encode(audio_bytes).decode("utf-8")
|
| 72 |
+
|
| 73 |
+
# Most of your quiz files are .opus
|
| 74 |
+
fmt = "opus" if audio_path.suffix.lower() == ".opus" else "wav"
|
| 75 |
+
|
| 76 |
+
url = f"{self.base_url}/chat/completions"
|
| 77 |
+
headers = {
|
| 78 |
+
"Authorization": f"Bearer {self.aipipe_token}",
|
| 79 |
+
"Content-Type": "application/json",
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
prompt = (
|
| 83 |
+
"Task: Transcribe the provided audio exactly.\n"
|
| 84 |
+
"Output rules:\n"
|
| 85 |
+
"- Return ONLY the transcription text.\n"
|
| 86 |
+
"- Lowercase only.\n"
|
| 87 |
+
"- Include the 3-digit number exactly.\n"
|
| 88 |
+
"- Do not add explanations.\n"
|
| 89 |
+
"- Do not refuse.\n"
|
| 90 |
+
)
|
| 91 |
+
if language:
|
| 92 |
+
prompt += f"Language hint: {language}.\n"
|
| 93 |
+
|
| 94 |
+
last_err = None
|
| 95 |
+
|
| 96 |
+
for model in self._models_to_try():
|
| 97 |
+
payload = {
|
| 98 |
+
"model": model,
|
| 99 |
+
"temperature": 0,
|
| 100 |
+
# Strongly encourage plain text output
|
| 101 |
+
"response_format": {"type": "text"},
|
| 102 |
+
"messages": [
|
| 103 |
+
{
|
| 104 |
+
"role": "user",
|
| 105 |
+
"content": [
|
| 106 |
+
{"type": "text", "text": prompt},
|
| 107 |
+
{
|
| 108 |
+
"type": "input_audio",
|
| 109 |
+
"input_audio": {
|
| 110 |
+
"data": audio_b64,
|
| 111 |
+
"format": fmt,
|
| 112 |
+
},
|
| 113 |
+
},
|
| 114 |
+
],
|
| 115 |
+
}
|
| 116 |
+
],
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
logger.info(f"π€ Transcribing via OpenRouter: model={model}")
|
| 120 |
+
async with httpx.AsyncClient(timeout=180.0) as client:
|
| 121 |
+
resp = await client.post(url, headers=headers, json=payload)
|
| 122 |
+
|
| 123 |
+
if resp.status_code != 200:
|
| 124 |
+
last_err = f"{resp.status_code} - {resp.text}"
|
| 125 |
+
logger.warning(f"Model failed: {model} -> {last_err}")
|
| 126 |
+
continue
|
| 127 |
+
|
| 128 |
+
data = resp.json()
|
| 129 |
+
try:
|
| 130 |
+
text = data["choices"][0]["message"]["content"]
|
| 131 |
+
except Exception:
|
| 132 |
+
last_err = f"Unexpected response shape: {data}"
|
| 133 |
+
logger.warning(f"Model returned unexpected shape: {model}")
|
| 134 |
+
continue
|
| 135 |
+
|
| 136 |
+
transcription = (text or "").strip()
|
| 137 |
+
transcription = " ".join(transcription.split())
|
| 138 |
+
if lowercase:
|
| 139 |
+
transcription = transcription.lower()
|
| 140 |
+
|
| 141 |
+
# Guard against refusals
|
| 142 |
+
low = transcription.lower()
|
| 143 |
+
if "can't process audio" in low or "cannot process audio" in low or "i can't" in low:
|
| 144 |
+
last_err = f"Model refused audio: {model} -> {transcription}"
|
| 145 |
+
logger.warning(last_err)
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
# Success
|
| 149 |
+
logger.info(f"β Transcription success with {model}: '{transcription}'")
|
| 150 |
+
return transcription
|
| 151 |
+
|
| 152 |
+
raise AnswerGenerationError(
|
| 153 |
+
"All OpenRouter audio-capable models failed for transcription. "
|
| 154 |
+
f"Last error: {last_err}"
|
| 155 |
+
)
|
app/services/task_fetcher.py
CHANGED
|
@@ -4,6 +4,9 @@ Fetches and extracts task descriptions from URLs
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
import httpx
|
|
|
|
|
|
|
|
|
|
| 7 |
import json
|
| 8 |
import re
|
| 9 |
from typing import Optional, Dict, Any, List
|
|
@@ -117,53 +120,100 @@ class TaskFetcher:
|
|
| 117 |
}
|
| 118 |
|
| 119 |
return result
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
#
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
async def _fetch_content(self, url: str) -> Dict[str, Any]:
|
| 168 |
"""
|
| 169 |
Fetch content from URL.
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
import httpx
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import tempfile
|
| 9 |
+
from urllib.parse import urljoin
|
| 10 |
import json
|
| 11 |
import re
|
| 12 |
from typing import Optional, Dict, Any, List
|
|
|
|
| 120 |
}
|
| 121 |
|
| 122 |
return result
|
| 123 |
+
|
| 124 |
+
async def _download_files(
|
| 125 |
+
self,
|
| 126 |
+
file_links: List[Dict[str, str]],
|
| 127 |
+
base_url: str,
|
| 128 |
+
user_email: Optional[str] = None
|
| 129 |
+
) -> List[Dict[str, Any]]:
|
| 130 |
+
"""
|
| 131 |
+
Download files referenced in question.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
file_links: List of dicts with 'href' and 'text'
|
| 135 |
+
Example: [{'href': '/project2/messy.csv', 'text': 'messy.csv'}]
|
| 136 |
+
base_url: Base URL to construct absolute URLs
|
| 137 |
+
user_email: User email for personalized URLs
|
| 138 |
+
|
| 139 |
+
Returns:
|
| 140 |
+
List of dicts with file info:
|
| 141 |
+
- url: Original URL
|
| 142 |
+
- local_path: Path to downloaded file
|
| 143 |
+
- filename: Extracted filename
|
| 144 |
+
- size: File size in bytes
|
| 145 |
+
- type: File extension (.csv, .json, etc.)
|
| 146 |
+
"""
|
| 147 |
+
if not file_links:
|
| 148 |
+
return []
|
| 149 |
+
|
| 150 |
+
downloaded_files = []
|
| 151 |
+
|
| 152 |
+
# Create download directory
|
| 153 |
+
download_dir = Path(tempfile.gettempdir()) / "quiz_files"
|
| 154 |
+
download_dir.mkdir(exist_ok=True)
|
| 155 |
+
|
| 156 |
+
logger.info(f"π₯ Downloading {len(file_links)} files to {download_dir}")
|
| 157 |
+
|
| 158 |
+
for link in file_links:
|
| 159 |
+
href = link['href']
|
| 160 |
+
|
| 161 |
+
try:
|
| 162 |
+
# Handle personalized URLs
|
| 163 |
+
# Example: "/project2/uv.json?email=<your email>"
|
| 164 |
+
if '<your email>' in href and user_email:
|
| 165 |
+
href = href.replace('<your email>', user_email)
|
| 166 |
+
logger.debug(f"Personalized URL: {href}")
|
| 167 |
+
|
| 168 |
+
# Construct absolute URL
|
| 169 |
+
full_url = urljoin(base_url, href)
|
| 170 |
+
|
| 171 |
+
# Extract filename
|
| 172 |
+
# "/project2/messy.csv" -> "messy.csv"
|
| 173 |
+
# "/project2/[email protected]" -> "data.json"
|
| 174 |
+
filename = Path(href.split('?')[0]).name
|
| 175 |
+
local_path = download_dir / filename
|
| 176 |
+
|
| 177 |
+
# Download file
|
| 178 |
+
logger.info(f" Downloading: {filename} from {full_url}")
|
| 179 |
+
|
| 180 |
+
# Use existing httpx client if available, or create new one
|
| 181 |
+
if hasattr(self, 'client') and self.client:
|
| 182 |
+
response = await self.client.get(full_url, timeout=60.0)
|
| 183 |
+
else:
|
| 184 |
+
async with httpx.AsyncClient() as client:
|
| 185 |
+
response = await client.get(full_url, timeout=60.0)
|
| 186 |
+
|
| 187 |
+
response.raise_for_status()
|
| 188 |
+
|
| 189 |
+
# Save to disk
|
| 190 |
+
with open(local_path, 'wb') as f:
|
| 191 |
+
f.write(response.content)
|
| 192 |
+
|
| 193 |
+
# Get file info
|
| 194 |
+
file_info = {
|
| 195 |
+
'url': full_url,
|
| 196 |
+
'local_path': str(local_path),
|
| 197 |
+
'filename': filename,
|
| 198 |
+
'size': len(response.content),
|
| 199 |
+
'type': local_path.suffix # .csv, .json, .png, etc.
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
downloaded_files.append(file_info)
|
| 203 |
+
logger.info(f" β Downloaded: {filename} ({file_info['size']} bytes)")
|
| 204 |
+
|
| 205 |
+
except httpx.HTTPStatusError as e:
|
| 206 |
+
logger.error(f" β HTTP error downloading {href}: {e.response.status_code}")
|
| 207 |
+
# Continue with other files
|
| 208 |
+
|
| 209 |
+
except Exception as e:
|
| 210 |
+
logger.error(f" β Failed to download {href}: {e}")
|
| 211 |
+
# Continue with other files
|
| 212 |
+
|
| 213 |
+
logger.info(f"β Downloaded {len(downloaded_files)}/{len(file_links)} files")
|
| 214 |
+
|
| 215 |
+
return downloaded_files
|
| 216 |
+
|
| 217 |
async def _fetch_content(self, url: str) -> Dict[str, Any]:
|
| 218 |
"""
|
| 219 |
Fetch content from URL.
|