jobbler commited on
Commit
dbffc3c
·
1 Parent(s): be6166b

feat: Add percentage progress to downloading status in UI and extension

Browse files
firefox-extension/content.js CHANGED
@@ -56,8 +56,10 @@ browser.runtime.onMessage.addListener(async (request, sender, sendResponse) => {
56
  const statusRes = await fetch(`${apiUrl}/status`);
57
  if (statusRes.ok) {
58
  const statusData = await statusRes.json();
59
- if (statusData[modelVersion] === "downloading") {
60
- showToast(`Downloading Gemma 4 (${modelVersion})... this may take a few minutes.`, 0);
 
 
61
  }
62
  }
63
  } catch (e) {
@@ -188,8 +190,10 @@ async function processEntirePage() {
188
  const statusRes = await fetch(`${apiUrl}/status`);
189
  if (statusRes.ok) {
190
  const statusData = await statusRes.json();
191
- if (statusData[modelVersion] === "downloading") {
192
- showToast(`Downloading Gemma 4 (${modelVersion})... this may take a few minutes.`, 0);
 
 
193
  }
194
  }
195
  } catch (e) {}
@@ -203,8 +207,13 @@ async function processEntirePage() {
203
 
204
  // Only show analyzing text if not downloading
205
  const statusResTemp = await fetch(`${apiUrl}/status`).catch(() => null);
206
- if (!statusResTemp || !statusResTemp.ok || (await statusResTemp.json())[modelVersion] !== "downloading") {
207
  showToast(`FlowRead analyzing page (${processedCount}/${nodesToProcess.length} blocks)...`, 0);
 
 
 
 
 
208
  }
209
 
210
  await Promise.all(batch.map(async (node) => {
@@ -287,8 +296,10 @@ async function updateExisting(newSettings) {
287
  const statusRes = await fetch(`${apiUrl}/status`);
288
  if (statusRes.ok) {
289
  const statusData = await statusRes.json();
290
- if (statusData[modelVersion] === "downloading") {
291
- showToast(`Downloading Gemma 4 (${modelVersion})... this may take a few minutes.`, 0);
 
 
292
  }
293
  }
294
  } catch (e) {}
 
56
  const statusRes = await fetch(`${apiUrl}/status`);
57
  if (statusRes.ok) {
58
  const statusData = await statusRes.json();
59
+ if (statusData[modelVersion] && statusData[modelVersion].startsWith("downloading")) {
60
+ const parts = statusData[modelVersion].split(": ");
61
+ const progress = parts.length > 1 ? parts[1] : "...";
62
+ showToast(`Downloading Gemma 4 (${modelVersion}) ${progress}... this may take a few minutes.`, 0);
63
  }
64
  }
65
  } catch (e) {
 
190
  const statusRes = await fetch(`${apiUrl}/status`);
191
  if (statusRes.ok) {
192
  const statusData = await statusRes.json();
193
+ if (statusData[modelVersion] && statusData[modelVersion].startsWith("downloading")) {
194
+ const parts = statusData[modelVersion].split(": ");
195
+ const progress = parts.length > 1 ? parts[1] : "...";
196
+ showToast(`Downloading Gemma 4 (${modelVersion}) ${progress}... this may take a few minutes.`, 0);
197
  }
198
  }
199
  } catch (e) {}
 
207
 
208
  // Only show analyzing text if not downloading
209
  const statusResTemp = await fetch(`${apiUrl}/status`).catch(() => null);
210
+ if (!statusResTemp || !statusResTemp.ok) {
211
  showToast(`FlowRead analyzing page (${processedCount}/${nodesToProcess.length} blocks)...`, 0);
212
+ } else {
213
+ const statusJson = await statusResTemp.json();
214
+ if (!statusJson[modelVersion] || !statusJson[modelVersion].startsWith("downloading")) {
215
+ showToast(`FlowRead analyzing page (${processedCount}/${nodesToProcess.length} blocks)...`, 0);
216
+ }
217
  }
218
 
219
  await Promise.all(batch.map(async (node) => {
 
296
  const statusRes = await fetch(`${apiUrl}/status`);
297
  if (statusRes.ok) {
298
  const statusData = await statusRes.json();
299
+ if (statusData[modelVersion] && statusData[modelVersion].startsWith("downloading")) {
300
+ const parts = statusData[modelVersion].split(": ");
301
+ const progress = parts.length > 1 ? parts[1] : "...";
302
+ showToast(`Downloading Gemma 4 (${modelVersion}) ${progress}... this may take a few minutes.`, 0);
303
  }
304
  }
305
  } catch (e) {}
flowread-extension.zip CHANGED
Binary files a/flowread-extension.zip and b/flowread-extension.zip differ
 
main.py CHANGED
@@ -233,6 +233,30 @@ def get_study_stats():
233
  "preferences": preferences
234
  }
235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  # --- Saliency API (Existing) ---
237
  models = {}
238
  tokenizers = {}
@@ -249,7 +273,8 @@ def load_model(model_name: str):
249
  return models[model_name], tokenizers[model_name]
250
 
251
  print(f"Loading {model_name} on {device}...")
252
- model_status[model_name] = "downloading"
 
253
  try:
254
  if model_name == "27b-4a":
255
  # Use Gemma 4 27B in 4-bit (requires CUDA)
@@ -285,10 +310,12 @@ def load_model(model_name: str):
285
  models[model_name] = model
286
  tokenizers[model_name] = tokenizer
287
  model_status[model_name] = "loaded"
 
288
  return model, tokenizer
289
  except Exception as e:
290
  print(f"Error loading model {model_name}: {e}")
291
  model_status[model_name] = "error"
 
292
  raise e
293
 
294
  # Pre-load default 2b
 
233
  "preferences": preferences
234
  }
235
 
236
+ import sys
237
+ import re
238
+
239
+ class StderrProgressInterceptor:
240
+ def __init__(self, original):
241
+ self.original = original
242
+ self.current_progress = ""
243
+ self.active_model = None
244
+
245
+ def write(self, s):
246
+ self.original.write(s)
247
+ match = re.search(r'(\d+)%\|', s)
248
+ if match and self.active_model:
249
+ pct = match.group(1)
250
+ self.current_progress = f"{pct}%"
251
+ # Update the global status explicitly so the API returns it immediately
252
+ model_status[self.active_model] = f"downloading: {self.current_progress}"
253
+
254
+ def flush(self):
255
+ self.original.flush()
256
+
257
+ stderr_interceptor = StderrProgressInterceptor(sys.stderr)
258
+ sys.stderr = stderr_interceptor
259
+
260
  # --- Saliency API (Existing) ---
261
  models = {}
262
  tokenizers = {}
 
273
  return models[model_name], tokenizers[model_name]
274
 
275
  print(f"Loading {model_name} on {device}...")
276
+ model_status[model_name] = "downloading: 0%"
277
+ stderr_interceptor.active_model = model_name
278
  try:
279
  if model_name == "27b-4a":
280
  # Use Gemma 4 27B in 4-bit (requires CUDA)
 
310
  models[model_name] = model
311
  tokenizers[model_name] = tokenizer
312
  model_status[model_name] = "loaded"
313
+ stderr_interceptor.active_model = None
314
  return model, tokenizer
315
  except Exception as e:
316
  print(f"Error loading model {model_name}: {e}")
317
  model_status[model_name] = "error"
318
+ stderr_interceptor.active_model = None
319
  raise e
320
 
321
  # Pre-load default 2b
static/index.html CHANGED
@@ -485,8 +485,10 @@
485
  const statusRes = await fetch('/status');
486
  if (statusRes.ok) {
487
  const statusData = await statusRes.json();
488
- if (statusData[modelVersion] === "downloading") {
489
- loading.textContent = `Downloading Gemma 4 (${modelVersion}) Model... this takes a few minutes.`;
 
 
490
  }
491
  }
492
  } catch(e) {}
 
485
  const statusRes = await fetch('/status');
486
  if (statusRes.ok) {
487
  const statusData = await statusRes.json();
488
+ if (statusData[modelVersion] && statusData[modelVersion].startsWith("downloading")) {
489
+ const parts = statusData[modelVersion].split(": ");
490
+ const progress = parts.length > 1 ? parts[1] : "...";
491
+ loading.textContent = `Downloading Gemma 4 (${modelVersion}) Model ${progress}... this takes a few minutes.`;
492
  }
493
  }
494
  } catch(e) {}
test_stderr.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import re
3
+
4
+ class StderrInterceptor:
5
+ def __init__(self, original):
6
+ self.original = original
7
+
8
+ def write(self, s):
9
+ self.original.write(s)
10
+ match = re.search(r'(\d+)%\|', s)
11
+ if match:
12
+ self.original.write(f"\nINTERCEPTED: {match.group(1)}%\n")
13
+ self.original.flush()
14
+
15
+ def flush(self):
16
+ self.original.flush()
17
+
18
+ sys.stderr = StderrInterceptor(sys.stderr)
19
+
20
+ from transformers import AutoConfig
21
+ AutoConfig.from_pretrained("gpt2", force_download=True)
test_tqdm.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import huggingface_hub.utils as hf_utils
2
+
3
+ original_tqdm = hf_utils.tqdm
4
+
5
+ class MyTqdm(original_tqdm):
6
+ def update(self, n=1):
7
+ super().update(n)
8
+ if hasattr(self, 'total') and self.total:
9
+ print(f"DL_PROGRESS: {self.n / self.total:.2%}", flush=True)
10
+
11
+ hf_utils.tqdm = MyTqdm
12
+ hf_utils.tqdm.tqdm = MyTqdm # Also overwrite the module attribute just in case
13
+
14
+ import huggingface_hub.file_download
15
+ huggingface_hub.file_download.tqdm = MyTqdm
16
+
17
+ try:
18
+ from transformers import AutoConfig
19
+ AutoConfig.from_pretrained("gpt2", force_download=True)
20
+ except Exception as e:
21
+ print(e)