RFTSystems commited on
Commit
9d2da30
·
verified ·
1 Parent(s): 3aa03ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -122
app.py CHANGED
@@ -49,13 +49,6 @@ def tau_eff_adaptive(
49
  gain: float = 1.2,
50
  cap: float = 4.0
51
  ):
52
- """
53
- τ_eff is implemented here as a timing/decision delay modifier.
54
- - base: baseline τ_eff
55
- - slow_by: explicit slow-down term
56
- - gain: reaction strength to uncertainty
57
- - cap: prevents absurd values
58
- """
59
  u = clamp(float(uncertainty), 0.0, 1.0)
60
  tau = base + slow_by + gain * u
61
  return clamp(tau, base, cap)
@@ -64,11 +57,6 @@ def rft_confidence(uncertainty: float):
64
  return clamp(1.0 - float(uncertainty), 0.0, 1.0)
65
 
66
  def rft_gate(conf: float, tau_eff: float, threshold: float):
67
- """
68
- Collapse gate:
69
- - higher τ_eff makes the gate stricter
70
- - threshold is the minimum confidence needed
71
- """
72
  conf = float(conf)
73
  tau_eff = float(tau_eff)
74
  effective = threshold + 0.08 * (tau_eff - 1.0)
@@ -474,8 +462,9 @@ def simulate_landing(
474
  return summary, [p_alt, p_x, p_w, p_a], csv_path
475
 
476
  # ===============================================================
477
- # Predator Avoidance (Reflex vs QuantumConscious "RFT-style")
478
  # ===============================================================
 
479
  def numpy_convolve2d_toroidal(array: np.ndarray, kernel: np.ndarray) -> np.ndarray:
480
  out = np.zeros_like(array, dtype=float)
481
  kcx = kernel.shape[0] // 2
@@ -515,21 +504,21 @@ class ReflexAgent:
515
  self.x = (self.x + dx) % self.grid_size
516
  self.y = (self.y + dy) % self.grid_size
517
 
518
- class QuantumConsciousAgent:
519
  def __init__(
520
  self,
521
  grid_size: int,
522
  move_kernel: np.ndarray,
523
  energy_max: float,
524
  energy_regen: float,
525
- base_override_cost: float,
526
- quantum_boost_prob: float,
527
- quantum_boost_amount: float,
528
  sense_noise_prob: float,
529
  alpha: float,
530
  beta: float,
531
  dt_internal: float,
532
- override_threshold: float
533
  ):
534
  self.grid_size = grid_size
535
  self.move_kernel = move_kernel.astype(float)
@@ -542,27 +531,24 @@ class QuantumConsciousAgent:
542
  self.energy_max = float(energy_max)
543
  self.energy = float(energy_max)
544
  self.energy_regen = float(energy_regen)
545
- self.base_override_cost = float(base_override_cost)
546
- self.quantum_boost_prob = float(quantum_boost_prob)
547
- self.quantum_boost_amount = float(quantum_boost_amount)
548
  self.sense_noise_prob = float(sense_noise_prob)
549
 
550
  self.alpha = float(alpha)
551
  self.beta = float(beta)
552
  self.dt_internal = float(dt_internal)
553
- self.override_threshold = float(override_threshold)
554
 
555
- # Start low so P_override is not pinned at the threshold.
556
- self.psi_override = (0.08 + 0j) # |psi|^2 = 0.0064
557
- self.overrides = 0
558
  self.collisions = 0
559
 
560
  def move(self):
561
  dx, dy = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
562
  self.x = (self.x + dx) % self.grid_size
563
  self.y = (self.y + dy) % self.grid_size
564
-
565
- # Keep pos_prob consistent with actual state (otherwise threat stays meaningless)
566
  self.pos_prob.fill(0.0)
567
  self.pos_prob[self.x, self.y] = 1.0
568
 
@@ -584,34 +570,27 @@ class QuantumConsciousAgent:
584
  threat += float(sub.sum())
585
  return threat
586
 
587
- def update_override_state(self, perceived):
588
- """
589
- Make P_override responsive (amplitude changes), not phase-only.
590
- Threat pushes amplitude up; energy pushes it down.
591
- """
592
  T = self.compute_threat(perceived)
593
  E = self.energy / max(self.energy_max, 1e-9)
594
 
595
  drive = (self.alpha * T) - (self.beta * E)
596
 
597
- # amplitude pump/decay (bounded)
598
  exp_term = clamp(drive, -6.0, 6.0) * 0.22 * self.dt_internal
599
  amp = math.exp(exp_term)
600
  amp = clamp(amp, 0.75, 1.35)
601
 
602
- # keep a "quantum-style" phase evolution too
603
- H = drive + 0.01 * (abs(self.psi_override) ** 2)
604
- self.psi_override *= amp * np.exp(-1j * H * self.dt_internal)
605
 
606
- # cap magnitude so probability stays within [0,1]
607
- mag = abs(self.psi_override)
608
  if mag > 1.0:
609
- self.psi_override /= mag
610
 
611
- def get_override_probability(self):
612
- return float(min(abs(self.psi_override) ** 2, 1.0))
613
 
614
- def apply_override(self, perceived):
615
  field = numpy_convolve2d_toroidal(self.pos_prob, self.move_kernel)
616
  field = np.maximum(field, 0.0)
617
 
@@ -644,62 +623,62 @@ class QuantumConsciousAgent:
644
  idx = np.random.choice(self.grid_size * self.grid_size, p=flat)
645
  self.x, self.y = divmod(int(idx), self.grid_size)
646
 
647
- def quantum_energy_boost(self):
648
- if random.random() < self.quantum_boost_prob:
649
- return float(self.quantum_boost_amount)
650
  return 0.0
651
 
652
  def regen_energy(self):
653
- boost = self.quantum_energy_boost()
654
  self.energy = clamp(self.energy + self.energy_regen + boost, 0.0, self.energy_max)
655
  if self.energy < self.energy_max and random.random() < 0.05:
656
  self.energy = self.energy_max
657
 
658
- def move_consciously(self, predators, group_coherence: float):
659
  if self.energy <= 0:
660
  self.move()
661
  return 0, 0.0, 0.0
662
 
663
  perceived = self.sense_predators(predators)
664
- self.update_override_state(perceived)
665
 
666
- P_ov = self.get_override_probability()
667
  threat = self.compute_threat(perceived)
668
 
669
  acted = 0
670
- if (P_ov >= self.override_threshold) and (self.energy > 0):
671
- effective_cost = self.base_override_cost * (1.0 - float(group_coherence))
672
  if self.energy >= effective_cost:
673
- self.overrides += 1
674
  self.energy -= effective_cost
675
- self.apply_override(perceived)
676
- self.psi_override = (0.08 + 0j) # reset after a collapse action
677
  acted = 1
678
  else:
679
  self.move()
680
  else:
681
  self.move()
682
 
683
- return acted, P_ov, threat
684
 
685
  def simulate_predator(
686
  seed: int,
687
  grid_size: int,
688
  steps: int,
689
  num_reflex: int,
690
- num_conscious: int,
691
  num_predators: int,
692
  group_coherence: float,
693
  sense_noise_prob: float,
694
- override_threshold: float,
695
  alpha: float,
696
  beta: float,
697
  dt_internal: float,
698
  energy_max: float,
699
- base_override_cost: float,
700
  energy_regen: float,
701
- quantum_boost_prob: float,
702
- quantum_boost_amount: float,
703
  show_heatmap: bool
704
  ):
705
  set_seed(seed)
@@ -709,22 +688,22 @@ def simulate_predator(
709
  [0, 0.2, 0]], dtype=float)
710
 
711
  reflex_agents = [ReflexAgent(grid_size) for _ in range(int(num_reflex))]
712
- conscious_agents = [
713
- QuantumConsciousAgent(
714
  grid_size=grid_size,
715
  move_kernel=move_kernel,
716
  energy_max=energy_max,
717
  energy_regen=energy_regen,
718
- base_override_cost=base_override_cost,
719
- quantum_boost_prob=quantum_boost_prob,
720
- quantum_boost_amount=quantum_boost_amount,
721
  sense_noise_prob=sense_noise_prob,
722
  alpha=alpha,
723
  beta=beta,
724
  dt_internal=dt_internal,
725
- override_threshold=override_threshold
726
  )
727
- for _ in range(int(num_conscious))
728
  ]
729
  predators = [Predator(grid_size) for _ in range(int(num_predators))]
730
 
@@ -742,13 +721,13 @@ def simulate_predator(
742
  a.collisions += 1
743
 
744
  actions = []
745
- povs = []
746
  threats = []
747
- for a in conscious_agents:
748
- acted, P_ov, threat = a.move_consciously(predators, group_coherence)
749
  a.regen_energy()
750
  actions.append(acted)
751
- povs.append(P_ov)
752
  threats.append(threat)
753
  for p in predators:
754
  if a.x == p.x and a.y == p.y:
@@ -757,22 +736,22 @@ def simulate_predator(
757
  ops_proxy += 18
758
 
759
  reflex_collisions = int(sum(a.collisions for a in reflex_agents))
760
- conscious_collisions = int(sum(a.collisions for a in conscious_agents))
761
- avg_overrides = float(np.mean([a.overrides for a in conscious_agents])) if conscious_agents else 0.0
762
- avg_energy = float(np.mean([a.energy for a in conscious_agents])) if conscious_agents else 0.0
763
  avg_threat = float(np.mean(threats)) if threats else 0.0
764
- avg_pov = float(np.mean(povs)) if povs else 0.0
765
- avg_act = float(np.mean(actions)) if actions else 0.0
766
 
767
  rows.append({
768
  "t": t,
769
  "reflex_collisions_cum": reflex_collisions,
770
- "conscious_collisions_cum": conscious_collisions,
771
- "avg_conscious_overrides": avg_overrides,
772
- "avg_conscious_energy": avg_energy,
773
- "avg_conscious_threat": avg_threat,
774
- "avg_conscious_P_override": avg_pov,
775
- "avg_conscious_action": avg_act,
776
  "predators_positions": "|".join([f"{p.x},{p.y}" for p in predators]),
777
  })
778
 
@@ -782,7 +761,7 @@ def simulate_predator(
782
  fig1 = plt.figure(figsize=(10, 4))
783
  ax = fig1.add_subplot(111)
784
  ax.plot(df["t"], df["reflex_collisions_cum"], label="Reflex collisions (cum)")
785
- ax.plot(df["t"], df["conscious_collisions_cum"], label="Conscious collisions (cum)")
786
  ax.set_title("Predator Avoidance: Collisions (Reflex vs RFT)")
787
  ax.set_xlabel("t (step)")
788
  ax.set_ylabel("collisions (cum)")
@@ -791,32 +770,32 @@ def simulate_predator(
791
 
792
  fig2 = plt.figure(figsize=(10, 4))
793
  ax = fig2.add_subplot(111)
794
- ax.plot(df["t"], df["avg_conscious_overrides"], label="Avg overrides (conscious)")
795
- ax.plot(df["t"], df["avg_conscious_energy"], label="Avg energy (conscious)")
796
- ax.set_title("Predator Avoidance: Overrides + Energy (Conscious)")
797
  ax.set_xlabel("t (step)")
798
  ax.set_ylabel("value")
799
  ax.legend()
800
- p_ov = save_plot(fig2, f"predator_overrides_energy_seed{seed}.png")
801
 
802
  fig3 = plt.figure(figsize=(10, 4))
803
  ax = fig3.add_subplot(111)
804
- ax.plot(df["t"], df["avg_conscious_threat"], label="Avg threat")
805
- ax.plot(df["t"], df["avg_conscious_P_override"], label="Avg P_override")
806
- ax.plot(df["t"], df["avg_conscious_action"], label="Avg action rate")
807
- ax.set_title("Predator Avoidance: Threat vs Override Probability vs Action Rate")
808
  ax.set_xlabel("t (step)")
809
  ax.set_ylabel("value")
810
  ax.legend()
811
  p_thr = save_plot(fig3, f"predator_threat_seed{seed}.png")
812
 
813
  heatmap_path = None
814
- if show_heatmap and len(conscious_agents) > 0:
815
- field = conscious_agents[0].pos_prob
816
  fig4 = plt.figure(figsize=(6, 5))
817
  ax = fig4.add_subplot(111)
818
  im = ax.imshow(field, aspect="auto")
819
- ax.set_title("Conscious Agent[0]: Final probability field (pos_prob)")
820
  ax.set_xlabel("y")
821
  ax.set_ylabel("x")
822
  fig4.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
@@ -827,16 +806,16 @@ def simulate_predator(
827
  "grid_size": int(grid_size),
828
  "steps": int(steps),
829
  "num_reflex": int(num_reflex),
830
- "num_conscious": int(num_conscious),
831
  "num_predators": int(num_predators),
832
  "final_reflex_collisions": int(df["reflex_collisions_cum"].iloc[-1]) if len(df) else 0,
833
- "final_conscious_collisions": int(df["conscious_collisions_cum"].iloc[-1]) if len(df) else 0,
834
- "final_avg_conscious_overrides": float(df["avg_conscious_overrides"].iloc[-1]) if len(df) else 0.0,
835
- "final_avg_conscious_energy": float(df["avg_conscious_energy"].iloc[-1]) if len(df) else 0.0,
836
  "ops_proxy": int(ops_proxy),
837
  }
838
 
839
- imgs = [p_col, p_ov, p_thr]
840
  if heatmap_path is not None:
841
  imgs.append(heatmap_path)
842
 
@@ -951,7 +930,7 @@ This Space contains:
951
  - **NEO alerting**
952
  - **Satellite jitter reduction**
953
  - **Starship-style landing harness**
954
- - **Predator avoidance** (Reflex vs RFT-style "QuantumConscious" agents)
955
 
956
  No SciPy. No hidden dependencies. No model weights.
957
  """
@@ -1091,30 +1070,30 @@ def ui_run_landing(seed, steps, dt, wind_max, thrust_noise, kp_base, kp_rft, gat
1091
  summary_txt = json.dumps(summary, indent=2)
1092
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
1093
 
1094
- def ui_run_predator(seed, grid_size, steps, num_reflex, num_conscious, num_predators,
1095
- group_coherence, sense_noise_prob, override_threshold,
1096
  alpha, beta, dt_internal,
1097
- energy_max, base_override_cost, energy_regen,
1098
- quantum_boost_prob, quantum_boost_amount,
1099
  show_heatmap):
1100
  summary, imgs, csv_path = simulate_predator(
1101
  seed=int(seed),
1102
  grid_size=int(grid_size),
1103
  steps=int(steps),
1104
  num_reflex=int(num_reflex),
1105
- num_conscious=int(num_conscious),
1106
  num_predators=int(num_predators),
1107
  group_coherence=float(group_coherence),
1108
  sense_noise_prob=float(sense_noise_prob),
1109
- override_threshold=float(override_threshold),
1110
  alpha=float(alpha),
1111
  beta=float(beta),
1112
  dt_internal=float(dt_internal),
1113
  energy_max=float(energy_max),
1114
- base_override_cost=float(base_override_cost),
1115
  energy_regen=float(energy_regen),
1116
- quantum_boost_prob=float(quantum_boost_prob),
1117
- quantum_boost_amount=float(quantum_boost_amount),
1118
  show_heatmap=bool(show_heatmap)
1119
  )
1120
  summary_txt = json.dumps(summary, indent=2)
@@ -1306,7 +1285,7 @@ with gr.Blocks(title="RFT — Agent Console (NEO / Jitter / Landing / Predator)"
1306
  "# Predator Avoidance (Reflex vs RFT)\n"
1307
  "Grid world with roaming predators.\n"
1308
  "Reflex agents: random walk.\n"
1309
- "Conscious agents: probability field + threat-weighted override.\n"
1310
  )
1311
 
1312
  with gr.Row():
@@ -1316,28 +1295,28 @@ with gr.Blocks(title="RFT — Agent Console (NEO / Jitter / Landing / Predator)"
1316
 
1317
  with gr.Row():
1318
  num_reflex = gr.Slider(0, 50, value=10, step=1, label="Reflex agents")
1319
- num_conscious = gr.Slider(0, 20, value=3, step=1, label="Conscious agents")
1320
  num_predators = gr.Slider(1, 20, value=3, step=1, label="Predators")
1321
 
1322
  with gr.Accordion("RFT / Agent parameters", open=True):
1323
  with gr.Row():
1324
  group_coherence = gr.Slider(0.0, 0.95, value=0.30, step=0.01, label="Group coherence")
1325
  sense_noise_prob = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Sense noise probability")
1326
- override_threshold = gr.Slider(0.0, 1.0, value=0.02, step=0.005, label="Override threshold (P_ov)")
1327
 
1328
  with gr.Row():
1329
  alpha = gr.Slider(0.0, 50.0, value=15.0, step=0.5, label="alpha (threat gain)")
1330
  beta = gr.Slider(0.0, 10.0, value=0.5, step=0.05, label="beta (energy term)")
1331
- dt_internal = gr.Slider(0.01, 1.0, value=0.2, step=0.01, label="override dt")
1332
 
1333
  with gr.Row():
1334
  energy_max = gr.Slider(1.0, 300.0, value=100.0, step=1.0, label="Energy max")
1335
- base_override_cost = gr.Slider(0.0, 10.0, value=1.0, step=0.1, label="Base override cost")
1336
  energy_regen = gr.Slider(0.0, 1.0, value=0.05, step=0.01, label="Energy regen")
1337
 
1338
  with gr.Row():
1339
- quantum_boost_prob = gr.Slider(0.0, 1.0, value=0.10, step=0.01, label="Quantum boost probability")
1340
- quantum_boost_amount = gr.Slider(0.0, 50.0, value=5.0, step=0.5, label="Quantum boost amount")
1341
  show_heatmap = gr.Checkbox(value=True, label="Show probability field heatmap (agent[0])")
1342
 
1343
  run_p = gr.Button("Run Predator Simulation")
@@ -1345,19 +1324,19 @@ with gr.Blocks(title="RFT — Agent Console (NEO / Jitter / Landing / Predator)"
1345
  out_p_summary = gr.Textbox(label="Summary JSON", lines=12)
1346
  with gr.Row():
1347
  out_p_img1 = gr.Image(label="Collisions (cumulative)")
1348
- out_p_img2 = gr.Image(label="Overrides + Energy")
1349
  with gr.Row():
1350
- out_p_img3 = gr.Image(label="Threat / P_override / Action rate")
1351
  out_p_img4 = gr.Image(label="Final probability field (optional)")
1352
  out_p_csv = gr.File(label="Download Predator CSV log")
1353
 
1354
  run_p.click(
1355
  ui_run_predator,
1356
- inputs=[seed_p, grid_size, steps_p, num_reflex, num_conscious, num_predators,
1357
- group_coherence, sense_noise_prob, override_threshold,
1358
  alpha, beta, dt_internal,
1359
- energy_max, base_override_cost, energy_regen,
1360
- quantum_boost_prob, quantum_boost_amount,
1361
  show_heatmap],
1362
  outputs=[out_p_summary, out_p_img1, out_p_img2, out_p_img3, out_p_img4, out_p_csv]
1363
  )
 
49
  gain: float = 1.2,
50
  cap: float = 4.0
51
  ):
 
 
 
 
 
 
 
52
  u = clamp(float(uncertainty), 0.0, 1.0)
53
  tau = base + slow_by + gain * u
54
  return clamp(tau, base, cap)
 
57
  return clamp(1.0 - float(uncertainty), 0.0, 1.0)
58
 
59
  def rft_gate(conf: float, tau_eff: float, threshold: float):
 
 
 
 
 
60
  conf = float(conf)
61
  tau_eff = float(tau_eff)
62
  effective = threshold + 0.08 * (tau_eff - 1.0)
 
462
  return summary, [p_alt, p_x, p_w, p_a], csv_path
463
 
464
  # ===============================================================
465
+ # Predator Avoidance (Reflex vs RFT-style Adaptive Agents)
466
  # ===============================================================
467
+
468
  def numpy_convolve2d_toroidal(array: np.ndarray, kernel: np.ndarray) -> np.ndarray:
469
  out = np.zeros_like(array, dtype=float)
470
  kcx = kernel.shape[0] // 2
 
504
  self.x = (self.x + dx) % self.grid_size
505
  self.y = (self.y + dy) % self.grid_size
506
 
507
+ class RFTAdaptiveAgent:
508
  def __init__(
509
  self,
510
  grid_size: int,
511
  move_kernel: np.ndarray,
512
  energy_max: float,
513
  energy_regen: float,
514
+ base_collapse_cost: float,
515
+ boost_prob: float,
516
+ boost_amount: float,
517
  sense_noise_prob: float,
518
  alpha: float,
519
  beta: float,
520
  dt_internal: float,
521
+ collapse_threshold: float
522
  ):
523
  self.grid_size = grid_size
524
  self.move_kernel = move_kernel.astype(float)
 
531
  self.energy_max = float(energy_max)
532
  self.energy = float(energy_max)
533
  self.energy_regen = float(energy_regen)
534
+ self.base_collapse_cost = float(base_collapse_cost)
535
+ self.boost_prob = float(boost_prob)
536
+ self.boost_amount = float(boost_amount)
537
  self.sense_noise_prob = float(sense_noise_prob)
538
 
539
  self.alpha = float(alpha)
540
  self.beta = float(beta)
541
  self.dt_internal = float(dt_internal)
542
+ self.collapse_threshold = float(collapse_threshold)
543
 
544
+ self.psi_action = (0.08 + 0j) # |psi|^2 = 0.0064 baseline
545
+ self.collapse_actions = 0
 
546
  self.collisions = 0
547
 
548
  def move(self):
549
  dx, dy = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
550
  self.x = (self.x + dx) % self.grid_size
551
  self.y = (self.y + dy) % self.grid_size
 
 
552
  self.pos_prob.fill(0.0)
553
  self.pos_prob[self.x, self.y] = 1.0
554
 
 
570
  threat += float(sub.sum())
571
  return threat
572
 
573
+ def update_action_state(self, perceived):
 
 
 
 
574
  T = self.compute_threat(perceived)
575
  E = self.energy / max(self.energy_max, 1e-9)
576
 
577
  drive = (self.alpha * T) - (self.beta * E)
578
 
 
579
  exp_term = clamp(drive, -6.0, 6.0) * 0.22 * self.dt_internal
580
  amp = math.exp(exp_term)
581
  amp = clamp(amp, 0.75, 1.35)
582
 
583
+ H = drive + 0.01 * (abs(self.psi_action) ** 2)
584
+ self.psi_action *= amp * np.exp(-1j * H * self.dt_internal)
 
585
 
586
+ mag = abs(self.psi_action)
 
587
  if mag > 1.0:
588
+ self.psi_action /= mag
589
 
590
+ def get_action_probability(self):
591
+ return float(min(abs(self.psi_action) ** 2, 1.0))
592
 
593
+ def apply_collapse_action(self, perceived):
594
  field = numpy_convolve2d_toroidal(self.pos_prob, self.move_kernel)
595
  field = np.maximum(field, 0.0)
596
 
 
623
  idx = np.random.choice(self.grid_size * self.grid_size, p=flat)
624
  self.x, self.y = divmod(int(idx), self.grid_size)
625
 
626
+ def energy_boost(self):
627
+ if random.random() < self.boost_prob:
628
+ return float(self.boost_amount)
629
  return 0.0
630
 
631
  def regen_energy(self):
632
+ boost = self.energy_boost()
633
  self.energy = clamp(self.energy + self.energy_regen + boost, 0.0, self.energy_max)
634
  if self.energy < self.energy_max and random.random() < 0.05:
635
  self.energy = self.energy_max
636
 
637
+ def step_rft(self, predators, group_coherence: float):
638
  if self.energy <= 0:
639
  self.move()
640
  return 0, 0.0, 0.0
641
 
642
  perceived = self.sense_predators(predators)
643
+ self.update_action_state(perceived)
644
 
645
+ P_act = self.get_action_probability()
646
  threat = self.compute_threat(perceived)
647
 
648
  acted = 0
649
+ if (P_act >= self.collapse_threshold) and (self.energy > 0):
650
+ effective_cost = self.base_collapse_cost * (1.0 - float(group_coherence))
651
  if self.energy >= effective_cost:
652
+ self.collapse_actions += 1
653
  self.energy -= effective_cost
654
+ self.apply_collapse_action(perceived)
655
+ self.psi_action = (0.08 + 0j)
656
  acted = 1
657
  else:
658
  self.move()
659
  else:
660
  self.move()
661
 
662
+ return acted, P_act, threat
663
 
664
  def simulate_predator(
665
  seed: int,
666
  grid_size: int,
667
  steps: int,
668
  num_reflex: int,
669
+ num_rft: int,
670
  num_predators: int,
671
  group_coherence: float,
672
  sense_noise_prob: float,
673
+ collapse_threshold: float,
674
  alpha: float,
675
  beta: float,
676
  dt_internal: float,
677
  energy_max: float,
678
+ base_collapse_cost: float,
679
  energy_regen: float,
680
+ boost_prob: float,
681
+ boost_amount: float,
682
  show_heatmap: bool
683
  ):
684
  set_seed(seed)
 
688
  [0, 0.2, 0]], dtype=float)
689
 
690
  reflex_agents = [ReflexAgent(grid_size) for _ in range(int(num_reflex))]
691
+ rft_agents = [
692
+ RFTAdaptiveAgent(
693
  grid_size=grid_size,
694
  move_kernel=move_kernel,
695
  energy_max=energy_max,
696
  energy_regen=energy_regen,
697
+ base_collapse_cost=base_collapse_cost,
698
+ boost_prob=boost_prob,
699
+ boost_amount=boost_amount,
700
  sense_noise_prob=sense_noise_prob,
701
  alpha=alpha,
702
  beta=beta,
703
  dt_internal=dt_internal,
704
+ collapse_threshold=collapse_threshold
705
  )
706
+ for _ in range(int(num_rft))
707
  ]
708
  predators = [Predator(grid_size) for _ in range(int(num_predators))]
709
 
 
721
  a.collisions += 1
722
 
723
  actions = []
724
+ probs = []
725
  threats = []
726
+ for a in rft_agents:
727
+ acted, P_act, threat = a.step_rft(predators, group_coherence)
728
  a.regen_energy()
729
  actions.append(acted)
730
+ probs.append(P_act)
731
  threats.append(threat)
732
  for p in predators:
733
  if a.x == p.x and a.y == p.y:
 
736
  ops_proxy += 18
737
 
738
  reflex_collisions = int(sum(a.collisions for a in reflex_agents))
739
+ rft_collisions = int(sum(a.collisions for a in rft_agents))
740
+ avg_actions = float(np.mean([a.collapse_actions for a in rft_agents])) if rft_agents else 0.0
741
+ avg_energy = float(np.mean([a.energy for a in rft_agents])) if rft_agents else 0.0
742
  avg_threat = float(np.mean(threats)) if threats else 0.0
743
+ avg_prob = float(np.mean(probs)) if probs else 0.0
744
+ avg_act_rate = float(np.mean(actions)) if actions else 0.0
745
 
746
  rows.append({
747
  "t": t,
748
  "reflex_collisions_cum": reflex_collisions,
749
+ "rft_collisions_cum": rft_collisions,
750
+ "avg_rft_actions": avg_actions,
751
+ "avg_rft_energy": avg_energy,
752
+ "avg_rft_threat": avg_threat,
753
+ "avg_rft_P_action": avg_prob,
754
+ "avg_rft_action_rate": avg_act_rate,
755
  "predators_positions": "|".join([f"{p.x},{p.y}" for p in predators]),
756
  })
757
 
 
761
  fig1 = plt.figure(figsize=(10, 4))
762
  ax = fig1.add_subplot(111)
763
  ax.plot(df["t"], df["reflex_collisions_cum"], label="Reflex collisions (cum)")
764
+ ax.plot(df["t"], df["rft_collisions_cum"], label="RFT collisions (cum)")
765
  ax.set_title("Predator Avoidance: Collisions (Reflex vs RFT)")
766
  ax.set_xlabel("t (step)")
767
  ax.set_ylabel("collisions (cum)")
 
770
 
771
  fig2 = plt.figure(figsize=(10, 4))
772
  ax = fig2.add_subplot(111)
773
+ ax.plot(df["t"], df["avg_rft_actions"], label="Avg actions (RFT)")
774
+ ax.plot(df["t"], df["avg_rft_energy"], label="Avg energy (RFT)")
775
+ ax.set_title("Predator Avoidance: Actions + Energy (RFT)")
776
  ax.set_xlabel("t (step)")
777
  ax.set_ylabel("value")
778
  ax.legend()
779
+ p_act = save_plot(fig2, f"predator_actions_energy_seed{seed}.png")
780
 
781
  fig3 = plt.figure(figsize=(10, 4))
782
  ax = fig3.add_subplot(111)
783
+ ax.plot(df["t"], df["avg_rft_threat"], label="Avg threat")
784
+ ax.plot(df["t"], df["avg_rft_P_action"], label="Avg P_action")
785
+ ax.plot(df["t"], df["avg_rft_action_rate"], label="Avg action rate")
786
+ ax.set_title("Predator Avoidance: Threat vs P_action vs Action Rate")
787
  ax.set_xlabel("t (step)")
788
  ax.set_ylabel("value")
789
  ax.legend()
790
  p_thr = save_plot(fig3, f"predator_threat_seed{seed}.png")
791
 
792
  heatmap_path = None
793
+ if show_heatmap and len(rft_agents) > 0:
794
+ field = rft_agents[0].pos_prob
795
  fig4 = plt.figure(figsize=(6, 5))
796
  ax = fig4.add_subplot(111)
797
  im = ax.imshow(field, aspect="auto")
798
+ ax.set_title("RFT Agent[0]: Final probability field (pos_prob)")
799
  ax.set_xlabel("y")
800
  ax.set_ylabel("x")
801
  fig4.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
 
806
  "grid_size": int(grid_size),
807
  "steps": int(steps),
808
  "num_reflex": int(num_reflex),
809
+ "num_rft": int(num_rft),
810
  "num_predators": int(num_predators),
811
  "final_reflex_collisions": int(df["reflex_collisions_cum"].iloc[-1]) if len(df) else 0,
812
+ "final_rft_collisions": int(df["rft_collisions_cum"].iloc[-1]) if len(df) else 0,
813
+ "final_avg_rft_actions": float(df["avg_rft_actions"].iloc[-1]) if len(df) else 0.0,
814
+ "final_avg_rft_energy": float(df["avg_rft_energy"].iloc[-1]) if len(df) else 0.0,
815
  "ops_proxy": int(ops_proxy),
816
  }
817
 
818
+ imgs = [p_col, p_act, p_thr]
819
  if heatmap_path is not None:
820
  imgs.append(heatmap_path)
821
 
 
930
  - **NEO alerting**
931
  - **Satellite jitter reduction**
932
  - **Starship-style landing harness**
933
+ - **Predator avoidance** (Reflex vs RFT-style adaptive agents)
934
 
935
  No SciPy. No hidden dependencies. No model weights.
936
  """
 
1070
  summary_txt = json.dumps(summary, indent=2)
1071
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
1072
 
1073
+ def ui_run_predator(seed, grid_size, steps, num_reflex, num_rft, num_predators,
1074
+ group_coherence, sense_noise_prob, collapse_threshold,
1075
  alpha, beta, dt_internal,
1076
+ energy_max, base_collapse_cost, energy_regen,
1077
+ boost_prob, boost_amount,
1078
  show_heatmap):
1079
  summary, imgs, csv_path = simulate_predator(
1080
  seed=int(seed),
1081
  grid_size=int(grid_size),
1082
  steps=int(steps),
1083
  num_reflex=int(num_reflex),
1084
+ num_rft=int(num_rft),
1085
  num_predators=int(num_predators),
1086
  group_coherence=float(group_coherence),
1087
  sense_noise_prob=float(sense_noise_prob),
1088
+ collapse_threshold=float(collapse_threshold),
1089
  alpha=float(alpha),
1090
  beta=float(beta),
1091
  dt_internal=float(dt_internal),
1092
  energy_max=float(energy_max),
1093
+ base_collapse_cost=float(base_collapse_cost),
1094
  energy_regen=float(energy_regen),
1095
+ boost_prob=float(boost_prob),
1096
+ boost_amount=float(boost_amount),
1097
  show_heatmap=bool(show_heatmap)
1098
  )
1099
  summary_txt = json.dumps(summary, indent=2)
 
1285
  "# Predator Avoidance (Reflex vs RFT)\n"
1286
  "Grid world with roaming predators.\n"
1287
  "Reflex agents: random walk.\n"
1288
+ "RFT agents: probability field + threat-weighted collapse action.\n"
1289
  )
1290
 
1291
  with gr.Row():
 
1295
 
1296
  with gr.Row():
1297
  num_reflex = gr.Slider(0, 50, value=10, step=1, label="Reflex agents")
1298
+ num_rft = gr.Slider(0, 20, value=3, step=1, label="RFT agents")
1299
  num_predators = gr.Slider(1, 20, value=3, step=1, label="Predators")
1300
 
1301
  with gr.Accordion("RFT / Agent parameters", open=True):
1302
  with gr.Row():
1303
  group_coherence = gr.Slider(0.0, 0.95, value=0.30, step=0.01, label="Group coherence")
1304
  sense_noise_prob = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Sense noise probability")
1305
+ collapse_threshold = gr.Slider(0.0, 1.0, value=0.02, step=0.005, label="Collapse threshold (P_action)")
1306
 
1307
  with gr.Row():
1308
  alpha = gr.Slider(0.0, 50.0, value=15.0, step=0.5, label="alpha (threat gain)")
1309
  beta = gr.Slider(0.0, 10.0, value=0.5, step=0.05, label="beta (energy term)")
1310
+ dt_internal = gr.Slider(0.01, 1.0, value=0.2, step=0.01, label="internal dt")
1311
 
1312
  with gr.Row():
1313
  energy_max = gr.Slider(1.0, 300.0, value=100.0, step=1.0, label="Energy max")
1314
+ base_collapse_cost = gr.Slider(0.0, 10.0, value=1.0, step=0.1, label="Base action cost")
1315
  energy_regen = gr.Slider(0.0, 1.0, value=0.05, step=0.01, label="Energy regen")
1316
 
1317
  with gr.Row():
1318
+ boost_prob = gr.Slider(0.0, 1.0, value=0.10, step=0.01, label="Boost probability")
1319
+ boost_amount = gr.Slider(0.0, 50.0, value=5.0, step=0.5, label="Boost amount")
1320
  show_heatmap = gr.Checkbox(value=True, label="Show probability field heatmap (agent[0])")
1321
 
1322
  run_p = gr.Button("Run Predator Simulation")
 
1324
  out_p_summary = gr.Textbox(label="Summary JSON", lines=12)
1325
  with gr.Row():
1326
  out_p_img1 = gr.Image(label="Collisions (cumulative)")
1327
+ out_p_img2 = gr.Image(label="Actions + Energy")
1328
  with gr.Row():
1329
+ out_p_img3 = gr.Image(label="Threat / P_action / Action rate")
1330
  out_p_img4 = gr.Image(label="Final probability field (optional)")
1331
  out_p_csv = gr.File(label="Download Predator CSV log")
1332
 
1333
  run_p.click(
1334
  ui_run_predator,
1335
+ inputs=[seed_p, grid_size, steps_p, num_reflex, num_rft, num_predators,
1336
+ group_coherence, sense_noise_prob, collapse_threshold,
1337
  alpha, beta, dt_internal,
1338
+ energy_max, base_collapse_cost, energy_regen,
1339
+ boost_prob, boost_amount,
1340
  show_heatmap],
1341
  outputs=[out_p_summary, out_p_img1, out_p_img2, out_p_img3, out_p_img4, out_p_csv]
1342
  )