diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..271d860 Binary files /dev/null and b/.DS_Store differ diff --git a/.gitignore b/.gitignore index b13ecaa..8a871d4 100644 --- a/.gitignore +++ b/.gitignore @@ -12,12 +12,4 @@ results/ data/ .pytest_cache/ -.DS_Store - -hpc_data/phase4_18735304 - -hpc_data/phase6_18780164 - - -hpc_data/**/*.jsonl -hpc_data/**/*.jsonl +.DS_Store/ diff --git a/README.md b/README.md index 97ba3fb..437c284 100644 --- a/README.md +++ b/README.md @@ -290,7 +290,3 @@ Metadata files (`phase{N}_metadata.json`) accompany each results file with confi **Optional:** - matplotlib (visualization) - scipy (additional analysis) - ---- - -## References diff --git a/docs/GEN_AI.md b/docs/GEN_AI.md deleted file mode 100644 index e69de29..0000000 diff --git a/docs/HPC_GUIDE.md b/docs/HPC_GUIDE.md deleted file mode 100644 index 248e642..0000000 --- a/docs/HPC_GUIDE.md +++ /dev/null @@ -1,57 +0,0 @@ -### Snellius Usage Breakdown - -``` -ssh kanagnostopoul@snellius.surf.nl - -# On a separate terminal run the following - -# Upload the entire project directory (including your models/ folder) - -rsync -avz --progress --exclude-from='.rsync-exclude' \ - ~/CSS_Project/ kanagnostopoul@snellius.surf.nl:~/CSS_Project/ - -# On the Snellius terminal - -module load 2023 Python/3.11.3-GCCcore-12.3.0 -python3 -m venv ~/css_env -source ~/css_env/bin/activate -pip install numpy scipy matplotlib joblib - -# To do a dry run for testing the entire environment - -python3 pp_analysis.py --mode full --dry-run - -# For async run - -python3 pp_analysis.py --mode full --output results_${SLURM_JOB_ID} --cores $SLURM_CPUS_PER_TASK --async - -# To submit a job - -sbatch run_analysis.sh - -# Check Queue Status - -squeue -u $USER - -# Cancel a job - -scancel - -# Monitoring live progress - -tail -f pp_phase4_18735304.err - -# Watch task completetion - -watch -n 5 squeue -u $USER -watch -n 10 "ls -1 results_JOBID | wc -l" - -# Fetching the results once the job is done - -scp -r kanagnostopoul@snellius.surf.nl:~/CSS_Project/results/phase6_18832958/ ./results/ -``` - -The jobscript template can be found in ```run_analysis.sh``` (default rome paritition). - - -Snellius Partitions Page: https://servicedesk.surf.nl/wiki/spaces/WIKI/pages/30660209/Snellius+partitions+and+accounting \ No newline at end of file diff --git a/docs/experiments.html b/docs/experiments.html new file mode 100644 index 0000000..1f081d6 --- /dev/null +++ b/docs/experiments.html @@ -0,0 +1,3410 @@ + + + + + + + experiments API documentation + + + + + + + + + +
+
+

+experiments

+ +

Predator-Prey Hydra Effect Experiments

+ +

HPC-ready experiment runner for investigating the Hydra effect in +predator-prey cellular automata.

+ +
Experimental Phases
+ +
    +
  • Phase 1: Parameter sweep to find critical point (bifurcation + cluster analysis)
  • +
  • Phase 2: Self-organization analysis (evolution toward criticality)
  • +
  • Phase 3: Finite-size scaling at critical point
  • +
  • Phase 4: Sensitivity analysis across parameter regimes
  • +
  • Phase 5: Model extensions (directed hunting comparison)
  • +
+ +
Functions
+ +
+
run_single_simulation # Execute one simulation run and collect metrics.
+run_phase1, run_phase2, run_phase3, run_phase4, run_phase5  # Phase-specific experiment runners.
+
+
+ +
Utilities
+ +
+
generate_unique_seed # Deterministic seed generation from parameters.
+count_populations # Count species populations on grid.
+get_evolved_stats # Statistics for evolved parameters.
+average_pcfs # Average multiple PCF measurements.
+save_results_jsonl, load_results_jsonl, save_results_npz # I/O functions for experiment results.
+
+
+ +
Command Line Usage
+ +
+
python experiments.py --phase 1                    # Run phase 1
+python experiments.py --phase 1 --dry-run          # Estimate runtime
+python experiments.py --phase all                  # Run all phases
+python experiments.py --phase 1 --output results/  # Custom output
+
+
+ +
Programmatic Usage
+ +
+
from experiments import run_single_simulation, run_phase1
+from models.config import PHASE1_CONFIG
+
+# Single simulation
+result = run_single_simulation(
+    prey_birth=0.2,
+    prey_death=0.05,
+    predator_birth=0.8,
+    predator_death=0.1,
+    grid_size=100,
+    seed=42,
+    cfg=PHASE1_CONFIG,
+)
+
+# Full phase (writes to output directory)
+import logging
+results = run_phase1(PHASE1_CONFIG, Path("results/"), logging.getLogger())
+
+
+
+ + + + + +
   1#!/usr/bin/env python3
+   2"""
+   3Predator-Prey Hydra Effect Experiments
+   4======================================
+   5
+   6HPC-ready experiment runner for investigating the Hydra effect in
+   7predator-prey cellular automata.
+   8
+   9Experimental Phases
+  10-------------------
+  11- **Phase 1**: Parameter sweep to find critical point (bifurcation + cluster analysis)
+  12- **Phase 2**: Self-organization analysis (evolution toward criticality)
+  13- **Phase 3**: Finite-size scaling at critical point
+  14- **Phase 4**: Sensitivity analysis across parameter regimes
+  15- **Phase 5**: Model extensions (directed hunting comparison)
+  16
+  17Functions
+  18---------
+  19```python
+  20run_single_simulation # Execute one simulation run and collect metrics.
+  21run_phase1, run_phase2, run_phase3, run_phase4, run_phase5  # Phase-specific experiment runners.
+  22```
+  23
+  24Utilities
+  25---------
+  26```python
+  27generate_unique_seed # Deterministic seed generation from parameters.
+  28count_populations # Count species populations on grid.
+  29get_evolved_stats # Statistics for evolved parameters.
+  30average_pcfs # Average multiple PCF measurements.
+  31save_results_jsonl, load_results_jsonl, save_results_npz # I/O functions for experiment results.
+  32```
+  33
+  34Command Line Usage
+  35------------------
+  36```bash
+  37python experiments.py --phase 1                    # Run phase 1
+  38python experiments.py --phase 1 --dry-run          # Estimate runtime
+  39python experiments.py --phase all                  # Run all phases
+  40python experiments.py --phase 1 --output results/  # Custom output
+  41```
+  42
+  43Programmatic Usage
+  44------------------
+  45```python
+  46from experiments import run_single_simulation, run_phase1
+  47from models.config import PHASE1_CONFIG
+  48
+  49# Single simulation
+  50result = run_single_simulation(
+  51    prey_birth=0.2,
+  52    prey_death=0.05,
+  53    predator_birth=0.8,
+  54    predator_death=0.1,
+  55    grid_size=100,
+  56    seed=42,
+  57    cfg=PHASE1_CONFIG,
+  58)
+  59
+  60# Full phase (writes to output directory)
+  61import logging
+  62results = run_phase1(PHASE1_CONFIG, Path("results/"), logging.getLogger())
+  63```
+  64"""
+  65
+  66import argparse
+  67import hashlib
+  68import json
+  69import logging
+  70import os
+  71import sys
+  72import time
+  73from dataclasses import asdict
+  74from pathlib import Path
+  75from typing import Dict, List, Tuple, Optional
+  76import warnings
+  77
+  78import numpy as np
+  79from tqdm import tqdm
+  80
+  81warnings.filterwarnings("ignore")
+  82
+  83# Project imports
+  84project_root = str(Path(__file__).parent.parent)
+  85if project_root not in sys.path:
+  86    sys.path.insert(0, project_root)
+  87
+  88from models.config import Config, get_phase_config, PHASE_CONFIGS
+  89
+  90# Numba imports
+  91try:
+  92    from models.numba_optimized import (
+  93        compute_all_pcfs_fast,
+  94        get_cluster_stats_fast,
+  95        warmup_numba_kernels,
+  96        set_numba_seed,
+  97        NUMBA_AVAILABLE,
+  98    )
+  99
+ 100    USE_NUMBA = NUMBA_AVAILABLE
+ 101except ImportError:
+ 102    USE_NUMBA = False
+ 103
+ 104    def warmup_numba_kernels(size, **kwargs):
+ 105        pass
+ 106
+ 107    def set_numba_seed(seed):
+ 108        pass
+ 109
+ 110
+ 111# =============================================================================
+ 112# Utility Functions
+ 113# =============================================================================
+ 114
+ 115
+ 116def generate_unique_seed(params: dict, rep: int) -> int:
+ 117    """
+ 118    Create a deterministic seed from a dictionary of parameters and a repetition index.
+ 119
+ 120    This function serializes the input dictionary into a sorted JSON string,
+ 121    appends the repetition count, and hashes the resulting string using SHA-256.
+ 122    The first 8 characters of the hex digest are then converted to an integer
+ 123    to provide a stable, unique seed for random number generators.
+ 124
+ 125    Parameters
+ 126    ----------
+ 127    params : dict
+ 128        A dictionary of configuration parameters. Keys are sorted to ensure
+ 129        determinism regardless of insertion order.
+ 130    rep : int
+ 131        The repetition or iteration index, used to ensure different seeds
+ 132        are generated for the same parameter set across multiple runs.
+ 133
+ 134    Returns
+ 135    -------
+ 136    int
+ 137        A unique integer seed derived from the input parameters.
+ 138
+ 139    Examples
+ 140    --------
+ 141    >>> params = {'learning_rate': 0.01, 'batch_size': 32}
+ 142    >>> generate_unique_seed(params, 1)
+ 143    3432571217
+ 144    >>> generate_unique_seed(params, 2)
+ 145    3960013583
+ 146    """
+ 147    identifier = json.dumps(params, sort_keys=True) + f"_{rep}"
+ 148    return int(hashlib.sha256(identifier.encode()).hexdigest()[:8], 16)
+ 149
+ 150
+ 151def count_populations(grid: np.ndarray) -> Tuple[int, int, int]:
+ 152    """
+ 153    Count the number of empty, prey, and predator cells in the simulation grid.
+ 154
+ 155    Parameters
+ 156    ----------
+ 157    grid : np.ndarray
+ 158        A 2D NumPy array representing the simulation environment, where:
+ 159        - 0: Empty cell
+ 160        - 1: Prey
+ 161        - 2: Predator
+ 162
+ 163    Returns
+ 164    -------
+ 165    empty_count : int
+ 166        Total number of cells with a value of 0.
+ 167    prey_count : int
+ 168        Total number of cells with a value of 1.
+ 169    predator_count : int
+ 170        Total number of cells with a value of 2.
+ 171
+ 172    Examples
+ 173    --------
+ 174    >>> grid = np.array([[0, 1], [2, 1]])
+ 175    >>> count_populations(grid)
+ 176    (1, 2, 1)
+ 177    """
+ 178    return int(np.sum(grid == 0)), int(np.sum(grid == 1)), int(np.sum(grid == 2))
+ 179
+ 180
+ 181def get_evolved_stats(model, param: str) -> Dict:
+ 182    """
+ 183    Get statistics of an evolved parameter from the model.
+ 184
+ 185    This function retrieves parameter values from the model's internal storage,
+ 186    filters out NaN values, and calculates basic descriptive statistics.
+ 187
+ 188    Parameters
+ 189    ----------
+ 190    model : object
+ 191        The simulation model instance containing a `cell_params` attribute
+ 192        with a `.get()` method.
+ 193    param : str
+ 194        The name of the parameter to calculate statistics for.
+ 195
+ 196    Returns
+ 197    -------
+ 198    stats : dict
+ 199        A dictionary containing the following keys:
+ 200        - 'mean': Arithmetic mean of valid values.
+ 201        - 'std': Standard deviation of valid values.
+ 202        - 'min': Minimum valid value.
+ 203        - 'max': Maximum valid value.
+ 204        - 'n': Count of non-NaN values.
+ 205        If no valid data is found, all stats return NaN and n returns 0.
+ 206
+ 207    Examples
+ 208    --------
+ 209    >>> stats = get_evolved_stats(my_model, "speed")
+ 210    >>> print(stats['mean'])
+ 211    1.25
+ 212    """
+ 213    arr = model.cell_params.get(param)
+ 214    if arr is None:
+ 215        return {"mean": np.nan, "std": np.nan, "min": np.nan, "max": np.nan, "n": 0}
+ 216    valid = arr[~np.isnan(arr)]
+ 217    if len(valid) == 0:
+ 218        return {"mean": np.nan, "std": np.nan, "min": np.nan, "max": np.nan, "n": 0}
+ 219    return {
+ 220        "mean": float(np.mean(valid)),
+ 221        "std": float(np.std(valid)),
+ 222        "min": float(np.min(valid)),
+ 223        "max": float(np.max(valid)),
+ 224        "n": len(valid),
+ 225    }
+ 226
+ 227
+ 228def average_pcfs(
+ 229    pcf_list: List[Tuple[np.ndarray, np.ndarray, int]],
+ 230) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ 231    """
+ 232    Average multiple Pair Correlation Function (PCF) measurements and calculate standard error.
+ 233
+ 234    Parameters
+ 235    ----------
+ 236    pcf_list : list of tuple
+ 237        A list where each element is a tuple containing:
+ 238        - distances (np.ndarray): The radial distances (r).
+ 239        - pcf_values (np.ndarray): The correlation values g(r).
+ 240        - count (int): Metadata or weight (not used in current calculation).
+ 241
+ 242    Returns
+ 243    -------
+ 244    distances : np.ndarray
+ 245        The radial distances from the first entry in the list.
+ 246    pcf_mean : np.ndarray
+ 247        The element-wise mean of the PCF values across all measurements.
+ 248    pcf_se : np.ndarray
+ 249        The standard error of the mean for the PCF values.
+ 250
+ 251    Examples
+ 252    --------
+ 253    >>> data = [(np.array([0, 1]), np.array([1.0, 2.0]), 10),
+ 254    ...         (np.array([0, 1]), np.array([1.2, 1.8]), 12)]
+ 255    >>> dist, mean, se = average_pcfs(data)
+ 256    >>> mean
+ 257    array([1.1, 1.9])
+ 258    """
+ 259    if len(pcf_list) == 0:
+ 260        return np.array([]), np.array([]), np.array([])
+ 261
+ 262    distances = pcf_list[0][0]
+ 263    pcfs = np.array([p[1] for p in pcf_list])
+ 264
+ 265    pcf_mean = np.mean(pcfs, axis=0)
+ 266    pcf_se = np.std(pcfs, axis=0) / np.sqrt(len(pcfs))
+ 267
+ 268    return distances, pcf_mean, pcf_se
+ 269
+ 270
+ 271def save_results_jsonl(results: List[Dict], output_path: Path):
+ 272    """
+ 273    Save a list of dictionaries to a file in JSON Lines (JSONL) format.
+ 274
+ 275    Each dictionary in the list is serialized into a single JSON string and
+ 276    written as a new line. Non-serializable objects are converted to strings
+ 277    using the default string representation.
+ 278
+ 279    Parameters
+ 280    ----------
+ 281    results : list of dict
+ 282        The collection of result dictionaries to be saved.
+ 283    output_path : Path
+ 284        The file system path (pathlib.Path) where the JSONL file will be created.
+ 285
+ 286    Returns
+ 287    -------
+ 288    None
+ 289
+ 290    Notes
+ 291    -----
+ 292    The file is opened in 'w' (write) mode, which will overwrite any existing
+ 293    content at the specified path.
+ 294
+ 295    Examples
+ 296    --------
+ 297    >>> data = [{"id": 1, "score": 0.95}, {"id": 2, "score": 0.88}]
+ 298    >>> save_results_jsonl(data, Path("results.jsonl"))
+ 299    """
+ 300    with open(output_path, "w", encoding="utf-8") as f:
+ 301        for result in results:
+ 302            f.write(json.dumps(result, default=str) + "\n")
+ 303
+ 304
+ 305def save_results_npz(results: List[Dict], output_path: Path):
+ 306    """
+ 307    Save simulation results to a compressed NumPy (.npz) binary file.
+ 308
+ 309    This function flattens a list of result dictionaries into a single
+ 310    dictionary of NumPy arrays, prefixing keys with the run index to
+ 311    maintain data separation. The resulting file is compressed to
+ 312    reduce storage space.
+ 313
+ 314    Parameters
+ 315    ----------
+ 316    results : list of dict
+ 317        A list where each dictionary contains key-value pairs of
+ 318        simulation data (e.g., arrays, lists, or scalars).
+ 319    output_path : Path
+ 320        The file system path (pathlib.Path) where the compressed
+ 321        NPZ file will be saved.
+ 322
+ 323    Returns
+ 324    -------
+ 325    None
+ 326
+ 327    Notes
+ 328    -----
+ 329    The keys in the saved file follow the format 'run_{index}_{original_key}'.
+ 330    Values are automatically converted to NumPy arrays if they are not
+ 331    already.
+ 332
+ 333    Examples
+ 334    --------
+ 335    >>> results = [{"energy": [1, 2]}, {"energy": [3, 4]}]
+ 336    >>> save_results_npz(results, Path("output.npz"))
+ 337    """
+ 338    data = {}
+ 339    for i, res in enumerate(results):
+ 340        for key, val in res.items():
+ 341            data[f"run_{i}_{key}"] = np.array(val)
+ 342    np.savez_compressed(output_path, **data)
+ 343
+ 344
+ 345def load_results_jsonl(input_path: Path) -> List[Dict]:
+ 346    """
+ 347    Load simulation results from a JSON Lines (JSONL) formatted file.
+ 348
+ 349    This function reads a file line-by-line, parsing each line as an
+ 350    independent JSON object and aggregating them into a list of dictionaries.
+ 351
+ 352    Parameters
+ 353    ----------
+ 354    input_path : Path
+ 355        The file system path (pathlib.Path) to the JSONL file.
+ 356
+ 357    Returns
+ 358    -------
+ 359    results : list of dict
+ 360        A list of dictionaries reconstructed from the file content.
+ 361
+ 362    Raises
+ 363    ------
+ 364    FileNotFoundError
+ 365        If the specified input path does not exist.
+ 366    json.JSONDecodeError
+ 367        If a line in the file is not valid JSON.
+ 368
+ 369    Examples
+ 370    --------
+ 371    >>> data = load_results_jsonl(Path("results.jsonl"))
+ 372    >>> len(data)
+ 373    2
+ 374    """
+ 375    results = []
+ 376    with open(input_path, "r", encoding="utf-8") as f:
+ 377        for line in f:
+ 378            results.append(json.loads(line.strip()))
+ 379    return results
+ 380
+ 381
+ 382# =============================================================================
+ 383# Simulation Functionality
+ 384# =============================================================================
+ 385
+ 386
+ 387def run_single_simulation(
+ 388    prey_birth: float,
+ 389    prey_death: float,
+ 390    predator_birth: float,
+ 391    predator_death: float,
+ 392    grid_size: int,
+ 393    seed: int,
+ 394    cfg: Config,
+ 395    with_evolution: bool = False,
+ 396    compute_pcf: Optional[bool] = None,
+ 397) -> Dict:
+ 398    """
+ 399    Run a single Predator-Prey (PP) simulation and collect comprehensive metrics.
+ 400
+ 401    This function initializes a Cellular Automata model, executes a warmup phase
+ 402    to reach steady state, and then performs a measurement phase to track
+ 403    population dynamics, spatial clustering, and evolutionary changes.
+ 404
+ 405    Parameters
+ 406    ----------
+ 407    prey_birth : float
+ 408        The probability or rate of prey reproduction.
+ 409    prey_death : float
+ 410        The base probability or rate of prey mortality.
+ 411    predator_birth : float
+ 412        The probability or rate of predator reproduction upon consuming prey.
+ 413    predator_death : float
+ 414        The probability or rate of predator mortality.
+ 415    grid_size : int
+ 416        The side length of the square simulation grid.
+ 417    seed : int
+ 418        Random seed for ensuring reproducibility of the simulation run.
+ 419    cfg : Config
+ 420        A configuration object containing simulation hyperparameters (densities,
+ 421        sampling rates, timing, etc.).
+ 422    with_evolution : bool, optional
+ 423        If True, enables the evolution of the 'prey_death' parameter within
+ 424        the model (default is False).
+ 425    compute_pcf : bool, optional
+ 426        Explicit toggle for Pair Correlation Function calculation. If None,
+ 427        it is determined by `cfg.pcf_sample_rate` (default is None).
+ 428
+ 429    Returns
+ 430    -------
+ 431    result : dict
+ 432        A dictionary containing simulation results including:
+ 433        - Input parameters and survival flags.
+ 434        - Population mean and standard deviation for both species.
+ 435        - Cluster statistics (number of clusters, sizes, largest fractions).
+ 436        - Evolutionary statistics (mean, std, min, max, and final values).
+ 437        - PCF data and spatial indices (segregation and clustering).
+ 438        - Optional time series for populations and evolved parameters.
+ 439
+ 440    Notes
+ 441    -----
+ 442    The function relies on several external utilities: `count_populations`,
+ 443    `get_evolved_stats`, `get_cluster_stats_fast`, `compute_all_pcfs_fast`,
+ 444    and `average_pcfs`.
+ 445    """
+ 446
+ 447    from models.CA import PP
+ 448
+ 449    if USE_NUMBA:
+ 450        set_numba_seed(seed)
+ 451
+ 452    if compute_pcf is None:
+ 453        compute_pcf = cfg.collect_pcf and (np.random.random() < cfg.pcf_sample_rate)
+ 454
+ 455    # Initialize model
+ 456    model = PP(
+ 457        rows=grid_size,
+ 458        cols=grid_size,
+ 459        densities=cfg.densities,
+ 460        neighborhood="moore",  # NOTE: Default neighborhood
+ 461        params={
+ 462            "prey_birth": prey_birth,
+ 463            "prey_death": prey_death,
+ 464            "predator_death": predator_death,
+ 465            "predator_birth": predator_birth,
+ 466        },
+ 467        seed=seed,
+ 468        directed_hunting=cfg.directed_hunting,
+ 469    )
+ 470
+ 471    if with_evolution:
+ 472        model.evolve(
+ 473            "prey_death",
+ 474            sd=cfg.evolve_sd,
+ 475            min_val=cfg.evolve_min,
+ 476            max_val=cfg.evolve_max,
+ 477        )
+ 478
+ 479    # Scale timing with grid size
+ 480    warmup_steps = cfg.get_warmup_steps(grid_size)
+ 481    measurement_steps = cfg.get_measurement_steps(grid_size)
+ 482
+ 483    # Warmup phase
+ 484    for _ in range(warmup_steps):
+ 485        model.update()
+ 486
+ 487    # Measurement phase: start collecting our mertics
+ 488    prey_pops, pred_pops = [], []  # Prey populations and predator populations
+ 489    evolved_means, evolved_stds = [], []  # Evolution stats over time
+ 490    cluster_sizes_prey, cluster_sizes_pred = [], []  # Cluster sizes
+ 491    largest_fractions_prey, largest_fractions_pred = (
+ 492        [],
+ 493        [],
+ 494    )  # Largest cluster fractions = size of largest cluster / total population
+ 495    pcf_samples = {"prey_prey": [], "pred_pred": [], "prey_pred": []}
+ 496
+ 497    # Determine minimum count for analysis
+ 498    min_count = int(cfg.min_density_for_analysis * (grid_size**2))
+ 499
+ 500    for step in range(measurement_steps):
+ 501        model.update()
+ 502
+ 503        _, prey, pred = count_populations(model.grid)
+ 504        prey_pops.append(prey)
+ 505        pred_pops.append(pred)
+ 506
+ 507        # Track evolution
+ 508        if with_evolution:
+ 509            stats = get_evolved_stats(model, "prey_death")
+ 510            evolved_means.append(stats["mean"])
+ 511            evolved_stds.append(stats["std"])
+ 512
+ 513        # Cluster analysis (at end of measurement)
+ 514        if step == measurement_steps - 1:
+ 515            prey_survived = prey_pops[-1] > min_count
+ 516            pred_survived = pred_pops[-1] > (min_count // 4)
+ 517
+ 518            if prey_survived:
+ 519                prey_stats = get_cluster_stats_fast(model.grid, 1)
+ 520                cluster_sizes_prey = prey_stats["sizes"].tolist()
+ 521                largest_fractions_prey.append(prey_stats["largest_fraction"])
+ 522
+ 523            if pred_survived:
+ 524                pred_stats = get_cluster_stats_fast(model.grid, 2)
+ 525                cluster_sizes_pred = pred_stats["sizes"].tolist()
+ 526                largest_fractions_pred.append(pred_stats["largest_fraction"])
+ 527
+ 528            # PCF requires both
+ 529            if compute_pcf and prey_survived and pred_survived:
+ 530                max_dist = min(grid_size / 2, cfg.pcf_max_distance)
+ 531                pcf_data = compute_all_pcfs_fast(model.grid, max_dist, cfg.pcf_n_bins)
+ 532                pcf_samples["prey_prey"].append(pcf_data["prey_prey"])
+ 533                pcf_samples["pred_pred"].append(pcf_data["pred_pred"])
+ 534                pcf_samples["prey_pred"].append(pcf_data["prey_pred"])
+ 535
+ 536    # Compile results
+ 537    result = {
+ 538        # Parameters
+ 539        "prey_birth": prey_birth,
+ 540        "prey_death": prey_death,
+ 541        "predator_birth": predator_birth,
+ 542        "predator_death": predator_death,
+ 543        "grid_size": grid_size,
+ 544        "with_evolution": with_evolution,
+ 545        "seed": seed,
+ 546        # Population dynamics
+ 547        "prey_mean": float(np.mean(prey_pops)),
+ 548        "prey_std": float(np.std(prey_pops)),
+ 549        "pred_mean": float(np.mean(pred_pops)),
+ 550        "pred_std": float(np.std(pred_pops)),
+ 551        "prey_survived": prey_pops[-1] > min_count,
+ 552        "pred_survived": pred_pops[-1] > (min_count // 4),
+ 553        # Cluster statistics
+ 554        "prey_n_clusters": len(cluster_sizes_prey),
+ 555        "pred_n_clusters": len(cluster_sizes_pred),
+ 556        "prey_cluster_sizes": cluster_sizes_prey,
+ 557        "pred_cluster_sizes": cluster_sizes_pred,
+ 558        # Order parameters
+ 559        "prey_largest_fraction": (
+ 560            float(np.mean(largest_fractions_prey)) if largest_fractions_prey else np.nan
+ 561        ),
+ 562        "pred_largest_fraction": (
+ 563            float(np.mean(largest_fractions_pred)) if largest_fractions_pred else np.nan
+ 564        ),
+ 565    }
+ 566
+ 567    # Time series (if requested)
+ 568    if cfg.save_timeseries:
+ 569        subsample = cfg.timeseries_subsample
+ 570        result["prey_timeseries"] = prey_pops[
+ 571            ::subsample
+ 572        ]  # NOTE: Sample temporal data every 'subsample' steps
+ 573        result["pred_timeseries"] = pred_pops[::subsample]
+ 574
+ 575    # Evolution statistics
+ 576    if with_evolution and evolved_means:
+ 577        valid_means = [v for v in evolved_means if not np.isnan(v)]
+ 578        result["evolved_prey_death_mean"] = (
+ 579            float(np.mean(valid_means)) if valid_means else np.nan
+ 580        )
+ 581        result["evolved_prey_death_std"] = (
+ 582            float(np.mean([v for v in evolved_stds if not np.isnan(v)]))
+ 583            if evolved_stds
+ 584            else np.nan
+ 585        )
+ 586        result["evolved_prey_death_final"] = valid_means[-1] if valid_means else np.nan
+ 587        result["evolved_prey_death_min"] = (
+ 588            float(np.min(valid_means)) if valid_means else np.nan
+ 589        )
+ 590        result["evolved_prey_death_max"] = (
+ 591            float(np.max(valid_means)) if valid_means else np.nan
+ 592        )
+ 593        result["evolve_sd"] = cfg.evolve_sd
+ 594
+ 595        if cfg.save_timeseries:
+ 596            result["evolved_prey_death_timeseries"] = evolved_means[
+ 597                :: cfg.timeseries_subsample
+ 598            ]
+ 599
+ 600    # PCF statistics
+ 601    if pcf_samples["prey_prey"]:
+ 602        dist, pcf_rr, _ = average_pcfs(pcf_samples["prey_prey"])
+ 603        _, pcf_cc, _ = average_pcfs(pcf_samples["pred_pred"])
+ 604        _, pcf_cr, _ = average_pcfs(pcf_samples["prey_pred"])
+ 605
+ 606        result["pcf_distances"] = dist.tolist()
+ 607        result["pcf_prey_prey"] = pcf_rr.tolist()
+ 608        result["pcf_pred_pred"] = pcf_cc.tolist()
+ 609        result["pcf_prey_pred"] = pcf_cr.tolist()
+ 610
+ 611        # Short-range indices
+ 612        short_mask = dist < 3.0
+ 613        if np.any(short_mask):
+ 614            result["segregation_index"] = float(np.mean(pcf_cr[short_mask]))
+ 615            result["prey_clustering_index"] = float(np.mean(pcf_rr[short_mask]))
+ 616            result["pred_clustering_index"] = float(np.mean(pcf_cc[short_mask]))
+ 617
+ 618    return result
+ 619
+ 620
+ 621# =============================================================================
+ 622# Experiment Phases
+ 623# =============================================================================
+ 624
+ 625
+ 626def run_phase1(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 627    """
+ 628    Execute Phase 1 of the simulation: a parameter sweep to identify critical points.
+ 629
+ 630    This function performs a 1D sweep across varying prey mortality rates while
+ 631    keeping other parameters fixed. It utilizes parallel execution via joblib
+ 632    and saves results incrementally to a JSONL file to ensure data integrity
+ 633    during long-running batches.
+ 634
+ 635    Parameters
+ 636    ----------
+ 637    cfg : Config
+ 638        Configuration object containing simulation hyperparameters, sweep
+ 639        ranges, and execution settings (n_jobs, grid_size, etc.).
+ 640    output_dir : Path
+ 641        Directory where result files (JSONL) and metadata (JSON) will be stored.
+ 642    logger : logging.Logger
+ 643        Logger instance for tracking simulation progress and recording
+ 644        operational metadata.
+ 645
+ 646    Returns
+ 647    -------
+ 648    all_results : list of dict
+ 649        A list of dictionaries containing the metrics collected from every
+ 650        individual simulation run in the sweep.
+ 651
+ 652    Notes
+ 653    -----
+ 654    The function performs the following steps:
+ 655    1. Pre-warms Numba kernels for performance.
+ 656    2. Generates a deterministic set of simulation jobs using unique seeds.
+ 657    3. Executes simulations in parallel using a generator for memory efficiency.
+ 658    4. Records metadata including a timestamp and a serialized snapshot of
+ 659       the configuration.
+ 660    """
+ 661    from joblib import Parallel, delayed
+ 662
+ 663    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+ 664
+ 665    prey_deaths = cfg.get_prey_deaths()
+ 666
+ 667    # Build job list
+ 668    jobs = []
+ 669    # Sweep through prey_death only (prey_birth is fixed)
+ 670    for pd in prey_deaths:
+ 671        for rep in range(cfg.n_replicates):
+ 672            params = {"pd": pd}
+ 673
+ 674            seed = generate_unique_seed(params, rep)
+ 675            jobs.append(
+ 676                (
+ 677                    cfg.prey_birth,
+ 678                    pd,
+ 679                    cfg.predator_birth,
+ 680                    cfg.predator_death,
+ 681                    cfg.grid_size,
+ 682                    seed,
+ 683                    cfg,
+ 684                    False,
+ 685                )
+ 686            )
+ 687
+ 688    logger.info(f"Phase 1: {len(jobs):,} simulations")
+ 689    logger.info(
+ 690        f"  Grid: {cfg.n_prey_death} prey_death values × {cfg.n_replicates} reps (prey_birth={cfg.prey_birth})"
+ 691    )
+ 692    # Run with incremental saving
+ 693    output_jsonl = output_dir / "phase1_results.jsonl"
+ 694    all_results = []
+ 695
+ 696    with open(output_jsonl, "w", encoding="utf-8") as f:
+ 697        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+ 698        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+ 699
+ 700        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 1"):
+ 701            f.write(json.dumps(result, default=str) + "\n")
+ 702            f.flush()
+ 703            all_results.append(result)
+ 704
+ 705    # Save metadata
+ 706    meta = {
+ 707        "phase": 1,
+ 708        "description": "Parameter sweep for critical point",
+ 709        "n_sims": len(all_results),
+ 710        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ 711        "config": asdict(cfg),
+ 712    }
+ 713    with open(output_dir / "phase1_metadata.json", "w") as f:
+ 714        json.dump(meta, f, indent=2, default=str)
+ 715
+ 716    logger.info(f"Phase 1 complete. Results: {output_jsonl}")
+ 717    return all_results
+ 718
+ 719
+ 720def run_phase2(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 721    """
+ 722    Execute Phase 2 of the simulation: self-organization and criticality analysis.
+ 723
+ 724    This phase tests the Self-Organized Criticality (SOC) hypothesis by
+ 725    initializing simulations at different points in the parameter space and
+ 726    observing whether evolutionary pressure drives the system toward a
+ 727    common critical point, regardless of initial prey mortality rates.
+ 728
+ 729    Parameters
+ 730    ----------
+ 731    cfg : Config
+ 732        Configuration object containing simulation hyperparameters, evolution
+ 733        settings, and execution constraints.
+ 734    output_dir : Path
+ 735        Directory where result files (JSONL) and metadata (JSON) will be stored.
+ 736    logger : logging.Logger
+ 737        Logger instance for tracking progress and evolutionary convergence.
+ 738
+ 739    Returns
+ 740    -------
+ 741    all_results : list of dict
+ 742        A list of dictionaries containing metrics from the evolutionary
+ 743        simulation runs.
+ 744
+ 745    Notes
+ 746    -----
+ 747    The function captures:
+ 748    1. Convergence of 'prey_death' across multiple replicates.
+ 749    2. Final steady-state population distributions.
+ 750    3. Incremental saving of results to prevent data loss.
+ 751    """
+ 752    from joblib import Parallel, delayed
+ 753
+ 754    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+ 755
+ 756    # Test at multiple prey_birth values
+ 757    pb = 0.2
+ 758    # Vary intial prey_death
+ 759    initial_prey_deaths = np.linspace(
+ 760        cfg.prey_death_range[0], cfg.prey_death_range[1], cfg.n_prey_death
+ 761    )
+ 762
+ 763    jobs = []
+ 764    for initial_pd in initial_prey_deaths:
+ 765        for rep in range(cfg.n_replicates):
+ 766            params = {"pb": pb, "initial_pd": initial_pd, "phase": 2}
+ 767            seed = generate_unique_seed(params, rep)
+ 768            jobs.append(
+ 769                (
+ 770                    pb,
+ 771                    initial_pd,
+ 772                    cfg.predator_birth,
+ 773                    cfg.predator_death,
+ 774                    cfg.grid_size,
+ 775                    seed,
+ 776                    cfg,
+ 777                    True,
+ 778                )
+ 779            )
+ 780
+ 781    logger.info(f"Phase 2: {len(jobs):,} simulations")
+ 782    logger.info(f"  prey_birth value: {pb}")
+ 783    logger.info(f"  initial prey_death values: {len(initial_prey_deaths)}")
+ 784    logger.info(f"  Replicates: {cfg.n_replicates}")
+ 785
+ 786    output_jsonl = output_dir / "phase2_results.jsonl"
+ 787    all_results = []
+ 788
+ 789    with open(output_jsonl, "w", encoding="utf-8") as f:
+ 790        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+ 791        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+ 792
+ 793        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 2"):
+ 794            f.write(json.dumps(result, default=str) + "\n")
+ 795            f.flush()
+ 796            all_results.append(result)
+ 797
+ 798    meta = {
+ 799        "phase": 2,
+ 800        "description": "Self-organization toward criticality",
+ 801        "n_sims": len(all_results),
+ 802        "initial_prey_deaths": initial_prey_deaths.tolist(),
+ 803        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ 804    }
+ 805    with open(output_dir / "phase2_metadata.json", "w") as f:
+ 806        json.dump(meta, f, indent=2, default=str)
+ 807
+ 808    logger.info(f"Phase 2 complete. Results: {output_jsonl}")
+ 809    return all_results
+ 810
+ 811
+ 812def run_phase3(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 813    """
+ 814    Phase 3: Finite-size scaling at critical point.
+ 815
+ 816    - Multiple grid sizes at (critical_prey_birth, critical_prey_death)
+ 817    - Analyze cluster size cutoffs vs L
+ 818    """
+ 819    from joblib import Parallel, delayed
+ 820
+ 821    # NOTE: Tuned to critical points from phase 1
+ 822    pb = cfg.critical_prey_birth
+ 823    pd = cfg.critical_prey_death
+ 824
+ 825    logger.info(f"Phase 3: FSS at critical point (pb={pb}, pd={pd})")
+ 826
+ 827    for L in cfg.grid_sizes:
+ 828        warmup_numba_kernels(L, directed_hunting=cfg.directed_hunting)
+ 829
+ 830    jobs = []
+ 831    for L in cfg.grid_sizes:  # Sweep through grid sizes
+ 832        for rep in range(cfg.n_replicates):
+ 833            params = {"L": L, "phase": 3}
+ 834            seed = generate_unique_seed(params, rep)
+ 835            jobs.append(
+ 836                (pb, pd, cfg.predator_birth, cfg.predator_death, L, seed, cfg, False)
+ 837            )
+ 838
+ 839    logger.info(f"  Grid sizes: {cfg.grid_sizes}")
+ 840    logger.info(f"  Total simulations: {len(jobs):,}")
+ 841
+ 842    output_jsonl = output_dir / "phase3_results.jsonl"
+ 843    all_results = []
+ 844
+ 845    with open(output_jsonl, "w", encoding="utf-8") as f:
+ 846        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+ 847        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+ 848
+ 849        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 3"):
+ 850            f.write(json.dumps(result, default=str) + "\n")
+ 851            f.flush()
+ 852            all_results.append(result)
+ 853
+ 854    # Post-run metadata: postprocessing will fit cluster cutoffs vs L
+ 855    meta = {
+ 856        "phase": 3,
+ 857        "description": "Finite-size scaling",
+ 858        "critical_point": {"prey_birth": pb, "prey_death": pd},
+ 859        "grid_sizes": cfg.grid_sizes,
+ 860        "n_sims": len(all_results),
+ 861        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ 862    }
+ 863    with open(output_dir / "phase3_metadata.json", "w") as f:
+ 864        json.dump(meta, f, indent=2, default=str)
+ 865
+ 866    logger.info(f"Phase 3 complete. Results: {output_jsonl}")
+ 867    return all_results
+ 868
+ 869
+ 870def run_phase4(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 871    """
+ 872    Execute Phase 3 of the simulation: Finite-Size Scaling (FSS) analysis.
+ 873
+ 874    This phase investigates how spatial structures, specifically cluster size
+ 875    cutoffs, scale with the system size (L) at the critical point identified
+ 876    in Phase 1. This is essential for determining the universality class of
+ 877    the phase transition.
+ 878
+ 879    Parameters
+ 880    ----------
+ 881    cfg : Config
+ 882        Configuration object containing critical point parameters, the list of
+ 883        grid sizes to test, and execution settings.
+ 884    output_dir : Path
+ 885        Directory where result files (JSONL) and FSS metadata (JSON) will be
+ 886        stored.
+ 887    logger : logging.Logger
+ 888        Logger instance for tracking progress across different grid sizes.
+ 889
+ 890    Returns
+ 891    -------
+ 892    all_results : list of dict
+ 893        A list of dictionaries containing metrics and cluster statistics for
+ 894        each grid size and replicate.
+ 895
+ 896    Notes
+ 897    -----
+ 898    The function performs the following:
+ 899    1. Iterates through multiple grid sizes defined in `cfg.grid_sizes`.
+ 900    2. Generates parallel jobs for each size using critical birth/death rates.
+ 901    3. Saves results incrementally to allow for post-simulation analysis of
+ 902       power-law exponents.
+ 903    """
+ 904    from joblib import Parallel, delayed
+ 905    import itertools
+ 906
+ 907    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+ 908
+ 909    # Define sweep values
+ 910    prey_death_values = np.linspace(0.05, 0.95, 10)  # 10 values for prey_death
+ 911    other_param_values = np.linspace(0.0, 1.0, 11)  # 11 values for the rest
+ 912
+ 913    # Logging
+ 914    logger.info(f"Phase 4: Full 4D Parameter Sweep")
+ 915    logger.info(f"  prey_death: 10 values from 0.05 to 0.95")
+ 916    logger.info(f"  prey_birth, pred_birth, pred_death: 11 values each from 0 to 1")
+ 917    logger.info(f"  Grid Size: {cfg.grid_size}")
+ 918    logger.info(f"  Replicates: {cfg.n_replicates}")
+ 919
+ 920    # Build parameter grid
+ 921    param_grid = itertools.product(
+ 922        other_param_values,  # prey_birth (11 values)
+ 923        prey_death_values,  # prey_death (10 values)
+ 924        other_param_values,  # predator_birth (11 values)
+ 925        other_param_values,  # predator_death (11 values)
+ 926    )
+ 927
+ 928    jobs = []
+ 929
+ 930    for pb, pd, pred_b, pred_d in param_grid:
+ 931        for rep in range(cfg.n_replicates):
+ 932            params_id = {
+ 933                "pb": pb,
+ 934                "pd": pd,
+ 935                "pred_b": pred_b,
+ 936                "pred_d": pred_d,
+ 937                "rep": rep,
+ 938            }
+ 939            seed = generate_unique_seed(params_id, rep)
+ 940
+ 941            jobs.append(
+ 942                (
+ 943                    pb,  # prey_birth
+ 944                    pd,  # prey_death
+ 945                    pred_b,  # predator_birth
+ 946                    pred_d,  # predator_death
+ 947                    cfg.grid_size,
+ 948                    seed,
+ 949                    cfg,
+ 950                    False,
+ 951                )
+ 952            )
+ 953
+ 954    logger.info(
+ 955        f"  Total simulations: {len(jobs):,}"
+ 956    )  # 11 * 10 * 11 * 11 * n_reps = 13,310 * n_reps
+ 957
+ 958    output_jsonl = output_dir / "phase4_results.jsonl"
+ 959    all_results = []
+ 960
+ 961    with open(output_jsonl, "w", encoding="utf-8") as f:
+ 962        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+ 963        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+ 964
+ 965        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 4 (4D Sweep)"):
+ 966            f.write(json.dumps(result, default=str) + "\n")
+ 967            f.flush()
+ 968            all_results.append(result)
+ 969
+ 970    # Save Metadata
+ 971    meta = {
+ 972        "phase": 4,
+ 973        "description": "Global 4D Sensitivity Analysis",
+ 974        "prey_death_values": prey_death_values.tolist(),
+ 975        "other_param_values": other_param_values.tolist(),
+ 976        "parameters_varied": [
+ 977            "prey_birth",
+ 978            "prey_death",
+ 979            "predator_birth",
+ 980            "predator_death",
+ 981        ],
+ 982        "n_sims": len(all_results),
+ 983        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ 984        "config": asdict(cfg),
+ 985    }
+ 986    with open(output_dir / "phase4_metadata.json", "w") as f:
+ 987        json.dump(meta, f, indent=2, default=str)
+ 988
+ 989    logger.info(f"Phase 4 complete. Results: {output_jsonl}")
+ 990    return all_results
+ 991
+ 992
+ 993def run_phase5(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 994    """
+ 995    Execute Phase 5 of the simulation: Global 4D parameter sweep with directed hunting.
+ 996
+ 997    This phase performs a comprehensive sensitivity analysis by varying four key
+ 998    parameters (prey birth/death and predator birth/death) while directed
+ 999    hunting is enabled. The results allow for a direct comparison with Phase 4
+1000    to determine how predator search behavior shifts the system's critical
+1001    thresholds and stability.
+1002
+1003    Parameters
+1004    ----------
+1005    cfg : Config
+1006        Configuration object containing simulation hyperparameters, parallel
+1007        execution settings, and the fixed grid size for this phase.
+1008    output_dir : Path
+1009        Directory where the result JSONL file and execution metadata will
+1010        be stored.
+1011    logger : logging.Logger
+1012        Logger instance for tracking the progress of the high-volume
+1013        simulation batch.
+1014
+1015    Returns
+1016    -------
+1017    all_results : list of dict
+1018        A list of dictionaries containing metrics for every simulation in
+1019        the 4D parameter grid.
+1020
+1021    Notes
+1022    -----
+1023    The function utilizes a Cartesian product of parameter ranges to build a
+1024    job list of over 13,000 unique parameter sets (multiplied by replicates).
+1025    Seeds are uniquely generated to distinguish these runs from other phases
+1026    even if parameter values overlap.
+1027    """
+1028    from joblib import Parallel, delayed
+1029    import itertools
+1030
+1031    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+1032
+1033    # Define sweep values (same as Phase 4)
+1034    prey_death_values = np.linspace(0.05, 0.95, 10)  # 10 values for prey_death
+1035    other_param_values = np.linspace(0.0, 1.0, 11)  # 11 values for the rest
+1036
+1037    # Logging
+1038    logger.info(f"Phase 5: Full 4D Parameter Sweep (Directed Hunting)")
+1039    logger.info(f"  prey_death: 10 values from 0.05 to 0.95")
+1040    logger.info(f"  prey_birth, pred_birth, pred_death: 11 values each from 0 to 1")
+1041    logger.info(f"  Grid Size: {cfg.grid_size}")
+1042    logger.info(f"  Replicates: {cfg.n_replicates}")
+1043    logger.info(f"  Directed Hunting: {cfg.directed_hunting}")
+1044
+1045    # Build parameter grid
+1046    param_grid = itertools.product(
+1047        other_param_values,  # prey_birth (11 values)
+1048        prey_death_values,  # prey_death (10 values)
+1049        other_param_values,  # predator_birth (11 values)
+1050        other_param_values,  # predator_death (11 values)
+1051    )
+1052
+1053    jobs = []
+1054
+1055    for pb, pd, pred_b, pred_d in param_grid:
+1056        for rep in range(cfg.n_replicates):
+1057            # Include phase identifier to ensure different seeds from Phase 4
+1058            params_id = {
+1059                "pb": pb,
+1060                "pd": pd,
+1061                "pred_b": pred_b,
+1062                "pred_d": pred_d,
+1063                "phase": 6,
+1064                "rep": rep,
+1065            }
+1066            seed = generate_unique_seed(params_id, rep)
+1067
+1068            jobs.append(
+1069                (
+1070                    pb,  # prey_birth
+1071                    pd,  # prey_death
+1072                    pred_b,  # predator_birth
+1073                    pred_d,  # predator_death
+1074                    cfg.grid_size,
+1075                    seed,
+1076                    cfg,
+1077                    False,
+1078                )
+1079            )
+1080
+1081    logger.info(
+1082        f"  Total simulations: {len(jobs):,}"
+1083    )  # 11 * 10 * 11 * 11 * n_reps = 13,310 * n_reps
+1084
+1085    output_jsonl = output_dir / "phase5_results.jsonl"
+1086    all_results = []
+1087
+1088    with open(output_jsonl, "w", encoding="utf-8") as f:
+1089        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+1090        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+1091
+1092        for result in tqdm(
+1093            executor(tasks), total=len(jobs), desc="Phase 6 (4D Sweep + Directed)"
+1094        ):
+1095            f.write(json.dumps(result, default=str) + "\n")
+1096            f.flush()
+1097            all_results.append(result)
+1098
+1099    # Save Metadata
+1100    meta = {
+1101        "phase": 5,
+1102        "description": "Global 4D Sensitivity Analysis with Directed Hunting",
+1103        "prey_death_values": prey_death_values.tolist(),
+1104        "other_param_values": other_param_values.tolist(),
+1105        "parameters_varied": [
+1106            "prey_birth",
+1107            "prey_death",
+1108            "predator_birth",
+1109            "predator_death",
+1110        ],
+1111        "directed_hunting": cfg.directed_hunting,
+1112        "n_sims": len(all_results),
+1113        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+1114        "config": asdict(cfg),
+1115    }
+1116    with open(output_dir / "phase6_metadata.json", "w") as f:
+1117        json.dump(meta, f, indent=2, default=str)
+1118
+1119    logger.info(f"Phase 5 complete. Results: {output_jsonl}")
+1120    return all_results
+1121
+1122
+1123# =============================================================================
+1124# Main:
+1125# =============================================================================
+1126
+1127PHASE_RUNNERS = {
+1128    1: run_phase1,
+1129    2: run_phase2,
+1130    3: run_phase3,
+1131    4: run_phase4,
+1132    5: run_phase5,
+1133}
+1134
+1135
+1136def main():
+1137    """
+1138    Organize the predator-prey experimental suite across multiple phases.
+1139
+1140    This entry point handles command-line arguments, sets up logging and output
+1141    directories, and executes the requested simulation phases (1-5). It
+1142    supports parallel execution, dry runs for runtime estimation, and
+1143    automated configuration persistence.
+1144
+1145    Notes
+1146    -----
+1147    The script dynamically retrieves phase-specific configurations using
+1148    `get_phase_config` and dispatches execution to the corresponding runner
+1149    in the `PHASE_RUNNERS` mapping.
+1150    """
+1151    parser = argparse.ArgumentParser(
+1152        description="Predator-Prey Hydra Effect Experiments",
+1153        formatter_class=argparse.RawDescriptionHelpFormatter,
+1154        epilog="""
+1155Phases:
+1156  1  Parameter sweep to find critical point
+1157  2  Self-organization (evolution toward criticality)
+1158  3  Finite-size scaling at critical point
+1159  4  Sensitivity analysis across parameter regimes
+1160  5  Model extensions (directed hunting comparison)
+1161        """,
+1162    )
+1163    parser.add_argument(
+1164        "--phase", type=str, required=True, help="Phase to run: 1-6 or 'all'"
+1165    )
+1166    parser.add_argument(
+1167        "--output",
+1168        type=Path,
+1169        default=Path("results"),
+1170        help="Output directory (default: results)",
+1171    )
+1172    parser.add_argument(
+1173        "--cores", type=int, default=-1, help="Number of cores (-1 for all)"
+1174    )
+1175    parser.add_argument(
+1176        "--dry-run", action="store_true", help="Estimate runtime without running"
+1177    )
+1178    args = parser.parse_args()
+1179
+1180    # Parse phase argument
+1181    if args.phase.lower() == "all":
+1182        phases = list(PHASE_RUNNERS.keys())
+1183    else:
+1184        try:
+1185            phases = [int(args.phase)]
+1186        except ValueError:
+1187            print(f"Invalid phase: {args.phase}. Use 1-6 or 'all'")
+1188            sys.exit(1)
+1189
+1190    # Setup output directory
+1191    args.output.mkdir(parents=True, exist_ok=True)
+1192
+1193    # Setup logging
+1194    logging.basicConfig(
+1195        level=logging.INFO,
+1196        format="%(asctime)s [%(levelname)s] %(message)s",
+1197        handlers=[
+1198            logging.FileHandler(args.output / "experiments.log"),
+1199            logging.StreamHandler(),
+1200        ],
+1201    )
+1202    logger = logging.getLogger(__name__)
+1203
+1204    # Header
+1205    logger.info("=" * 60)
+1206    logger.info("PREDATOR-PREY HYDRA EFFECT EXPERIMENTS")
+1207    logger.info("=" * 60)
+1208    logger.info(f"Phases: {phases}")
+1209    logger.info(f"Output: {args.output}")
+1210    logger.info(f"Cores: {args.cores}")
+1211    logger.info(f"Numba: {'ENABLED' if USE_NUMBA else 'DISABLED'}")
+1212
+1213    # Process each phase
+1214    for phase in phases:
+1215        cfg = get_phase_config(phase)
+1216        cfg.n_jobs = (
+1217            args.cores
+1218            if args.cores > 0
+1219            else int(os.environ.get("SLURM_CPUS_PER_TASK", -1))
+1220        )
+1221
+1222        logger.info("")
+1223        logger.info(f"{'='*60}")
+1224        logger.info(f"PHASE {phase}")
+1225        logger.info(f"{'='*60}")
+1226
+1227        n_cores = cfg.n_jobs if cfg.n_jobs > 0 else os.cpu_count()
+1228        logger.info(f"Estimated: {cfg.estimate_runtime(n_cores)}")
+1229
+1230        if args.dry_run:
+1231            logger.info("Dry run - skipping execution")
+1232            continue
+1233
+1234        # Save config
+1235        with open(args.output / f"phase{phase}_config.json", "w") as f:
+1236            json.dump(asdict(cfg), f, indent=2, default=str)
+1237
+1238        # Run phase
+1239        start_time = time.time()
+1240        runner = PHASE_RUNNERS[phase]
+1241        runner(cfg, args.output, logger)
+1242        elapsed = time.time() - start_time
+1243
+1244        logger.info(f"Phase {phase} runtime: {elapsed/60:.1f} minutes")
+1245
+1246    logger.info("")
+1247    logger.info("=" * 60)
+1248    logger.info("EXPERIMENTS COMPLETE")
+1249    logger.info("=" * 60)
+1250
+1251
+1252if __name__ == "__main__":
+1253    main()
+
+ + +
+
+ +
+ + def + generate_unique_seed(params: dict, rep: int) -> int: + + + +
+ +
117def generate_unique_seed(params: dict, rep: int) -> int:
+118    """
+119    Create a deterministic seed from a dictionary of parameters and a repetition index.
+120
+121    This function serializes the input dictionary into a sorted JSON string,
+122    appends the repetition count, and hashes the resulting string using SHA-256.
+123    The first 8 characters of the hex digest are then converted to an integer
+124    to provide a stable, unique seed for random number generators.
+125
+126    Parameters
+127    ----------
+128    params : dict
+129        A dictionary of configuration parameters. Keys are sorted to ensure
+130        determinism regardless of insertion order.
+131    rep : int
+132        The repetition or iteration index, used to ensure different seeds
+133        are generated for the same parameter set across multiple runs.
+134
+135    Returns
+136    -------
+137    int
+138        A unique integer seed derived from the input parameters.
+139
+140    Examples
+141    --------
+142    >>> params = {'learning_rate': 0.01, 'batch_size': 32}
+143    >>> generate_unique_seed(params, 1)
+144    3432571217
+145    >>> generate_unique_seed(params, 2)
+146    3960013583
+147    """
+148    identifier = json.dumps(params, sort_keys=True) + f"_{rep}"
+149    return int(hashlib.sha256(identifier.encode()).hexdigest()[:8], 16)
+
+ + +

Create a deterministic seed from a dictionary of parameters and a repetition index.

+ +

This function serializes the input dictionary into a sorted JSON string, +appends the repetition count, and hashes the resulting string using SHA-256. +The first 8 characters of the hex digest are then converted to an integer +to provide a stable, unique seed for random number generators.

+ +
Parameters
+ +
    +
  • params (dict): +A dictionary of configuration parameters. Keys are sorted to ensure +determinism regardless of insertion order.
  • +
  • rep (int): +The repetition or iteration index, used to ensure different seeds +are generated for the same parameter set across multiple runs.
  • +
+ +
Returns
+ +
    +
  • int: A unique integer seed derived from the input parameters.
  • +
+ +
Examples
+ +
+
>>> params = {'learning_rate': 0.01, 'batch_size': 32}
+>>> generate_unique_seed(params, 1)
+3432571217
+>>> generate_unique_seed(params, 2)
+3960013583
+
+
+
+ + +
+
+ +
+ + def + count_populations(grid: numpy.ndarray) -> Tuple[int, int, int]: + + + +
+ +
152def count_populations(grid: np.ndarray) -> Tuple[int, int, int]:
+153    """
+154    Count the number of empty, prey, and predator cells in the simulation grid.
+155
+156    Parameters
+157    ----------
+158    grid : np.ndarray
+159        A 2D NumPy array representing the simulation environment, where:
+160        - 0: Empty cell
+161        - 1: Prey
+162        - 2: Predator
+163
+164    Returns
+165    -------
+166    empty_count : int
+167        Total number of cells with a value of 0.
+168    prey_count : int
+169        Total number of cells with a value of 1.
+170    predator_count : int
+171        Total number of cells with a value of 2.
+172
+173    Examples
+174    --------
+175    >>> grid = np.array([[0, 1], [2, 1]])
+176    >>> count_populations(grid)
+177    (1, 2, 1)
+178    """
+179    return int(np.sum(grid == 0)), int(np.sum(grid == 1)), int(np.sum(grid == 2))
+
+ + +

Count the number of empty, prey, and predator cells in the simulation grid.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +A 2D NumPy array representing the simulation environment, where: +
      +
    • 0: Empty cell
    • +
    • 1: Prey
    • +
    • 2: Predator
    • +
  • +
+ +
Returns
+ +
    +
  • empty_count (int): +Total number of cells with a value of 0.
  • +
  • prey_count (int): +Total number of cells with a value of 1.
  • +
  • predator_count (int): +Total number of cells with a value of 2.
  • +
+ +
Examples
+ +
+
>>> grid = np.array([[0, 1], [2, 1]])
+>>> count_populations(grid)
+(1, 2, 1)
+
+
+
+ + +
+
+ +
+ + def + get_evolved_stats(model, param: str) -> Dict: + + + +
+ +
182def get_evolved_stats(model, param: str) -> Dict:
+183    """
+184    Get statistics of an evolved parameter from the model.
+185
+186    This function retrieves parameter values from the model's internal storage,
+187    filters out NaN values, and calculates basic descriptive statistics.
+188
+189    Parameters
+190    ----------
+191    model : object
+192        The simulation model instance containing a `cell_params` attribute
+193        with a `.get()` method.
+194    param : str
+195        The name of the parameter to calculate statistics for.
+196
+197    Returns
+198    -------
+199    stats : dict
+200        A dictionary containing the following keys:
+201        - 'mean': Arithmetic mean of valid values.
+202        - 'std': Standard deviation of valid values.
+203        - 'min': Minimum valid value.
+204        - 'max': Maximum valid value.
+205        - 'n': Count of non-NaN values.
+206        If no valid data is found, all stats return NaN and n returns 0.
+207
+208    Examples
+209    --------
+210    >>> stats = get_evolved_stats(my_model, "speed")
+211    >>> print(stats['mean'])
+212    1.25
+213    """
+214    arr = model.cell_params.get(param)
+215    if arr is None:
+216        return {"mean": np.nan, "std": np.nan, "min": np.nan, "max": np.nan, "n": 0}
+217    valid = arr[~np.isnan(arr)]
+218    if len(valid) == 0:
+219        return {"mean": np.nan, "std": np.nan, "min": np.nan, "max": np.nan, "n": 0}
+220    return {
+221        "mean": float(np.mean(valid)),
+222        "std": float(np.std(valid)),
+223        "min": float(np.min(valid)),
+224        "max": float(np.max(valid)),
+225        "n": len(valid),
+226    }
+
+ + +

Get statistics of an evolved parameter from the model.

+ +

This function retrieves parameter values from the model's internal storage, +filters out NaN values, and calculates basic descriptive statistics.

+ +
Parameters
+ +
    +
  • model (object): +The simulation model instance containing a cell_params attribute +with a .get() method.
  • +
  • param (str): +The name of the parameter to calculate statistics for.
  • +
+ +
Returns
+ +
    +
  • stats (dict): +A dictionary containing the following keys: +
      +
    • 'mean': Arithmetic mean of valid values.
    • +
    • 'std': Standard deviation of valid values.
    • +
    • 'min': Minimum valid value.
    • +
    • 'max': Maximum valid value.
    • +
    • 'n': Count of non-NaN values. +If no valid data is found, all stats return NaN and n returns 0.
    • +
  • +
+ +
Examples
+ +
+
>>> stats = get_evolved_stats(my_model, "speed")
+>>> print(stats['mean'])
+1.25
+
+
+
+ + +
+
+ +
+ + def + average_pcfs( pcf_list: List[Tuple[numpy.ndarray, numpy.ndarray, int]]) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]: + + + +
+ +
229def average_pcfs(
+230    pcf_list: List[Tuple[np.ndarray, np.ndarray, int]],
+231) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+232    """
+233    Average multiple Pair Correlation Function (PCF) measurements and calculate standard error.
+234
+235    Parameters
+236    ----------
+237    pcf_list : list of tuple
+238        A list where each element is a tuple containing:
+239        - distances (np.ndarray): The radial distances (r).
+240        - pcf_values (np.ndarray): The correlation values g(r).
+241        - count (int): Metadata or weight (not used in current calculation).
+242
+243    Returns
+244    -------
+245    distances : np.ndarray
+246        The radial distances from the first entry in the list.
+247    pcf_mean : np.ndarray
+248        The element-wise mean of the PCF values across all measurements.
+249    pcf_se : np.ndarray
+250        The standard error of the mean for the PCF values.
+251
+252    Examples
+253    --------
+254    >>> data = [(np.array([0, 1]), np.array([1.0, 2.0]), 10),
+255    ...         (np.array([0, 1]), np.array([1.2, 1.8]), 12)]
+256    >>> dist, mean, se = average_pcfs(data)
+257    >>> mean
+258    array([1.1, 1.9])
+259    """
+260    if len(pcf_list) == 0:
+261        return np.array([]), np.array([]), np.array([])
+262
+263    distances = pcf_list[0][0]
+264    pcfs = np.array([p[1] for p in pcf_list])
+265
+266    pcf_mean = np.mean(pcfs, axis=0)
+267    pcf_se = np.std(pcfs, axis=0) / np.sqrt(len(pcfs))
+268
+269    return distances, pcf_mean, pcf_se
+
+ + +

Average multiple Pair Correlation Function (PCF) measurements and calculate standard error.

+ +
Parameters
+ +
    +
  • pcf_list (list of tuple): +A list where each element is a tuple containing: +
      +
    • distances (np.ndarray): The radial distances (r).
    • +
    • pcf_values (np.ndarray): The correlation values g(r).
    • +
    • count (int): Metadata or weight (not used in current calculation).
    • +
  • +
+ +
Returns
+ +
    +
  • distances (np.ndarray): +The radial distances from the first entry in the list.
  • +
  • pcf_mean (np.ndarray): +The element-wise mean of the PCF values across all measurements.
  • +
  • pcf_se (np.ndarray): +The standard error of the mean for the PCF values.
  • +
+ +
Examples
+ +
+
>>> data = [(np.array([0, 1]), np.array([1.0, 2.0]), 10),
+...         (np.array([0, 1]), np.array([1.2, 1.8]), 12)]
+>>> dist, mean, se = average_pcfs(data)
+>>> mean
+array([1.1, 1.9])
+
+
+
+ + +
+
+ +
+ + def + save_results_jsonl(results: List[Dict], output_path: pathlib.Path): + + + +
+ +
272def save_results_jsonl(results: List[Dict], output_path: Path):
+273    """
+274    Save a list of dictionaries to a file in JSON Lines (JSONL) format.
+275
+276    Each dictionary in the list is serialized into a single JSON string and
+277    written as a new line. Non-serializable objects are converted to strings
+278    using the default string representation.
+279
+280    Parameters
+281    ----------
+282    results : list of dict
+283        The collection of result dictionaries to be saved.
+284    output_path : Path
+285        The file system path (pathlib.Path) where the JSONL file will be created.
+286
+287    Returns
+288    -------
+289    None
+290
+291    Notes
+292    -----
+293    The file is opened in 'w' (write) mode, which will overwrite any existing
+294    content at the specified path.
+295
+296    Examples
+297    --------
+298    >>> data = [{"id": 1, "score": 0.95}, {"id": 2, "score": 0.88}]
+299    >>> save_results_jsonl(data, Path("results.jsonl"))
+300    """
+301    with open(output_path, "w", encoding="utf-8") as f:
+302        for result in results:
+303            f.write(json.dumps(result, default=str) + "\n")
+
+ + +

Save a list of dictionaries to a file in JSON Lines (JSONL) format.

+ +

Each dictionary in the list is serialized into a single JSON string and +written as a new line. Non-serializable objects are converted to strings +using the default string representation.

+ +
Parameters
+ +
    +
  • results (list of dict): +The collection of result dictionaries to be saved.
  • +
  • output_path (Path): +The file system path (pathlib.Path) where the JSONL file will be created.
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

The file is opened in 'w' (write) mode, which will overwrite any existing +content at the specified path.

+ +
Examples
+ +
+
>>> data = [{"id": 1, "score": 0.95}, {"id": 2, "score": 0.88}]
+>>> save_results_jsonl(data, Path("results.jsonl"))
+
+
+
+ + +
+
+ +
+ + def + save_results_npz(results: List[Dict], output_path: pathlib.Path): + + + +
+ +
306def save_results_npz(results: List[Dict], output_path: Path):
+307    """
+308    Save simulation results to a compressed NumPy (.npz) binary file.
+309
+310    This function flattens a list of result dictionaries into a single
+311    dictionary of NumPy arrays, prefixing keys with the run index to
+312    maintain data separation. The resulting file is compressed to
+313    reduce storage space.
+314
+315    Parameters
+316    ----------
+317    results : list of dict
+318        A list where each dictionary contains key-value pairs of
+319        simulation data (e.g., arrays, lists, or scalars).
+320    output_path : Path
+321        The file system path (pathlib.Path) where the compressed
+322        NPZ file will be saved.
+323
+324    Returns
+325    -------
+326    None
+327
+328    Notes
+329    -----
+330    The keys in the saved file follow the format 'run_{index}_{original_key}'.
+331    Values are automatically converted to NumPy arrays if they are not
+332    already.
+333
+334    Examples
+335    --------
+336    >>> results = [{"energy": [1, 2]}, {"energy": [3, 4]}]
+337    >>> save_results_npz(results, Path("output.npz"))
+338    """
+339    data = {}
+340    for i, res in enumerate(results):
+341        for key, val in res.items():
+342            data[f"run_{i}_{key}"] = np.array(val)
+343    np.savez_compressed(output_path, **data)
+
+ + +

Save simulation results to a compressed NumPy (.npz) binary file.

+ +

This function flattens a list of result dictionaries into a single +dictionary of NumPy arrays, prefixing keys with the run index to +maintain data separation. The resulting file is compressed to +reduce storage space.

+ +
Parameters
+ +
    +
  • results (list of dict): +A list where each dictionary contains key-value pairs of +simulation data (e.g., arrays, lists, or scalars).
  • +
  • output_path (Path): +The file system path (pathlib.Path) where the compressed +NPZ file will be saved.
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

The keys in the saved file follow the format 'run_{index}_{original_key}'. +Values are automatically converted to NumPy arrays if they are not +already.

+ +
Examples
+ +
+
>>> results = [{"energy": [1, 2]}, {"energy": [3, 4]}]
+>>> save_results_npz(results, Path("output.npz"))
+
+
+
+ + +
+
+ +
+ + def + load_results_jsonl(input_path: pathlib.Path) -> List[Dict]: + + + +
+ +
346def load_results_jsonl(input_path: Path) -> List[Dict]:
+347    """
+348    Load simulation results from a JSON Lines (JSONL) formatted file.
+349
+350    This function reads a file line-by-line, parsing each line as an
+351    independent JSON object and aggregating them into a list of dictionaries.
+352
+353    Parameters
+354    ----------
+355    input_path : Path
+356        The file system path (pathlib.Path) to the JSONL file.
+357
+358    Returns
+359    -------
+360    results : list of dict
+361        A list of dictionaries reconstructed from the file content.
+362
+363    Raises
+364    ------
+365    FileNotFoundError
+366        If the specified input path does not exist.
+367    json.JSONDecodeError
+368        If a line in the file is not valid JSON.
+369
+370    Examples
+371    --------
+372    >>> data = load_results_jsonl(Path("results.jsonl"))
+373    >>> len(data)
+374    2
+375    """
+376    results = []
+377    with open(input_path, "r", encoding="utf-8") as f:
+378        for line in f:
+379            results.append(json.loads(line.strip()))
+380    return results
+
+ + +

Load simulation results from a JSON Lines (JSONL) formatted file.

+ +

This function reads a file line-by-line, parsing each line as an +independent JSON object and aggregating them into a list of dictionaries.

+ +
Parameters
+ +
    +
  • input_path (Path): +The file system path (pathlib.Path) to the JSONL file.
  • +
+ +
Returns
+ +
    +
  • results (list of dict): +A list of dictionaries reconstructed from the file content.
  • +
+ +
Raises
+ +
    +
  • FileNotFoundError: If the specified input path does not exist.
  • +
  • json.JSONDecodeError: If a line in the file is not valid JSON.
  • +
+ +
Examples
+ +
+
>>> data = load_results_jsonl(Path("results.jsonl"))
+>>> len(data)
+2
+
+
+
+ + +
+
+ +
+ + def + run_single_simulation( prey_birth: float, prey_death: float, predator_birth: float, predator_death: float, grid_size: int, seed: int, cfg: models.config.Config, with_evolution: bool = False, compute_pcf: Optional[bool] = None) -> Dict: + + + +
+ +
388def run_single_simulation(
+389    prey_birth: float,
+390    prey_death: float,
+391    predator_birth: float,
+392    predator_death: float,
+393    grid_size: int,
+394    seed: int,
+395    cfg: Config,
+396    with_evolution: bool = False,
+397    compute_pcf: Optional[bool] = None,
+398) -> Dict:
+399    """
+400    Run a single Predator-Prey (PP) simulation and collect comprehensive metrics.
+401
+402    This function initializes a Cellular Automata model, executes a warmup phase
+403    to reach steady state, and then performs a measurement phase to track
+404    population dynamics, spatial clustering, and evolutionary changes.
+405
+406    Parameters
+407    ----------
+408    prey_birth : float
+409        The probability or rate of prey reproduction.
+410    prey_death : float
+411        The base probability or rate of prey mortality.
+412    predator_birth : float
+413        The probability or rate of predator reproduction upon consuming prey.
+414    predator_death : float
+415        The probability or rate of predator mortality.
+416    grid_size : int
+417        The side length of the square simulation grid.
+418    seed : int
+419        Random seed for ensuring reproducibility of the simulation run.
+420    cfg : Config
+421        A configuration object containing simulation hyperparameters (densities,
+422        sampling rates, timing, etc.).
+423    with_evolution : bool, optional
+424        If True, enables the evolution of the 'prey_death' parameter within
+425        the model (default is False).
+426    compute_pcf : bool, optional
+427        Explicit toggle for Pair Correlation Function calculation. If None,
+428        it is determined by `cfg.pcf_sample_rate` (default is None).
+429
+430    Returns
+431    -------
+432    result : dict
+433        A dictionary containing simulation results including:
+434        - Input parameters and survival flags.
+435        - Population mean and standard deviation for both species.
+436        - Cluster statistics (number of clusters, sizes, largest fractions).
+437        - Evolutionary statistics (mean, std, min, max, and final values).
+438        - PCF data and spatial indices (segregation and clustering).
+439        - Optional time series for populations and evolved parameters.
+440
+441    Notes
+442    -----
+443    The function relies on several external utilities: `count_populations`,
+444    `get_evolved_stats`, `get_cluster_stats_fast`, `compute_all_pcfs_fast`,
+445    and `average_pcfs`.
+446    """
+447
+448    from models.CA import PP
+449
+450    if USE_NUMBA:
+451        set_numba_seed(seed)
+452
+453    if compute_pcf is None:
+454        compute_pcf = cfg.collect_pcf and (np.random.random() < cfg.pcf_sample_rate)
+455
+456    # Initialize model
+457    model = PP(
+458        rows=grid_size,
+459        cols=grid_size,
+460        densities=cfg.densities,
+461        neighborhood="moore",  # NOTE: Default neighborhood
+462        params={
+463            "prey_birth": prey_birth,
+464            "prey_death": prey_death,
+465            "predator_death": predator_death,
+466            "predator_birth": predator_birth,
+467        },
+468        seed=seed,
+469        directed_hunting=cfg.directed_hunting,
+470    )
+471
+472    if with_evolution:
+473        model.evolve(
+474            "prey_death",
+475            sd=cfg.evolve_sd,
+476            min_val=cfg.evolve_min,
+477            max_val=cfg.evolve_max,
+478        )
+479
+480    # Scale timing with grid size
+481    warmup_steps = cfg.get_warmup_steps(grid_size)
+482    measurement_steps = cfg.get_measurement_steps(grid_size)
+483
+484    # Warmup phase
+485    for _ in range(warmup_steps):
+486        model.update()
+487
+488    # Measurement phase: start collecting our mertics
+489    prey_pops, pred_pops = [], []  # Prey populations and predator populations
+490    evolved_means, evolved_stds = [], []  # Evolution stats over time
+491    cluster_sizes_prey, cluster_sizes_pred = [], []  # Cluster sizes
+492    largest_fractions_prey, largest_fractions_pred = (
+493        [],
+494        [],
+495    )  # Largest cluster fractions = size of largest cluster / total population
+496    pcf_samples = {"prey_prey": [], "pred_pred": [], "prey_pred": []}
+497
+498    # Determine minimum count for analysis
+499    min_count = int(cfg.min_density_for_analysis * (grid_size**2))
+500
+501    for step in range(measurement_steps):
+502        model.update()
+503
+504        _, prey, pred = count_populations(model.grid)
+505        prey_pops.append(prey)
+506        pred_pops.append(pred)
+507
+508        # Track evolution
+509        if with_evolution:
+510            stats = get_evolved_stats(model, "prey_death")
+511            evolved_means.append(stats["mean"])
+512            evolved_stds.append(stats["std"])
+513
+514        # Cluster analysis (at end of measurement)
+515        if step == measurement_steps - 1:
+516            prey_survived = prey_pops[-1] > min_count
+517            pred_survived = pred_pops[-1] > (min_count // 4)
+518
+519            if prey_survived:
+520                prey_stats = get_cluster_stats_fast(model.grid, 1)
+521                cluster_sizes_prey = prey_stats["sizes"].tolist()
+522                largest_fractions_prey.append(prey_stats["largest_fraction"])
+523
+524            if pred_survived:
+525                pred_stats = get_cluster_stats_fast(model.grid, 2)
+526                cluster_sizes_pred = pred_stats["sizes"].tolist()
+527                largest_fractions_pred.append(pred_stats["largest_fraction"])
+528
+529            # PCF requires both
+530            if compute_pcf and prey_survived and pred_survived:
+531                max_dist = min(grid_size / 2, cfg.pcf_max_distance)
+532                pcf_data = compute_all_pcfs_fast(model.grid, max_dist, cfg.pcf_n_bins)
+533                pcf_samples["prey_prey"].append(pcf_data["prey_prey"])
+534                pcf_samples["pred_pred"].append(pcf_data["pred_pred"])
+535                pcf_samples["prey_pred"].append(pcf_data["prey_pred"])
+536
+537    # Compile results
+538    result = {
+539        # Parameters
+540        "prey_birth": prey_birth,
+541        "prey_death": prey_death,
+542        "predator_birth": predator_birth,
+543        "predator_death": predator_death,
+544        "grid_size": grid_size,
+545        "with_evolution": with_evolution,
+546        "seed": seed,
+547        # Population dynamics
+548        "prey_mean": float(np.mean(prey_pops)),
+549        "prey_std": float(np.std(prey_pops)),
+550        "pred_mean": float(np.mean(pred_pops)),
+551        "pred_std": float(np.std(pred_pops)),
+552        "prey_survived": prey_pops[-1] > min_count,
+553        "pred_survived": pred_pops[-1] > (min_count // 4),
+554        # Cluster statistics
+555        "prey_n_clusters": len(cluster_sizes_prey),
+556        "pred_n_clusters": len(cluster_sizes_pred),
+557        "prey_cluster_sizes": cluster_sizes_prey,
+558        "pred_cluster_sizes": cluster_sizes_pred,
+559        # Order parameters
+560        "prey_largest_fraction": (
+561            float(np.mean(largest_fractions_prey)) if largest_fractions_prey else np.nan
+562        ),
+563        "pred_largest_fraction": (
+564            float(np.mean(largest_fractions_pred)) if largest_fractions_pred else np.nan
+565        ),
+566    }
+567
+568    # Time series (if requested)
+569    if cfg.save_timeseries:
+570        subsample = cfg.timeseries_subsample
+571        result["prey_timeseries"] = prey_pops[
+572            ::subsample
+573        ]  # NOTE: Sample temporal data every 'subsample' steps
+574        result["pred_timeseries"] = pred_pops[::subsample]
+575
+576    # Evolution statistics
+577    if with_evolution and evolved_means:
+578        valid_means = [v for v in evolved_means if not np.isnan(v)]
+579        result["evolved_prey_death_mean"] = (
+580            float(np.mean(valid_means)) if valid_means else np.nan
+581        )
+582        result["evolved_prey_death_std"] = (
+583            float(np.mean([v for v in evolved_stds if not np.isnan(v)]))
+584            if evolved_stds
+585            else np.nan
+586        )
+587        result["evolved_prey_death_final"] = valid_means[-1] if valid_means else np.nan
+588        result["evolved_prey_death_min"] = (
+589            float(np.min(valid_means)) if valid_means else np.nan
+590        )
+591        result["evolved_prey_death_max"] = (
+592            float(np.max(valid_means)) if valid_means else np.nan
+593        )
+594        result["evolve_sd"] = cfg.evolve_sd
+595
+596        if cfg.save_timeseries:
+597            result["evolved_prey_death_timeseries"] = evolved_means[
+598                :: cfg.timeseries_subsample
+599            ]
+600
+601    # PCF statistics
+602    if pcf_samples["prey_prey"]:
+603        dist, pcf_rr, _ = average_pcfs(pcf_samples["prey_prey"])
+604        _, pcf_cc, _ = average_pcfs(pcf_samples["pred_pred"])
+605        _, pcf_cr, _ = average_pcfs(pcf_samples["prey_pred"])
+606
+607        result["pcf_distances"] = dist.tolist()
+608        result["pcf_prey_prey"] = pcf_rr.tolist()
+609        result["pcf_pred_pred"] = pcf_cc.tolist()
+610        result["pcf_prey_pred"] = pcf_cr.tolist()
+611
+612        # Short-range indices
+613        short_mask = dist < 3.0
+614        if np.any(short_mask):
+615            result["segregation_index"] = float(np.mean(pcf_cr[short_mask]))
+616            result["prey_clustering_index"] = float(np.mean(pcf_rr[short_mask]))
+617            result["pred_clustering_index"] = float(np.mean(pcf_cc[short_mask]))
+618
+619    return result
+
+ + +

Run a single Predator-Prey (PP) simulation and collect comprehensive metrics.

+ +

This function initializes a Cellular Automata model, executes a warmup phase +to reach steady state, and then performs a measurement phase to track +population dynamics, spatial clustering, and evolutionary changes.

+ +
Parameters
+ +
    +
  • prey_birth (float): +The probability or rate of prey reproduction.
  • +
  • prey_death (float): +The base probability or rate of prey mortality.
  • +
  • predator_birth (float): +The probability or rate of predator reproduction upon consuming prey.
  • +
  • predator_death (float): +The probability or rate of predator mortality.
  • +
  • grid_size (int): +The side length of the square simulation grid.
  • +
  • seed (int): +Random seed for ensuring reproducibility of the simulation run.
  • +
  • cfg (Config): +A configuration object containing simulation hyperparameters (densities, +sampling rates, timing, etc.).
  • +
  • with_evolution (bool, optional): +If True, enables the evolution of the 'prey_death' parameter within +the model (default is False).
  • +
  • compute_pcf (bool, optional): +Explicit toggle for Pair Correlation Function calculation. If None, +it is determined by cfg.pcf_sample_rate (default is None).
  • +
+ +
Returns
+ +
    +
  • result (dict): +A dictionary containing simulation results including: +
      +
    • Input parameters and survival flags.
    • +
    • Population mean and standard deviation for both species.
    • +
    • Cluster statistics (number of clusters, sizes, largest fractions).
    • +
    • Evolutionary statistics (mean, std, min, max, and final values).
    • +
    • PCF data and spatial indices (segregation and clustering).
    • +
    • Optional time series for populations and evolved parameters.
    • +
  • +
+ +
Notes
+ +

The function relies on several external utilities: count_populations, +get_evolved_stats, get_cluster_stats_fast, compute_all_pcfs_fast, +and average_pcfs.

+
+ + +
+
+ +
+ + def + run_phase1( cfg: models.config.Config, output_dir: pathlib.Path, logger: logging.Logger) -> List[Dict]: + + + +
+ +
627def run_phase1(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+628    """
+629    Execute Phase 1 of the simulation: a parameter sweep to identify critical points.
+630
+631    This function performs a 1D sweep across varying prey mortality rates while
+632    keeping other parameters fixed. It utilizes parallel execution via joblib
+633    and saves results incrementally to a JSONL file to ensure data integrity
+634    during long-running batches.
+635
+636    Parameters
+637    ----------
+638    cfg : Config
+639        Configuration object containing simulation hyperparameters, sweep
+640        ranges, and execution settings (n_jobs, grid_size, etc.).
+641    output_dir : Path
+642        Directory where result files (JSONL) and metadata (JSON) will be stored.
+643    logger : logging.Logger
+644        Logger instance for tracking simulation progress and recording
+645        operational metadata.
+646
+647    Returns
+648    -------
+649    all_results : list of dict
+650        A list of dictionaries containing the metrics collected from every
+651        individual simulation run in the sweep.
+652
+653    Notes
+654    -----
+655    The function performs the following steps:
+656    1. Pre-warms Numba kernels for performance.
+657    2. Generates a deterministic set of simulation jobs using unique seeds.
+658    3. Executes simulations in parallel using a generator for memory efficiency.
+659    4. Records metadata including a timestamp and a serialized snapshot of
+660       the configuration.
+661    """
+662    from joblib import Parallel, delayed
+663
+664    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+665
+666    prey_deaths = cfg.get_prey_deaths()
+667
+668    # Build job list
+669    jobs = []
+670    # Sweep through prey_death only (prey_birth is fixed)
+671    for pd in prey_deaths:
+672        for rep in range(cfg.n_replicates):
+673            params = {"pd": pd}
+674
+675            seed = generate_unique_seed(params, rep)
+676            jobs.append(
+677                (
+678                    cfg.prey_birth,
+679                    pd,
+680                    cfg.predator_birth,
+681                    cfg.predator_death,
+682                    cfg.grid_size,
+683                    seed,
+684                    cfg,
+685                    False,
+686                )
+687            )
+688
+689    logger.info(f"Phase 1: {len(jobs):,} simulations")
+690    logger.info(
+691        f"  Grid: {cfg.n_prey_death} prey_death values × {cfg.n_replicates} reps (prey_birth={cfg.prey_birth})"
+692    )
+693    # Run with incremental saving
+694    output_jsonl = output_dir / "phase1_results.jsonl"
+695    all_results = []
+696
+697    with open(output_jsonl, "w", encoding="utf-8") as f:
+698        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+699        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+700
+701        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 1"):
+702            f.write(json.dumps(result, default=str) + "\n")
+703            f.flush()
+704            all_results.append(result)
+705
+706    # Save metadata
+707    meta = {
+708        "phase": 1,
+709        "description": "Parameter sweep for critical point",
+710        "n_sims": len(all_results),
+711        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+712        "config": asdict(cfg),
+713    }
+714    with open(output_dir / "phase1_metadata.json", "w") as f:
+715        json.dump(meta, f, indent=2, default=str)
+716
+717    logger.info(f"Phase 1 complete. Results: {output_jsonl}")
+718    return all_results
+
+ + +

Execute Phase 1 of the simulation: a parameter sweep to identify critical points.

+ +

This function performs a 1D sweep across varying prey mortality rates while +keeping other parameters fixed. It utilizes parallel execution via joblib +and saves results incrementally to a JSONL file to ensure data integrity +during long-running batches.

+ +
Parameters
+ +
    +
  • cfg (Config): +Configuration object containing simulation hyperparameters, sweep +ranges, and execution settings (n_jobs, grid_size, etc.).
  • +
  • output_dir (Path): +Directory where result files (JSONL) and metadata (JSON) will be stored.
  • +
  • logger (logging.Logger): +Logger instance for tracking simulation progress and recording +operational metadata.
  • +
+ +
Returns
+ +
    +
  • all_results (list of dict): +A list of dictionaries containing the metrics collected from every +individual simulation run in the sweep.
  • +
+ +
Notes
+ +

The function performs the following steps:

+ +
    +
  1. Pre-warms Numba kernels for performance.
  2. +
  3. Generates a deterministic set of simulation jobs using unique seeds.
  4. +
  5. Executes simulations in parallel using a generator for memory efficiency.
  6. +
  7. Records metadata including a timestamp and a serialized snapshot of +the configuration.
  8. +
+
+ + +
+
+ +
+ + def + run_phase2( cfg: models.config.Config, output_dir: pathlib.Path, logger: logging.Logger) -> List[Dict]: + + + +
+ +
721def run_phase2(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+722    """
+723    Execute Phase 2 of the simulation: self-organization and criticality analysis.
+724
+725    This phase tests the Self-Organized Criticality (SOC) hypothesis by
+726    initializing simulations at different points in the parameter space and
+727    observing whether evolutionary pressure drives the system toward a
+728    common critical point, regardless of initial prey mortality rates.
+729
+730    Parameters
+731    ----------
+732    cfg : Config
+733        Configuration object containing simulation hyperparameters, evolution
+734        settings, and execution constraints.
+735    output_dir : Path
+736        Directory where result files (JSONL) and metadata (JSON) will be stored.
+737    logger : logging.Logger
+738        Logger instance for tracking progress and evolutionary convergence.
+739
+740    Returns
+741    -------
+742    all_results : list of dict
+743        A list of dictionaries containing metrics from the evolutionary
+744        simulation runs.
+745
+746    Notes
+747    -----
+748    The function captures:
+749    1. Convergence of 'prey_death' across multiple replicates.
+750    2. Final steady-state population distributions.
+751    3. Incremental saving of results to prevent data loss.
+752    """
+753    from joblib import Parallel, delayed
+754
+755    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+756
+757    # Test at multiple prey_birth values
+758    pb = 0.2
+759    # Vary intial prey_death
+760    initial_prey_deaths = np.linspace(
+761        cfg.prey_death_range[0], cfg.prey_death_range[1], cfg.n_prey_death
+762    )
+763
+764    jobs = []
+765    for initial_pd in initial_prey_deaths:
+766        for rep in range(cfg.n_replicates):
+767            params = {"pb": pb, "initial_pd": initial_pd, "phase": 2}
+768            seed = generate_unique_seed(params, rep)
+769            jobs.append(
+770                (
+771                    pb,
+772                    initial_pd,
+773                    cfg.predator_birth,
+774                    cfg.predator_death,
+775                    cfg.grid_size,
+776                    seed,
+777                    cfg,
+778                    True,
+779                )
+780            )
+781
+782    logger.info(f"Phase 2: {len(jobs):,} simulations")
+783    logger.info(f"  prey_birth value: {pb}")
+784    logger.info(f"  initial prey_death values: {len(initial_prey_deaths)}")
+785    logger.info(f"  Replicates: {cfg.n_replicates}")
+786
+787    output_jsonl = output_dir / "phase2_results.jsonl"
+788    all_results = []
+789
+790    with open(output_jsonl, "w", encoding="utf-8") as f:
+791        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+792        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+793
+794        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 2"):
+795            f.write(json.dumps(result, default=str) + "\n")
+796            f.flush()
+797            all_results.append(result)
+798
+799    meta = {
+800        "phase": 2,
+801        "description": "Self-organization toward criticality",
+802        "n_sims": len(all_results),
+803        "initial_prey_deaths": initial_prey_deaths.tolist(),
+804        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+805    }
+806    with open(output_dir / "phase2_metadata.json", "w") as f:
+807        json.dump(meta, f, indent=2, default=str)
+808
+809    logger.info(f"Phase 2 complete. Results: {output_jsonl}")
+810    return all_results
+
+ + +

Execute Phase 2 of the simulation: self-organization and criticality analysis.

+ +

This phase tests the Self-Organized Criticality (SOC) hypothesis by +initializing simulations at different points in the parameter space and +observing whether evolutionary pressure drives the system toward a +common critical point, regardless of initial prey mortality rates.

+ +
Parameters
+ +
    +
  • cfg (Config): +Configuration object containing simulation hyperparameters, evolution +settings, and execution constraints.
  • +
  • output_dir (Path): +Directory where result files (JSONL) and metadata (JSON) will be stored.
  • +
  • logger (logging.Logger): +Logger instance for tracking progress and evolutionary convergence.
  • +
+ +
Returns
+ +
    +
  • all_results (list of dict): +A list of dictionaries containing metrics from the evolutionary +simulation runs.
  • +
+ +
Notes
+ +

The function captures:

+ +
    +
  1. Convergence of 'prey_death' across multiple replicates.
  2. +
  3. Final steady-state population distributions.
  4. +
  5. Incremental saving of results to prevent data loss.
  6. +
+
+ + +
+
+ +
+ + def + run_phase3( cfg: models.config.Config, output_dir: pathlib.Path, logger: logging.Logger) -> List[Dict]: + + + +
+ +
813def run_phase3(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+814    """
+815    Phase 3: Finite-size scaling at critical point.
+816
+817    - Multiple grid sizes at (critical_prey_birth, critical_prey_death)
+818    - Analyze cluster size cutoffs vs L
+819    """
+820    from joblib import Parallel, delayed
+821
+822    # NOTE: Tuned to critical points from phase 1
+823    pb = cfg.critical_prey_birth
+824    pd = cfg.critical_prey_death
+825
+826    logger.info(f"Phase 3: FSS at critical point (pb={pb}, pd={pd})")
+827
+828    for L in cfg.grid_sizes:
+829        warmup_numba_kernels(L, directed_hunting=cfg.directed_hunting)
+830
+831    jobs = []
+832    for L in cfg.grid_sizes:  # Sweep through grid sizes
+833        for rep in range(cfg.n_replicates):
+834            params = {"L": L, "phase": 3}
+835            seed = generate_unique_seed(params, rep)
+836            jobs.append(
+837                (pb, pd, cfg.predator_birth, cfg.predator_death, L, seed, cfg, False)
+838            )
+839
+840    logger.info(f"  Grid sizes: {cfg.grid_sizes}")
+841    logger.info(f"  Total simulations: {len(jobs):,}")
+842
+843    output_jsonl = output_dir / "phase3_results.jsonl"
+844    all_results = []
+845
+846    with open(output_jsonl, "w", encoding="utf-8") as f:
+847        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+848        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+849
+850        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 3"):
+851            f.write(json.dumps(result, default=str) + "\n")
+852            f.flush()
+853            all_results.append(result)
+854
+855    # Post-run metadata: postprocessing will fit cluster cutoffs vs L
+856    meta = {
+857        "phase": 3,
+858        "description": "Finite-size scaling",
+859        "critical_point": {"prey_birth": pb, "prey_death": pd},
+860        "grid_sizes": cfg.grid_sizes,
+861        "n_sims": len(all_results),
+862        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+863    }
+864    with open(output_dir / "phase3_metadata.json", "w") as f:
+865        json.dump(meta, f, indent=2, default=str)
+866
+867    logger.info(f"Phase 3 complete. Results: {output_jsonl}")
+868    return all_results
+
+ + +

Phase 3: Finite-size scaling at critical point.

+ +
    +
  • Multiple grid sizes at (critical_prey_birth, critical_prey_death)
  • +
  • Analyze cluster size cutoffs vs L
  • +
+
+ + +
+
+ +
+ + def + run_phase4( cfg: models.config.Config, output_dir: pathlib.Path, logger: logging.Logger) -> List[Dict]: + + + +
+ +
871def run_phase4(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+872    """
+873    Execute Phase 3 of the simulation: Finite-Size Scaling (FSS) analysis.
+874
+875    This phase investigates how spatial structures, specifically cluster size
+876    cutoffs, scale with the system size (L) at the critical point identified
+877    in Phase 1. This is essential for determining the universality class of
+878    the phase transition.
+879
+880    Parameters
+881    ----------
+882    cfg : Config
+883        Configuration object containing critical point parameters, the list of
+884        grid sizes to test, and execution settings.
+885    output_dir : Path
+886        Directory where result files (JSONL) and FSS metadata (JSON) will be
+887        stored.
+888    logger : logging.Logger
+889        Logger instance for tracking progress across different grid sizes.
+890
+891    Returns
+892    -------
+893    all_results : list of dict
+894        A list of dictionaries containing metrics and cluster statistics for
+895        each grid size and replicate.
+896
+897    Notes
+898    -----
+899    The function performs the following:
+900    1. Iterates through multiple grid sizes defined in `cfg.grid_sizes`.
+901    2. Generates parallel jobs for each size using critical birth/death rates.
+902    3. Saves results incrementally to allow for post-simulation analysis of
+903       power-law exponents.
+904    """
+905    from joblib import Parallel, delayed
+906    import itertools
+907
+908    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+909
+910    # Define sweep values
+911    prey_death_values = np.linspace(0.05, 0.95, 10)  # 10 values for prey_death
+912    other_param_values = np.linspace(0.0, 1.0, 11)  # 11 values for the rest
+913
+914    # Logging
+915    logger.info(f"Phase 4: Full 4D Parameter Sweep")
+916    logger.info(f"  prey_death: 10 values from 0.05 to 0.95")
+917    logger.info(f"  prey_birth, pred_birth, pred_death: 11 values each from 0 to 1")
+918    logger.info(f"  Grid Size: {cfg.grid_size}")
+919    logger.info(f"  Replicates: {cfg.n_replicates}")
+920
+921    # Build parameter grid
+922    param_grid = itertools.product(
+923        other_param_values,  # prey_birth (11 values)
+924        prey_death_values,  # prey_death (10 values)
+925        other_param_values,  # predator_birth (11 values)
+926        other_param_values,  # predator_death (11 values)
+927    )
+928
+929    jobs = []
+930
+931    for pb, pd, pred_b, pred_d in param_grid:
+932        for rep in range(cfg.n_replicates):
+933            params_id = {
+934                "pb": pb,
+935                "pd": pd,
+936                "pred_b": pred_b,
+937                "pred_d": pred_d,
+938                "rep": rep,
+939            }
+940            seed = generate_unique_seed(params_id, rep)
+941
+942            jobs.append(
+943                (
+944                    pb,  # prey_birth
+945                    pd,  # prey_death
+946                    pred_b,  # predator_birth
+947                    pred_d,  # predator_death
+948                    cfg.grid_size,
+949                    seed,
+950                    cfg,
+951                    False,
+952                )
+953            )
+954
+955    logger.info(
+956        f"  Total simulations: {len(jobs):,}"
+957    )  # 11 * 10 * 11 * 11 * n_reps = 13,310 * n_reps
+958
+959    output_jsonl = output_dir / "phase4_results.jsonl"
+960    all_results = []
+961
+962    with open(output_jsonl, "w", encoding="utf-8") as f:
+963        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+964        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+965
+966        for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 4 (4D Sweep)"):
+967            f.write(json.dumps(result, default=str) + "\n")
+968            f.flush()
+969            all_results.append(result)
+970
+971    # Save Metadata
+972    meta = {
+973        "phase": 4,
+974        "description": "Global 4D Sensitivity Analysis",
+975        "prey_death_values": prey_death_values.tolist(),
+976        "other_param_values": other_param_values.tolist(),
+977        "parameters_varied": [
+978            "prey_birth",
+979            "prey_death",
+980            "predator_birth",
+981            "predator_death",
+982        ],
+983        "n_sims": len(all_results),
+984        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+985        "config": asdict(cfg),
+986    }
+987    with open(output_dir / "phase4_metadata.json", "w") as f:
+988        json.dump(meta, f, indent=2, default=str)
+989
+990    logger.info(f"Phase 4 complete. Results: {output_jsonl}")
+991    return all_results
+
+ + +

Execute Phase 3 of the simulation: Finite-Size Scaling (FSS) analysis.

+ +

This phase investigates how spatial structures, specifically cluster size +cutoffs, scale with the system size (L) at the critical point identified +in Phase 1. This is essential for determining the universality class of +the phase transition.

+ +
Parameters
+ +
    +
  • cfg (Config): +Configuration object containing critical point parameters, the list of +grid sizes to test, and execution settings.
  • +
  • output_dir (Path): +Directory where result files (JSONL) and FSS metadata (JSON) will be +stored.
  • +
  • logger (logging.Logger): +Logger instance for tracking progress across different grid sizes.
  • +
+ +
Returns
+ +
    +
  • all_results (list of dict): +A list of dictionaries containing metrics and cluster statistics for +each grid size and replicate.
  • +
+ +
Notes
+ +

The function performs the following:

+ +
    +
  1. Iterates through multiple grid sizes defined in cfg.grid_sizes.
  2. +
  3. Generates parallel jobs for each size using critical birth/death rates.
  4. +
  5. Saves results incrementally to allow for post-simulation analysis of +power-law exponents.
  6. +
+
+ + +
+
+ +
+ + def + run_phase5( cfg: models.config.Config, output_dir: pathlib.Path, logger: logging.Logger) -> List[Dict]: + + + +
+ +
 994def run_phase5(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]:
+ 995    """
+ 996    Execute Phase 5 of the simulation: Global 4D parameter sweep with directed hunting.
+ 997
+ 998    This phase performs a comprehensive sensitivity analysis by varying four key
+ 999    parameters (prey birth/death and predator birth/death) while directed
+1000    hunting is enabled. The results allow for a direct comparison with Phase 4
+1001    to determine how predator search behavior shifts the system's critical
+1002    thresholds and stability.
+1003
+1004    Parameters
+1005    ----------
+1006    cfg : Config
+1007        Configuration object containing simulation hyperparameters, parallel
+1008        execution settings, and the fixed grid size for this phase.
+1009    output_dir : Path
+1010        Directory where the result JSONL file and execution metadata will
+1011        be stored.
+1012    logger : logging.Logger
+1013        Logger instance for tracking the progress of the high-volume
+1014        simulation batch.
+1015
+1016    Returns
+1017    -------
+1018    all_results : list of dict
+1019        A list of dictionaries containing metrics for every simulation in
+1020        the 4D parameter grid.
+1021
+1022    Notes
+1023    -----
+1024    The function utilizes a Cartesian product of parameter ranges to build a
+1025    job list of over 13,000 unique parameter sets (multiplied by replicates).
+1026    Seeds are uniquely generated to distinguish these runs from other phases
+1027    even if parameter values overlap.
+1028    """
+1029    from joblib import Parallel, delayed
+1030    import itertools
+1031
+1032    warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting)
+1033
+1034    # Define sweep values (same as Phase 4)
+1035    prey_death_values = np.linspace(0.05, 0.95, 10)  # 10 values for prey_death
+1036    other_param_values = np.linspace(0.0, 1.0, 11)  # 11 values for the rest
+1037
+1038    # Logging
+1039    logger.info(f"Phase 5: Full 4D Parameter Sweep (Directed Hunting)")
+1040    logger.info(f"  prey_death: 10 values from 0.05 to 0.95")
+1041    logger.info(f"  prey_birth, pred_birth, pred_death: 11 values each from 0 to 1")
+1042    logger.info(f"  Grid Size: {cfg.grid_size}")
+1043    logger.info(f"  Replicates: {cfg.n_replicates}")
+1044    logger.info(f"  Directed Hunting: {cfg.directed_hunting}")
+1045
+1046    # Build parameter grid
+1047    param_grid = itertools.product(
+1048        other_param_values,  # prey_birth (11 values)
+1049        prey_death_values,  # prey_death (10 values)
+1050        other_param_values,  # predator_birth (11 values)
+1051        other_param_values,  # predator_death (11 values)
+1052    )
+1053
+1054    jobs = []
+1055
+1056    for pb, pd, pred_b, pred_d in param_grid:
+1057        for rep in range(cfg.n_replicates):
+1058            # Include phase identifier to ensure different seeds from Phase 4
+1059            params_id = {
+1060                "pb": pb,
+1061                "pd": pd,
+1062                "pred_b": pred_b,
+1063                "pred_d": pred_d,
+1064                "phase": 6,
+1065                "rep": rep,
+1066            }
+1067            seed = generate_unique_seed(params_id, rep)
+1068
+1069            jobs.append(
+1070                (
+1071                    pb,  # prey_birth
+1072                    pd,  # prey_death
+1073                    pred_b,  # predator_birth
+1074                    pred_d,  # predator_death
+1075                    cfg.grid_size,
+1076                    seed,
+1077                    cfg,
+1078                    False,
+1079                )
+1080            )
+1081
+1082    logger.info(
+1083        f"  Total simulations: {len(jobs):,}"
+1084    )  # 11 * 10 * 11 * 11 * n_reps = 13,310 * n_reps
+1085
+1086    output_jsonl = output_dir / "phase5_results.jsonl"
+1087    all_results = []
+1088
+1089    with open(output_jsonl, "w", encoding="utf-8") as f:
+1090        executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator")
+1091        tasks = (delayed(run_single_simulation)(*job) for job in jobs)
+1092
+1093        for result in tqdm(
+1094            executor(tasks), total=len(jobs), desc="Phase 6 (4D Sweep + Directed)"
+1095        ):
+1096            f.write(json.dumps(result, default=str) + "\n")
+1097            f.flush()
+1098            all_results.append(result)
+1099
+1100    # Save Metadata
+1101    meta = {
+1102        "phase": 5,
+1103        "description": "Global 4D Sensitivity Analysis with Directed Hunting",
+1104        "prey_death_values": prey_death_values.tolist(),
+1105        "other_param_values": other_param_values.tolist(),
+1106        "parameters_varied": [
+1107            "prey_birth",
+1108            "prey_death",
+1109            "predator_birth",
+1110            "predator_death",
+1111        ],
+1112        "directed_hunting": cfg.directed_hunting,
+1113        "n_sims": len(all_results),
+1114        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+1115        "config": asdict(cfg),
+1116    }
+1117    with open(output_dir / "phase6_metadata.json", "w") as f:
+1118        json.dump(meta, f, indent=2, default=str)
+1119
+1120    logger.info(f"Phase 5 complete. Results: {output_jsonl}")
+1121    return all_results
+
+ + +

Execute Phase 5 of the simulation: Global 4D parameter sweep with directed hunting.

+ +

This phase performs a comprehensive sensitivity analysis by varying four key +parameters (prey birth/death and predator birth/death) while directed +hunting is enabled. The results allow for a direct comparison with Phase 4 +to determine how predator search behavior shifts the system's critical +thresholds and stability.

+ +
Parameters
+ +
    +
  • cfg (Config): +Configuration object containing simulation hyperparameters, parallel +execution settings, and the fixed grid size for this phase.
  • +
  • output_dir (Path): +Directory where the result JSONL file and execution metadata will +be stored.
  • +
  • logger (logging.Logger): +Logger instance for tracking the progress of the high-volume +simulation batch.
  • +
+ +
Returns
+ +
    +
  • all_results (list of dict): +A list of dictionaries containing metrics for every simulation in +the 4D parameter grid.
  • +
+ +
Notes
+ +

The function utilizes a Cartesian product of parameter ranges to build a +job list of over 13,000 unique parameter sets (multiplied by replicates). +Seeds are uniquely generated to distinguish these runs from other phases +even if parameter values overlap.

+
+ + +
+
+ +
+ + def + main(): + + + +
+ +
1137def main():
+1138    """
+1139    Organize the predator-prey experimental suite across multiple phases.
+1140
+1141    This entry point handles command-line arguments, sets up logging and output
+1142    directories, and executes the requested simulation phases (1-5). It
+1143    supports parallel execution, dry runs for runtime estimation, and
+1144    automated configuration persistence.
+1145
+1146    Notes
+1147    -----
+1148    The script dynamically retrieves phase-specific configurations using
+1149    `get_phase_config` and dispatches execution to the corresponding runner
+1150    in the `PHASE_RUNNERS` mapping.
+1151    """
+1152    parser = argparse.ArgumentParser(
+1153        description="Predator-Prey Hydra Effect Experiments",
+1154        formatter_class=argparse.RawDescriptionHelpFormatter,
+1155        epilog="""
+1156Phases:
+1157  1  Parameter sweep to find critical point
+1158  2  Self-organization (evolution toward criticality)
+1159  3  Finite-size scaling at critical point
+1160  4  Sensitivity analysis across parameter regimes
+1161  5  Model extensions (directed hunting comparison)
+1162        """,
+1163    )
+1164    parser.add_argument(
+1165        "--phase", type=str, required=True, help="Phase to run: 1-6 or 'all'"
+1166    )
+1167    parser.add_argument(
+1168        "--output",
+1169        type=Path,
+1170        default=Path("results"),
+1171        help="Output directory (default: results)",
+1172    )
+1173    parser.add_argument(
+1174        "--cores", type=int, default=-1, help="Number of cores (-1 for all)"
+1175    )
+1176    parser.add_argument(
+1177        "--dry-run", action="store_true", help="Estimate runtime without running"
+1178    )
+1179    args = parser.parse_args()
+1180
+1181    # Parse phase argument
+1182    if args.phase.lower() == "all":
+1183        phases = list(PHASE_RUNNERS.keys())
+1184    else:
+1185        try:
+1186            phases = [int(args.phase)]
+1187        except ValueError:
+1188            print(f"Invalid phase: {args.phase}. Use 1-6 or 'all'")
+1189            sys.exit(1)
+1190
+1191    # Setup output directory
+1192    args.output.mkdir(parents=True, exist_ok=True)
+1193
+1194    # Setup logging
+1195    logging.basicConfig(
+1196        level=logging.INFO,
+1197        format="%(asctime)s [%(levelname)s] %(message)s",
+1198        handlers=[
+1199            logging.FileHandler(args.output / "experiments.log"),
+1200            logging.StreamHandler(),
+1201        ],
+1202    )
+1203    logger = logging.getLogger(__name__)
+1204
+1205    # Header
+1206    logger.info("=" * 60)
+1207    logger.info("PREDATOR-PREY HYDRA EFFECT EXPERIMENTS")
+1208    logger.info("=" * 60)
+1209    logger.info(f"Phases: {phases}")
+1210    logger.info(f"Output: {args.output}")
+1211    logger.info(f"Cores: {args.cores}")
+1212    logger.info(f"Numba: {'ENABLED' if USE_NUMBA else 'DISABLED'}")
+1213
+1214    # Process each phase
+1215    for phase in phases:
+1216        cfg = get_phase_config(phase)
+1217        cfg.n_jobs = (
+1218            args.cores
+1219            if args.cores > 0
+1220            else int(os.environ.get("SLURM_CPUS_PER_TASK", -1))
+1221        )
+1222
+1223        logger.info("")
+1224        logger.info(f"{'='*60}")
+1225        logger.info(f"PHASE {phase}")
+1226        logger.info(f"{'='*60}")
+1227
+1228        n_cores = cfg.n_jobs if cfg.n_jobs > 0 else os.cpu_count()
+1229        logger.info(f"Estimated: {cfg.estimate_runtime(n_cores)}")
+1230
+1231        if args.dry_run:
+1232            logger.info("Dry run - skipping execution")
+1233            continue
+1234
+1235        # Save config
+1236        with open(args.output / f"phase{phase}_config.json", "w") as f:
+1237            json.dump(asdict(cfg), f, indent=2, default=str)
+1238
+1239        # Run phase
+1240        start_time = time.time()
+1241        runner = PHASE_RUNNERS[phase]
+1242        runner(cfg, args.output, logger)
+1243        elapsed = time.time() - start_time
+1244
+1245        logger.info(f"Phase {phase} runtime: {elapsed/60:.1f} minutes")
+1246
+1247    logger.info("")
+1248    logger.info("=" * 60)
+1249    logger.info("EXPERIMENTS COMPLETE")
+1250    logger.info("=" * 60)
+
+ + +

Organize the predator-prey experimental suite across multiple phases.

+ +

This entry point handles command-line arguments, sets up logging and output +directories, and executes the requested simulation phases (1-5). It +supports parallel execution, dry runs for runtime estimation, and +automated configuration persistence.

+ +
Notes
+ +

The script dynamically retrieves phase-specific configurations using +get_phase_config and dispatches execution to the corresponding runner +in the PHASE_RUNNERS mapping.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/experiments.md b/docs/experiments.md deleted file mode 100644 index 0202b42..0000000 --- a/docs/experiments.md +++ /dev/null @@ -1,233 +0,0 @@ -# Metrics and measures -This is what should be measured each run. These runs can then be further aggregated for final metrics. -### Fixed parameter runs -- Population count (mean and std after warmup) -- Cluster size distribution (means and stds after warmup) -### Evolution runs -It is important to scrutenize whether these should be time-series or steady state values. -- Population count (time series after warmup) -- Cluster size distribution (time series after warmup) -- Prey death rate (time series mean and std after warmup) - -# Experiments -These phases should be completed sequentially, deepening our understanding at each step. The different experiments in each phase should be completed with data from the same runs. -### Phase 1: finding the critical point -- Create bifurcation diagram of mean population count, varying prey death rate - - Look for critical transition -- Create log-log plot of cluster size distribution, varying prey death rate - - Look for power-law -### Phase 2: self-organization -- Measure final prey death rate after evolution - - Look for self-organized criticality: an SOC-system should move towards the critical point -### Phase 3: finite-size scaling -- Sweep of grid sizes at critical point - - Check for power-law cut-offs -### Phase 4: sensitivity analysis -- Show sensitivity of hydra effect varying other parameters - - Investigate the ubiquity of the critical point across parameter regimes -- Show correlation between critical prey death rate and post-evolution prey death rate, varying other parameters - - Again look for self-organized criticality: an SOC-system should move towards the critical point regardless of other parameters -### Phase 5: perturbation analysis -- Create autocorrelation plot of mean population count, following perturbations around the critical point - - Look for critical slowing down: perturbations to states closer to the critical point should more slowly return to the steady state - - This requires time series data -### Phase 6: model extensions -- Investigate whether hydra effect and SOC still occur with diffusion and directed reproduction - -# Todo -The main functionality is all complete. Thus, the models folder should be relatively untouched. -However, it is important to standardize experiments and analysis. The following files should be used for this. -These files should contain very little (if any) functionality outside of what is listed here. -### experiments.py -This is the file that will be run on the cluster and should generate all experiment data. -- General config class to setup experiments (grid size, parameters, sweep, evolution, repetitions, etc.) -- Config objects for each phase (see phases above) -- Function that runs the experiment based on a config object (calls run_single_simulation in parallel) - - Should save results to results folder (which can then be used by analysis.py) -- Function that runs a single simulation, saving all necessary results - - This needs functionality to run a predetermined amount of time with a warmup - - And needs functionality to dynamically run until it has found a steady state -- Should not contain any analysis (power-law fitting, bifurcation, etc.) - - Exception to this is the PCF data -- Function to estimate runtime (already exists) -- Should have argparse functionality to choose which phase to execute -- Nice-to-have: argparse functionality to create new config object for arbitrary experiments -### analysis.py -This is the file that will generate our plots and statistics for the analysis. -- Function to create bifurcation diagram to find critical point -- Function to create log-log plot to check for power-law - - Should also fit a power-function to the data (see scrips/experiments.fit_truncated_power_law) -- Function to calculate/ show similarity between post-evolution prey death rates and critical points -- Function for sensitivity analysis -- Function for perturbation analysis - ---- - - -## What we are currently collecting: - -### 2D Parameter Sweep - -We map the full phase space to find: -- Hydra regions -- Critical points -- Coexistence boundaries -- Evolutionary advantage zones - -For now at least we sweep: - -``` -prey_birth in [0.10, 0.35] -prey_death in [0.001, 0.10] -``` - -Metrics Collected (so far): - -1. Population Dynamics - -``` - -prey_mean: time-averaged prey pop -prey_std: variability in prey - -# same for predator as above - -prey_survived: did prey persist -pred_survived: did pred perist - -``` - -2. Cluster structure - -``` - -prey_n_clusters: total number of prey clusters -pred_n_clusters: total number of pred clusters -prey_tau: power law exp -prey_s_c: cutoff cluster sizes -pred_tau: pred cluster exp -pred_s_c: pred cutoff - -``` - -3. Order Parameters - -``` -prey_largest_fraction_mean -prey_largest_fraction_std -pred_largest_fraction_mean -prey_percolation_prob: fraction of samples with spanning cluster -pred_percolation_prob: predator percolation prob - -``` - - -4. Spatial Correlations - -``` - -pcf_distances: distance bins in lattice units -pcf_prey_prey_mean: prey-prey correlation function -pcf_pred_pred_mean -pcf_prey_pred_mean -segregation_index: measure spatial mixing -prey_clustering_index: short range prey clustering -pred_clustering_index - -``` - -5. Evolutionary dynamics - -``` -evolved_prey_death_mean: time avg evolved mortality rate -evolved_prey_death_std -evolved_prey_death_final -evolve_sd: mutation strength used - -``` - ---- - -### Finite-size scaling - -We choose a fixed point identified in the main simulation run ```(target_prey_birth, target_prey_death)``` ideally near hydra boundary. - - -For selected grid sizes (TBD) we run independent reps for each size. - - -Metrics: - -``` -grid_size -prey_mean, prey_std -prey_survived: bool -prey_largest_fraction: order parameter -prey_percolation_prob -prey_tau: grid size dependent exponent -prey_tau_se: SE on tau -prey_s_c: cutoff scales -``` - ---- - -### Evo Sensitivity - -How does mutation strength affect evolutionary advantage in Hydra regions, speed of adaptation and final evolved mortality rates. - -Again. choose fixed point identified from main analysis. - -Metrics Dict: - -``` -prey_mean: in cell units as the below metrics as well -prey_std -pred_mean -pred_std -prey_survived: bool - -+ same cluster metrics and spatial correlation metrics - - -evolved_prey_death_mean: avg mortality across all prey -evolved_prey_death_std -evolved_prey_death_final -evolve_sd -``` - - -## Additions Required: - -1. Temporal dynamics for time series analysis. Needed to add critical slowing down effect near phase transitions. - -``` -result["prey_timeseries"] = prey_pops[::10] # Subsample every 10 steps -result["pred_timeseries"] = pred_pops[::10] - -``` - -``` -def run_perturbation_experiment(...): - # Save full time series only for these special runs -``` - -2. Snapshots of spatial configurations. This is a costly operation so we need to figure out how and when to do it in the sim. - -3. Saving final grid states? - -``` -result["final_grid"] = model.grid.copy() -``` - - - - - - - - - - - - - diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..4c0c3b7 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,219 @@ + + + + + + + Module List + + + + + + + + + + +
+ + pdoc + + +
+
+ + \ No newline at end of file diff --git a/docs/models/CA.html b/docs/models/CA.html new file mode 100644 index 0000000..220c354 --- /dev/null +++ b/docs/models/CA.html @@ -0,0 +1,2522 @@ + + + + + + + models.CA API documentation + + + + + + + + + +
+
+

+models.CA

+ +

Cellular Automaton Framework

+ +

This module provides the base cellular automaton class and the +Predator-Prey (PP) implementation with Numba-accelerated kernels.

+ +
Classes
+ +

CA: Abstract base class for spatial cellular automata.

+ +

PP: Predator-Prey model with configurable hunting behavior.

+ +
Example
+ +
+
from models.CA import PP
+
+# Basic usage
+model = PP(rows=100, cols=100, densities=(0.3, 0.15), seed=42)
+model.run(steps=1000)
+
+# With evolution enabled
+model = PP(rows=100, cols=100, seed=42)
+model.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15)
+model.run(steps=500)
+
+# With directed hunting
+model = PP(rows=100, cols=100, directed_hunting=True, seed=42)
+
+
+
+ + + + + +
  1#!/usr/bin/env python3
+  2"""
+  3Cellular Automaton Framework
+  4============================
+  5
+  6This module provides the base cellular automaton class and the
+  7Predator-Prey (PP) implementation with Numba-accelerated kernels.
+  8
+  9Classes
+ 10-------
+ 11CA: Abstract base class for spatial cellular automata.
+ 12
+ 13PP: Predator-Prey model with configurable hunting behavior.
+ 14
+ 15Example
+ 16-------
+ 17```python
+ 18from models.CA import PP
+ 19
+ 20# Basic usage
+ 21model = PP(rows=100, cols=100, densities=(0.3, 0.15), seed=42)
+ 22model.run(steps=1000)
+ 23
+ 24# With evolution enabled
+ 25model = PP(rows=100, cols=100, seed=42)
+ 26model.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15)
+ 27model.run(steps=500)
+ 28
+ 29# With directed hunting
+ 30model = PP(rows=100, cols=100, directed_hunting=True, seed=42)
+ 31```
+ 32"""
+ 33
+ 34from typing import Tuple, Dict, Optional
+ 35
+ 36import numpy as np
+ 37import logging
+ 38import sys
+ 39from pathlib import Path
+ 40
+ 41# Add parent directory to path for imports
+ 42sys.path.insert(0, str(Path(__file__).parent.parent))
+ 43
+ 44from models.numba_optimized import PPKernel, set_numba_seed
+ 45
+ 46# Module logger
+ 47logger = logging.getLogger(__name__)
+ 48
+ 49
+ 50class CA:
+ 51    """
+ 52    Base cellular automaton class for spatial simulations.
+ 53
+ 54    This class provides a framework for multi-species cellular automata with
+ 55    support for global parameters, per-cell evolving parameters, and
+ 56    grid initialization based on density.
+ 57
+ 58    Attributes
+ 59    ----------
+ 60    grid : np.ndarray
+ 61        2D numpy array containing integers in range [0, n_species].
+ 62    params : Dict[str, Any]
+ 63        Global parameters shared by all cells.
+ 64    cell_params : Dict[str, Any]
+ 65        Local per-cell parameters, typically stored as numpy arrays matching the grid shape.
+ 66    neighborhood : str
+ 67        The adjacency rule used ('neumann' or 'moore').
+ 68    generator : np.random.Generator
+ 69        The random number generator instance for reproducibility.
+ 70    species_names : Tuple[str, ...]
+ 71        Human-readable names for each species state.
+ 72    """
+ 73
+ 74    # Default colormap spec (string or sequence); resolved in `visualize` at runtime
+ 75    _default_cmap = "viridis"
+ 76
+ 77    # Read-only accessors for size/densities (protected attributes set in __init__)
+ 78    @property
+ 79    def rows(self) -> int:
+ 80        """int: Number of rows in the grid."""
+ 81        return getattr(self, "_rows")
+ 82
+ 83    @property
+ 84    def cols(self) -> int:
+ 85        """int: Number of columns in the grid."""
+ 86        return getattr(self, "_cols")
+ 87
+ 88    @property
+ 89    def densities(self) -> Tuple[float, ...]:
+ 90        """Tuple[float, ...]: Initial density fraction for each species."""
+ 91        return tuple(getattr(self, "_densities"))
+ 92
+ 93    # make n_species protected with read-only property
+ 94    @property
+ 95    def n_species(self) -> int:
+ 96        """int: Number of distinct species states (excluding empty state 0)."""
+ 97        return int(getattr(self, "_n_species"))
+ 98
+ 99    def __init__(
+100        self,
+101        rows: int,
+102        cols: int,
+103        densities: Tuple[float, ...],
+104        neighborhood: str,
+105        params: Dict[str, object],
+106        cell_params: Dict[str, object],
+107        seed: Optional[int] = None,
+108    ) -> None:
+109        """
+110        Initialize the cellular automaton grid and configurations.
+111
+112        Parameters
+113        ----------
+114        rows : int
+115            Number of rows in the grid (must be > 0).
+116        cols : int
+117            Number of columns in the grid (must be > 0).
+118        densities : Tuple[float, ...]
+119            Initial density for each species. Length defines `n_species`.
+120            Values must sum to <= 1.0.
+121        neighborhood : {'neumann', 'moore'}
+122            Type of neighborhood connectivity.
+123        params : Dict[str, Any]
+124            Initial global parameter values.
+125        cell_params : Dict[str, Any]
+126            Initial local per-cell parameters.
+127        seed : int, optional
+128            Seed for the random number generator.
+129        """
+130        assert isinstance(rows, int) and rows > 0, "rows must be positive int"
+131        assert isinstance(cols, int) and cols > 0, "cols must be positive int"
+132        assert (
+133            isinstance(densities, tuple) and len(densities) > 0
+134        ), "densities must be a non-empty tuple"
+135        for d in densities:
+136            assert (
+137                isinstance(d, (float, int)) and d >= 0
+138            ), "each density must be non-negative"
+139        total_density = float(sum(densities))
+140        assert total_density <= 1.0 + 1e-12, "sum of densities must not exceed 1"
+141        assert neighborhood in (
+142            "neumann",
+143            "moore",
+144        ), "neighborhood must be 'neumann' or 'moore'"
+145
+146        self._n_species: int = len(densities)
+147        # store protected size/density attributes (read-only properties exposed)
+148        self._rows: int = rows
+149        self._cols: int = cols
+150        self._densities: Tuple[float, ...] = tuple(densities)
+151        self.params: Dict[str, object] = dict(params) if params is not None else {}
+152        self.cell_params: Dict[str, object] = (
+153            dict(cell_params) if cell_params is not None else {}
+154        )
+155
+156        # per-parameter evolve metadata and evolution state
+157        # maps parameter name -> dict with keys 'sd','min','max','species'
+158        self._evolve_info: Dict[str, Dict[str, float]] = {}
+159        # when True, inheritance uses deterministic copy from parent (no mutation)
+160        self._evolution_stopped: bool = False
+161
+162        # human-readable species names (useful for visualization). Default
+163        # generates generic names based on n_species; subclasses may override.
+164        self.species_names: Tuple[str, ...] = tuple(
+165            f"species{i+1}" for i in range(self._n_species)
+166        )
+167        self.neighborhood: str = neighborhood
+168        self.generator: np.random.Generator = np.random.default_rng(seed)
+169
+170        self.grid: np.ndarray = np.zeros((rows, cols), dtype=int)
+171
+172        total_cells = rows * cols
+173        # Fill grid with species states 1..n_species according to densities.
+174        for i, dens in enumerate(densities):
+175            if dens <= 0:
+176                continue
+177            n_to_fill = int(round(total_cells * float(dens)))
+178            if n_to_fill <= 0:
+179                continue
+180            empty_flat = np.flatnonzero(self.grid.ravel() == 0)
+181            if len(empty_flat) == 0:
+182                break
+183            n_choice = min(n_to_fill, len(empty_flat))
+184            chosen = self.generator.choice(empty_flat, size=n_choice, replace=False)
+185            # assign chosen flattened indices to state i+1
+186            r = chosen // cols
+187            c = chosen % cols
+188            self.grid[r, c] = i + 1
+189
+190    def validate(self) -> None:
+191        """
+192        Validate core CA invariants and grid dimensions.
+193
+194        Checks that the neighborhood is valid, the grid matches initialized dimensions,
+195        and that local parameter arrays match the grid shape.
+196
+197        Raises
+198        ------
+199        ValueError
+200            If any structural invariant is violated.
+201        """
+202        if self.neighborhood not in ("neumann", "moore"):
+203            raise ValueError("neighborhood must be 'neumann' or 'moore'")
+204
+205        expected_shape = (int(getattr(self, "_rows")), int(getattr(self, "_cols")))
+206        if self.grid.shape != expected_shape:
+207            raise ValueError(
+208                f"grid shape {self.grid.shape} does not match expected {expected_shape}"
+209            )
+210
+211        # Ensure any array in cell_params matches grid shape
+212        for k, v in (self.cell_params or {}).items():
+213            if isinstance(v, np.ndarray) and v.shape != expected_shape:
+214                raise ValueError(f"cell_params['{k}'] must have shape equal to grid")
+215
+216    def _infer_species_from_param_name(self, param_name: str) -> Optional[int]:
+217        """
+218        Infer the 1-based species index from a parameter name using `species_names`.
+219
+220        This method checks if the given parameter name starts with any of the
+221        defined species names followed by an underscore (e.g., 'prey_birth').
+222        It is used to automatically route global parameters to the correct
+223        species' local parameter arrays.
+224
+225        Parameters
+226        ----------
+227        param_name : str
+228            The name of the parameter to check.
+229
+230        Returns
+231        -------
+232        Optional[int]
+233            The 1-based index of the species if a matching prefix is found;
+234            otherwise, None.
+235
+236        Notes
+237        -----
+238        The method expects `self.species_names` to be a collection of strings.
+239        If `param_name` is not a string or no match is found, it returns None.
+240        """
+241        if not isinstance(param_name, str):
+242            return None
+243        for idx, name in enumerate(self.species_names or ()):  # type: ignore
+244            if isinstance(name, str) and param_name.startswith(f"{name}_"):
+245                return idx + 1
+246        return None
+247
+248    def evolve(
+249        self,
+250        param: str,
+251        species: Optional[int] = None,
+252        sd: float = 0.05,
+253        min_val: Optional[float] = None,
+254        max_val: Optional[float] = None,
+255    ) -> None:
+256        """
+257        Enable per-cell evolution for a specific parameter on a given species.
+258
+259        This method initializes a spatial parameter array (local parameter map)
+260        for a global parameter. It allows individual cells to carry their own
+261        values for that parameter, which can then mutate and evolve during
+262        the simulation.
+263
+264        Parameters
+265        ----------
+266        param : str
+267            The name of the global parameter to enable for evolution.
+268            Must exist in `self.params`.
+269        species : int, optional
+270            The 1-based index of the species to which this parameter applies.
+271            If None, the method attempts to infer the species from the
+272            parameter name prefix.
+273        sd : float, default 0.05
+274            The standard deviation of the Gaussian mutation applied during
+275            inheritance/reproduction.
+276        min_val : float, optional
+277            The minimum allowable value for the parameter (clamping).
+278            Defaults to 0.01 if not provided.
+279        max_val : float, optional
+280            The maximum allowable value for the parameter (clamping).
+281            Defaults to 0.99 if not provided.
+282
+283        Raises
+284        ------
+285        ValueError
+286            If the parameter is not in `self.params`, the species cannot be
+287            inferred, or the species index is out of bounds.
+288
+289        Notes
+290        -----
+291        The local parameter is stored in `self.cell_params` as a 2D numpy
+292        array initialized with the current global value for all cells of
+293        the target species, and `NaN` elsewhere.
+294        """
+295        if min_val is None:
+296            min_val = 0.01
+297        if max_val is None:
+298            max_val = 0.99
+299        if param not in self.params:
+300            raise ValueError(f"Unknown parameter '{param}'")
+301        if species is None:
+302            species = self._infer_species_from_param_name(param)
+303            if species is None:
+304                raise ValueError(
+305                    "species must be provided or inferable from param name and species_names"
+306                )
+307        if not isinstance(species, int) or species <= 0 or species > self._n_species:
+308            raise ValueError("species must be an integer between 1 and n_species")
+309
+310        arr = np.full(self.grid.shape, np.nan, dtype=float)
+311        mask = self.grid == int(species)
+312        arr[mask] = float(self.params[param])
+313        self.cell_params[param] = arr
+314        self._evolve_info[param] = {
+315            "sd": float(sd),
+316            "min": float(min_val),
+317            "max": float(max_val),
+318            "species": int(species),
+319        }
+320
+321    def update(self) -> None:
+322        """
+323        Perform one update step of the cellular automaton.
+324
+325        This is an abstract method that defines the transition rules of the
+326        system. It must be implemented by concrete subclasses to specify
+327        how cell states and parameters change over time based on their
+328        current state and neighborhood.
+329
+330        Raises
+331        ------
+332        NotImplementedError
+333            If called directly on the base class instead of an implementation.
+334
+335        Returns
+336        -------
+337        None
+338
+339        Notes
+340        -----
+341        In a typical implementation, this method handles the logic for
+342        stochastic transitions, movement, or predator-prey interactions.
+343        """
+344        raise NotImplementedError(
+345            "Override update() in a subclass to define CA dynamics"
+346        )
+347
+348    def run(
+349        self,
+350        steps: int,
+351        stop_evolution_at: Optional[int] = None,
+352        snapshot_iters: Optional[list] = None,
+353    ) -> None:
+354        """
+355        Execute the cellular automaton simulation for a specified number of steps.
+356
+357        This method drives the simulation loop, calling `update()` at each
+358        iteration. It manages visualization updates, directory creation for
+359        data persistence, and handles the freezing of evolving parameters
+360        at a specific time step.
+361
+362        Parameters
+363        ----------
+364        steps : int
+365            The total number of iterations to run (must be non-negative).
+366        stop_evolution_at : int, optional
+367            The 1-based iteration index after which parameter mutation is
+368            disabled. Useful for observing system stability after a period
+369            of adaptation.
+370        snapshot_iters : List[int], optional
+371            A list of specific 1-based iteration indices at which to save
+372            the current grid state to the results directory.
+373
+374        Returns
+375        -------
+376        None
+377
+378        Notes
+379        -----
+380        If snapshots are requested, a results directory is automatically created
+381        using a timestamped subfolder (e.g., 'results/run-1700000000/').
+382        Visualization errors are logged but do not terminate the simulation.
+383        """
+384        assert (
+385            isinstance(steps, int) and steps >= 0
+386        ), "steps must be a non-negative integer"
+387
+388        # normalize snapshot iteration list
+389        snapshot_set = set(snapshot_iters) if snapshot_iters is not None else set()
+390
+391        for i in range(steps):
+392            self.update()
+393            # Update visualization if enabled every `interval` iterations
+394            if getattr(self, "_viz_on", False):
+395                # iteration number is 1-based for display
+396                try:
+397                    self._viz_update(i + 1)
+398                except Exception:
+399                    # Log visualization errors but don't stop the simulation
+400                    logger.exception(
+401                        "Visualization update failed at iteration %d", i + 1
+402                    )
+403
+404            # create snapshots if requested at this iteration
+405            if (i + 1) in snapshot_set:
+406                try:
+407                    # create snapshot folder if not present
+408                    if (
+409                        not hasattr(self, "_viz_snapshot_dir")
+410                        or self._viz_snapshot_dir is None
+411                    ):
+412                        import os, time
+413
+414                        base = "results"
+415                        ts = int(time.time())
+416                        run_folder = f"run-{ts}"
+417                        full = os.path.join(base, run_folder)
+418                        os.makedirs(full, exist_ok=True)
+419                        self._viz_snapshot_dir = full
+420                    self._viz_save_snapshot(i + 1)
+421                except (OSError, PermissionError):
+422                    logger.exception(
+423                        "Failed to create or write snapshot at iteration %d", i + 1
+424                    )
+425
+426            # stop evolution at specified time-step (disable further evolution)
+427            if stop_evolution_at is not None and (i + 1) == int(stop_evolution_at):
+428                # mark evolution as stopped; do not erase evolve metadata so
+429                # deterministic inheritance can still use parent values
+430                self._evolution_stopped = True
+431
+432
+433class PP(CA):
+434    """
+435    Predator-Prey Cellular Automaton model with Numba-accelerated kernels.
+436
+437    This model simulates a stochastic predator-prey system where species
+438    interact on a 2D grid. It supports evolving per-cell death rates,
+439    periodic boundary conditions, and both random and directed hunting
+440    behaviors.
+441
+442    Parameters
+443    ----------
+444    rows : int, default 10
+445        Number of rows in the simulation grid.
+446    cols : int, default 10
+447        Number of columns in the simulation grid.
+448    densities : Tuple[float, ...], default (0.2, 0.1)
+449        Initial population densities for (prey, predator).
+450    neighborhood : {'moore', 'neumann'}, default 'moore'
+451        The neighborhood type for cell interactions.
+452    params : Dict[str, object], optional
+453        Global parameters: "prey_death", "predator_death", "prey_birth",
+454        "predator_birth".
+455    cell_params : Dict[str, object], optional
+456        Initial local parameter maps (2D arrays).
+457    seed : int, optional
+458        Random seed for reproducibility.
+459    synchronous : bool, default True
+460        If True, updates the entire grid at once. If False, updates
+461        cells asynchronously.
+462    directed_hunting : bool, default False
+463        If True, predators selectively hunt prey rather than choosing
+464        neighbors at random.
+465
+466    Attributes
+467    ----------
+468    species_names : Tuple[str, ...]
+469        Labels for the species ('prey', 'predator').
+470    synchronous : bool
+471        Current update mode.
+472    directed_hunting : bool
+473        Current hunting strategy logic.
+474    """
+475
+476    # Default colors: 0=empty black, 1=prey green, 2=predator red
+477    _default_cmap = ("black", "green", "red")
+478
+479    def __init__(
+480        self,
+481        rows: int = 10,
+482        cols: int = 10,
+483        densities: Tuple[float, ...] = (0.2, 0.1),
+484        neighborhood: str = "moore",
+485        params: Dict[str, object] = None,
+486        cell_params: Dict[str, object] = None,
+487        seed: Optional[int] = None,
+488        synchronous: bool = True,
+489        directed_hunting: bool = False,  # New directed hunting option
+490    ) -> None:
+491        """
+492        Initialize the Predator-Prey CA with validated parameters and kernels.
+493        """
+494        # Allowed params and defaults
+495        _defaults = {
+496            "prey_death": 0.05,
+497            "predator_death": 0.1,
+498            "prey_birth": 0.25,
+499            "predator_birth": 0.2,
+500        }
+501
+502        # Validate user-supplied params: only allowed keys
+503        if params is None:
+504            merged_params = dict(_defaults)
+505        else:
+506            if not isinstance(params, dict):
+507                raise TypeError("params must be a dict or None")
+508            extra = set(params.keys()) - set(_defaults.keys())
+509            if extra:
+510                raise ValueError(f"Unexpected parameter keys: {sorted(list(extra))}")
+511            # Do not override user-set values: start from defaults then update with user values
+512            merged_params = dict(_defaults)
+513            merged_params.update(params)
+514
+515        # Validate numerical ranges
+516        for k, v in merged_params.items():
+517            if not isinstance(v, (int, float)):
+518                raise TypeError(f"Parameter '{k}' must be a number between 0 and 1")
+519            if not (0.0 <= float(v) <= 1.0):
+520                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+521
+522        # Call base initializer with merged params
+523        super().__init__(
+524            rows, cols, densities, neighborhood, merged_params, cell_params, seed
+525        )
+526
+527        self.synchronous: bool = bool(synchronous)
+528        self.directed_hunting: bool = bool(directed_hunting)
+529
+530        # set human-friendly species names for PP
+531        self.species_names = ("prey", "predator")
+532
+533        if seed is not None:
+534            # This sets the seed for all @njit functions globally
+535            set_numba_seed(seed)
+536
+537        self._kernel = PPKernel(
+538            rows, cols, neighborhood, directed_hunting=directed_hunting
+539        )
+540
+541    # Remove PP-specific evolve wrapper; use CA.evolve with optional species
+542
+543    def validate(self) -> None:
+544        """
+545        Validate Predator-Prey specific invariants and spatial parameter arrays.
+546
+547        Extends the base CA validation to ensure that numerical parameters are
+548        within the [0, 1] probability range and that evolved parameter maps
+549        (e.g., prey_death) correctly align with the species locations.
+550
+551        Raises
+552        ------
+553        ValueError
+554            If grid shapes, parameter ranges, or species masks are inconsistent.
+555        TypeError
+556            If parameters are non-numeric.
+557        """
+558        super().validate()
+559
+560        # Validate global params
+561        for k, v in (self.params or {}).items():
+562            if not isinstance(v, (int, float)):
+563                raise TypeError(f"Parameter '{k}' must be numeric")
+564            if not (0.0 <= float(v) <= 1.0):
+565                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+566
+567        # Validate per-cell evolve arrays
+568        for pname, meta in (self._evolve_info or {}).items():
+569            arr = self.cell_params.get(pname)
+570            if not isinstance(arr, np.ndarray):
+571                # absent or non-array per-cell params are allowed; skip
+572                continue
+573            # shape already checked in super().validate(), but be explicit
+574            if arr.shape != self.grid.shape:
+575                raise ValueError(f"cell_params['{pname}'] must match grid shape")
+576            # expected non-NaN positions correspond to species stored in metadata
+577            species = None
+578            if isinstance(meta, dict) and "species" in meta:
+579                species = int(meta.get("species"))
+580            else:
+581                # try to infer species from parameter name using species_names
+582                species = self._infer_species_from_param_name(pname)
+583                if species is None:
+584                    raise ValueError(
+585                        f"cell_params['{pname}'] missing species metadata and could not infer from name"
+586                    )
+587            nonnan = ~np.isnan(arr)
+588            expected = self.grid == species
+589            if not np.array_equal(nonnan, expected):
+590                raise ValueError(
+591                    f"cell_params['{pname}'] non-NaN entries must match positions of species {species}"
+592                )
+593            # values must be within configured range where not NaN
+594            mn = float(meta.get("min", 0.0))
+595            mx = float(meta.get("max", 1.0))
+596            vals = arr[~np.isnan(arr)]
+597            if vals.size > 0:
+598                if np.any(vals < mn) or np.any(vals > mx):
+599                    raise ValueError(
+600                        f"cell_params['{pname}'] contains values outside [{mn}, {mx}]"
+601                    )
+602
+603    def update_async(self) -> None:
+604        """
+605        Execute an asynchronous update using the optimized Numba kernel.
+606
+607        This method retrieves the evolved parameter maps and delegates the
+608        stochastic transitions to the `PPKernel`. Asynchronous updates
+609        typically handle cell-by-cell logic where changes can be
+610        immediately visible to neighbors.
+611        """
+612        # Get the evolved prey death map
+613        # Fallback to a full array of the global param if it doesn't exist yet
+614        p_death_arr = self.cell_params.get("prey_death")
+615        if p_death_arr is None:
+616            p_death_arr = np.full(
+617                self.grid.shape, self.params["prey_death"], dtype=np.float64
+618            )
+619
+620        meta = self._evolve_info.get(
+621            "prey_death", {"sd": 0.05, "min": 0.001, "max": 0.1}
+622        )
+623
+624        # Call the optimized kernel (uses pre-allocated buffers)
+625        self._kernel.update(
+626            self.grid,
+627            p_death_arr,
+628            float(self.params["prey_birth"]),
+629            float(self.params["prey_death"]),
+630            float(self.params["predator_birth"]),
+631            float(self.params["predator_death"]),
+632            float(meta["sd"]),
+633            float(meta["min"]),
+634            float(meta["max"]),
+635            self._evolution_stopped,
+636        )
+637
+638    def update(self) -> None:
+639        """
+640        Dispatch the simulation step based on the configured update mode.
+641        """
+642        self.update_async()
+
+ + +
+
+ +
+ + class + CA: + + + +
+ +
 51class CA:
+ 52    """
+ 53    Base cellular automaton class for spatial simulations.
+ 54
+ 55    This class provides a framework for multi-species cellular automata with
+ 56    support for global parameters, per-cell evolving parameters, and
+ 57    grid initialization based on density.
+ 58
+ 59    Attributes
+ 60    ----------
+ 61    grid : np.ndarray
+ 62        2D numpy array containing integers in range [0, n_species].
+ 63    params : Dict[str, Any]
+ 64        Global parameters shared by all cells.
+ 65    cell_params : Dict[str, Any]
+ 66        Local per-cell parameters, typically stored as numpy arrays matching the grid shape.
+ 67    neighborhood : str
+ 68        The adjacency rule used ('neumann' or 'moore').
+ 69    generator : np.random.Generator
+ 70        The random number generator instance for reproducibility.
+ 71    species_names : Tuple[str, ...]
+ 72        Human-readable names for each species state.
+ 73    """
+ 74
+ 75    # Default colormap spec (string or sequence); resolved in `visualize` at runtime
+ 76    _default_cmap = "viridis"
+ 77
+ 78    # Read-only accessors for size/densities (protected attributes set in __init__)
+ 79    @property
+ 80    def rows(self) -> int:
+ 81        """int: Number of rows in the grid."""
+ 82        return getattr(self, "_rows")
+ 83
+ 84    @property
+ 85    def cols(self) -> int:
+ 86        """int: Number of columns in the grid."""
+ 87        return getattr(self, "_cols")
+ 88
+ 89    @property
+ 90    def densities(self) -> Tuple[float, ...]:
+ 91        """Tuple[float, ...]: Initial density fraction for each species."""
+ 92        return tuple(getattr(self, "_densities"))
+ 93
+ 94    # make n_species protected with read-only property
+ 95    @property
+ 96    def n_species(self) -> int:
+ 97        """int: Number of distinct species states (excluding empty state 0)."""
+ 98        return int(getattr(self, "_n_species"))
+ 99
+100    def __init__(
+101        self,
+102        rows: int,
+103        cols: int,
+104        densities: Tuple[float, ...],
+105        neighborhood: str,
+106        params: Dict[str, object],
+107        cell_params: Dict[str, object],
+108        seed: Optional[int] = None,
+109    ) -> None:
+110        """
+111        Initialize the cellular automaton grid and configurations.
+112
+113        Parameters
+114        ----------
+115        rows : int
+116            Number of rows in the grid (must be > 0).
+117        cols : int
+118            Number of columns in the grid (must be > 0).
+119        densities : Tuple[float, ...]
+120            Initial density for each species. Length defines `n_species`.
+121            Values must sum to <= 1.0.
+122        neighborhood : {'neumann', 'moore'}
+123            Type of neighborhood connectivity.
+124        params : Dict[str, Any]
+125            Initial global parameter values.
+126        cell_params : Dict[str, Any]
+127            Initial local per-cell parameters.
+128        seed : int, optional
+129            Seed for the random number generator.
+130        """
+131        assert isinstance(rows, int) and rows > 0, "rows must be positive int"
+132        assert isinstance(cols, int) and cols > 0, "cols must be positive int"
+133        assert (
+134            isinstance(densities, tuple) and len(densities) > 0
+135        ), "densities must be a non-empty tuple"
+136        for d in densities:
+137            assert (
+138                isinstance(d, (float, int)) and d >= 0
+139            ), "each density must be non-negative"
+140        total_density = float(sum(densities))
+141        assert total_density <= 1.0 + 1e-12, "sum of densities must not exceed 1"
+142        assert neighborhood in (
+143            "neumann",
+144            "moore",
+145        ), "neighborhood must be 'neumann' or 'moore'"
+146
+147        self._n_species: int = len(densities)
+148        # store protected size/density attributes (read-only properties exposed)
+149        self._rows: int = rows
+150        self._cols: int = cols
+151        self._densities: Tuple[float, ...] = tuple(densities)
+152        self.params: Dict[str, object] = dict(params) if params is not None else {}
+153        self.cell_params: Dict[str, object] = (
+154            dict(cell_params) if cell_params is not None else {}
+155        )
+156
+157        # per-parameter evolve metadata and evolution state
+158        # maps parameter name -> dict with keys 'sd','min','max','species'
+159        self._evolve_info: Dict[str, Dict[str, float]] = {}
+160        # when True, inheritance uses deterministic copy from parent (no mutation)
+161        self._evolution_stopped: bool = False
+162
+163        # human-readable species names (useful for visualization). Default
+164        # generates generic names based on n_species; subclasses may override.
+165        self.species_names: Tuple[str, ...] = tuple(
+166            f"species{i+1}" for i in range(self._n_species)
+167        )
+168        self.neighborhood: str = neighborhood
+169        self.generator: np.random.Generator = np.random.default_rng(seed)
+170
+171        self.grid: np.ndarray = np.zeros((rows, cols), dtype=int)
+172
+173        total_cells = rows * cols
+174        # Fill grid with species states 1..n_species according to densities.
+175        for i, dens in enumerate(densities):
+176            if dens <= 0:
+177                continue
+178            n_to_fill = int(round(total_cells * float(dens)))
+179            if n_to_fill <= 0:
+180                continue
+181            empty_flat = np.flatnonzero(self.grid.ravel() == 0)
+182            if len(empty_flat) == 0:
+183                break
+184            n_choice = min(n_to_fill, len(empty_flat))
+185            chosen = self.generator.choice(empty_flat, size=n_choice, replace=False)
+186            # assign chosen flattened indices to state i+1
+187            r = chosen // cols
+188            c = chosen % cols
+189            self.grid[r, c] = i + 1
+190
+191    def validate(self) -> None:
+192        """
+193        Validate core CA invariants and grid dimensions.
+194
+195        Checks that the neighborhood is valid, the grid matches initialized dimensions,
+196        and that local parameter arrays match the grid shape.
+197
+198        Raises
+199        ------
+200        ValueError
+201            If any structural invariant is violated.
+202        """
+203        if self.neighborhood not in ("neumann", "moore"):
+204            raise ValueError("neighborhood must be 'neumann' or 'moore'")
+205
+206        expected_shape = (int(getattr(self, "_rows")), int(getattr(self, "_cols")))
+207        if self.grid.shape != expected_shape:
+208            raise ValueError(
+209                f"grid shape {self.grid.shape} does not match expected {expected_shape}"
+210            )
+211
+212        # Ensure any array in cell_params matches grid shape
+213        for k, v in (self.cell_params or {}).items():
+214            if isinstance(v, np.ndarray) and v.shape != expected_shape:
+215                raise ValueError(f"cell_params['{k}'] must have shape equal to grid")
+216
+217    def _infer_species_from_param_name(self, param_name: str) -> Optional[int]:
+218        """
+219        Infer the 1-based species index from a parameter name using `species_names`.
+220
+221        This method checks if the given parameter name starts with any of the
+222        defined species names followed by an underscore (e.g., 'prey_birth').
+223        It is used to automatically route global parameters to the correct
+224        species' local parameter arrays.
+225
+226        Parameters
+227        ----------
+228        param_name : str
+229            The name of the parameter to check.
+230
+231        Returns
+232        -------
+233        Optional[int]
+234            The 1-based index of the species if a matching prefix is found;
+235            otherwise, None.
+236
+237        Notes
+238        -----
+239        The method expects `self.species_names` to be a collection of strings.
+240        If `param_name` is not a string or no match is found, it returns None.
+241        """
+242        if not isinstance(param_name, str):
+243            return None
+244        for idx, name in enumerate(self.species_names or ()):  # type: ignore
+245            if isinstance(name, str) and param_name.startswith(f"{name}_"):
+246                return idx + 1
+247        return None
+248
+249    def evolve(
+250        self,
+251        param: str,
+252        species: Optional[int] = None,
+253        sd: float = 0.05,
+254        min_val: Optional[float] = None,
+255        max_val: Optional[float] = None,
+256    ) -> None:
+257        """
+258        Enable per-cell evolution for a specific parameter on a given species.
+259
+260        This method initializes a spatial parameter array (local parameter map)
+261        for a global parameter. It allows individual cells to carry their own
+262        values for that parameter, which can then mutate and evolve during
+263        the simulation.
+264
+265        Parameters
+266        ----------
+267        param : str
+268            The name of the global parameter to enable for evolution.
+269            Must exist in `self.params`.
+270        species : int, optional
+271            The 1-based index of the species to which this parameter applies.
+272            If None, the method attempts to infer the species from the
+273            parameter name prefix.
+274        sd : float, default 0.05
+275            The standard deviation of the Gaussian mutation applied during
+276            inheritance/reproduction.
+277        min_val : float, optional
+278            The minimum allowable value for the parameter (clamping).
+279            Defaults to 0.01 if not provided.
+280        max_val : float, optional
+281            The maximum allowable value for the parameter (clamping).
+282            Defaults to 0.99 if not provided.
+283
+284        Raises
+285        ------
+286        ValueError
+287            If the parameter is not in `self.params`, the species cannot be
+288            inferred, or the species index is out of bounds.
+289
+290        Notes
+291        -----
+292        The local parameter is stored in `self.cell_params` as a 2D numpy
+293        array initialized with the current global value for all cells of
+294        the target species, and `NaN` elsewhere.
+295        """
+296        if min_val is None:
+297            min_val = 0.01
+298        if max_val is None:
+299            max_val = 0.99
+300        if param not in self.params:
+301            raise ValueError(f"Unknown parameter '{param}'")
+302        if species is None:
+303            species = self._infer_species_from_param_name(param)
+304            if species is None:
+305                raise ValueError(
+306                    "species must be provided or inferable from param name and species_names"
+307                )
+308        if not isinstance(species, int) or species <= 0 or species > self._n_species:
+309            raise ValueError("species must be an integer between 1 and n_species")
+310
+311        arr = np.full(self.grid.shape, np.nan, dtype=float)
+312        mask = self.grid == int(species)
+313        arr[mask] = float(self.params[param])
+314        self.cell_params[param] = arr
+315        self._evolve_info[param] = {
+316            "sd": float(sd),
+317            "min": float(min_val),
+318            "max": float(max_val),
+319            "species": int(species),
+320        }
+321
+322    def update(self) -> None:
+323        """
+324        Perform one update step of the cellular automaton.
+325
+326        This is an abstract method that defines the transition rules of the
+327        system. It must be implemented by concrete subclasses to specify
+328        how cell states and parameters change over time based on their
+329        current state and neighborhood.
+330
+331        Raises
+332        ------
+333        NotImplementedError
+334            If called directly on the base class instead of an implementation.
+335
+336        Returns
+337        -------
+338        None
+339
+340        Notes
+341        -----
+342        In a typical implementation, this method handles the logic for
+343        stochastic transitions, movement, or predator-prey interactions.
+344        """
+345        raise NotImplementedError(
+346            "Override update() in a subclass to define CA dynamics"
+347        )
+348
+349    def run(
+350        self,
+351        steps: int,
+352        stop_evolution_at: Optional[int] = None,
+353        snapshot_iters: Optional[list] = None,
+354    ) -> None:
+355        """
+356        Execute the cellular automaton simulation for a specified number of steps.
+357
+358        This method drives the simulation loop, calling `update()` at each
+359        iteration. It manages visualization updates, directory creation for
+360        data persistence, and handles the freezing of evolving parameters
+361        at a specific time step.
+362
+363        Parameters
+364        ----------
+365        steps : int
+366            The total number of iterations to run (must be non-negative).
+367        stop_evolution_at : int, optional
+368            The 1-based iteration index after which parameter mutation is
+369            disabled. Useful for observing system stability after a period
+370            of adaptation.
+371        snapshot_iters : List[int], optional
+372            A list of specific 1-based iteration indices at which to save
+373            the current grid state to the results directory.
+374
+375        Returns
+376        -------
+377        None
+378
+379        Notes
+380        -----
+381        If snapshots are requested, a results directory is automatically created
+382        using a timestamped subfolder (e.g., 'results/run-1700000000/').
+383        Visualization errors are logged but do not terminate the simulation.
+384        """
+385        assert (
+386            isinstance(steps, int) and steps >= 0
+387        ), "steps must be a non-negative integer"
+388
+389        # normalize snapshot iteration list
+390        snapshot_set = set(snapshot_iters) if snapshot_iters is not None else set()
+391
+392        for i in range(steps):
+393            self.update()
+394            # Update visualization if enabled every `interval` iterations
+395            if getattr(self, "_viz_on", False):
+396                # iteration number is 1-based for display
+397                try:
+398                    self._viz_update(i + 1)
+399                except Exception:
+400                    # Log visualization errors but don't stop the simulation
+401                    logger.exception(
+402                        "Visualization update failed at iteration %d", i + 1
+403                    )
+404
+405            # create snapshots if requested at this iteration
+406            if (i + 1) in snapshot_set:
+407                try:
+408                    # create snapshot folder if not present
+409                    if (
+410                        not hasattr(self, "_viz_snapshot_dir")
+411                        or self._viz_snapshot_dir is None
+412                    ):
+413                        import os, time
+414
+415                        base = "results"
+416                        ts = int(time.time())
+417                        run_folder = f"run-{ts}"
+418                        full = os.path.join(base, run_folder)
+419                        os.makedirs(full, exist_ok=True)
+420                        self._viz_snapshot_dir = full
+421                    self._viz_save_snapshot(i + 1)
+422                except (OSError, PermissionError):
+423                    logger.exception(
+424                        "Failed to create or write snapshot at iteration %d", i + 1
+425                    )
+426
+427            # stop evolution at specified time-step (disable further evolution)
+428            if stop_evolution_at is not None and (i + 1) == int(stop_evolution_at):
+429                # mark evolution as stopped; do not erase evolve metadata so
+430                # deterministic inheritance can still use parent values
+431                self._evolution_stopped = True
+
+ + +

Base cellular automaton class for spatial simulations.

+ +

This class provides a framework for multi-species cellular automata with +support for global parameters, per-cell evolving parameters, and +grid initialization based on density.

+ +
Attributes
+ +
    +
  • grid (np.ndarray): +2D numpy array containing integers in range [0, n_species].
  • +
  • params (Dict[str, Any]): +Global parameters shared by all cells.
  • +
  • cell_params (Dict[str, Any]): +Local per-cell parameters, typically stored as numpy arrays matching the grid shape.
  • +
  • neighborhood (str): +The adjacency rule used ('neumann' or 'moore').
  • +
  • generator (np.random.Generator): +The random number generator instance for reproducibility.
  • +
  • species_names (Tuple[str, ...]): +Human-readable names for each species state.
  • +
+
+ + +
+ +
+ + CA( rows: int, cols: int, densities: Tuple[float, ...], neighborhood: str, params: Dict[str, object], cell_params: Dict[str, object], seed: Optional[int] = None) + + + +
+ +
100    def __init__(
+101        self,
+102        rows: int,
+103        cols: int,
+104        densities: Tuple[float, ...],
+105        neighborhood: str,
+106        params: Dict[str, object],
+107        cell_params: Dict[str, object],
+108        seed: Optional[int] = None,
+109    ) -> None:
+110        """
+111        Initialize the cellular automaton grid and configurations.
+112
+113        Parameters
+114        ----------
+115        rows : int
+116            Number of rows in the grid (must be > 0).
+117        cols : int
+118            Number of columns in the grid (must be > 0).
+119        densities : Tuple[float, ...]
+120            Initial density for each species. Length defines `n_species`.
+121            Values must sum to <= 1.0.
+122        neighborhood : {'neumann', 'moore'}
+123            Type of neighborhood connectivity.
+124        params : Dict[str, Any]
+125            Initial global parameter values.
+126        cell_params : Dict[str, Any]
+127            Initial local per-cell parameters.
+128        seed : int, optional
+129            Seed for the random number generator.
+130        """
+131        assert isinstance(rows, int) and rows > 0, "rows must be positive int"
+132        assert isinstance(cols, int) and cols > 0, "cols must be positive int"
+133        assert (
+134            isinstance(densities, tuple) and len(densities) > 0
+135        ), "densities must be a non-empty tuple"
+136        for d in densities:
+137            assert (
+138                isinstance(d, (float, int)) and d >= 0
+139            ), "each density must be non-negative"
+140        total_density = float(sum(densities))
+141        assert total_density <= 1.0 + 1e-12, "sum of densities must not exceed 1"
+142        assert neighborhood in (
+143            "neumann",
+144            "moore",
+145        ), "neighborhood must be 'neumann' or 'moore'"
+146
+147        self._n_species: int = len(densities)
+148        # store protected size/density attributes (read-only properties exposed)
+149        self._rows: int = rows
+150        self._cols: int = cols
+151        self._densities: Tuple[float, ...] = tuple(densities)
+152        self.params: Dict[str, object] = dict(params) if params is not None else {}
+153        self.cell_params: Dict[str, object] = (
+154            dict(cell_params) if cell_params is not None else {}
+155        )
+156
+157        # per-parameter evolve metadata and evolution state
+158        # maps parameter name -> dict with keys 'sd','min','max','species'
+159        self._evolve_info: Dict[str, Dict[str, float]] = {}
+160        # when True, inheritance uses deterministic copy from parent (no mutation)
+161        self._evolution_stopped: bool = False
+162
+163        # human-readable species names (useful for visualization). Default
+164        # generates generic names based on n_species; subclasses may override.
+165        self.species_names: Tuple[str, ...] = tuple(
+166            f"species{i+1}" for i in range(self._n_species)
+167        )
+168        self.neighborhood: str = neighborhood
+169        self.generator: np.random.Generator = np.random.default_rng(seed)
+170
+171        self.grid: np.ndarray = np.zeros((rows, cols), dtype=int)
+172
+173        total_cells = rows * cols
+174        # Fill grid with species states 1..n_species according to densities.
+175        for i, dens in enumerate(densities):
+176            if dens <= 0:
+177                continue
+178            n_to_fill = int(round(total_cells * float(dens)))
+179            if n_to_fill <= 0:
+180                continue
+181            empty_flat = np.flatnonzero(self.grid.ravel() == 0)
+182            if len(empty_flat) == 0:
+183                break
+184            n_choice = min(n_to_fill, len(empty_flat))
+185            chosen = self.generator.choice(empty_flat, size=n_choice, replace=False)
+186            # assign chosen flattened indices to state i+1
+187            r = chosen // cols
+188            c = chosen % cols
+189            self.grid[r, c] = i + 1
+
+ + +

Initialize the cellular automaton grid and configurations.

+ +
Parameters
+ +
    +
  • rows (int): +Number of rows in the grid (must be > 0).
  • +
  • cols (int): +Number of columns in the grid (must be > 0).
  • +
  • densities (Tuple[float, ...]): +Initial density for each species. Length defines n_species. +Values must sum to <= 1.0.
  • +
  • neighborhood ({'neumann', 'moore'}): +Type of neighborhood connectivity.
  • +
  • params (Dict[str, Any]): +Initial global parameter values.
  • +
  • cell_params (Dict[str, Any]): +Initial local per-cell parameters.
  • +
  • seed (int, optional): +Seed for the random number generator.
  • +
+
+ + +
+
+ +
+ rows: int + + + +
+ +
79    @property
+80    def rows(self) -> int:
+81        """int: Number of rows in the grid."""
+82        return getattr(self, "_rows")
+
+ + +

int: Number of rows in the grid.

+
+ + +
+
+ +
+ cols: int + + + +
+ +
84    @property
+85    def cols(self) -> int:
+86        """int: Number of columns in the grid."""
+87        return getattr(self, "_cols")
+
+ + +

int: Number of columns in the grid.

+
+ + +
+
+ +
+ densities: Tuple[float, ...] + + + +
+ +
89    @property
+90    def densities(self) -> Tuple[float, ...]:
+91        """Tuple[float, ...]: Initial density fraction for each species."""
+92        return tuple(getattr(self, "_densities"))
+
+ + +

Tuple[float, ...]: Initial density fraction for each species.

+
+ + +
+
+ +
+ n_species: int + + + +
+ +
95    @property
+96    def n_species(self) -> int:
+97        """int: Number of distinct species states (excluding empty state 0)."""
+98        return int(getattr(self, "_n_species"))
+
+ + +

int: Number of distinct species states (excluding empty state 0).

+
+ + +
+
+ +
+ + def + validate(self) -> None: + + + +
+ +
191    def validate(self) -> None:
+192        """
+193        Validate core CA invariants and grid dimensions.
+194
+195        Checks that the neighborhood is valid, the grid matches initialized dimensions,
+196        and that local parameter arrays match the grid shape.
+197
+198        Raises
+199        ------
+200        ValueError
+201            If any structural invariant is violated.
+202        """
+203        if self.neighborhood not in ("neumann", "moore"):
+204            raise ValueError("neighborhood must be 'neumann' or 'moore'")
+205
+206        expected_shape = (int(getattr(self, "_rows")), int(getattr(self, "_cols")))
+207        if self.grid.shape != expected_shape:
+208            raise ValueError(
+209                f"grid shape {self.grid.shape} does not match expected {expected_shape}"
+210            )
+211
+212        # Ensure any array in cell_params matches grid shape
+213        for k, v in (self.cell_params or {}).items():
+214            if isinstance(v, np.ndarray) and v.shape != expected_shape:
+215                raise ValueError(f"cell_params['{k}'] must have shape equal to grid")
+
+ + +

Validate core CA invariants and grid dimensions.

+ +

Checks that the neighborhood is valid, the grid matches initialized dimensions, +and that local parameter arrays match the grid shape.

+ +
Raises
+ +
    +
  • ValueError: If any structural invariant is violated.
  • +
+
+ + +
+
+ +
+ + def + evolve( self, param: str, species: Optional[int] = None, sd: float = 0.05, min_val: Optional[float] = None, max_val: Optional[float] = None) -> None: + + + +
+ +
249    def evolve(
+250        self,
+251        param: str,
+252        species: Optional[int] = None,
+253        sd: float = 0.05,
+254        min_val: Optional[float] = None,
+255        max_val: Optional[float] = None,
+256    ) -> None:
+257        """
+258        Enable per-cell evolution for a specific parameter on a given species.
+259
+260        This method initializes a spatial parameter array (local parameter map)
+261        for a global parameter. It allows individual cells to carry their own
+262        values for that parameter, which can then mutate and evolve during
+263        the simulation.
+264
+265        Parameters
+266        ----------
+267        param : str
+268            The name of the global parameter to enable for evolution.
+269            Must exist in `self.params`.
+270        species : int, optional
+271            The 1-based index of the species to which this parameter applies.
+272            If None, the method attempts to infer the species from the
+273            parameter name prefix.
+274        sd : float, default 0.05
+275            The standard deviation of the Gaussian mutation applied during
+276            inheritance/reproduction.
+277        min_val : float, optional
+278            The minimum allowable value for the parameter (clamping).
+279            Defaults to 0.01 if not provided.
+280        max_val : float, optional
+281            The maximum allowable value for the parameter (clamping).
+282            Defaults to 0.99 if not provided.
+283
+284        Raises
+285        ------
+286        ValueError
+287            If the parameter is not in `self.params`, the species cannot be
+288            inferred, or the species index is out of bounds.
+289
+290        Notes
+291        -----
+292        The local parameter is stored in `self.cell_params` as a 2D numpy
+293        array initialized with the current global value for all cells of
+294        the target species, and `NaN` elsewhere.
+295        """
+296        if min_val is None:
+297            min_val = 0.01
+298        if max_val is None:
+299            max_val = 0.99
+300        if param not in self.params:
+301            raise ValueError(f"Unknown parameter '{param}'")
+302        if species is None:
+303            species = self._infer_species_from_param_name(param)
+304            if species is None:
+305                raise ValueError(
+306                    "species must be provided or inferable from param name and species_names"
+307                )
+308        if not isinstance(species, int) or species <= 0 or species > self._n_species:
+309            raise ValueError("species must be an integer between 1 and n_species")
+310
+311        arr = np.full(self.grid.shape, np.nan, dtype=float)
+312        mask = self.grid == int(species)
+313        arr[mask] = float(self.params[param])
+314        self.cell_params[param] = arr
+315        self._evolve_info[param] = {
+316            "sd": float(sd),
+317            "min": float(min_val),
+318            "max": float(max_val),
+319            "species": int(species),
+320        }
+
+ + +

Enable per-cell evolution for a specific parameter on a given species.

+ +

This method initializes a spatial parameter array (local parameter map) +for a global parameter. It allows individual cells to carry their own +values for that parameter, which can then mutate and evolve during +the simulation.

+ +
Parameters
+ +
    +
  • param (str): +The name of the global parameter to enable for evolution. +Must exist in self.params.
  • +
  • species (int, optional): +The 1-based index of the species to which this parameter applies. +If None, the method attempts to infer the species from the +parameter name prefix.
  • +
  • sd (float, default 0.05): +The standard deviation of the Gaussian mutation applied during +inheritance/reproduction.
  • +
  • min_val (float, optional): +The minimum allowable value for the parameter (clamping). +Defaults to 0.01 if not provided.
  • +
  • max_val (float, optional): +The maximum allowable value for the parameter (clamping). +Defaults to 0.99 if not provided.
  • +
+ +
Raises
+ +
    +
  • ValueError: If the parameter is not in self.params, the species cannot be +inferred, or the species index is out of bounds.
  • +
+ +
Notes
+ +

The local parameter is stored in self.cell_params as a 2D numpy +array initialized with the current global value for all cells of +the target species, and NaN elsewhere.

+
+ + +
+
+ +
+ + def + update(self) -> None: + + + +
+ +
322    def update(self) -> None:
+323        """
+324        Perform one update step of the cellular automaton.
+325
+326        This is an abstract method that defines the transition rules of the
+327        system. It must be implemented by concrete subclasses to specify
+328        how cell states and parameters change over time based on their
+329        current state and neighborhood.
+330
+331        Raises
+332        ------
+333        NotImplementedError
+334            If called directly on the base class instead of an implementation.
+335
+336        Returns
+337        -------
+338        None
+339
+340        Notes
+341        -----
+342        In a typical implementation, this method handles the logic for
+343        stochastic transitions, movement, or predator-prey interactions.
+344        """
+345        raise NotImplementedError(
+346            "Override update() in a subclass to define CA dynamics"
+347        )
+
+ + +

Perform one update step of the cellular automaton.

+ +

This is an abstract method that defines the transition rules of the +system. It must be implemented by concrete subclasses to specify +how cell states and parameters change over time based on their +current state and neighborhood.

+ +
Raises
+ +
    +
  • NotImplementedError: If called directly on the base class instead of an implementation.
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

In a typical implementation, this method handles the logic for +stochastic transitions, movement, or predator-prey interactions.

+
+ + +
+
+ +
+ + def + run( self, steps: int, stop_evolution_at: Optional[int] = None, snapshot_iters: Optional[list] = None) -> None: + + + +
+ +
349    def run(
+350        self,
+351        steps: int,
+352        stop_evolution_at: Optional[int] = None,
+353        snapshot_iters: Optional[list] = None,
+354    ) -> None:
+355        """
+356        Execute the cellular automaton simulation for a specified number of steps.
+357
+358        This method drives the simulation loop, calling `update()` at each
+359        iteration. It manages visualization updates, directory creation for
+360        data persistence, and handles the freezing of evolving parameters
+361        at a specific time step.
+362
+363        Parameters
+364        ----------
+365        steps : int
+366            The total number of iterations to run (must be non-negative).
+367        stop_evolution_at : int, optional
+368            The 1-based iteration index after which parameter mutation is
+369            disabled. Useful for observing system stability after a period
+370            of adaptation.
+371        snapshot_iters : List[int], optional
+372            A list of specific 1-based iteration indices at which to save
+373            the current grid state to the results directory.
+374
+375        Returns
+376        -------
+377        None
+378
+379        Notes
+380        -----
+381        If snapshots are requested, a results directory is automatically created
+382        using a timestamped subfolder (e.g., 'results/run-1700000000/').
+383        Visualization errors are logged but do not terminate the simulation.
+384        """
+385        assert (
+386            isinstance(steps, int) and steps >= 0
+387        ), "steps must be a non-negative integer"
+388
+389        # normalize snapshot iteration list
+390        snapshot_set = set(snapshot_iters) if snapshot_iters is not None else set()
+391
+392        for i in range(steps):
+393            self.update()
+394            # Update visualization if enabled every `interval` iterations
+395            if getattr(self, "_viz_on", False):
+396                # iteration number is 1-based for display
+397                try:
+398                    self._viz_update(i + 1)
+399                except Exception:
+400                    # Log visualization errors but don't stop the simulation
+401                    logger.exception(
+402                        "Visualization update failed at iteration %d", i + 1
+403                    )
+404
+405            # create snapshots if requested at this iteration
+406            if (i + 1) in snapshot_set:
+407                try:
+408                    # create snapshot folder if not present
+409                    if (
+410                        not hasattr(self, "_viz_snapshot_dir")
+411                        or self._viz_snapshot_dir is None
+412                    ):
+413                        import os, time
+414
+415                        base = "results"
+416                        ts = int(time.time())
+417                        run_folder = f"run-{ts}"
+418                        full = os.path.join(base, run_folder)
+419                        os.makedirs(full, exist_ok=True)
+420                        self._viz_snapshot_dir = full
+421                    self._viz_save_snapshot(i + 1)
+422                except (OSError, PermissionError):
+423                    logger.exception(
+424                        "Failed to create or write snapshot at iteration %d", i + 1
+425                    )
+426
+427            # stop evolution at specified time-step (disable further evolution)
+428            if stop_evolution_at is not None and (i + 1) == int(stop_evolution_at):
+429                # mark evolution as stopped; do not erase evolve metadata so
+430                # deterministic inheritance can still use parent values
+431                self._evolution_stopped = True
+
+ + +

Execute the cellular automaton simulation for a specified number of steps.

+ +

This method drives the simulation loop, calling update() at each +iteration. It manages visualization updates, directory creation for +data persistence, and handles the freezing of evolving parameters +at a specific time step.

+ +
Parameters
+ +
    +
  • steps (int): +The total number of iterations to run (must be non-negative).
  • +
  • stop_evolution_at (int, optional): +The 1-based iteration index after which parameter mutation is +disabled. Useful for observing system stability after a period +of adaptation.
  • +
  • snapshot_iters (List[int], optional): +A list of specific 1-based iteration indices at which to save +the current grid state to the results directory.
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

If snapshots are requested, a results directory is automatically created +using a timestamped subfolder (e.g., 'results/run-1700000000/'). +Visualization errors are logged but do not terminate the simulation.

+
+ + +
+
+
+ +
+ + class + PP(CA): + + + +
+ +
434class PP(CA):
+435    """
+436    Predator-Prey Cellular Automaton model with Numba-accelerated kernels.
+437
+438    This model simulates a stochastic predator-prey system where species
+439    interact on a 2D grid. It supports evolving per-cell death rates,
+440    periodic boundary conditions, and both random and directed hunting
+441    behaviors.
+442
+443    Parameters
+444    ----------
+445    rows : int, default 10
+446        Number of rows in the simulation grid.
+447    cols : int, default 10
+448        Number of columns in the simulation grid.
+449    densities : Tuple[float, ...], default (0.2, 0.1)
+450        Initial population densities for (prey, predator).
+451    neighborhood : {'moore', 'neumann'}, default 'moore'
+452        The neighborhood type for cell interactions.
+453    params : Dict[str, object], optional
+454        Global parameters: "prey_death", "predator_death", "prey_birth",
+455        "predator_birth".
+456    cell_params : Dict[str, object], optional
+457        Initial local parameter maps (2D arrays).
+458    seed : int, optional
+459        Random seed for reproducibility.
+460    synchronous : bool, default True
+461        If True, updates the entire grid at once. If False, updates
+462        cells asynchronously.
+463    directed_hunting : bool, default False
+464        If True, predators selectively hunt prey rather than choosing
+465        neighbors at random.
+466
+467    Attributes
+468    ----------
+469    species_names : Tuple[str, ...]
+470        Labels for the species ('prey', 'predator').
+471    synchronous : bool
+472        Current update mode.
+473    directed_hunting : bool
+474        Current hunting strategy logic.
+475    """
+476
+477    # Default colors: 0=empty black, 1=prey green, 2=predator red
+478    _default_cmap = ("black", "green", "red")
+479
+480    def __init__(
+481        self,
+482        rows: int = 10,
+483        cols: int = 10,
+484        densities: Tuple[float, ...] = (0.2, 0.1),
+485        neighborhood: str = "moore",
+486        params: Dict[str, object] = None,
+487        cell_params: Dict[str, object] = None,
+488        seed: Optional[int] = None,
+489        synchronous: bool = True,
+490        directed_hunting: bool = False,  # New directed hunting option
+491    ) -> None:
+492        """
+493        Initialize the Predator-Prey CA with validated parameters and kernels.
+494        """
+495        # Allowed params and defaults
+496        _defaults = {
+497            "prey_death": 0.05,
+498            "predator_death": 0.1,
+499            "prey_birth": 0.25,
+500            "predator_birth": 0.2,
+501        }
+502
+503        # Validate user-supplied params: only allowed keys
+504        if params is None:
+505            merged_params = dict(_defaults)
+506        else:
+507            if not isinstance(params, dict):
+508                raise TypeError("params must be a dict or None")
+509            extra = set(params.keys()) - set(_defaults.keys())
+510            if extra:
+511                raise ValueError(f"Unexpected parameter keys: {sorted(list(extra))}")
+512            # Do not override user-set values: start from defaults then update with user values
+513            merged_params = dict(_defaults)
+514            merged_params.update(params)
+515
+516        # Validate numerical ranges
+517        for k, v in merged_params.items():
+518            if not isinstance(v, (int, float)):
+519                raise TypeError(f"Parameter '{k}' must be a number between 0 and 1")
+520            if not (0.0 <= float(v) <= 1.0):
+521                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+522
+523        # Call base initializer with merged params
+524        super().__init__(
+525            rows, cols, densities, neighborhood, merged_params, cell_params, seed
+526        )
+527
+528        self.synchronous: bool = bool(synchronous)
+529        self.directed_hunting: bool = bool(directed_hunting)
+530
+531        # set human-friendly species names for PP
+532        self.species_names = ("prey", "predator")
+533
+534        if seed is not None:
+535            # This sets the seed for all @njit functions globally
+536            set_numba_seed(seed)
+537
+538        self._kernel = PPKernel(
+539            rows, cols, neighborhood, directed_hunting=directed_hunting
+540        )
+541
+542    # Remove PP-specific evolve wrapper; use CA.evolve with optional species
+543
+544    def validate(self) -> None:
+545        """
+546        Validate Predator-Prey specific invariants and spatial parameter arrays.
+547
+548        Extends the base CA validation to ensure that numerical parameters are
+549        within the [0, 1] probability range and that evolved parameter maps
+550        (e.g., prey_death) correctly align with the species locations.
+551
+552        Raises
+553        ------
+554        ValueError
+555            If grid shapes, parameter ranges, or species masks are inconsistent.
+556        TypeError
+557            If parameters are non-numeric.
+558        """
+559        super().validate()
+560
+561        # Validate global params
+562        for k, v in (self.params or {}).items():
+563            if not isinstance(v, (int, float)):
+564                raise TypeError(f"Parameter '{k}' must be numeric")
+565            if not (0.0 <= float(v) <= 1.0):
+566                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+567
+568        # Validate per-cell evolve arrays
+569        for pname, meta in (self._evolve_info or {}).items():
+570            arr = self.cell_params.get(pname)
+571            if not isinstance(arr, np.ndarray):
+572                # absent or non-array per-cell params are allowed; skip
+573                continue
+574            # shape already checked in super().validate(), but be explicit
+575            if arr.shape != self.grid.shape:
+576                raise ValueError(f"cell_params['{pname}'] must match grid shape")
+577            # expected non-NaN positions correspond to species stored in metadata
+578            species = None
+579            if isinstance(meta, dict) and "species" in meta:
+580                species = int(meta.get("species"))
+581            else:
+582                # try to infer species from parameter name using species_names
+583                species = self._infer_species_from_param_name(pname)
+584                if species is None:
+585                    raise ValueError(
+586                        f"cell_params['{pname}'] missing species metadata and could not infer from name"
+587                    )
+588            nonnan = ~np.isnan(arr)
+589            expected = self.grid == species
+590            if not np.array_equal(nonnan, expected):
+591                raise ValueError(
+592                    f"cell_params['{pname}'] non-NaN entries must match positions of species {species}"
+593                )
+594            # values must be within configured range where not NaN
+595            mn = float(meta.get("min", 0.0))
+596            mx = float(meta.get("max", 1.0))
+597            vals = arr[~np.isnan(arr)]
+598            if vals.size > 0:
+599                if np.any(vals < mn) or np.any(vals > mx):
+600                    raise ValueError(
+601                        f"cell_params['{pname}'] contains values outside [{mn}, {mx}]"
+602                    )
+603
+604    def update_async(self) -> None:
+605        """
+606        Execute an asynchronous update using the optimized Numba kernel.
+607
+608        This method retrieves the evolved parameter maps and delegates the
+609        stochastic transitions to the `PPKernel`. Asynchronous updates
+610        typically handle cell-by-cell logic where changes can be
+611        immediately visible to neighbors.
+612        """
+613        # Get the evolved prey death map
+614        # Fallback to a full array of the global param if it doesn't exist yet
+615        p_death_arr = self.cell_params.get("prey_death")
+616        if p_death_arr is None:
+617            p_death_arr = np.full(
+618                self.grid.shape, self.params["prey_death"], dtype=np.float64
+619            )
+620
+621        meta = self._evolve_info.get(
+622            "prey_death", {"sd": 0.05, "min": 0.001, "max": 0.1}
+623        )
+624
+625        # Call the optimized kernel (uses pre-allocated buffers)
+626        self._kernel.update(
+627            self.grid,
+628            p_death_arr,
+629            float(self.params["prey_birth"]),
+630            float(self.params["prey_death"]),
+631            float(self.params["predator_birth"]),
+632            float(self.params["predator_death"]),
+633            float(meta["sd"]),
+634            float(meta["min"]),
+635            float(meta["max"]),
+636            self._evolution_stopped,
+637        )
+638
+639    def update(self) -> None:
+640        """
+641        Dispatch the simulation step based on the configured update mode.
+642        """
+643        self.update_async()
+
+ + +

Predator-Prey Cellular Automaton model with Numba-accelerated kernels.

+ +

This model simulates a stochastic predator-prey system where species +interact on a 2D grid. It supports evolving per-cell death rates, +periodic boundary conditions, and both random and directed hunting +behaviors.

+ +
Parameters
+ +
    +
  • rows (int, default 10): +Number of rows in the simulation grid.
  • +
  • cols (int, default 10): +Number of columns in the simulation grid.
  • +
  • densities (Tuple[float, ...], default (0.2, 0.1)): +Initial population densities for (prey, predator).
  • +
  • neighborhood ({'moore', 'neumann'}, default 'moore'): +The neighborhood type for cell interactions.
  • +
  • params (Dict[str, object], optional): +Global parameters: "prey_death", "predator_death", "prey_birth", +"predator_birth".
  • +
  • cell_params (Dict[str, object], optional): +Initial local parameter maps (2D arrays).
  • +
  • seed (int, optional): +Random seed for reproducibility.
  • +
  • synchronous (bool, default True): +If True, updates the entire grid at once. If False, updates +cells asynchronously.
  • +
  • directed_hunting (bool, default False): +If True, predators selectively hunt prey rather than choosing +neighbors at random.
  • +
+ +
Attributes
+ +
    +
  • species_names (Tuple[str, ...]): +Labels for the species ('prey', 'predator').
  • +
  • synchronous (bool): +Current update mode.
  • +
  • directed_hunting (bool): +Current hunting strategy logic.
  • +
+
+ + +
+ +
+ + PP( rows: int = 10, cols: int = 10, densities: Tuple[float, ...] = (0.2, 0.1), neighborhood: str = 'moore', params: Dict[str, object] = None, cell_params: Dict[str, object] = None, seed: Optional[int] = None, synchronous: bool = True, directed_hunting: bool = False) + + + +
+ +
480    def __init__(
+481        self,
+482        rows: int = 10,
+483        cols: int = 10,
+484        densities: Tuple[float, ...] = (0.2, 0.1),
+485        neighborhood: str = "moore",
+486        params: Dict[str, object] = None,
+487        cell_params: Dict[str, object] = None,
+488        seed: Optional[int] = None,
+489        synchronous: bool = True,
+490        directed_hunting: bool = False,  # New directed hunting option
+491    ) -> None:
+492        """
+493        Initialize the Predator-Prey CA with validated parameters and kernels.
+494        """
+495        # Allowed params and defaults
+496        _defaults = {
+497            "prey_death": 0.05,
+498            "predator_death": 0.1,
+499            "prey_birth": 0.25,
+500            "predator_birth": 0.2,
+501        }
+502
+503        # Validate user-supplied params: only allowed keys
+504        if params is None:
+505            merged_params = dict(_defaults)
+506        else:
+507            if not isinstance(params, dict):
+508                raise TypeError("params must be a dict or None")
+509            extra = set(params.keys()) - set(_defaults.keys())
+510            if extra:
+511                raise ValueError(f"Unexpected parameter keys: {sorted(list(extra))}")
+512            # Do not override user-set values: start from defaults then update with user values
+513            merged_params = dict(_defaults)
+514            merged_params.update(params)
+515
+516        # Validate numerical ranges
+517        for k, v in merged_params.items():
+518            if not isinstance(v, (int, float)):
+519                raise TypeError(f"Parameter '{k}' must be a number between 0 and 1")
+520            if not (0.0 <= float(v) <= 1.0):
+521                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+522
+523        # Call base initializer with merged params
+524        super().__init__(
+525            rows, cols, densities, neighborhood, merged_params, cell_params, seed
+526        )
+527
+528        self.synchronous: bool = bool(synchronous)
+529        self.directed_hunting: bool = bool(directed_hunting)
+530
+531        # set human-friendly species names for PP
+532        self.species_names = ("prey", "predator")
+533
+534        if seed is not None:
+535            # This sets the seed for all @njit functions globally
+536            set_numba_seed(seed)
+537
+538        self._kernel = PPKernel(
+539            rows, cols, neighborhood, directed_hunting=directed_hunting
+540        )
+
+ + +

Initialize the Predator-Prey CA with validated parameters and kernels.

+
+ + +
+
+ +
+ + def + validate(self) -> None: + + + +
+ +
544    def validate(self) -> None:
+545        """
+546        Validate Predator-Prey specific invariants and spatial parameter arrays.
+547
+548        Extends the base CA validation to ensure that numerical parameters are
+549        within the [0, 1] probability range and that evolved parameter maps
+550        (e.g., prey_death) correctly align with the species locations.
+551
+552        Raises
+553        ------
+554        ValueError
+555            If grid shapes, parameter ranges, or species masks are inconsistent.
+556        TypeError
+557            If parameters are non-numeric.
+558        """
+559        super().validate()
+560
+561        # Validate global params
+562        for k, v in (self.params or {}).items():
+563            if not isinstance(v, (int, float)):
+564                raise TypeError(f"Parameter '{k}' must be numeric")
+565            if not (0.0 <= float(v) <= 1.0):
+566                raise ValueError(f"Parameter '{k}' must be between 0 and 1")
+567
+568        # Validate per-cell evolve arrays
+569        for pname, meta in (self._evolve_info or {}).items():
+570            arr = self.cell_params.get(pname)
+571            if not isinstance(arr, np.ndarray):
+572                # absent or non-array per-cell params are allowed; skip
+573                continue
+574            # shape already checked in super().validate(), but be explicit
+575            if arr.shape != self.grid.shape:
+576                raise ValueError(f"cell_params['{pname}'] must match grid shape")
+577            # expected non-NaN positions correspond to species stored in metadata
+578            species = None
+579            if isinstance(meta, dict) and "species" in meta:
+580                species = int(meta.get("species"))
+581            else:
+582                # try to infer species from parameter name using species_names
+583                species = self._infer_species_from_param_name(pname)
+584                if species is None:
+585                    raise ValueError(
+586                        f"cell_params['{pname}'] missing species metadata and could not infer from name"
+587                    )
+588            nonnan = ~np.isnan(arr)
+589            expected = self.grid == species
+590            if not np.array_equal(nonnan, expected):
+591                raise ValueError(
+592                    f"cell_params['{pname}'] non-NaN entries must match positions of species {species}"
+593                )
+594            # values must be within configured range where not NaN
+595            mn = float(meta.get("min", 0.0))
+596            mx = float(meta.get("max", 1.0))
+597            vals = arr[~np.isnan(arr)]
+598            if vals.size > 0:
+599                if np.any(vals < mn) or np.any(vals > mx):
+600                    raise ValueError(
+601                        f"cell_params['{pname}'] contains values outside [{mn}, {mx}]"
+602                    )
+
+ + +

Validate Predator-Prey specific invariants and spatial parameter arrays.

+ +

Extends the base CA validation to ensure that numerical parameters are +within the [0, 1] probability range and that evolved parameter maps +(e.g., prey_death) correctly align with the species locations.

+ +
Raises
+ +
    +
  • ValueError: If grid shapes, parameter ranges, or species masks are inconsistent.
  • +
  • TypeError: If parameters are non-numeric.
  • +
+
+ + +
+
+ +
+ + def + update_async(self) -> None: + + + +
+ +
604    def update_async(self) -> None:
+605        """
+606        Execute an asynchronous update using the optimized Numba kernel.
+607
+608        This method retrieves the evolved parameter maps and delegates the
+609        stochastic transitions to the `PPKernel`. Asynchronous updates
+610        typically handle cell-by-cell logic where changes can be
+611        immediately visible to neighbors.
+612        """
+613        # Get the evolved prey death map
+614        # Fallback to a full array of the global param if it doesn't exist yet
+615        p_death_arr = self.cell_params.get("prey_death")
+616        if p_death_arr is None:
+617            p_death_arr = np.full(
+618                self.grid.shape, self.params["prey_death"], dtype=np.float64
+619            )
+620
+621        meta = self._evolve_info.get(
+622            "prey_death", {"sd": 0.05, "min": 0.001, "max": 0.1}
+623        )
+624
+625        # Call the optimized kernel (uses pre-allocated buffers)
+626        self._kernel.update(
+627            self.grid,
+628            p_death_arr,
+629            float(self.params["prey_birth"]),
+630            float(self.params["prey_death"]),
+631            float(self.params["predator_birth"]),
+632            float(self.params["predator_death"]),
+633            float(meta["sd"]),
+634            float(meta["min"]),
+635            float(meta["max"]),
+636            self._evolution_stopped,
+637        )
+
+ + +

Execute an asynchronous update using the optimized Numba kernel.

+ +

This method retrieves the evolved parameter maps and delegates the +stochastic transitions to the PPKernel. Asynchronous updates +typically handle cell-by-cell logic where changes can be +immediately visible to neighbors.

+
+ + +
+
+ +
+ + def + update(self) -> None: + + + +
+ +
639    def update(self) -> None:
+640        """
+641        Dispatch the simulation step based on the configured update mode.
+642        """
+643        self.update_async()
+
+ + +

Dispatch the simulation step based on the configured update mode.

+
+ + +
+
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/models/config.html b/docs/models/config.html new file mode 100644 index 0000000..19d4700 --- /dev/null +++ b/docs/models/config.html @@ -0,0 +1,1050 @@ + + + + + + + models.config API documentation + + + + + + + + + +
+
+

+models.config

+ +

Experiment Configuration

+ +

This module provides the configuration dataclass and pre-defined phase +configurations for Predator-Prey Hydra Effect experiments.

+ +
Classes
+ +

Config + Central configuration dataclass with all experiment parameters.

+ +
Functions
+ +
+
get_phase_config: Retrieve configuration for a specific experimental phase.
+
+
+ +
Phase Configurations
+ +
    +
  • PHASE1_CONFIG: Parameter sweep to find critical point
  • +
  • PHASE2_CONFIG: Self-organization (evolution toward criticality)
  • +
  • PHASE3_CONFIG: Finite-size scaling at critical point
  • +
  • PHASE4_CONFIG: Sensitivity analysis (4D parameter sweep)
  • +
  • PHASE5_CONFIG: Directed hunting comparison
  • +
+ +
Example
+ +
+
from models.config import Config, get_phase_config
+
+# Use predefined phase config
+cfg = get_phase_config(1)
+
+# Create custom config
+cfg = Config(grid_size=200, n_replicates=10)
+
+# Generate parameter sweep values
+prey_deaths = cfg.get_prey_deaths()
+
+
+
+ + + + + +
  1#!/usr/bin/env python3
+  2"""
+  3Experiment Configuration
+  4========================
+  5
+  6This module provides the configuration dataclass and pre-defined phase
+  7configurations for Predator-Prey Hydra Effect experiments.
+  8
+  9Classes
+ 10-------
+ 11Config
+ 12    Central configuration dataclass with all experiment parameters.
+ 13
+ 14Functions
+ 15---------
+ 16```python
+ 17get_phase_config: Retrieve configuration for a specific experimental phase.
+ 18````
+ 19
+ 20Phase Configurations
+ 21--------------------
+ 22- ``PHASE1_CONFIG``: Parameter sweep to find critical point
+ 23- ``PHASE2_CONFIG``: Self-organization (evolution toward criticality)
+ 24- ``PHASE3_CONFIG``: Finite-size scaling at critical point
+ 25- ``PHASE4_CONFIG``: Sensitivity analysis (4D parameter sweep)
+ 26- ``PHASE5_CONFIG``: Directed hunting comparison
+ 27
+ 28Example
+ 29-------
+ 30```python
+ 31from models.config import Config, get_phase_config
+ 32
+ 33# Use predefined phase config
+ 34cfg = get_phase_config(1)
+ 35
+ 36# Create custom config
+ 37cfg = Config(grid_size=200, n_replicates=10)
+ 38
+ 39# Generate parameter sweep values
+ 40prey_deaths = cfg.get_prey_deaths()
+ 41```
+ 42"""
+ 43from dataclasses import dataclass
+ 44from typing import Tuple
+ 45import numpy as np
+ 46
+ 47
+ 48@dataclass
+ 49class Config:
+ 50    """
+ 51    Central configuration for Predator-Prey Hydra Effect experiments.
+ 52
+ 53    Attributes
+ 54    ----------
+ 55    grid_size : int
+ 56        Side length of the square simulation grid.
+ 57    densities : Tuple[float, float]
+ 58        Initial population fractions for (prey, predator).
+ 59    grid_sizes : Tuple[int, ...]
+ 60        Grid dimensions for Finite-Size Scaling (FSS) analysis (Phase 3).
+ 61    prey_birth : float
+ 62        Global birth rate for prey species.
+ 63    prey_death : float
+ 64        Global death rate for prey species.
+ 65    predator_birth : float
+ 66        Global birth rate for predator species.
+ 67    predator_death : float
+ 68        Global death rate for predator species.
+ 69    critical_prey_birth : float
+ 70        Critical birth rate identified from Phase 1.
+ 71    critical_prey_death : float
+ 72        Critical death rate identified from Phase 1.
+ 73    prey_death_range : Tuple[float, float]
+ 74        Bounds for prey death rate sweep.
+ 75    n_prey_death : int
+ 76        Number of points in prey death rate sweep.
+ 77    n_replicates : int
+ 78        Independent stochastic runs per parameter set.
+ 79    warmup_steps : int
+ 80        Iterations before data collection begins.
+ 81    measurement_steps : int
+ 82        Iterations for collecting statistics.
+ 83    evolve_sd : float
+ 84        Standard deviation for parameter mutation.
+ 85    evolve_min : float
+ 86        Lower bound for evolving parameters.
+ 87    evolve_max : float
+ 88        Upper bound for evolving parameters.
+ 89    directed_hunting : bool
+ 90        Toggle for targeted predator movement.
+ 91    save_timeseries : bool
+ 92        Toggle for recording population time series.
+ 93    timeseries_subsample : int
+ 94        Subsample rate for time series data.
+ 95    collect_pcf : bool
+ 96        Toggle for Pair Correlation Function analysis.
+ 97    pcf_sample_rate : float
+ 98        Fraction of runs that compute PCFs.
+ 99    pcf_max_distance : float
+100        Maximum radial distance for PCF.
+101    pcf_n_bins : int
+102        Number of bins in PCF histogram.
+103    min_density_for_analysis : float
+104        Population threshold for spatial analysis.
+105    n_jobs : int
+106        CPU cores for parallelization (-1 = all).
+107    """
+108
+109    # Grid settings
+110    grid_size: int = 1000
+111    densities: Tuple[float, float] = (0.30, 0.15)
+112    grid_sizes: Tuple[int, ...] = (50, 100, 250, 500, 1000, 2500)
+113
+114    # Species parameters
+115    prey_birth: float = 0.2
+116    prey_death: float = 0.05
+117    predator_birth: float = 0.8
+118    predator_death: float = 0.05
+119
+120    # Critical point (from Phase 1)
+121    critical_prey_birth: float = 0.20
+122    critical_prey_death: float = 0.0968
+123
+124    # Parameter sweep settings
+125    prey_death_range: Tuple[float, float] = (0.0, 0.2)
+126    n_prey_death: int = 20
+127
+128    # Replication
+129    n_replicates: int = 15
+130
+131    # Simulation timing
+132    warmup_steps: int = 300
+133    measurement_steps: int = 500
+134
+135    # Evolution settings
+136    evolve_sd: float = 0.10
+137    evolve_min: float = 0.0
+138    evolve_max: float = 0.10
+139
+140    # Model variant
+141    directed_hunting: bool = False
+142
+143    # Time series collection
+144    save_timeseries: bool = False
+145    timeseries_subsample: int = 10
+146
+147    # PCF settings
+148    collect_pcf: bool = True
+149    pcf_sample_rate: float = 0.2
+150    pcf_max_distance: float = 20.0
+151    pcf_n_bins: int = 20
+152
+153    # Analysis thresholds
+154    min_density_for_analysis: float = 0.002
+155
+156    # Parallelization
+157    n_jobs: int = -1
+158
+159    def get_prey_deaths(self) -> np.ndarray:
+160        """Generate array of prey death rates for parameter sweep."""
+161        return np.linspace(
+162            self.prey_death_range[0], self.prey_death_range[1], self.n_prey_death
+163        )
+164
+165    def get_warmup_steps(self, L: int) -> int:
+166        """Get warmup steps (can be extended for size-dependent scaling)."""
+167        return self.warmup_steps
+168
+169    def get_measurement_steps(self, L: int) -> int:
+170        """Get measurement steps (can be extended for size-dependent scaling)."""
+171        return self.measurement_steps
+172
+173    def estimate_runtime(self, n_cores: int = 32) -> str:
+174        """
+175        Estimate wall-clock time for the experiment.
+176
+177        Parameters
+178        ----------
+179        n_cores : int
+180            Number of available CPU cores.
+181
+182        Returns
+183        -------
+184        str
+185            Human-readable runtime estimate.
+186        """
+187        ref_size = 100
+188        ref_steps_per_sec = 1182
+189
+190        size_scaling = (self.grid_size / ref_size) ** 2
+191        actual_steps_per_sec = ref_steps_per_sec / size_scaling
+192
+193        total_steps = self.warmup_steps + self.measurement_steps
+194        base_time_s = total_steps / actual_steps_per_sec
+195
+196        pcf_time_s = (0.008 * size_scaling) if self.collect_pcf else 0
+197
+198        n_sims = self.n_prey_death * self.n_replicates
+199
+200        total_seconds = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate)
+201        total_seconds /= n_cores
+202
+203        hours = total_seconds / 3600
+204        core_hours = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate) / 3600
+205
+206        return f"{n_sims:,} sims, ~{hours:.1f}h on {n_cores} cores (~{core_hours:.0f} core-hours)"
+207
+208
+209# =============================================================================
+210# Phase Configurations
+211# =============================================================================
+212
+213PHASE1_CONFIG = Config(
+214    grid_size=1000,
+215    n_prey_death=20,
+216    prey_birth=0.2,
+217    prey_death_range=(0.0963, 0.0973),
+218    predator_birth=0.8,
+219    predator_death=0.05,
+220    n_replicates=30,
+221    warmup_steps=1000,
+222    measurement_steps=1000,
+223    collect_pcf=False,
+224    save_timeseries=False,
+225    directed_hunting=False,
+226)
+227
+228PHASE2_CONFIG = Config(
+229    grid_size=1000,
+230    n_prey_death=10,
+231    n_replicates=10,
+232    warmup_steps=1000,
+233    measurement_steps=10000,
+234    evolve_sd=0.01,
+235    evolve_min=0.0,
+236    evolve_max=0.20,
+237    collect_pcf=False,
+238    save_timeseries=False,
+239)
+240
+241PHASE3_CONFIG = Config(
+242    grid_sizes=(50, 100, 250, 500, 1000, 2500),
+243    n_replicates=20,
+244    warmup_steps=1000,
+245    measurement_steps=1000,
+246    critical_prey_birth=0.20,
+247    critical_prey_death=0.0968,
+248    collect_pcf=True,
+249    pcf_sample_rate=1.0,
+250    save_timeseries=False,
+251    directed_hunting=False,
+252)
+253
+254PHASE4_CONFIG = Config(
+255    grid_size=250,
+256    n_replicates=10,
+257    warmup_steps=500,
+258    measurement_steps=500,
+259    collect_pcf=False,
+260    save_timeseries=False,
+261    directed_hunting=False,
+262)
+263
+264PHASE5_CONFIG = Config(
+265    grid_size=250,
+266    n_replicates=10,
+267    warmup_steps=500,
+268    measurement_steps=500,
+269    collect_pcf=False,
+270    save_timeseries=False,
+271    directed_hunting=True,
+272)
+273
+274PHASE_CONFIGS = {
+275    1: PHASE1_CONFIG,
+276    2: PHASE2_CONFIG,
+277    3: PHASE3_CONFIG,
+278    4: PHASE4_CONFIG,
+279    5: PHASE5_CONFIG,
+280}
+281
+282
+283def get_phase_config(phase: int) -> Config:
+284    """
+285    Retrieve configuration for a specific experimental phase.
+286
+287    Parameters
+288    ----------
+289    phase : int
+290        Phase number (1-5).
+291
+292    Returns
+293    -------
+294    Config
+295        Configuration instance for the requested phase.
+296
+297    Raises
+298    ------
+299    ValueError
+300        If phase number is invalid.
+301    """
+302    if phase not in PHASE_CONFIGS:
+303        raise ValueError(
+304            f"Unknown phase {phase}. Valid phases: {list(PHASE_CONFIGS.keys())}"
+305        )
+306    return PHASE_CONFIGS[phase]
+
+ + +
+
+ +
+
@dataclass
+ + class + Config: + + + +
+ +
 49@dataclass
+ 50class Config:
+ 51    """
+ 52    Central configuration for Predator-Prey Hydra Effect experiments.
+ 53
+ 54    Attributes
+ 55    ----------
+ 56    grid_size : int
+ 57        Side length of the square simulation grid.
+ 58    densities : Tuple[float, float]
+ 59        Initial population fractions for (prey, predator).
+ 60    grid_sizes : Tuple[int, ...]
+ 61        Grid dimensions for Finite-Size Scaling (FSS) analysis (Phase 3).
+ 62    prey_birth : float
+ 63        Global birth rate for prey species.
+ 64    prey_death : float
+ 65        Global death rate for prey species.
+ 66    predator_birth : float
+ 67        Global birth rate for predator species.
+ 68    predator_death : float
+ 69        Global death rate for predator species.
+ 70    critical_prey_birth : float
+ 71        Critical birth rate identified from Phase 1.
+ 72    critical_prey_death : float
+ 73        Critical death rate identified from Phase 1.
+ 74    prey_death_range : Tuple[float, float]
+ 75        Bounds for prey death rate sweep.
+ 76    n_prey_death : int
+ 77        Number of points in prey death rate sweep.
+ 78    n_replicates : int
+ 79        Independent stochastic runs per parameter set.
+ 80    warmup_steps : int
+ 81        Iterations before data collection begins.
+ 82    measurement_steps : int
+ 83        Iterations for collecting statistics.
+ 84    evolve_sd : float
+ 85        Standard deviation for parameter mutation.
+ 86    evolve_min : float
+ 87        Lower bound for evolving parameters.
+ 88    evolve_max : float
+ 89        Upper bound for evolving parameters.
+ 90    directed_hunting : bool
+ 91        Toggle for targeted predator movement.
+ 92    save_timeseries : bool
+ 93        Toggle for recording population time series.
+ 94    timeseries_subsample : int
+ 95        Subsample rate for time series data.
+ 96    collect_pcf : bool
+ 97        Toggle for Pair Correlation Function analysis.
+ 98    pcf_sample_rate : float
+ 99        Fraction of runs that compute PCFs.
+100    pcf_max_distance : float
+101        Maximum radial distance for PCF.
+102    pcf_n_bins : int
+103        Number of bins in PCF histogram.
+104    min_density_for_analysis : float
+105        Population threshold for spatial analysis.
+106    n_jobs : int
+107        CPU cores for parallelization (-1 = all).
+108    """
+109
+110    # Grid settings
+111    grid_size: int = 1000
+112    densities: Tuple[float, float] = (0.30, 0.15)
+113    grid_sizes: Tuple[int, ...] = (50, 100, 250, 500, 1000, 2500)
+114
+115    # Species parameters
+116    prey_birth: float = 0.2
+117    prey_death: float = 0.05
+118    predator_birth: float = 0.8
+119    predator_death: float = 0.05
+120
+121    # Critical point (from Phase 1)
+122    critical_prey_birth: float = 0.20
+123    critical_prey_death: float = 0.0968
+124
+125    # Parameter sweep settings
+126    prey_death_range: Tuple[float, float] = (0.0, 0.2)
+127    n_prey_death: int = 20
+128
+129    # Replication
+130    n_replicates: int = 15
+131
+132    # Simulation timing
+133    warmup_steps: int = 300
+134    measurement_steps: int = 500
+135
+136    # Evolution settings
+137    evolve_sd: float = 0.10
+138    evolve_min: float = 0.0
+139    evolve_max: float = 0.10
+140
+141    # Model variant
+142    directed_hunting: bool = False
+143
+144    # Time series collection
+145    save_timeseries: bool = False
+146    timeseries_subsample: int = 10
+147
+148    # PCF settings
+149    collect_pcf: bool = True
+150    pcf_sample_rate: float = 0.2
+151    pcf_max_distance: float = 20.0
+152    pcf_n_bins: int = 20
+153
+154    # Analysis thresholds
+155    min_density_for_analysis: float = 0.002
+156
+157    # Parallelization
+158    n_jobs: int = -1
+159
+160    def get_prey_deaths(self) -> np.ndarray:
+161        """Generate array of prey death rates for parameter sweep."""
+162        return np.linspace(
+163            self.prey_death_range[0], self.prey_death_range[1], self.n_prey_death
+164        )
+165
+166    def get_warmup_steps(self, L: int) -> int:
+167        """Get warmup steps (can be extended for size-dependent scaling)."""
+168        return self.warmup_steps
+169
+170    def get_measurement_steps(self, L: int) -> int:
+171        """Get measurement steps (can be extended for size-dependent scaling)."""
+172        return self.measurement_steps
+173
+174    def estimate_runtime(self, n_cores: int = 32) -> str:
+175        """
+176        Estimate wall-clock time for the experiment.
+177
+178        Parameters
+179        ----------
+180        n_cores : int
+181            Number of available CPU cores.
+182
+183        Returns
+184        -------
+185        str
+186            Human-readable runtime estimate.
+187        """
+188        ref_size = 100
+189        ref_steps_per_sec = 1182
+190
+191        size_scaling = (self.grid_size / ref_size) ** 2
+192        actual_steps_per_sec = ref_steps_per_sec / size_scaling
+193
+194        total_steps = self.warmup_steps + self.measurement_steps
+195        base_time_s = total_steps / actual_steps_per_sec
+196
+197        pcf_time_s = (0.008 * size_scaling) if self.collect_pcf else 0
+198
+199        n_sims = self.n_prey_death * self.n_replicates
+200
+201        total_seconds = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate)
+202        total_seconds /= n_cores
+203
+204        hours = total_seconds / 3600
+205        core_hours = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate) / 3600
+206
+207        return f"{n_sims:,} sims, ~{hours:.1f}h on {n_cores} cores (~{core_hours:.0f} core-hours)"
+
+ + +

Central configuration for Predator-Prey Hydra Effect experiments.

+ +
Attributes
+ +
    +
  • grid_size (int): +Side length of the square simulation grid.
  • +
  • densities (Tuple[float, float]): +Initial population fractions for (prey, predator).
  • +
  • grid_sizes (Tuple[int, ...]): +Grid dimensions for Finite-Size Scaling (FSS) analysis (Phase 3).
  • +
  • prey_birth (float): +Global birth rate for prey species.
  • +
  • prey_death (float): +Global death rate for prey species.
  • +
  • predator_birth (float): +Global birth rate for predator species.
  • +
  • predator_death (float): +Global death rate for predator species.
  • +
  • critical_prey_birth (float): +Critical birth rate identified from Phase 1.
  • +
  • critical_prey_death (float): +Critical death rate identified from Phase 1.
  • +
  • prey_death_range (Tuple[float, float]): +Bounds for prey death rate sweep.
  • +
  • n_prey_death (int): +Number of points in prey death rate sweep.
  • +
  • n_replicates (int): +Independent stochastic runs per parameter set.
  • +
  • warmup_steps (int): +Iterations before data collection begins.
  • +
  • measurement_steps (int): +Iterations for collecting statistics.
  • +
  • evolve_sd (float): +Standard deviation for parameter mutation.
  • +
  • evolve_min (float): +Lower bound for evolving parameters.
  • +
  • evolve_max (float): +Upper bound for evolving parameters.
  • +
  • directed_hunting (bool): +Toggle for targeted predator movement.
  • +
  • save_timeseries (bool): +Toggle for recording population time series.
  • +
  • timeseries_subsample (int): +Subsample rate for time series data.
  • +
  • collect_pcf (bool): +Toggle for Pair Correlation Function analysis.
  • +
  • pcf_sample_rate (float): +Fraction of runs that compute PCFs.
  • +
  • pcf_max_distance (float): +Maximum radial distance for PCF.
  • +
  • pcf_n_bins (int): +Number of bins in PCF histogram.
  • +
  • min_density_for_analysis (float): +Population threshold for spatial analysis.
  • +
  • n_jobs (int): +CPU cores for parallelization (-1 = all).
  • +
+
+ + +
+ +
+ + def + get_prey_deaths(self) -> numpy.ndarray: + + + +
+ +
160    def get_prey_deaths(self) -> np.ndarray:
+161        """Generate array of prey death rates for parameter sweep."""
+162        return np.linspace(
+163            self.prey_death_range[0], self.prey_death_range[1], self.n_prey_death
+164        )
+
+ + +

Generate array of prey death rates for parameter sweep.

+
+ + +
+
+ +
+ + def + get_warmup_steps(self, L: int) -> int: + + + +
+ +
166    def get_warmup_steps(self, L: int) -> int:
+167        """Get warmup steps (can be extended for size-dependent scaling)."""
+168        return self.warmup_steps
+
+ + +

Get warmup steps (can be extended for size-dependent scaling).

+
+ + +
+
+ +
+ + def + get_measurement_steps(self, L: int) -> int: + + + +
+ +
170    def get_measurement_steps(self, L: int) -> int:
+171        """Get measurement steps (can be extended for size-dependent scaling)."""
+172        return self.measurement_steps
+
+ + +

Get measurement steps (can be extended for size-dependent scaling).

+
+ + +
+
+ +
+ + def + estimate_runtime(self, n_cores: int = 32) -> str: + + + +
+ +
174    def estimate_runtime(self, n_cores: int = 32) -> str:
+175        """
+176        Estimate wall-clock time for the experiment.
+177
+178        Parameters
+179        ----------
+180        n_cores : int
+181            Number of available CPU cores.
+182
+183        Returns
+184        -------
+185        str
+186            Human-readable runtime estimate.
+187        """
+188        ref_size = 100
+189        ref_steps_per_sec = 1182
+190
+191        size_scaling = (self.grid_size / ref_size) ** 2
+192        actual_steps_per_sec = ref_steps_per_sec / size_scaling
+193
+194        total_steps = self.warmup_steps + self.measurement_steps
+195        base_time_s = total_steps / actual_steps_per_sec
+196
+197        pcf_time_s = (0.008 * size_scaling) if self.collect_pcf else 0
+198
+199        n_sims = self.n_prey_death * self.n_replicates
+200
+201        total_seconds = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate)
+202        total_seconds /= n_cores
+203
+204        hours = total_seconds / 3600
+205        core_hours = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate) / 3600
+206
+207        return f"{n_sims:,} sims, ~{hours:.1f}h on {n_cores} cores (~{core_hours:.0f} core-hours)"
+
+ + +

Estimate wall-clock time for the experiment.

+ +
Parameters
+ +
    +
  • n_cores (int): +Number of available CPU cores.
  • +
+ +
Returns
+ +
    +
  • str: Human-readable runtime estimate.
  • +
+
+ + +
+
+
+ +
+ + def + get_phase_config(phase: int) -> Config: + + + +
+ +
284def get_phase_config(phase: int) -> Config:
+285    """
+286    Retrieve configuration for a specific experimental phase.
+287
+288    Parameters
+289    ----------
+290    phase : int
+291        Phase number (1-5).
+292
+293    Returns
+294    -------
+295    Config
+296        Configuration instance for the requested phase.
+297
+298    Raises
+299    ------
+300    ValueError
+301        If phase number is invalid.
+302    """
+303    if phase not in PHASE_CONFIGS:
+304        raise ValueError(
+305            f"Unknown phase {phase}. Valid phases: {list(PHASE_CONFIGS.keys())}"
+306        )
+307    return PHASE_CONFIGS[phase]
+
+ + +

Retrieve configuration for a specific experimental phase.

+ +
Parameters
+ +
    +
  • phase (int): +Phase number (1-5).
  • +
+ +
Returns
+ +
    +
  • Config: Configuration instance for the requested phase.
  • +
+ +
Raises
+ +
    +
  • ValueError: If phase number is invalid.
  • +
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/models/numba_optimized.html b/docs/models/numba_optimized.html new file mode 100644 index 0000000..59dc16d --- /dev/null +++ b/docs/models/numba_optimized.html @@ -0,0 +1,3319 @@ + + + + + + + models.numba_optimized API documentation + + + + + + + + + +
+
+

+models.numba_optimized

+ +

Numba-Optimized Kernels

+ +

This module provides Numba-accelerated kernels for the predator-prey +cellular automaton, including update kernels and spatial analysis functions.

+ +
Classes
+ +

PPKernel + Wrapper for predator-prey update kernels with pre-allocated buffers.

+ +
Cluster Analysis
+ +
+
measure_cluster_sizes_fast # Fast cluster size measurement (sizes only).
+detect_clusters_fast # Full cluster detection with labels.
+get_cluster_stats_fast # Comprehensive cluster statistics.
+
+
+ +
Pair Correlation Functions
+ +
+
compute_pcf_periodic_fast # PCF for two position sets with periodic boundaries.
+compute_all_pcfs_fast #Compute prey-prey, pred-pred, and prey-pred PCFs.
+
+
+ +
Utilities
+ +
+
set_numba_seed # Seed Numba's internal RNG.
+warmup_numba_kernels # Pre-compile kernels to avoid first-run latency.
+
+
+ +
Example
+ +
+
from models.numba_optimized import (
+    PPKernel,
+    get_cluster_stats_fast,
+    compute_all_pcfs_fast,
+)
+
+# Cluster analysis
+stats = get_cluster_stats_fast(grid, species=1)
+print(f"Largest cluster: {stats['largest']}")
+
+# PCF computation
+pcfs = compute_all_pcfs_fast(grid, max_distance=20.0)
+prey_prey_dist, prey_prey_gr, _ = pcfs['prey_prey']
+
+
+
+ + + + + +
   1#!/usr/bin/env python3
+   2"""
+   3Numba-Optimized Kernels
+   4=======================
+   5
+   6This module provides Numba-accelerated kernels for the predator-prey
+   7cellular automaton, including update kernels and spatial analysis functions.
+   8
+   9Classes
+  10-------
+  11PPKernel
+  12    Wrapper for predator-prey update kernels with pre-allocated buffers.
+  13
+  14Cluster Analysis
+  15----------------
+  16```python
+  17measure_cluster_sizes_fast # Fast cluster size measurement (sizes only).
+  18detect_clusters_fast # Full cluster detection with labels.
+  19get_cluster_stats_fast # Comprehensive cluster statistics.
+  20```
+  21
+  22Pair Correlation Functions
+  23--------------------------
+  24```python
+  25compute_pcf_periodic_fast # PCF for two position sets with periodic boundaries.
+  26compute_all_pcfs_fast #Compute prey-prey, pred-pred, and prey-pred PCFs.
+  27```
+  28
+  29Utilities
+  30---------
+  31```python
+  32set_numba_seed # Seed Numba's internal RNG.
+  33warmup_numba_kernels # Pre-compile kernels to avoid first-run latency.
+  34```
+  35
+  36Example
+  37-------
+  38```python
+  39from models.numba_optimized import (
+  40    PPKernel,
+  41    get_cluster_stats_fast,
+  42    compute_all_pcfs_fast,
+  43)
+  44
+  45# Cluster analysis
+  46stats = get_cluster_stats_fast(grid, species=1)
+  47print(f"Largest cluster: {stats['largest']}")
+  48
+  49# PCF computation
+  50pcfs = compute_all_pcfs_fast(grid, max_distance=20.0)
+  51prey_prey_dist, prey_prey_gr, _ = pcfs['prey_prey']
+  52```
+  53"""
+  54
+  55import numpy as np
+  56from typing import Tuple, Dict, Optional
+  57
+  58try:
+  59    from numba import njit, prange
+  60
+  61    NUMBA_AVAILABLE = True
+  62except ImportError:
+  63    NUMBA_AVAILABLE = False
+  64
+  65    def njit(*args, **kwargs):
+  66        def decorator(func):
+  67            return func
+  68
+  69        return decorator
+  70
+  71    def prange(*args):
+  72        return range(*args)
+  73
+  74
+  75# ============================================================================
+  76# RNG SEEDING
+  77# ============================================================================
+  78
+  79
+  80@njit(cache=True)
+  81def set_numba_seed(seed: int) -> None:
+  82    """
+  83    Seed Numba's internal random number generator from within a JIT context.
+  84
+  85    This function ensures that Numba's independent random number generator
+  86    is synchronized with the provided seed, enabling reproducibility for
+  87    jit-compiled functions that use NumPy's random operations.
+  88
+  89    Parameters
+  90    ----------
+  91    seed : int
+  92        The integer value used to initialize the random number generator.
+  93
+  94    Returns
+  95    -------
+  96    None
+  97
+  98    Notes
+  99    -----
+ 100    Because Numba maintains its own internal state for random number
+ 101    generation, calling `np.random.seed()` in standard Python code will not
+ 102    affect jit-compiled functions. This helper must be called to bridge
+ 103    that gap.
+ 104    """
+ 105    np.random.seed(seed)
+ 106
+ 107
+ 108# ============================================================================
+ 109# PREDATOR-PREY KERNELS
+ 110# ============================================================================
+ 111
+ 112
+ 113@njit(cache=True)
+ 114def _pp_async_kernel_random(
+ 115    grid: np.ndarray,
+ 116    prey_death_arr: np.ndarray,
+ 117    p_birth_val: float,
+ 118    p_death_val: float,
+ 119    pred_birth_val: float,
+ 120    pred_death_val: float,
+ 121    dr_arr: np.ndarray,
+ 122    dc_arr: np.ndarray,
+ 123    evolve_sd: float,
+ 124    evolve_min: float,
+ 125    evolve_max: float,
+ 126    evolution_stopped: bool,
+ 127    occupied_buffer: np.ndarray,
+ 128) -> np.ndarray:
+ 129    """
+ 130    Asynchronous predator-prey update kernel with random neighbor selection.
+ 131
+ 132    This Numba-accelerated kernel performs an asynchronous update of the
+ 133    simulation grid. It identifies all occupied cells, shuffles them to
+ 134    ensure unbiased processing, and applies stochastic rules for prey
+ 135    mortality, prey reproduction (with optional parameter evolution),
+ 136    predator mortality, and predation.
+ 137
+ 138    Parameters
+ 139    ----------
+ 140    grid : np.ndarray
+ 141        2D integer array representing the simulation grid (0: Empty, 1: Prey, 2: Predator).
+ 142    prey_death_arr : np.ndarray
+ 143        2D float array storing the individual prey death rates for evolution tracking.
+ 144    p_birth_val : float
+ 145        Base probability of prey reproduction into an adjacent empty cell.
+ 146    p_death_val : float
+ 147        Base probability of prey death (though individual rates in `prey_death_arr` are used).
+ 148    pred_birth_val : float
+ 149        Probability of a predator reproducing after consuming prey.
+ 150    pred_death_val : float
+ 151        Probability of a predator dying.
+ 152    dr_arr : np.ndarray
+ 153        Array of row offsets defining the neighborhood.
+ 154    dc_arr : np.ndarray
+ 155        Array of column offsets defining the neighborhood.
+ 156    evolve_sd : float
+ 157        Standard deviation of the mutation applied to the prey death rate during reproduction.
+ 158    evolve_min : float
+ 159        Lower bound for the evolved prey death rate.
+ 160    evolve_max : float
+ 161        Upper bound for the evolved prey death rate.
+ 162    evolution_stopped : bool
+ 163        If True, offspring inherit the parent's death rate without mutation.
+ 164    occupied_buffer : np.ndarray
+ 165        Pre-allocated 2D array used to store and shuffle coordinates of occupied cells.
+ 166
+ 167    Returns
+ 168    -------
+ 169    grid : np.ndarray
+ 170        The updated simulation grid.
+ 171
+ 172    Notes
+ 173    -----
+ 174    The kernel uses periodic boundary conditions. The Fisher-Yates shuffle on
+ 175    `occupied_buffer` ensures that the asynchronous updates do not introduce
+ 176    directional bias.
+ 177    """
+ 178    rows, cols = grid.shape
+ 179    n_shifts = len(dr_arr)
+ 180
+ 181    # Collect occupied cells
+ 182    count = 0
+ 183    for r in range(rows):
+ 184        for c in range(cols):
+ 185            if grid[r, c] != 0:
+ 186                occupied_buffer[count, 0] = r
+ 187                occupied_buffer[count, 1] = c
+ 188                count += 1
+ 189
+ 190    # Fisher-Yates shuffle
+ 191    for i in range(count - 1, 0, -1):
+ 192        j = np.random.randint(0, i + 1)
+ 193        occupied_buffer[i, 0], occupied_buffer[j, 0] = (
+ 194            occupied_buffer[j, 0],
+ 195            occupied_buffer[i, 0],
+ 196        )
+ 197        occupied_buffer[i, 1], occupied_buffer[j, 1] = (
+ 198            occupied_buffer[j, 1],
+ 199            occupied_buffer[i, 1],
+ 200        )
+ 201
+ 202    # Process each occupied cell
+ 203    for i in range(count):
+ 204        r = occupied_buffer[i, 0]
+ 205        c = occupied_buffer[i, 1]
+ 206
+ 207        state = grid[r, c]
+ 208        if state == 0:
+ 209            continue
+ 210
+ 211        # Random neighbor selection
+ 212        nbi = np.random.randint(0, n_shifts)
+ 213        nr = (r + dr_arr[nbi]) % rows
+ 214        nc = (c + dc_arr[nbi]) % cols
+ 215
+ 216        if state == 1:  # PREY
+ 217            if np.random.random() < prey_death_arr[r, c]:
+ 218                grid[r, c] = 0
+ 219                prey_death_arr[r, c] = np.nan
+ 220            elif grid[nr, nc] == 0:
+ 221                if np.random.random() < p_birth_val:
+ 222                    grid[nr, nc] = 1
+ 223                    parent_val = prey_death_arr[r, c]
+ 224                    if not evolution_stopped:
+ 225                        child_val = parent_val + np.random.normal(0, evolve_sd)
+ 226                        if child_val < evolve_min:
+ 227                            child_val = evolve_min
+ 228                        if child_val > evolve_max:
+ 229                            child_val = evolve_max
+ 230                        prey_death_arr[nr, nc] = child_val
+ 231                    else:
+ 232                        prey_death_arr[nr, nc] = parent_val
+ 233
+ 234        elif state == 2:  # PREDATOR
+ 235            if np.random.random() < pred_death_val:
+ 236                grid[r, c] = 0
+ 237            elif grid[nr, nc] == 1:
+ 238                if np.random.random() < pred_birth_val:
+ 239                    grid[nr, nc] = 2
+ 240                    prey_death_arr[nr, nc] = np.nan
+ 241
+ 242    return grid
+ 243
+ 244
+ 245@njit(cache=True)
+ 246def _pp_async_kernel_directed(
+ 247    grid: np.ndarray,
+ 248    prey_death_arr: np.ndarray,
+ 249    p_birth_val: float,
+ 250    p_death_val: float,
+ 251    pred_birth_val: float,
+ 252    pred_death_val: float,
+ 253    dr_arr: np.ndarray,
+ 254    dc_arr: np.ndarray,
+ 255    evolve_sd: float,
+ 256    evolve_min: float,
+ 257    evolve_max: float,
+ 258    evolution_stopped: bool,
+ 259    occupied_buffer: np.ndarray,
+ 260) -> np.ndarray:
+ 261    """
+ 262    Asynchronous predator-prey update kernel with directed behavior.
+ 263
+ 264    This kernel implements "intelligent" species behavior: prey actively search
+ 265    for empty spaces to reproduce, and predators actively search for nearby
+ 266    prey to hunt. A two-pass approach is used to stochastically select a
+ 267    valid target from the neighborhood without heap allocation.
+ 268
+ 269    Parameters
+ 270    ----------
+ 271    grid : np.ndarray
+ 272        2D integer array representing the simulation grid (0: Empty, 1: Prey, 2: Predator).
+ 273    prey_death_arr : np.ndarray
+ 274        2D float array storing individual prey mortality rates for evolution.
+ 275    p_birth_val : float
+ 276        Probability of prey reproduction attempt.
+ 277    p_death_val : float
+ 278        Base probability of prey mortality.
+ 279    pred_birth_val : float
+ 280        Probability of a predator reproduction attempt (hunting success).
+ 281    pred_death_val : float
+ 282        Probability of predator mortality.
+ 283    dr_arr : np.ndarray
+ 284        Row offsets defining the spatial neighborhood (e.g., Moore or von Neumann).
+ 285    dc_arr : np.ndarray
+ 286        Column offsets defining the spatial neighborhood.
+ 287    evolve_sd : float
+ 288        Standard deviation for mutations in prey death rates.
+ 289    evolve_min : float
+ 290        Minimum allowable value for evolved prey death rates.
+ 291    evolve_max : float
+ 292        Maximum allowable value for evolved prey death rates.
+ 293    evolution_stopped : bool
+ 294        If True, prevents mutation during prey reproduction.
+ 295    occupied_buffer : np.ndarray
+ 296        Pre-allocated array for storing and shuffling active cell coordinates.
+ 297
+ 298    Returns
+ 299    -------
+ 300    grid : np.ndarray
+ 301        The updated simulation grid.
+ 302
+ 303    Notes
+ 304    -----
+ 305    The directed behavior significantly changes the system dynamics compared to
+ 306    random neighbor selection, often leading to different critical thresholds
+ 307    and spatial patterning. Periodic boundary conditions are applied.
+ 308    """
+ 309    rows, cols = grid.shape
+ 310    n_shifts = len(dr_arr)
+ 311
+ 312    # Collect occupied cells
+ 313    count = 0
+ 314    for r in range(rows):
+ 315        for c in range(cols):
+ 316            if grid[r, c] != 0:
+ 317                occupied_buffer[count, 0] = r
+ 318                occupied_buffer[count, 1] = c
+ 319                count += 1
+ 320
+ 321    # Fisher-Yates shuffle
+ 322    for i in range(count - 1, 0, -1):
+ 323        j = np.random.randint(0, i + 1)
+ 324        occupied_buffer[i, 0], occupied_buffer[j, 0] = (
+ 325            occupied_buffer[j, 0],
+ 326            occupied_buffer[i, 0],
+ 327        )
+ 328        occupied_buffer[i, 1], occupied_buffer[j, 1] = (
+ 329            occupied_buffer[j, 1],
+ 330            occupied_buffer[i, 1],
+ 331        )
+ 332
+ 333    # Process each occupied cell
+ 334    for i in range(count):
+ 335        r = occupied_buffer[i, 0]
+ 336        c = occupied_buffer[i, 1]
+ 337
+ 338        state = grid[r, c]
+ 339        if state == 0:
+ 340            continue
+ 341
+ 342        if state == 1:  # PREY - directed reproduction into empty cells
+ 343            # Check for death first
+ 344            if np.random.random() < prey_death_arr[r, c]:
+ 345                grid[r, c] = 0
+ 346                prey_death_arr[r, c] = np.nan
+ 347                continue
+ 348
+ 349            # Attempt reproduction with directed selection
+ 350            if np.random.random() < p_birth_val:
+ 351                # Pass 1: Count empty neighbors
+ 352                empty_count = 0
+ 353                for k in range(n_shifts):
+ 354                    check_r = (r + dr_arr[k]) % rows
+ 355                    check_c = (c + dc_arr[k]) % cols
+ 356                    if grid[check_r, check_c] == 0:
+ 357                        empty_count += 1
+ 358
+ 359                # Pass 2: Select random empty neighbor
+ 360                if empty_count > 0:
+ 361                    target_idx = np.random.randint(0, empty_count)
+ 362                    found = 0
+ 363                    nr, nc = r, c  # Initialize (will be overwritten)
+ 364                    for k in range(n_shifts):
+ 365                        check_r = (r + dr_arr[k]) % rows
+ 366                        check_c = (c + dc_arr[k]) % cols
+ 367                        if grid[check_r, check_c] == 0:
+ 368                            if found == target_idx:
+ 369                                nr, nc = check_r, check_c
+ 370                                break
+ 371                            found += 1
+ 372
+ 373                    # Reproduce into selected empty cell
+ 374                    grid[nr, nc] = 1
+ 375                    parent_val = prey_death_arr[r, c]
+ 376                    if not evolution_stopped:
+ 377                        child_val = parent_val + np.random.normal(0, evolve_sd)
+ 378                        if child_val < evolve_min:
+ 379                            child_val = evolve_min
+ 380                        if child_val > evolve_max:
+ 381                            child_val = evolve_max
+ 382                        prey_death_arr[nr, nc] = child_val
+ 383                    else:
+ 384                        prey_death_arr[nr, nc] = parent_val
+ 385
+ 386        elif state == 2:  # PREDATOR - directed hunting
+ 387            # Check for death first
+ 388            if np.random.random() < pred_death_val:
+ 389                grid[r, c] = 0
+ 390                continue
+ 391
+ 392            # Attempt hunting with directed selection
+ 393            if np.random.random() < pred_birth_val:
+ 394                # Pass 1: Count prey neighbors
+ 395                prey_count = 0
+ 396                for k in range(n_shifts):
+ 397                    check_r = (r + dr_arr[k]) % rows
+ 398                    check_c = (c + dc_arr[k]) % cols
+ 399                    if grid[check_r, check_c] == 1:
+ 400                        prey_count += 1
+ 401
+ 402                # Pass 2: Select random prey neighbor
+ 403                if prey_count > 0:
+ 404                    target_idx = np.random.randint(0, prey_count)
+ 405                    found = 0
+ 406                    nr, nc = r, c  # Initialize (will be overwritten)
+ 407                    for k in range(n_shifts):
+ 408                        check_r = (r + dr_arr[k]) % rows
+ 409                        check_c = (c + dc_arr[k]) % cols
+ 410                        if grid[check_r, check_c] == 1:
+ 411                            if found == target_idx:
+ 412                                nr, nc = check_r, check_c
+ 413                                break
+ 414                            found += 1
+ 415
+ 416                    # Hunt: prey cell becomes predator
+ 417                    grid[nr, nc] = 2
+ 418                    prey_death_arr[nr, nc] = np.nan
+ 419
+ 420    return grid
+ 421
+ 422
+ 423class PPKernel:
+ 424    """
+ 425    Wrapper for predator-prey kernel with pre-allocated buffers.
+ 426
+ 427    This class manages the spatial configuration and memory buffers required
+ 428    for the Numba-accelerated update kernels. By pre-allocating the
+ 429    `occupied_buffer`, it avoids expensive memory allocations during the
+ 430    simulation loop.
+ 431
+ 432    Parameters
+ 433    ----------
+ 434    rows : int
+ 435        Number of rows in the simulation grid.
+ 436    cols : int
+ 437        Number of columns in the simulation grid.
+ 438    neighborhood : {'moore', 'von_neumann'}, optional
+ 439        The neighborhood type determining adjacent cells. 'moore' includes
+ 440        diagonals (8 neighbors), 'von_neumann' does not (4 neighbors).
+ 441        Default is 'moore'.
+ 442    directed_hunting : bool, optional
+ 443        If True, uses the directed behavior kernel where species search for
+ 444        targets. If False, uses random neighbor selection. Default is False.
+ 445
+ 446    Attributes
+ 447    ----------
+ 448    rows : int
+ 449        Grid row count.
+ 450    cols : int
+ 451        Grid column count.
+ 452    directed_hunting : bool
+ 453        Toggle for intelligent behavior logic.
+ 454    """
+ 455
+ 456    def __init__(
+ 457        self,
+ 458        rows: int,
+ 459        cols: int,
+ 460        neighborhood: str = "moore",
+ 461        directed_hunting: bool = False,
+ 462    ):
+ 463        self.rows = rows
+ 464        self.cols = cols
+ 465        self.directed_hunting = directed_hunting
+ 466        self._occupied_buffer = np.empty((rows * cols, 2), dtype=np.int32)
+ 467
+ 468        if neighborhood == "moore":
+ 469            self._dr = np.array([-1, -1, -1, 0, 0, 1, 1, 1], dtype=np.int32)
+ 470            self._dc = np.array([-1, 0, 1, -1, 1, -1, 0, 1], dtype=np.int32)
+ 471        else:  # von Neumann
+ 472            self._dr = np.array([-1, 1, 0, 0], dtype=np.int32)
+ 473            self._dc = np.array([0, 0, -1, 1], dtype=np.int32)
+ 474
+ 475    def update(
+ 476        self,
+ 477        grid: np.ndarray,
+ 478        prey_death_arr: np.ndarray,
+ 479        prey_birth: float,
+ 480        prey_death: float,
+ 481        pred_birth: float,
+ 482        pred_death: float,
+ 483        evolve_sd: float = 0.1,
+ 484        evolve_min: float = 0.001,
+ 485        evolve_max: float = 0.1,
+ 486        evolution_stopped: bool = True,
+ 487    ) -> np.ndarray:
+ 488        """
+ 489        Execute a single asynchronous update step using the configured kernel.
+ 490
+ 491        Parameters
+ 492        ----------
+ 493        grid : np.ndarray
+ 494            The current 2D simulation grid.
+ 495        prey_death_arr : np.ndarray
+ 496            2D array of individual prey mortality rates.
+ 497        prey_birth : float
+ 498            Prey reproduction probability.
+ 499        prey_death : float
+ 500            Base prey mortality probability.
+ 501        pred_birth : float
+ 502            Predator reproduction (hunting success) probability.
+ 503        pred_death : float
+ 504            Predator mortality probability.
+ 505        evolve_sd : float, optional
+ 506            Mutation standard deviation (default 0.1).
+ 507        evolve_min : float, optional
+ 508            Minimum evolved death rate (default 0.001).
+ 509        evolve_max : float, optional
+ 510            Maximum evolved death rate (default 0.1).
+ 511        evolution_stopped : bool, optional
+ 512            Whether to disable mutation during this step (default True).
+ 513
+ 514        Returns
+ 515        -------
+ 516        np.ndarray
+ 517            The updated grid after one full asynchronous pass.
+ 518        """
+ 519        if self.directed_hunting:
+ 520            return _pp_async_kernel_directed(
+ 521                grid,
+ 522                prey_death_arr,
+ 523                prey_birth,
+ 524                prey_death,
+ 525                pred_birth,
+ 526                pred_death,
+ 527                self._dr,
+ 528                self._dc,
+ 529                evolve_sd,
+ 530                evolve_min,
+ 531                evolve_max,
+ 532                evolution_stopped,
+ 533                self._occupied_buffer,
+ 534            )
+ 535        else:
+ 536            return _pp_async_kernel_random(
+ 537                grid,
+ 538                prey_death_arr,
+ 539                prey_birth,
+ 540                prey_death,
+ 541                pred_birth,
+ 542                pred_death,
+ 543                self._dr,
+ 544                self._dc,
+ 545                evolve_sd,
+ 546                evolve_min,
+ 547                evolve_max,
+ 548                evolution_stopped,
+ 549                self._occupied_buffer,
+ 550            )
+ 551
+ 552
+ 553# ============================================================================
+ 554# CLUSTER DETECTION (ENHANCED)
+ 555# ============================================================================
+ 556
+ 557
+ 558@njit(cache=True)
+ 559def _flood_fill(
+ 560    grid: np.ndarray,
+ 561    visited: np.ndarray,
+ 562    start_r: int,
+ 563    start_c: int,
+ 564    target: int,
+ 565    rows: int,
+ 566    cols: int,
+ 567    moore: bool,
+ 568) -> int:
+ 569    """
+ 570    Perform a stack-based flood fill to measure the size of a connected cluster.
+ 571
+ 572    This Numba-accelerated function identifies all contiguous cells of a
+ 573    specific target value starting from a given coordinate. It supports
+ 574    both Moore and von Neumann neighborhoods and implements periodic
+ 575    boundary conditions (toroidal topology).
+ 576
+ 577    Parameters
+ 578    ----------
+ 579    grid : np.ndarray
+ 580        2D integer array representing the simulation environment.
+ 581    visited : np.ndarray
+ 582        2D boolean array tracked across calls to avoid re-processing cells.
+ 583    start_r : int
+ 584        Starting row index for the flood fill.
+ 585    start_c : int
+ 586        Starting column index for the flood fill.
+ 587    target : int
+ 588        The cell value (e.g., 1 for Prey, 2 for Predator) to include in the cluster.
+ 589    rows : int
+ 590        Total number of rows in the grid.
+ 591    cols : int
+ 592        Total number of columns in the grid.
+ 593    moore : bool
+ 594        If True, use a Moore neighborhood (8 neighbors). If False, use a
+ 595        von Neumann neighborhood (4 neighbors).
+ 596
+ 597    Returns
+ 598    -------
+ 599    size : int
+ 600        The total number of connected cells belonging to the cluster.
+ 601
+ 602    Notes
+ 603    -----
+ 604    The function uses a manual stack implementation to avoid recursion limit
+ 605    issues and is optimized for use within JIT-compiled loops.
+ 606    """
+ 607    max_stack = rows * cols
+ 608    stack_r = np.empty(max_stack, dtype=np.int32)
+ 609    stack_c = np.empty(max_stack, dtype=np.int32)
+ 610    stack_ptr = 0
+ 611
+ 612    stack_r[stack_ptr] = start_r
+ 613    stack_c[stack_ptr] = start_c
+ 614    stack_ptr += 1
+ 615    visited[start_r, start_c] = True
+ 616
+ 617    size = 0
+ 618
+ 619    if moore:
+ 620        dr = np.array([-1, -1, -1, 0, 0, 1, 1, 1], dtype=np.int32)
+ 621        dc = np.array([-1, 0, 1, -1, 1, -1, 0, 1], dtype=np.int32)
+ 622        n_neighbors = 8
+ 623    else:
+ 624        dr = np.array([-1, 1, 0, 0], dtype=np.int32)
+ 625        dc = np.array([0, 0, -1, 1], dtype=np.int32)
+ 626        n_neighbors = 4
+ 627
+ 628    while stack_ptr > 0:
+ 629        stack_ptr -= 1
+ 630        r = stack_r[stack_ptr]
+ 631        c = stack_c[stack_ptr]
+ 632        size += 1
+ 633
+ 634        for k in range(n_neighbors):
+ 635            nr = (r + dr[k]) % rows
+ 636            nc = (c + dc[k]) % cols
+ 637
+ 638            if not visited[nr, nc] and grid[nr, nc] == target:
+ 639                visited[nr, nc] = True
+ 640                stack_r[stack_ptr] = nr
+ 641                stack_c[stack_ptr] = nc
+ 642                stack_ptr += 1
+ 643
+ 644    return size
+ 645
+ 646
+ 647@njit(cache=True)
+ 648def _measure_clusters(grid: np.ndarray, species: int, moore: bool = True) -> np.ndarray:
+ 649    """
+ 650    Identify and measure the sizes of all connected clusters for a specific species.
+ 651
+ 652    This function scans the entire grid and initiates a flood-fill algorithm
+ 653    whenever an unvisited cell of the target species is encountered. It
+ 654    returns an array containing the size (cell count) of each identified cluster.
+ 655
+ 656    Parameters
+ 657    ----------
+ 658    grid : np.ndarray
+ 659        2D integer array representing the simulation environment.
+ 660    species : int
+ 661        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+ 662    moore : bool, optional
+ 663        Determines the connectivity logic. If True, uses the Moore neighborhood
+ 664        (8 neighbors); if False, uses the von Neumann neighborhood (4 neighbors).
+ 665        Default is True.
+ 666
+ 667    Returns
+ 668    -------
+ 669    cluster_sizes : np.ndarray
+ 670        A 1D array of integers where each element represents the size of
+ 671        one connected cluster.
+ 672
+ 673    Notes
+ 674    -----
+ 675    This function is Numba-optimized and utilizes an internal `visited` mask
+ 676    to ensure each cell is processed only once, maintaining $O(N)$
+ 677    complexity relative to the number of cells.
+ 678    """
+ 679    rows, cols = grid.shape
+ 680    visited = np.zeros((rows, cols), dtype=np.bool_)
+ 681
+ 682    max_clusters = rows * cols
+ 683    sizes = np.empty(max_clusters, dtype=np.int32)
+ 684    n_clusters = 0
+ 685
+ 686    for r in range(rows):
+ 687        for c in range(cols):
+ 688            if grid[r, c] == species and not visited[r, c]:
+ 689                size = _flood_fill(grid, visited, r, c, species, rows, cols, moore)
+ 690                sizes[n_clusters] = size
+ 691                n_clusters += 1
+ 692
+ 693    return sizes[:n_clusters]
+ 694
+ 695
+ 696@njit(cache=True)
+ 697def _detect_clusters_numba(
+ 698    grid: np.ndarray,
+ 699    species: int,
+ 700    moore: bool,
+ 701) -> Tuple[np.ndarray, np.ndarray]:
+ 702    """
+ 703    Full cluster detection returning labels and sizes (Numba-accelerated).
+ 704
+ 705    Returns:
+ 706        labels: 2D int32 array where each cell contains its cluster ID (0 = non-target)
+ 707        sizes: 1D int32 array of cluster sizes (index i = size of cluster i+1)
+ 708    """
+ 709    rows, cols = grid.shape
+ 710    labels = np.zeros((rows, cols), dtype=np.int32)
+ 711
+ 712    if moore:
+ 713        dr = np.array([-1, -1, -1, 0, 0, 1, 1, 1], dtype=np.int32)
+ 714        dc = np.array([-1, 0, 1, -1, 1, -1, 0, 1], dtype=np.int32)
+ 715        n_neighbors = 8
+ 716    else:
+ 717        dr = np.array([-1, 1, 0, 0], dtype=np.int32)
+ 718        dc = np.array([0, 0, -1, 1], dtype=np.int32)
+ 719        n_neighbors = 4
+ 720
+ 721    max_clusters = rows * cols
+ 722    sizes = np.empty(max_clusters, dtype=np.int32)
+ 723    n_clusters = 0
+ 724    current_label = 1
+ 725
+ 726    max_stack = rows * cols
+ 727    stack_r = np.empty(max_stack, dtype=np.int32)
+ 728    stack_c = np.empty(max_stack, dtype=np.int32)
+ 729
+ 730    for start_r in range(rows):
+ 731        for start_c in range(cols):
+ 732            if grid[start_r, start_c] != species or labels[start_r, start_c] != 0:
+ 733                continue
+ 734
+ 735            stack_ptr = 0
+ 736            stack_r[stack_ptr] = start_r
+ 737            stack_c[stack_ptr] = start_c
+ 738            stack_ptr += 1
+ 739            labels[start_r, start_c] = current_label
+ 740            size = 0
+ 741
+ 742            while stack_ptr > 0:
+ 743                stack_ptr -= 1
+ 744                r = stack_r[stack_ptr]
+ 745                c = stack_c[stack_ptr]
+ 746                size += 1
+ 747
+ 748                for k in range(n_neighbors):
+ 749                    nr = (r + dr[k]) % rows
+ 750                    nc = (c + dc[k]) % cols
+ 751
+ 752                    if grid[nr, nc] == species and labels[nr, nc] == 0:
+ 753                        labels[nr, nc] = current_label
+ 754                        stack_r[stack_ptr] = nr
+ 755                        stack_c[stack_ptr] = nc
+ 756                        stack_ptr += 1
+ 757
+ 758            sizes[n_clusters] = size
+ 759            n_clusters += 1
+ 760            current_label += 1
+ 761
+ 762    return labels, sizes[:n_clusters]
+ 763
+ 764
+ 765# ============================================================================
+ 766# PUBLIC API - CLUSTER DETECTION
+ 767# ============================================================================
+ 768
+ 769
+ 770def measure_cluster_sizes_fast(
+ 771    grid: np.ndarray,
+ 772    species: int,
+ 773    neighborhood: str = "moore",
+ 774) -> np.ndarray:
+ 775    """
+ 776    Measure cluster sizes for a specific species using Numba-accelerated flood fill.
+ 777
+ 778    This function provides a high-performance interface for calculating cluster
+ 779    size statistics without the overhead of generating a full label map. It is
+ 780    optimized for large-scale simulation analysis where only distribution
+ 781    metrics (e.g., mean size, max size) are required.
+ 782
+ 783    Parameters
+ 784    ----------
+ 785    grid : np.ndarray
+ 786        A 2D array representing the simulation environment.
+ 787    species : int
+ 788        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+ 789    neighborhood : {'moore', 'neumann'}, optional
+ 790        The connectivity rule. 'moore' uses 8-way connectivity (including diagonals);
+ 791        'neumann' uses 4-way connectivity. Default is 'moore'.
+ 792
+ 793    Returns
+ 794    -------
+ 795    cluster_sizes : np.ndarray
+ 796        A 1D array of integers, where each element is the cell count of an
+ 797        identified cluster.
+ 798
+ 799    Notes
+ 800    -----
+ 801    The input grid is cast to `int32` to ensure compatibility with the
+ 802    underlying JIT-compiled `_measure_clusters` kernel.
+ 803
+ 804    Examples
+ 805    --------
+ 806    >>> sizes = measure_cluster_sizes_fast(grid, species=1, neighborhood='moore')
+ 807    >>> if sizes.size > 0:
+ 808    ...     print(f"Largest cluster: {sizes.max()}")
+ 809    """
+ 810    grid_int = np.asarray(grid, dtype=np.int32)
+ 811    moore = neighborhood == "moore"
+ 812    return _measure_clusters(grid_int, np.int32(species), moore)
+ 813
+ 814
+ 815def detect_clusters_fast(
+ 816    grid: np.ndarray,
+ 817    species: int,
+ 818    neighborhood: str = "moore",
+ 819) -> Tuple[np.ndarray, Dict[int, int]]:
+ 820    """
+ 821    Perform full cluster detection with labels using Numba acceleration.
+ 822
+ 823    This function returns a label array for spatial analysis and a dictionary
+ 824    of cluster sizes. It is significantly faster than standard Python or
+ 825    SciPy equivalents for large simulation grids.
+ 826
+ 827    Parameters
+ 828    ----------
+ 829    grid : np.ndarray
+ 830        A 2D array representing the simulation environment.
+ 831    species : int
+ 832        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+ 833    neighborhood : {'moore', 'neumann'}, optional
+ 834        The connectivity rule. 'moore' uses 8-way connectivity; 'neumann'
+ 835        uses 4-way connectivity. Default is 'moore'.
+ 836
+ 837    Returns
+ 838    -------
+ 839    labels : np.ndarray
+ 840        A 2D int32 array where each cell contains its unique cluster ID.
+ 841        Cells not belonging to the target species are 0.
+ 842    sizes : dict
+ 843        A dictionary mapping cluster IDs to their respective cell counts.
+ 844
+ 845    Notes
+ 846    -----
+ 847    The underlying Numba kernel uses a stack-based flood fill to avoid
+ 848    recursion limits and handles periodic boundary conditions.
+ 849
+ 850    Examples
+ 851    --------
+ 852    >>> labels, sizes = detect_clusters_fast(grid, species=1)
+ 853    >>> if sizes:
+ 854    ...     largest_id = max(sizes, key=sizes.get)
+ 855    ...     print(f"Cluster {largest_id} size: {sizes[largest_id]}")
+ 856    """
+ 857    grid_int = np.asarray(grid, dtype=np.int32)
+ 858    moore = neighborhood == "moore"
+ 859    labels, sizes_arr = _detect_clusters_numba(grid_int, np.int32(species), moore)
+ 860    sizes_dict = {i + 1: int(sizes_arr[i]) for i in range(len(sizes_arr))}
+ 861    return labels, sizes_dict
+ 862
+ 863
+ 864def get_cluster_stats_fast(
+ 865    grid: np.ndarray,
+ 866    species: int,
+ 867    neighborhood: str = "moore",
+ 868) -> Dict:
+ 869    """
+ 870    Compute comprehensive cluster statistics for a species using Numba acceleration.
+ 871
+ 872    This function integrates cluster detection and labeling to provide a
+ 873    full suite of spatial metrics. It calculates the cluster size distribution
+ 874    and the largest cluster fraction, which often serves as an order
+ 875    parameter in percolation theory and Phase 1-3 analyses.
+ 876
+ 877    Parameters
+ 878    ----------
+ 879    grid : np.ndarray
+ 880        A 2D array representing the simulation environment.
+ 881    species : int
+ 882        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+ 883    neighborhood : {'moore', 'neumann'}, optional
+ 884        The connectivity rule. 'moore' uses 8-way connectivity; 'neumann'
+ 885        uses 4-way connectivity. Default is 'moore'.
+ 886
+ 887    Returns
+ 888    -------
+ 889    stats : dict
+ 890        A dictionary containing:
+ 891        - 'n_clusters': Total count of isolated clusters.
+ 892        - 'sizes': Sorted array (descending) of all cluster sizes.
+ 893        - 'largest': Size of the single largest cluster.
+ 894        - 'largest_fraction': Size of the largest cluster divided by
+ 895          the total population of the species.
+ 896        - 'mean_size': Average size of all clusters.
+ 897        - 'size_distribution': Frequency mapping of {size: count}.
+ 898        - 'labels': 2D array of unique cluster IDs.
+ 899        - 'size_dict': Mapping of {label_id: size}.
+ 900
+ 901    Examples
+ 902    --------
+ 903    >>> stats = get_cluster_stats_fast(grid, species=1)
+ 904    >>> print(f"Found {stats['n_clusters']} prey clusters.")
+ 905    >>> print(f"Order parameter: {stats['largest_fraction']:.3f}")
+ 906    """
+ 907    labels, size_dict = detect_clusters_fast(grid, species, neighborhood)
+ 908
+ 909    if len(size_dict) == 0:
+ 910        return {
+ 911            "n_clusters": 0,
+ 912            "sizes": np.array([], dtype=np.int32),
+ 913            "largest": 0,
+ 914            "largest_fraction": 0.0,
+ 915            "mean_size": 0.0,
+ 916            "size_distribution": {},
+ 917            "labels": labels,
+ 918            "size_dict": size_dict,
+ 919        }
+ 920
+ 921    sizes = np.array(list(size_dict.values()), dtype=np.int32)
+ 922    sizes_sorted = np.sort(sizes)[::-1]
+ 923    total_pop = int(np.sum(sizes))
+ 924    largest = int(sizes_sorted[0])
+ 925
+ 926    size_dist = {}
+ 927    for s in sizes:
+ 928        s_int = int(s)
+ 929        size_dist[s_int] = size_dist.get(s_int, 0) + 1
+ 930
+ 931    return {
+ 932        "n_clusters": len(size_dict),
+ 933        "sizes": sizes_sorted,
+ 934        "largest": largest,
+ 935        "largest_fraction": float(largest) / total_pop if total_pop > 0 else 0.0,
+ 936        "mean_size": float(np.mean(sizes)),
+ 937        "size_distribution": size_dist,
+ 938        "labels": labels,
+ 939        "size_dict": size_dict,
+ 940    }
+ 941
+ 942
+ 943# ============================================================================
+ 944# PCF COMPUTATION (Cell-list accelerated)
+ 945# ============================================================================
+ 946
+ 947
+ 948@njit(cache=True)
+ 949def _build_cell_list(
+ 950    positions: np.ndarray,
+ 951    n_cells: int,
+ 952    L_row: float,
+ 953    L_col: float,
+ 954) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float, float]:
+ 955    """
+ 956    Build a cell list for spatial hashing to accelerate neighbor lookups.
+ 957
+ 958    This Numba-optimized function partitions a set of coordinates into a
+ 959    grid of cells. It uses a three-pass approach to calculate cell occupancy,
+ 960    compute starting offsets for each cell in a flat index array, and finally
+ 961    populate that array with position indices.
+ 962
+ 963    Parameters
+ 964    ----------
+ 965    positions : np.ndarray
+ 966        An (N, 2) float array of coordinates within the simulation domain.
+ 967    n_cells : int
+ 968        The number of cells along one dimension of the square grid.
+ 969    L_row : float
+ 970        The total height (row extent) of the simulation domain.
+ 971    L_col : float
+ 972        The total width (column extent) of the simulation domain.
+ 973
+ 974    Returns
+ 975    -------
+ 976    indices : np.ndarray
+ 977        A 1D array of original position indices, reordered so that indices
+ 978        belonging to the same cell are contiguous.
+ 979    offsets : np.ndarray
+ 980        A 2D array where `offsets[r, c]` is the starting index in the
+ 981        `indices` array for cell (r, c).
+ 982    cell_counts : np.ndarray
+ 983        A 2D array where `cell_counts[r, c]` is the number of points
+ 984        contained in cell (r, c).
+ 985    cell_size_r : float
+ 986        The calculated height of an individual cell.
+ 987    cell_size_c : float
+ 988        The calculated width of an individual cell.
+ 989
+ 990    Notes
+ 991    -----
+ 992    This implementation assumes periodic boundary conditions via the
+ 993    modulo operator during coordinate-to-cell mapping. It is designed to
+ 994    eliminate heap allocations within the main simulation loop by using
+ 995    Numba's efficient array handling.
+ 996    """
+ 997    n_pos = len(positions)
+ 998    cell_size_r = L_row / n_cells
+ 999    cell_size_c = L_col / n_cells
+1000
+1001    cell_counts = np.zeros((n_cells, n_cells), dtype=np.int32)
+1002    for i in range(n_pos):
+1003        cr = int(positions[i, 0] / cell_size_r) % n_cells
+1004        cc = int(positions[i, 1] / cell_size_c) % n_cells
+1005        cell_counts[cr, cc] += 1
+1006
+1007    offsets = np.zeros((n_cells, n_cells), dtype=np.int32)
+1008    running = 0
+1009    for cr in range(n_cells):
+1010        for cc in range(n_cells):
+1011            offsets[cr, cc] = running
+1012            running += cell_counts[cr, cc]
+1013
+1014    indices = np.empty(n_pos, dtype=np.int32)
+1015    fill_counts = np.zeros((n_cells, n_cells), dtype=np.int32)
+1016    for i in range(n_pos):
+1017        cr = int(positions[i, 0] / cell_size_r) % n_cells
+1018        cc = int(positions[i, 1] / cell_size_c) % n_cells
+1019        idx = offsets[cr, cc] + fill_counts[cr, cc]
+1020        indices[idx] = i
+1021        fill_counts[cr, cc] += 1
+1022
+1023    return indices, offsets, cell_counts, cell_size_r, cell_size_c
+1024
+1025
+1026@njit(cache=True)
+1027def _periodic_dist_sq(
+1028    r1: float,
+1029    c1: float,
+1030    r2: float,
+1031    c2: float,
+1032    L_row: float,
+1033    L_col: float,
+1034) -> float:
+1035    """
+1036    Calculate the squared Euclidean distance between two points with periodic boundary conditions.
+1037
+1038    This Numba-optimized function accounts for toroidal topology by finding the
+1039    shortest path between coordinates across the grid edges. Using the squared
+1040    distance avoids the computational expense of a square root operation,
+1041    making it ideal for high-frequency spatial queries.
+1042
+1043    Parameters
+1044    ----------
+1045    r1 : float
+1046        Row coordinate of the first point.
+1047    c1 : float
+1048        Column coordinate of the first point.
+1049    r2 : float
+1050        Row coordinate of the second point.
+1051    c2 : float
+1052        Column coordinate of the second point.
+1053    L_row : float
+1054        Total height (row extent) of the periodic domain.
+1055    L_col : float
+1056        Total width (column extent) of the periodic domain.
+1057
+1058    Returns
+1059    -------
+1060    dist_sq : float
+1061        The squared shortest distance between the two points.
+1062
+1063    Notes
+1064    -----
+1065    The function applies the minimum image convention, ensuring that the
+1066    distance never exceeds half the domain length in any dimension.
+1067    """
+1068    dr = abs(r1 - r2)
+1069    dc = abs(c1 - c2)
+1070    if dr > L_row * 0.5:
+1071        dr = L_row - dr
+1072    if dc > L_col * 0.5:
+1073        dc = L_col - dc
+1074    return dr * dr + dc * dc
+1075
+1076
+1077@njit(parallel=True, cache=True)
+1078def _pcf_cell_list(
+1079    pos_i: np.ndarray,
+1080    pos_j: np.ndarray,
+1081    indices_j: np.ndarray,
+1082    offsets_j: np.ndarray,
+1083    counts_j: np.ndarray,
+1084    cell_size_r: float,
+1085    cell_size_c: float,
+1086    L_row: float,
+1087    L_col: float,
+1088    max_distance: float,
+1089    n_bins: int,
+1090    self_correlation: bool,
+1091    n_cells: int,
+1092) -> np.ndarray:
+1093    """
+1094    Compute a Pair Correlation Function (PCF) histogram using spatial cell lists.
+1095
+1096    This Numba-accelerated parallel kernel calculates distances between two sets
+1097    of points (pos_i and pos_j). It uses a cell list (spatial hashing) to
+1098    restrict distance calculations to neighboring cells within the maximum
+1099    specified distance, significantly improving performance from $O(N^2)$
+1100    to $O(N)$.
+1101
+1102    Parameters
+1103    ----------
+1104    pos_i : np.ndarray
+1105        (N, 2) float array of coordinates for the primary species.
+1106    pos_j : np.ndarray
+1107        (M, 2) float array of coordinates for the secondary species.
+1108    indices_j : np.ndarray
+1109        Flattened indices of pos_j sorted by cell, produced by `_build_cell_list`.
+1110    offsets_j : np.ndarray
+1111        2D array of starting offsets for each cell in `indices_j`.
+1112    counts_j : np.ndarray
+1113        2D array of particle counts within each cell for species J.
+1114    cell_size_r : float
+1115        Height of a single spatial cell.
+1116    cell_size_c : float
+1117        Width of a single spatial cell.
+1118    L_row : float
+1119        Total height of the periodic domain.
+1120    L_col : float
+1121        Total width of the periodic domain.
+1122    max_distance : float
+1123        Maximum radial distance (r) to consider for the correlation.
+1124    n_bins : int
+1125        Number of bins in the distance histogram.
+1126    self_correlation : bool
+1127        If True, assumes species I and J are the same and avoids double-counting
+1128        or self-interaction.
+1129    n_cells : int
+1130        Number of cells per dimension in the spatial hash grid.
+1131
+1132    Returns
+1133    -------
+1134    hist : np.ndarray
+1135        A 1D array of length `n_bins` containing the counts of pairs found
+1136        at each radial distance.
+1137
+1138    Notes
+1139    -----
+1140    The kernel uses `prange` for parallel execution across points in `pos_i`.
+1141    Local histograms are used per thread to prevent race conditions during
+1142    reduction. Periodic boundary conditions are handled via `_periodic_dist_sq`.
+1143    """
+1144    n_i = len(pos_i)
+1145    bin_width = max_distance / n_bins
+1146    max_dist_sq = max_distance * max_distance
+1147    cells_to_check = int(np.ceil(max_distance / min(cell_size_r, cell_size_c))) + 1
+1148
+1149    hist = np.zeros(n_bins, dtype=np.int64)
+1150
+1151    for i in prange(n_i):
+1152        local_hist = np.zeros(n_bins, dtype=np.int64)
+1153        r1, c1 = pos_i[i, 0], pos_i[i, 1]
+1154
+1155        cell_r = int(r1 / cell_size_r) % n_cells
+1156        cell_c = int(c1 / cell_size_c) % n_cells
+1157
+1158        for dcr in range(-cells_to_check, cells_to_check + 1):
+1159            for dcc in range(-cells_to_check, cells_to_check + 1):
+1160                ncr = (cell_r + dcr) % n_cells
+1161                ncc = (cell_c + dcc) % n_cells
+1162
+1163                start = offsets_j[ncr, ncc]
+1164                end = start + counts_j[ncr, ncc]
+1165
+1166                for idx in range(start, end):
+1167                    j = indices_j[idx]
+1168
+1169                    if self_correlation and j <= i:
+1170                        continue
+1171
+1172                    r2, c2 = pos_j[j, 0], pos_j[j, 1]
+1173                    d_sq = _periodic_dist_sq(r1, c1, r2, c2, L_row, L_col)
+1174
+1175                    if 0 < d_sq < max_dist_sq:
+1176                        d = np.sqrt(d_sq)
+1177                        bin_idx = int(d / bin_width)
+1178                        if bin_idx >= n_bins:
+1179                            bin_idx = n_bins - 1
+1180                        local_hist[bin_idx] += 1
+1181
+1182        for b in range(n_bins):
+1183            hist[b] += local_hist[b]
+1184
+1185    if self_correlation:
+1186        for b in range(n_bins):
+1187            hist[b] *= 2
+1188
+1189    return hist
+1190
+1191
+1192def compute_pcf_periodic_fast(
+1193    positions_i: np.ndarray,
+1194    positions_j: np.ndarray,
+1195    grid_shape: Tuple[int, int],
+1196    max_distance: float,
+1197    n_bins: int = 50,
+1198    self_correlation: bool = False,
+1199) -> Tuple[np.ndarray, np.ndarray, int]:
+1200    """
+1201    Compute the Pair Correlation Function (PCF) using cell-list acceleration.
+1202
+1203    This high-level function coordinates the spatial hashing and histogram
+1204    calculation to determine the $g(r)$ function. It normalizes the resulting
+1205    histogram by the expected number of pairs in an ideal gas of the same
+1206    density, accounting for the toroidal area of each radial bin.
+1207
+1208    Parameters
+1209    ----------
+1210    positions_i : np.ndarray
+1211        (N, 2) array of coordinates for species I.
+1212    positions_j : np.ndarray
+1213        (M, 2) array of coordinates for species J.
+1214    grid_shape : tuple of int
+1215        The (rows, cols) dimensions of the simulation grid.
+1216    max_distance : float
+1217        The maximum radius to calculate correlations for.
+1218    n_bins : int, optional
+1219        Number of bins for the radial distribution (default 50).
+1220    self_correlation : bool, optional
+1221        Set to True if computing the correlation of a species with itself
+1222        to avoid self-counting (default False).
+1223
+1224    Returns
+1225    -------
+1226    bin_centers : np.ndarray
+1227        The central radial distance for each histogram bin.
+1228    pcf : np.ndarray
+1229        The normalized $g(r)$ values. A value of 1.0 indicates no spatial
+1230        correlation; > 1.0 indicates clustering; < 1.0 indicates repulsion.
+1231    total_pairs : int
+1232        The total count of pairs found within the `max_distance`.
+1233
+1234    Notes
+1235    -----
+1236    The function dynamically determines the optimal number of cells for the
+1237    spatial hash based on the `max_distance` and grid dimensions to maintain
+1238    linear time complexity.
+1239    """
+1240    rows, cols = grid_shape
+1241    L_row, L_col = float(rows), float(cols)
+1242    area = L_row * L_col
+1243
+1244    bin_width = max_distance / n_bins
+1245    bin_centers = np.linspace(bin_width / 2, max_distance - bin_width / 2, n_bins)
+1246
+1247    if len(positions_i) == 0 or len(positions_j) == 0:
+1248        return bin_centers, np.ones(n_bins), 0
+1249
+1250    n_cells = max(4, int(min(rows, cols) / max_distance))
+1251
+1252    pos_i = np.ascontiguousarray(positions_i, dtype=np.float64)
+1253    pos_j = np.ascontiguousarray(positions_j, dtype=np.float64)
+1254
+1255    indices_j, offsets_j, counts_j, cell_size_r, cell_size_c = _build_cell_list(
+1256        pos_j, n_cells, L_row, L_col
+1257    )
+1258
+1259    hist = _pcf_cell_list(
+1260        pos_i,
+1261        pos_j,
+1262        indices_j,
+1263        offsets_j,
+1264        counts_j,
+1265        cell_size_r,
+1266        cell_size_c,
+1267        L_row,
+1268        L_col,
+1269        max_distance,
+1270        n_bins,
+1271        self_correlation,
+1272        n_cells,
+1273    )
+1274
+1275    n_i, n_j = len(positions_i), len(positions_j)
+1276    if self_correlation:
+1277        density_product = n_i * (n_i - 1) / (area * area)
+1278    else:
+1279        density_product = n_i * n_j / (area * area)
+1280
+1281    expected = np.zeros(n_bins)
+1282    for i in range(n_bins):
+1283        r = bin_centers[i]
+1284        annulus_area = 2 * np.pi * r * bin_width
+1285        expected[i] = density_product * annulus_area * area
+1286
+1287    pcf = np.ones(n_bins)
+1288    mask = expected > 1.0
+1289    pcf[mask] = hist[mask] / expected[mask]
+1290
+1291    return bin_centers, pcf, int(np.sum(hist))
+1292
+1293
+1294def compute_all_pcfs_fast(
+1295    grid: np.ndarray,
+1296    max_distance: Optional[float] = None,
+1297    n_bins: int = 50,
+1298) -> Dict[str, Tuple[np.ndarray, np.ndarray, int]]:
+1299    """
+1300    Compute all three species Pair Correlation Functions (PCFs) using cell-list acceleration.
+1301
+1302    This function calculates the spatial auto-correlations (Prey-Prey,
+1303    Predator-Predator) and the cross-correlation (Prey-Predator) for a given
+1304    simulation grid. It identifies particle positions and leverages
+1305    Numba-accelerated cell lists to handle the computations efficiently.
+1306
+1307    Parameters
+1308    ----------
+1309    grid : np.ndarray
+1310        2D integer array where 1 represents prey and 2 represents predators.
+1311    max_distance : float, optional
+1312        The maximum radial distance for the correlation. Defaults to 1/4
+1313        of the minimum grid dimension if not provided.
+1314    n_bins : int, optional
+1315        Number of distance bins for the histogram. Default is 50.
+1316
+1317    Returns
+1318    -------
+1319    results : dict
+1320        A dictionary with keys 'prey_prey', 'pred_pred', and 'prey_pred'.
+1321        Each value is a tuple containing:
+1322        - bin_centers (np.ndarray): Radial distances.
+1323        - pcf_values (np.ndarray): Normalized g(r) values.
+1324        - pair_count (int): Total number of pairs found.
+1325
+1326    Notes
+1327    -----
+1328    The PCF provides insight into the spatial organization of the system.
+1329    g(r) > 1 at short distances indicates aggregation (clustering),
+1330    while g(r) < 1 indicates exclusion or repulsion.
+1331    """
+1332    rows, cols = grid.shape
+1333    if max_distance is None:
+1334        max_distance = min(rows, cols) / 4.0
+1335
+1336    prey_pos = np.argwhere(grid == 1)
+1337    pred_pos = np.argwhere(grid == 2)
+1338
+1339    results = {}
+1340
+1341    dist, pcf, n = compute_pcf_periodic_fast(
+1342        prey_pos,
+1343        prey_pos,
+1344        (rows, cols),
+1345        max_distance,
+1346        n_bins,
+1347        self_correlation=True,
+1348    )
+1349    results["prey_prey"] = (dist, pcf, n)
+1350
+1351    dist, pcf, n = compute_pcf_periodic_fast(
+1352        pred_pos,
+1353        pred_pos,
+1354        (rows, cols),
+1355        max_distance,
+1356        n_bins,
+1357        self_correlation=True,
+1358    )
+1359    results["pred_pred"] = (dist, pcf, n)
+1360
+1361    dist, pcf, n = compute_pcf_periodic_fast(
+1362        prey_pos,
+1363        pred_pos,
+1364        (rows, cols),
+1365        max_distance,
+1366        n_bins,
+1367        self_correlation=False,
+1368    )
+1369    results["prey_pred"] = (dist, pcf, n)
+1370
+1371    return results
+1372
+1373
+1374# ============================================================================
+1375# WARMUP & BENCHMARKS
+1376# ============================================================================
+1377
+1378
+1379def warmup_numba_kernels(grid_size: int = 100, directed_hunting: bool = False):
+1380    """
+1381    Pre-compile all Numba-accelerated kernels to avoid first-run latency.
+1382
+1383    This function executes a single step of the simulation and each analysis
+1384    routine on a dummy grid. Because Numba uses Just-In-Time (JIT) compilation,
+1385    the first call to a decorated function incurs a compilation overhead.
+1386    Running this warmup ensures that subsequent experimental runs are timed
+1387    accurately and perform at full speed.
+1388
+1389    Parameters
+1390    ----------
+1391    grid_size : int, optional
+1392        The side length of the dummy grid used for warmup (default 100).
+1393    directed_hunting : bool, optional
+1394        If True, also warms up the directed behavior update kernel (default False).
+1395
+1396    Returns
+1397    -------
+1398    None
+1399
+1400    Notes
+1401    -----
+1402    This function checks for `NUMBA_AVAILABLE` before execution. It warms up
+1403    the `PPKernel` (random and optionally directed), as well as the
+1404    spatial analysis functions (`compute_all_pcfs_fast`, `detect_clusters_fast`, etc.).
+1405    """
+1406    if not NUMBA_AVAILABLE:
+1407        return
+1408
+1409    set_numba_seed(0)
+1410
+1411    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1412    grid[::3, ::3] = 1
+1413    grid[::5, ::5] = 2
+1414
+1415    prey_death_arr = np.full((grid_size, grid_size), 0.05, dtype=np.float64)
+1416    prey_death_arr[grid != 1] = np.nan
+1417
+1418    # Always warmup random kernel
+1419    kernel_random = PPKernel(grid_size, grid_size, directed_hunting=False)
+1420    kernel_random.update(grid.copy(), prey_death_arr.copy(), 0.2, 0.05, 0.2, 0.1)
+1421
+1422    # Warmup directed kernel if requested
+1423    if directed_hunting:
+1424        kernel_directed = PPKernel(grid_size, grid_size, directed_hunting=True)
+1425        kernel_directed.update(grid.copy(), prey_death_arr.copy(), 0.2, 0.05, 0.2, 0.1)
+1426
+1427    # Warmup analysis functions
+1428    _ = compute_all_pcfs_fast(grid, max_distance=20.0, n_bins=20)
+1429    _ = measure_cluster_sizes_fast(grid, 1)
+1430    _ = detect_clusters_fast(grid, 1)
+1431    _ = get_cluster_stats_fast(grid, 1)
+1432
+1433
+1434def benchmark_kernels(grid_size: int = 100, n_runs: int = 20):
+1435    """
+1436    Benchmark the execution performance of random vs. directed update kernels.
+1437
+1438    This utility measures the average time per simulation step for both the
+1439    stochastic (random neighbor) and heuristic (directed hunting/reproduction)
+1440    update strategies. It accounts for the computational overhead introduced
+1441    by the "intelligent" search logic used in directed mode.
+1442
+1443    Parameters
+1444    ----------
+1445    grid_size : int, optional
+1446        The side length of the square simulation grid (default 100).
+1447    n_runs : int, optional
+1448        The number of iterations to perform for averaging performance (default 20).
+1449
+1450    Returns
+1451    -------
+1452    t_random : float
+1453        Average time per step for the random kernel in milliseconds.
+1454    t_directed : float
+1455        Average time per step for the directed kernel in milliseconds.
+1456
+1457    Notes
+1458    -----
+1459    The function ensures a fair comparison by:
+1460    1. Using a fixed seed for reproducible initial grid states.
+1461    2. Warming up Numba kernels before timing to exclude JIT compilation latency.
+1462    3. Copying the grid and death arrays for each iteration to maintain
+1463       consistent population densities throughout the benchmark.
+1464    """
+1465    import time
+1466
+1467    print("=" * 60)
+1468    print(f"KERNEL BENCHMARK ({grid_size}x{grid_size}, {n_runs} runs)")
+1469    print(f"Numba available: {NUMBA_AVAILABLE}")
+1470    print("=" * 60)
+1471
+1472    np.random.seed(42)
+1473    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1474    n_prey = int(grid_size * grid_size * 0.30)
+1475    n_pred = int(grid_size * grid_size * 0.15)
+1476    positions = np.random.permutation(grid_size * grid_size)
+1477    for pos in positions[:n_prey]:
+1478        grid[pos // grid_size, pos % grid_size] = 1
+1479    for pos in positions[n_prey : n_prey + n_pred]:
+1480        grid[pos // grid_size, pos % grid_size] = 2
+1481
+1482    prey_death_arr = np.full((grid_size, grid_size), 0.05, dtype=np.float64)
+1483    prey_death_arr[grid != 1] = np.nan
+1484
+1485    print(f"Initial: {np.sum(grid == 1)} prey, {np.sum(grid == 2)} predators")
+1486
+1487    # Warmup both kernels
+1488    warmup_numba_kernels(grid_size, directed_hunting=True)
+1489
+1490    # Benchmark random kernel
+1491    kernel_random = PPKernel(grid_size, grid_size, directed_hunting=False)
+1492    t0 = time.perf_counter()
+1493    for _ in range(n_runs):
+1494        test_grid = grid.copy()
+1495        test_arr = prey_death_arr.copy()
+1496        kernel_random.update(test_grid, test_arr, 0.2, 0.05, 0.2, 0.1)
+1497    t_random = (time.perf_counter() - t0) / n_runs * 1000
+1498
+1499    # Benchmark directed kernel
+1500    kernel_directed = PPKernel(grid_size, grid_size, directed_hunting=True)
+1501    t0 = time.perf_counter()
+1502    for _ in range(n_runs):
+1503        test_grid = grid.copy()
+1504        test_arr = prey_death_arr.copy()
+1505        kernel_directed.update(test_grid, test_arr, 0.2, 0.05, 0.2, 0.1)
+1506    t_directed = (time.perf_counter() - t0) / n_runs * 1000
+1507
+1508    print(f"\nRandom kernel:   {t_random:.2f} ms/step")
+1509    print(f"Directed kernel: {t_directed:.2f} ms/step")
+1510    print(
+1511        f"Overhead:        {t_directed - t_random:.2f} ms (+{100*(t_directed/t_random - 1):.1f}%)"
+1512    )
+1513
+1514    return t_random, t_directed
+1515
+1516
+1517def benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20):
+1518    """
+1519    Benchmark the performance of different cluster detection and analysis routines.
+1520
+1521    This function evaluates three levels of spatial analysis:
+1522    1. Size measurement only (fastest, no label map).
+1523    2. Full detection (returns label map and size dictionary).
+1524    3. Comprehensive statistics (calculates distributions, means, and order parameters).
+1525
+1526    Parameters
+1527    ----------
+1528    grid_size : int, optional
+1529        Side length of the square grid for benchmarking (default 100).
+1530    n_runs : int, optional
+1531        Number of iterations to average for performance results (default 20).
+1532
+1533    Returns
+1534    -------
+1535    stats : dict
+1536        The result dictionary from the final comprehensive statistics run.
+1537
+1538    Notes
+1539    -----
+1540    The benchmark uses a fixed prey density of 30% to ensure a representative
+1541    distribution of clusters. It pre-warms the Numba kernels to ensure that
+1542    the measurements reflect execution speed rather than compilation time.
+1543    """
+1544    import time
+1545
+1546    print("=" * 60)
+1547    print(f"CLUSTER DETECTION BENCHMARK ({grid_size}x{grid_size})")
+1548    print(f"Numba available: {NUMBA_AVAILABLE}")
+1549    print("=" * 60)
+1550
+1551    np.random.seed(42)
+1552    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1553    n_prey = int(grid_size * grid_size * 0.30)
+1554    positions = np.random.permutation(grid_size * grid_size)[:n_prey]
+1555    for pos in positions:
+1556        grid[pos // grid_size, pos % grid_size] = 1
+1557
+1558    print(f"Prey cells: {np.sum(grid == 1)}")
+1559
+1560    # Warmup
+1561    _ = measure_cluster_sizes_fast(grid, 1)
+1562    _ = detect_clusters_fast(grid, 1)
+1563    _ = get_cluster_stats_fast(grid, 1)
+1564
+1565    # Benchmark sizes only
+1566    t0 = time.perf_counter()
+1567    for _ in range(n_runs):
+1568        sizes = measure_cluster_sizes_fast(grid, 1)
+1569    t_sizes = (time.perf_counter() - t0) / n_runs * 1000
+1570    print(f"\nmeasure_cluster_sizes_fast: {t_sizes:.2f} ms  ({len(sizes)} clusters)")
+1571
+1572    # Benchmark full detection
+1573    t0 = time.perf_counter()
+1574    for _ in range(n_runs):
+1575        labels, size_dict = detect_clusters_fast(grid, 1)
+1576    t_detect = (time.perf_counter() - t0) / n_runs * 1000
+1577    print(f"detect_clusters_fast:       {t_detect:.2f} ms  ({len(size_dict)} clusters)")
+1578
+1579    # Benchmark full stats
+1580    t0 = time.perf_counter()
+1581    for _ in range(n_runs):
+1582        stats = get_cluster_stats_fast(grid, 1)
+1583    t_stats = (time.perf_counter() - t0) / n_runs * 1000
+1584    print(f"get_cluster_stats_fast:     {t_stats:.2f} ms")
+1585
+1586    print(
+1587        f"\nOverhead for labels: {t_detect - t_sizes:.2f} ms (+{100*(t_detect/t_sizes - 1):.0f}%)"
+1588    )
+1589
+1590    return stats
+1591
+1592
+1593if __name__ == "__main__":
+1594    print("\n" + "=" * 60)
+1595    print("NUMBA-OPTIMIZED PP MODULE - BENCHMARKS")
+1596    print("=" * 60 + "\n")
+1597
+1598    # Run kernel benchmarks
+1599    benchmark_kernels(100)
+1600
+1601    print("\n")
+1602
+1603    # Run cluster benchmarks
+1604    stats = benchmark_cluster_detection(100)
+1605    print(
+1606        f"\nSample stats: largest={stats['largest']}, "
+1607        f"largest_fraction={stats['largest_fraction']:.3f}, "
+1608        f"n_clusters={stats['n_clusters']}"
+1609    )
+
+ + +
+
+ +
+
@njit(cache=True)
+ + def + set_numba_seed(seed: int) -> None: + + + +
+ +
 81@njit(cache=True)
+ 82def set_numba_seed(seed: int) -> None:
+ 83    """
+ 84    Seed Numba's internal random number generator from within a JIT context.
+ 85
+ 86    This function ensures that Numba's independent random number generator
+ 87    is synchronized with the provided seed, enabling reproducibility for
+ 88    jit-compiled functions that use NumPy's random operations.
+ 89
+ 90    Parameters
+ 91    ----------
+ 92    seed : int
+ 93        The integer value used to initialize the random number generator.
+ 94
+ 95    Returns
+ 96    -------
+ 97    None
+ 98
+ 99    Notes
+100    -----
+101    Because Numba maintains its own internal state for random number
+102    generation, calling `np.random.seed()` in standard Python code will not
+103    affect jit-compiled functions. This helper must be called to bridge
+104    that gap.
+105    """
+106    np.random.seed(seed)
+
+ + +

Seed Numba's internal random number generator from within a JIT context.

+ +

This function ensures that Numba's independent random number generator +is synchronized with the provided seed, enabling reproducibility for +jit-compiled functions that use NumPy's random operations.

+ +
Parameters
+ +
    +
  • seed (int): +The integer value used to initialize the random number generator.
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

Because Numba maintains its own internal state for random number +generation, calling np.random.seed() in standard Python code will not +affect jit-compiled functions. This helper must be called to bridge +that gap.

+
+ + +
+
+ +
+ + class + PPKernel: + + + +
+ +
424class PPKernel:
+425    """
+426    Wrapper for predator-prey kernel with pre-allocated buffers.
+427
+428    This class manages the spatial configuration and memory buffers required
+429    for the Numba-accelerated update kernels. By pre-allocating the
+430    `occupied_buffer`, it avoids expensive memory allocations during the
+431    simulation loop.
+432
+433    Parameters
+434    ----------
+435    rows : int
+436        Number of rows in the simulation grid.
+437    cols : int
+438        Number of columns in the simulation grid.
+439    neighborhood : {'moore', 'von_neumann'}, optional
+440        The neighborhood type determining adjacent cells. 'moore' includes
+441        diagonals (8 neighbors), 'von_neumann' does not (4 neighbors).
+442        Default is 'moore'.
+443    directed_hunting : bool, optional
+444        If True, uses the directed behavior kernel where species search for
+445        targets. If False, uses random neighbor selection. Default is False.
+446
+447    Attributes
+448    ----------
+449    rows : int
+450        Grid row count.
+451    cols : int
+452        Grid column count.
+453    directed_hunting : bool
+454        Toggle for intelligent behavior logic.
+455    """
+456
+457    def __init__(
+458        self,
+459        rows: int,
+460        cols: int,
+461        neighborhood: str = "moore",
+462        directed_hunting: bool = False,
+463    ):
+464        self.rows = rows
+465        self.cols = cols
+466        self.directed_hunting = directed_hunting
+467        self._occupied_buffer = np.empty((rows * cols, 2), dtype=np.int32)
+468
+469        if neighborhood == "moore":
+470            self._dr = np.array([-1, -1, -1, 0, 0, 1, 1, 1], dtype=np.int32)
+471            self._dc = np.array([-1, 0, 1, -1, 1, -1, 0, 1], dtype=np.int32)
+472        else:  # von Neumann
+473            self._dr = np.array([-1, 1, 0, 0], dtype=np.int32)
+474            self._dc = np.array([0, 0, -1, 1], dtype=np.int32)
+475
+476    def update(
+477        self,
+478        grid: np.ndarray,
+479        prey_death_arr: np.ndarray,
+480        prey_birth: float,
+481        prey_death: float,
+482        pred_birth: float,
+483        pred_death: float,
+484        evolve_sd: float = 0.1,
+485        evolve_min: float = 0.001,
+486        evolve_max: float = 0.1,
+487        evolution_stopped: bool = True,
+488    ) -> np.ndarray:
+489        """
+490        Execute a single asynchronous update step using the configured kernel.
+491
+492        Parameters
+493        ----------
+494        grid : np.ndarray
+495            The current 2D simulation grid.
+496        prey_death_arr : np.ndarray
+497            2D array of individual prey mortality rates.
+498        prey_birth : float
+499            Prey reproduction probability.
+500        prey_death : float
+501            Base prey mortality probability.
+502        pred_birth : float
+503            Predator reproduction (hunting success) probability.
+504        pred_death : float
+505            Predator mortality probability.
+506        evolve_sd : float, optional
+507            Mutation standard deviation (default 0.1).
+508        evolve_min : float, optional
+509            Minimum evolved death rate (default 0.001).
+510        evolve_max : float, optional
+511            Maximum evolved death rate (default 0.1).
+512        evolution_stopped : bool, optional
+513            Whether to disable mutation during this step (default True).
+514
+515        Returns
+516        -------
+517        np.ndarray
+518            The updated grid after one full asynchronous pass.
+519        """
+520        if self.directed_hunting:
+521            return _pp_async_kernel_directed(
+522                grid,
+523                prey_death_arr,
+524                prey_birth,
+525                prey_death,
+526                pred_birth,
+527                pred_death,
+528                self._dr,
+529                self._dc,
+530                evolve_sd,
+531                evolve_min,
+532                evolve_max,
+533                evolution_stopped,
+534                self._occupied_buffer,
+535            )
+536        else:
+537            return _pp_async_kernel_random(
+538                grid,
+539                prey_death_arr,
+540                prey_birth,
+541                prey_death,
+542                pred_birth,
+543                pred_death,
+544                self._dr,
+545                self._dc,
+546                evolve_sd,
+547                evolve_min,
+548                evolve_max,
+549                evolution_stopped,
+550                self._occupied_buffer,
+551            )
+
+ + +

Wrapper for predator-prey kernel with pre-allocated buffers.

+ +

This class manages the spatial configuration and memory buffers required +for the Numba-accelerated update kernels. By pre-allocating the +occupied_buffer, it avoids expensive memory allocations during the +simulation loop.

+ +
Parameters
+ +
    +
  • rows (int): +Number of rows in the simulation grid.
  • +
  • cols (int): +Number of columns in the simulation grid.
  • +
  • neighborhood ({'moore', 'von_neumann'}, optional): +The neighborhood type determining adjacent cells. 'moore' includes +diagonals (8 neighbors), 'von_neumann' does not (4 neighbors). +Default is 'moore'.
  • +
  • directed_hunting (bool, optional): +If True, uses the directed behavior kernel where species search for +targets. If False, uses random neighbor selection. Default is False.
  • +
+ +
Attributes
+ +
    +
  • rows (int): +Grid row count.
  • +
  • cols (int): +Grid column count.
  • +
  • directed_hunting (bool): +Toggle for intelligent behavior logic.
  • +
+
+ + +
+ +
+ + def + update( self, grid: numpy.ndarray, prey_death_arr: numpy.ndarray, prey_birth: float, prey_death: float, pred_birth: float, pred_death: float, evolve_sd: float = 0.1, evolve_min: float = 0.001, evolve_max: float = 0.1, evolution_stopped: bool = True) -> numpy.ndarray: + + + +
+ +
476    def update(
+477        self,
+478        grid: np.ndarray,
+479        prey_death_arr: np.ndarray,
+480        prey_birth: float,
+481        prey_death: float,
+482        pred_birth: float,
+483        pred_death: float,
+484        evolve_sd: float = 0.1,
+485        evolve_min: float = 0.001,
+486        evolve_max: float = 0.1,
+487        evolution_stopped: bool = True,
+488    ) -> np.ndarray:
+489        """
+490        Execute a single asynchronous update step using the configured kernel.
+491
+492        Parameters
+493        ----------
+494        grid : np.ndarray
+495            The current 2D simulation grid.
+496        prey_death_arr : np.ndarray
+497            2D array of individual prey mortality rates.
+498        prey_birth : float
+499            Prey reproduction probability.
+500        prey_death : float
+501            Base prey mortality probability.
+502        pred_birth : float
+503            Predator reproduction (hunting success) probability.
+504        pred_death : float
+505            Predator mortality probability.
+506        evolve_sd : float, optional
+507            Mutation standard deviation (default 0.1).
+508        evolve_min : float, optional
+509            Minimum evolved death rate (default 0.001).
+510        evolve_max : float, optional
+511            Maximum evolved death rate (default 0.1).
+512        evolution_stopped : bool, optional
+513            Whether to disable mutation during this step (default True).
+514
+515        Returns
+516        -------
+517        np.ndarray
+518            The updated grid after one full asynchronous pass.
+519        """
+520        if self.directed_hunting:
+521            return _pp_async_kernel_directed(
+522                grid,
+523                prey_death_arr,
+524                prey_birth,
+525                prey_death,
+526                pred_birth,
+527                pred_death,
+528                self._dr,
+529                self._dc,
+530                evolve_sd,
+531                evolve_min,
+532                evolve_max,
+533                evolution_stopped,
+534                self._occupied_buffer,
+535            )
+536        else:
+537            return _pp_async_kernel_random(
+538                grid,
+539                prey_death_arr,
+540                prey_birth,
+541                prey_death,
+542                pred_birth,
+543                pred_death,
+544                self._dr,
+545                self._dc,
+546                evolve_sd,
+547                evolve_min,
+548                evolve_max,
+549                evolution_stopped,
+550                self._occupied_buffer,
+551            )
+
+ + +

Execute a single asynchronous update step using the configured kernel.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +The current 2D simulation grid.
  • +
  • prey_death_arr (np.ndarray): +2D array of individual prey mortality rates.
  • +
  • prey_birth (float): +Prey reproduction probability.
  • +
  • prey_death (float): +Base prey mortality probability.
  • +
  • pred_birth (float): +Predator reproduction (hunting success) probability.
  • +
  • pred_death (float): +Predator mortality probability.
  • +
  • evolve_sd (float, optional): +Mutation standard deviation (default 0.1).
  • +
  • evolve_min (float, optional): +Minimum evolved death rate (default 0.001).
  • +
  • evolve_max (float, optional): +Maximum evolved death rate (default 0.1).
  • +
  • evolution_stopped (bool, optional): +Whether to disable mutation during this step (default True).
  • +
+ +
Returns
+ +
    +
  • np.ndarray: The updated grid after one full asynchronous pass.
  • +
+
+ + +
+
+
+ +
+ + def + measure_cluster_sizes_fast( grid: numpy.ndarray, species: int, neighborhood: str = 'moore') -> numpy.ndarray: + + + +
+ +
771def measure_cluster_sizes_fast(
+772    grid: np.ndarray,
+773    species: int,
+774    neighborhood: str = "moore",
+775) -> np.ndarray:
+776    """
+777    Measure cluster sizes for a specific species using Numba-accelerated flood fill.
+778
+779    This function provides a high-performance interface for calculating cluster
+780    size statistics without the overhead of generating a full label map. It is
+781    optimized for large-scale simulation analysis where only distribution
+782    metrics (e.g., mean size, max size) are required.
+783
+784    Parameters
+785    ----------
+786    grid : np.ndarray
+787        A 2D array representing the simulation environment.
+788    species : int
+789        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+790    neighborhood : {'moore', 'neumann'}, optional
+791        The connectivity rule. 'moore' uses 8-way connectivity (including diagonals);
+792        'neumann' uses 4-way connectivity. Default is 'moore'.
+793
+794    Returns
+795    -------
+796    cluster_sizes : np.ndarray
+797        A 1D array of integers, where each element is the cell count of an
+798        identified cluster.
+799
+800    Notes
+801    -----
+802    The input grid is cast to `int32` to ensure compatibility with the
+803    underlying JIT-compiled `_measure_clusters` kernel.
+804
+805    Examples
+806    --------
+807    >>> sizes = measure_cluster_sizes_fast(grid, species=1, neighborhood='moore')
+808    >>> if sizes.size > 0:
+809    ...     print(f"Largest cluster: {sizes.max()}")
+810    """
+811    grid_int = np.asarray(grid, dtype=np.int32)
+812    moore = neighborhood == "moore"
+813    return _measure_clusters(grid_int, np.int32(species), moore)
+
+ + +

Measure cluster sizes for a specific species using Numba-accelerated flood fill.

+ +

This function provides a high-performance interface for calculating cluster +size statistics without the overhead of generating a full label map. It is +optimized for large-scale simulation analysis where only distribution +metrics (e.g., mean size, max size) are required.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +A 2D array representing the simulation environment.
  • +
  • species (int): +The target species identifier (e.g., 1 for Prey, 2 for Predator).
  • +
  • neighborhood ({'moore', 'neumann'}, optional): +The connectivity rule. 'moore' uses 8-way connectivity (including diagonals); +'neumann' uses 4-way connectivity. Default is 'moore'.
  • +
+ +
Returns
+ +
    +
  • cluster_sizes (np.ndarray): +A 1D array of integers, where each element is the cell count of an +identified cluster.
  • +
+ +
Notes
+ +

The input grid is cast to int32 to ensure compatibility with the +underlying JIT-compiled _measure_clusters kernel.

+ +
Examples
+ +
+
>>> sizes = measure_cluster_sizes_fast(grid, species=1, neighborhood='moore')
+>>> if sizes.size > 0:
+...     print(f"Largest cluster: {sizes.max()}")
+
+
+
+ + +
+
+ +
+ + def + detect_clusters_fast( grid: numpy.ndarray, species: int, neighborhood: str = 'moore') -> Tuple[numpy.ndarray, Dict[int, int]]: + + + +
+ +
816def detect_clusters_fast(
+817    grid: np.ndarray,
+818    species: int,
+819    neighborhood: str = "moore",
+820) -> Tuple[np.ndarray, Dict[int, int]]:
+821    """
+822    Perform full cluster detection with labels using Numba acceleration.
+823
+824    This function returns a label array for spatial analysis and a dictionary
+825    of cluster sizes. It is significantly faster than standard Python or
+826    SciPy equivalents for large simulation grids.
+827
+828    Parameters
+829    ----------
+830    grid : np.ndarray
+831        A 2D array representing the simulation environment.
+832    species : int
+833        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+834    neighborhood : {'moore', 'neumann'}, optional
+835        The connectivity rule. 'moore' uses 8-way connectivity; 'neumann'
+836        uses 4-way connectivity. Default is 'moore'.
+837
+838    Returns
+839    -------
+840    labels : np.ndarray
+841        A 2D int32 array where each cell contains its unique cluster ID.
+842        Cells not belonging to the target species are 0.
+843    sizes : dict
+844        A dictionary mapping cluster IDs to their respective cell counts.
+845
+846    Notes
+847    -----
+848    The underlying Numba kernel uses a stack-based flood fill to avoid
+849    recursion limits and handles periodic boundary conditions.
+850
+851    Examples
+852    --------
+853    >>> labels, sizes = detect_clusters_fast(grid, species=1)
+854    >>> if sizes:
+855    ...     largest_id = max(sizes, key=sizes.get)
+856    ...     print(f"Cluster {largest_id} size: {sizes[largest_id]}")
+857    """
+858    grid_int = np.asarray(grid, dtype=np.int32)
+859    moore = neighborhood == "moore"
+860    labels, sizes_arr = _detect_clusters_numba(grid_int, np.int32(species), moore)
+861    sizes_dict = {i + 1: int(sizes_arr[i]) for i in range(len(sizes_arr))}
+862    return labels, sizes_dict
+
+ + +

Perform full cluster detection with labels using Numba acceleration.

+ +

This function returns a label array for spatial analysis and a dictionary +of cluster sizes. It is significantly faster than standard Python or +SciPy equivalents for large simulation grids.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +A 2D array representing the simulation environment.
  • +
  • species (int): +The target species identifier (e.g., 1 for Prey, 2 for Predator).
  • +
  • neighborhood ({'moore', 'neumann'}, optional): +The connectivity rule. 'moore' uses 8-way connectivity; 'neumann' +uses 4-way connectivity. Default is 'moore'.
  • +
+ +
Returns
+ +
    +
  • labels (np.ndarray): +A 2D int32 array where each cell contains its unique cluster ID. +Cells not belonging to the target species are 0.
  • +
  • sizes (dict): +A dictionary mapping cluster IDs to their respective cell counts.
  • +
+ +
Notes
+ +

The underlying Numba kernel uses a stack-based flood fill to avoid +recursion limits and handles periodic boundary conditions.

+ +
Examples
+ +
+
>>> labels, sizes = detect_clusters_fast(grid, species=1)
+>>> if sizes:
+...     largest_id = max(sizes, key=sizes.get)
+...     print(f"Cluster {largest_id} size: {sizes[largest_id]}")
+
+
+
+ + +
+
+ +
+ + def + get_cluster_stats_fast(grid: numpy.ndarray, species: int, neighborhood: str = 'moore') -> Dict: + + + +
+ +
865def get_cluster_stats_fast(
+866    grid: np.ndarray,
+867    species: int,
+868    neighborhood: str = "moore",
+869) -> Dict:
+870    """
+871    Compute comprehensive cluster statistics for a species using Numba acceleration.
+872
+873    This function integrates cluster detection and labeling to provide a
+874    full suite of spatial metrics. It calculates the cluster size distribution
+875    and the largest cluster fraction, which often serves as an order
+876    parameter in percolation theory and Phase 1-3 analyses.
+877
+878    Parameters
+879    ----------
+880    grid : np.ndarray
+881        A 2D array representing the simulation environment.
+882    species : int
+883        The target species identifier (e.g., 1 for Prey, 2 for Predator).
+884    neighborhood : {'moore', 'neumann'}, optional
+885        The connectivity rule. 'moore' uses 8-way connectivity; 'neumann'
+886        uses 4-way connectivity. Default is 'moore'.
+887
+888    Returns
+889    -------
+890    stats : dict
+891        A dictionary containing:
+892        - 'n_clusters': Total count of isolated clusters.
+893        - 'sizes': Sorted array (descending) of all cluster sizes.
+894        - 'largest': Size of the single largest cluster.
+895        - 'largest_fraction': Size of the largest cluster divided by
+896          the total population of the species.
+897        - 'mean_size': Average size of all clusters.
+898        - 'size_distribution': Frequency mapping of {size: count}.
+899        - 'labels': 2D array of unique cluster IDs.
+900        - 'size_dict': Mapping of {label_id: size}.
+901
+902    Examples
+903    --------
+904    >>> stats = get_cluster_stats_fast(grid, species=1)
+905    >>> print(f"Found {stats['n_clusters']} prey clusters.")
+906    >>> print(f"Order parameter: {stats['largest_fraction']:.3f}")
+907    """
+908    labels, size_dict = detect_clusters_fast(grid, species, neighborhood)
+909
+910    if len(size_dict) == 0:
+911        return {
+912            "n_clusters": 0,
+913            "sizes": np.array([], dtype=np.int32),
+914            "largest": 0,
+915            "largest_fraction": 0.0,
+916            "mean_size": 0.0,
+917            "size_distribution": {},
+918            "labels": labels,
+919            "size_dict": size_dict,
+920        }
+921
+922    sizes = np.array(list(size_dict.values()), dtype=np.int32)
+923    sizes_sorted = np.sort(sizes)[::-1]
+924    total_pop = int(np.sum(sizes))
+925    largest = int(sizes_sorted[0])
+926
+927    size_dist = {}
+928    for s in sizes:
+929        s_int = int(s)
+930        size_dist[s_int] = size_dist.get(s_int, 0) + 1
+931
+932    return {
+933        "n_clusters": len(size_dict),
+934        "sizes": sizes_sorted,
+935        "largest": largest,
+936        "largest_fraction": float(largest) / total_pop if total_pop > 0 else 0.0,
+937        "mean_size": float(np.mean(sizes)),
+938        "size_distribution": size_dist,
+939        "labels": labels,
+940        "size_dict": size_dict,
+941    }
+
+ + +

Compute comprehensive cluster statistics for a species using Numba acceleration.

+ +

This function integrates cluster detection and labeling to provide a +full suite of spatial metrics. It calculates the cluster size distribution +and the largest cluster fraction, which often serves as an order +parameter in percolation theory and Phase 1-3 analyses.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +A 2D array representing the simulation environment.
  • +
  • species (int): +The target species identifier (e.g., 1 for Prey, 2 for Predator).
  • +
  • neighborhood ({'moore', 'neumann'}, optional): +The connectivity rule. 'moore' uses 8-way connectivity; 'neumann' +uses 4-way connectivity. Default is 'moore'.
  • +
+ +
Returns
+ +
    +
  • stats (dict): +A dictionary containing: +
      +
    • 'n_clusters': Total count of isolated clusters.
    • +
    • 'sizes': Sorted array (descending) of all cluster sizes.
    • +
    • 'largest': Size of the single largest cluster.
    • +
    • 'largest_fraction': Size of the largest cluster divided by +the total population of the species.
    • +
    • 'mean_size': Average size of all clusters.
    • +
    • 'size_distribution': Frequency mapping of {size: count}.
    • +
    • 'labels': 2D array of unique cluster IDs.
    • +
    • 'size_dict': Mapping of {label_id: size}.
    • +
  • +
+ +
Examples
+ +
+
>>> stats = get_cluster_stats_fast(grid, species=1)
+>>> print(f"Found {stats['n_clusters']} prey clusters.")
+>>> print(f"Order parameter: {stats['largest_fraction']:.3f}")
+
+
+
+ + +
+
+ +
+ + def + compute_pcf_periodic_fast( positions_i: numpy.ndarray, positions_j: numpy.ndarray, grid_shape: Tuple[int, int], max_distance: float, n_bins: int = 50, self_correlation: bool = False) -> Tuple[numpy.ndarray, numpy.ndarray, int]: + + + +
+ +
1193def compute_pcf_periodic_fast(
+1194    positions_i: np.ndarray,
+1195    positions_j: np.ndarray,
+1196    grid_shape: Tuple[int, int],
+1197    max_distance: float,
+1198    n_bins: int = 50,
+1199    self_correlation: bool = False,
+1200) -> Tuple[np.ndarray, np.ndarray, int]:
+1201    """
+1202    Compute the Pair Correlation Function (PCF) using cell-list acceleration.
+1203
+1204    This high-level function coordinates the spatial hashing and histogram
+1205    calculation to determine the $g(r)$ function. It normalizes the resulting
+1206    histogram by the expected number of pairs in an ideal gas of the same
+1207    density, accounting for the toroidal area of each radial bin.
+1208
+1209    Parameters
+1210    ----------
+1211    positions_i : np.ndarray
+1212        (N, 2) array of coordinates for species I.
+1213    positions_j : np.ndarray
+1214        (M, 2) array of coordinates for species J.
+1215    grid_shape : tuple of int
+1216        The (rows, cols) dimensions of the simulation grid.
+1217    max_distance : float
+1218        The maximum radius to calculate correlations for.
+1219    n_bins : int, optional
+1220        Number of bins for the radial distribution (default 50).
+1221    self_correlation : bool, optional
+1222        Set to True if computing the correlation of a species with itself
+1223        to avoid self-counting (default False).
+1224
+1225    Returns
+1226    -------
+1227    bin_centers : np.ndarray
+1228        The central radial distance for each histogram bin.
+1229    pcf : np.ndarray
+1230        The normalized $g(r)$ values. A value of 1.0 indicates no spatial
+1231        correlation; > 1.0 indicates clustering; < 1.0 indicates repulsion.
+1232    total_pairs : int
+1233        The total count of pairs found within the `max_distance`.
+1234
+1235    Notes
+1236    -----
+1237    The function dynamically determines the optimal number of cells for the
+1238    spatial hash based on the `max_distance` and grid dimensions to maintain
+1239    linear time complexity.
+1240    """
+1241    rows, cols = grid_shape
+1242    L_row, L_col = float(rows), float(cols)
+1243    area = L_row * L_col
+1244
+1245    bin_width = max_distance / n_bins
+1246    bin_centers = np.linspace(bin_width / 2, max_distance - bin_width / 2, n_bins)
+1247
+1248    if len(positions_i) == 0 or len(positions_j) == 0:
+1249        return bin_centers, np.ones(n_bins), 0
+1250
+1251    n_cells = max(4, int(min(rows, cols) / max_distance))
+1252
+1253    pos_i = np.ascontiguousarray(positions_i, dtype=np.float64)
+1254    pos_j = np.ascontiguousarray(positions_j, dtype=np.float64)
+1255
+1256    indices_j, offsets_j, counts_j, cell_size_r, cell_size_c = _build_cell_list(
+1257        pos_j, n_cells, L_row, L_col
+1258    )
+1259
+1260    hist = _pcf_cell_list(
+1261        pos_i,
+1262        pos_j,
+1263        indices_j,
+1264        offsets_j,
+1265        counts_j,
+1266        cell_size_r,
+1267        cell_size_c,
+1268        L_row,
+1269        L_col,
+1270        max_distance,
+1271        n_bins,
+1272        self_correlation,
+1273        n_cells,
+1274    )
+1275
+1276    n_i, n_j = len(positions_i), len(positions_j)
+1277    if self_correlation:
+1278        density_product = n_i * (n_i - 1) / (area * area)
+1279    else:
+1280        density_product = n_i * n_j / (area * area)
+1281
+1282    expected = np.zeros(n_bins)
+1283    for i in range(n_bins):
+1284        r = bin_centers[i]
+1285        annulus_area = 2 * np.pi * r * bin_width
+1286        expected[i] = density_product * annulus_area * area
+1287
+1288    pcf = np.ones(n_bins)
+1289    mask = expected > 1.0
+1290    pcf[mask] = hist[mask] / expected[mask]
+1291
+1292    return bin_centers, pcf, int(np.sum(hist))
+
+ + +

Compute the Pair Correlation Function (PCF) using cell-list acceleration.

+ +

This high-level function coordinates the spatial hashing and histogram +calculation to determine the $g(r)$ function. It normalizes the resulting +histogram by the expected number of pairs in an ideal gas of the same +density, accounting for the toroidal area of each radial bin.

+ +
Parameters
+ +
    +
  • positions_i (np.ndarray): +(N, 2) array of coordinates for species I.
  • +
  • positions_j (np.ndarray): +(M, 2) array of coordinates for species J.
  • +
  • grid_shape (tuple of int): +The (rows, cols) dimensions of the simulation grid.
  • +
  • max_distance (float): +The maximum radius to calculate correlations for.
  • +
  • n_bins (int, optional): +Number of bins for the radial distribution (default 50).
  • +
  • self_correlation (bool, optional): +Set to True if computing the correlation of a species with itself +to avoid self-counting (default False).
  • +
+ +
Returns
+ +
    +
  • bin_centers (np.ndarray): +The central radial distance for each histogram bin.
  • +
  • pcf (np.ndarray): +The normalized $g(r)$ values. A value of 1.0 indicates no spatial +correlation; > 1.0 indicates clustering; < 1.0 indicates repulsion.
  • +
  • total_pairs (int): +The total count of pairs found within the max_distance.
  • +
+ +
Notes
+ +

The function dynamically determines the optimal number of cells for the +spatial hash based on the max_distance and grid dimensions to maintain +linear time complexity.

+
+ + +
+
+ +
+ + def + compute_all_pcfs_fast( grid: numpy.ndarray, max_distance: Optional[float] = None, n_bins: int = 50) -> Dict[str, Tuple[numpy.ndarray, numpy.ndarray, int]]: + + + +
+ +
1295def compute_all_pcfs_fast(
+1296    grid: np.ndarray,
+1297    max_distance: Optional[float] = None,
+1298    n_bins: int = 50,
+1299) -> Dict[str, Tuple[np.ndarray, np.ndarray, int]]:
+1300    """
+1301    Compute all three species Pair Correlation Functions (PCFs) using cell-list acceleration.
+1302
+1303    This function calculates the spatial auto-correlations (Prey-Prey,
+1304    Predator-Predator) and the cross-correlation (Prey-Predator) for a given
+1305    simulation grid. It identifies particle positions and leverages
+1306    Numba-accelerated cell lists to handle the computations efficiently.
+1307
+1308    Parameters
+1309    ----------
+1310    grid : np.ndarray
+1311        2D integer array where 1 represents prey and 2 represents predators.
+1312    max_distance : float, optional
+1313        The maximum radial distance for the correlation. Defaults to 1/4
+1314        of the minimum grid dimension if not provided.
+1315    n_bins : int, optional
+1316        Number of distance bins for the histogram. Default is 50.
+1317
+1318    Returns
+1319    -------
+1320    results : dict
+1321        A dictionary with keys 'prey_prey', 'pred_pred', and 'prey_pred'.
+1322        Each value is a tuple containing:
+1323        - bin_centers (np.ndarray): Radial distances.
+1324        - pcf_values (np.ndarray): Normalized g(r) values.
+1325        - pair_count (int): Total number of pairs found.
+1326
+1327    Notes
+1328    -----
+1329    The PCF provides insight into the spatial organization of the system.
+1330    g(r) > 1 at short distances indicates aggregation (clustering),
+1331    while g(r) < 1 indicates exclusion or repulsion.
+1332    """
+1333    rows, cols = grid.shape
+1334    if max_distance is None:
+1335        max_distance = min(rows, cols) / 4.0
+1336
+1337    prey_pos = np.argwhere(grid == 1)
+1338    pred_pos = np.argwhere(grid == 2)
+1339
+1340    results = {}
+1341
+1342    dist, pcf, n = compute_pcf_periodic_fast(
+1343        prey_pos,
+1344        prey_pos,
+1345        (rows, cols),
+1346        max_distance,
+1347        n_bins,
+1348        self_correlation=True,
+1349    )
+1350    results["prey_prey"] = (dist, pcf, n)
+1351
+1352    dist, pcf, n = compute_pcf_periodic_fast(
+1353        pred_pos,
+1354        pred_pos,
+1355        (rows, cols),
+1356        max_distance,
+1357        n_bins,
+1358        self_correlation=True,
+1359    )
+1360    results["pred_pred"] = (dist, pcf, n)
+1361
+1362    dist, pcf, n = compute_pcf_periodic_fast(
+1363        prey_pos,
+1364        pred_pos,
+1365        (rows, cols),
+1366        max_distance,
+1367        n_bins,
+1368        self_correlation=False,
+1369    )
+1370    results["prey_pred"] = (dist, pcf, n)
+1371
+1372    return results
+
+ + +

Compute all three species Pair Correlation Functions (PCFs) using cell-list acceleration.

+ +

This function calculates the spatial auto-correlations (Prey-Prey, +Predator-Predator) and the cross-correlation (Prey-Predator) for a given +simulation grid. It identifies particle positions and leverages +Numba-accelerated cell lists to handle the computations efficiently.

+ +
Parameters
+ +
    +
  • grid (np.ndarray): +2D integer array where 1 represents prey and 2 represents predators.
  • +
  • max_distance (float, optional): +The maximum radial distance for the correlation. Defaults to 1/4 +of the minimum grid dimension if not provided.
  • +
  • n_bins (int, optional): +Number of distance bins for the histogram. Default is 50.
  • +
+ +
Returns
+ +
    +
  • results (dict): +A dictionary with keys 'prey_prey', 'pred_pred', and 'prey_pred'. +Each value is a tuple containing: +
      +
    • bin_centers (np.ndarray): Radial distances.
    • +
    • pcf_values (np.ndarray): Normalized g(r) values.
    • +
    • pair_count (int): Total number of pairs found.
    • +
  • +
+ +
Notes
+ +

The PCF provides insight into the spatial organization of the system. +g(r) > 1 at short distances indicates aggregation (clustering), +while g(r) < 1 indicates exclusion or repulsion.

+
+ + +
+
+ +
+ + def + warmup_numba_kernels(grid_size: int = 100, directed_hunting: bool = False): + + + +
+ +
1380def warmup_numba_kernels(grid_size: int = 100, directed_hunting: bool = False):
+1381    """
+1382    Pre-compile all Numba-accelerated kernels to avoid first-run latency.
+1383
+1384    This function executes a single step of the simulation and each analysis
+1385    routine on a dummy grid. Because Numba uses Just-In-Time (JIT) compilation,
+1386    the first call to a decorated function incurs a compilation overhead.
+1387    Running this warmup ensures that subsequent experimental runs are timed
+1388    accurately and perform at full speed.
+1389
+1390    Parameters
+1391    ----------
+1392    grid_size : int, optional
+1393        The side length of the dummy grid used for warmup (default 100).
+1394    directed_hunting : bool, optional
+1395        If True, also warms up the directed behavior update kernel (default False).
+1396
+1397    Returns
+1398    -------
+1399    None
+1400
+1401    Notes
+1402    -----
+1403    This function checks for `NUMBA_AVAILABLE` before execution. It warms up
+1404    the `PPKernel` (random and optionally directed), as well as the
+1405    spatial analysis functions (`compute_all_pcfs_fast`, `detect_clusters_fast`, etc.).
+1406    """
+1407    if not NUMBA_AVAILABLE:
+1408        return
+1409
+1410    set_numba_seed(0)
+1411
+1412    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1413    grid[::3, ::3] = 1
+1414    grid[::5, ::5] = 2
+1415
+1416    prey_death_arr = np.full((grid_size, grid_size), 0.05, dtype=np.float64)
+1417    prey_death_arr[grid != 1] = np.nan
+1418
+1419    # Always warmup random kernel
+1420    kernel_random = PPKernel(grid_size, grid_size, directed_hunting=False)
+1421    kernel_random.update(grid.copy(), prey_death_arr.copy(), 0.2, 0.05, 0.2, 0.1)
+1422
+1423    # Warmup directed kernel if requested
+1424    if directed_hunting:
+1425        kernel_directed = PPKernel(grid_size, grid_size, directed_hunting=True)
+1426        kernel_directed.update(grid.copy(), prey_death_arr.copy(), 0.2, 0.05, 0.2, 0.1)
+1427
+1428    # Warmup analysis functions
+1429    _ = compute_all_pcfs_fast(grid, max_distance=20.0, n_bins=20)
+1430    _ = measure_cluster_sizes_fast(grid, 1)
+1431    _ = detect_clusters_fast(grid, 1)
+1432    _ = get_cluster_stats_fast(grid, 1)
+
+ + +

Pre-compile all Numba-accelerated kernels to avoid first-run latency.

+ +

This function executes a single step of the simulation and each analysis +routine on a dummy grid. Because Numba uses Just-In-Time (JIT) compilation, +the first call to a decorated function incurs a compilation overhead. +Running this warmup ensures that subsequent experimental runs are timed +accurately and perform at full speed.

+ +
Parameters
+ +
    +
  • grid_size (int, optional): +The side length of the dummy grid used for warmup (default 100).
  • +
  • directed_hunting (bool, optional): +If True, also warms up the directed behavior update kernel (default False).
  • +
+ +
Returns
+ +
    +
  • None
  • +
+ +
Notes
+ +

This function checks for NUMBA_AVAILABLE before execution. It warms up +the PPKernel (random and optionally directed), as well as the +spatial analysis functions (compute_all_pcfs_fast, detect_clusters_fast, etc.).

+
+ + +
+
+ +
+ + def + benchmark_kernels(grid_size: int = 100, n_runs: int = 20): + + + +
+ +
1435def benchmark_kernels(grid_size: int = 100, n_runs: int = 20):
+1436    """
+1437    Benchmark the execution performance of random vs. directed update kernels.
+1438
+1439    This utility measures the average time per simulation step for both the
+1440    stochastic (random neighbor) and heuristic (directed hunting/reproduction)
+1441    update strategies. It accounts for the computational overhead introduced
+1442    by the "intelligent" search logic used in directed mode.
+1443
+1444    Parameters
+1445    ----------
+1446    grid_size : int, optional
+1447        The side length of the square simulation grid (default 100).
+1448    n_runs : int, optional
+1449        The number of iterations to perform for averaging performance (default 20).
+1450
+1451    Returns
+1452    -------
+1453    t_random : float
+1454        Average time per step for the random kernel in milliseconds.
+1455    t_directed : float
+1456        Average time per step for the directed kernel in milliseconds.
+1457
+1458    Notes
+1459    -----
+1460    The function ensures a fair comparison by:
+1461    1. Using a fixed seed for reproducible initial grid states.
+1462    2. Warming up Numba kernels before timing to exclude JIT compilation latency.
+1463    3. Copying the grid and death arrays for each iteration to maintain
+1464       consistent population densities throughout the benchmark.
+1465    """
+1466    import time
+1467
+1468    print("=" * 60)
+1469    print(f"KERNEL BENCHMARK ({grid_size}x{grid_size}, {n_runs} runs)")
+1470    print(f"Numba available: {NUMBA_AVAILABLE}")
+1471    print("=" * 60)
+1472
+1473    np.random.seed(42)
+1474    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1475    n_prey = int(grid_size * grid_size * 0.30)
+1476    n_pred = int(grid_size * grid_size * 0.15)
+1477    positions = np.random.permutation(grid_size * grid_size)
+1478    for pos in positions[:n_prey]:
+1479        grid[pos // grid_size, pos % grid_size] = 1
+1480    for pos in positions[n_prey : n_prey + n_pred]:
+1481        grid[pos // grid_size, pos % grid_size] = 2
+1482
+1483    prey_death_arr = np.full((grid_size, grid_size), 0.05, dtype=np.float64)
+1484    prey_death_arr[grid != 1] = np.nan
+1485
+1486    print(f"Initial: {np.sum(grid == 1)} prey, {np.sum(grid == 2)} predators")
+1487
+1488    # Warmup both kernels
+1489    warmup_numba_kernels(grid_size, directed_hunting=True)
+1490
+1491    # Benchmark random kernel
+1492    kernel_random = PPKernel(grid_size, grid_size, directed_hunting=False)
+1493    t0 = time.perf_counter()
+1494    for _ in range(n_runs):
+1495        test_grid = grid.copy()
+1496        test_arr = prey_death_arr.copy()
+1497        kernel_random.update(test_grid, test_arr, 0.2, 0.05, 0.2, 0.1)
+1498    t_random = (time.perf_counter() - t0) / n_runs * 1000
+1499
+1500    # Benchmark directed kernel
+1501    kernel_directed = PPKernel(grid_size, grid_size, directed_hunting=True)
+1502    t0 = time.perf_counter()
+1503    for _ in range(n_runs):
+1504        test_grid = grid.copy()
+1505        test_arr = prey_death_arr.copy()
+1506        kernel_directed.update(test_grid, test_arr, 0.2, 0.05, 0.2, 0.1)
+1507    t_directed = (time.perf_counter() - t0) / n_runs * 1000
+1508
+1509    print(f"\nRandom kernel:   {t_random:.2f} ms/step")
+1510    print(f"Directed kernel: {t_directed:.2f} ms/step")
+1511    print(
+1512        f"Overhead:        {t_directed - t_random:.2f} ms (+{100*(t_directed/t_random - 1):.1f}%)"
+1513    )
+1514
+1515    return t_random, t_directed
+
+ + +

Benchmark the execution performance of random vs. directed update kernels.

+ +

This utility measures the average time per simulation step for both the +stochastic (random neighbor) and heuristic (directed hunting/reproduction) +update strategies. It accounts for the computational overhead introduced +by the "intelligent" search logic used in directed mode.

+ +
Parameters
+ +
    +
  • grid_size (int, optional): +The side length of the square simulation grid (default 100).
  • +
  • n_runs (int, optional): +The number of iterations to perform for averaging performance (default 20).
  • +
+ +
Returns
+ +
    +
  • t_random (float): +Average time per step for the random kernel in milliseconds.
  • +
  • t_directed (float): +Average time per step for the directed kernel in milliseconds.
  • +
+ +
Notes
+ +

The function ensures a fair comparison by:

+ +
    +
  1. Using a fixed seed for reproducible initial grid states.
  2. +
  3. Warming up Numba kernels before timing to exclude JIT compilation latency.
  4. +
  5. Copying the grid and death arrays for each iteration to maintain +consistent population densities throughout the benchmark.
  6. +
+
+ + +
+
+ +
+ + def + benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20): + + + +
+ +
1518def benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20):
+1519    """
+1520    Benchmark the performance of different cluster detection and analysis routines.
+1521
+1522    This function evaluates three levels of spatial analysis:
+1523    1. Size measurement only (fastest, no label map).
+1524    2. Full detection (returns label map and size dictionary).
+1525    3. Comprehensive statistics (calculates distributions, means, and order parameters).
+1526
+1527    Parameters
+1528    ----------
+1529    grid_size : int, optional
+1530        Side length of the square grid for benchmarking (default 100).
+1531    n_runs : int, optional
+1532        Number of iterations to average for performance results (default 20).
+1533
+1534    Returns
+1535    -------
+1536    stats : dict
+1537        The result dictionary from the final comprehensive statistics run.
+1538
+1539    Notes
+1540    -----
+1541    The benchmark uses a fixed prey density of 30% to ensure a representative
+1542    distribution of clusters. It pre-warms the Numba kernels to ensure that
+1543    the measurements reflect execution speed rather than compilation time.
+1544    """
+1545    import time
+1546
+1547    print("=" * 60)
+1548    print(f"CLUSTER DETECTION BENCHMARK ({grid_size}x{grid_size})")
+1549    print(f"Numba available: {NUMBA_AVAILABLE}")
+1550    print("=" * 60)
+1551
+1552    np.random.seed(42)
+1553    grid = np.zeros((grid_size, grid_size), dtype=np.int32)
+1554    n_prey = int(grid_size * grid_size * 0.30)
+1555    positions = np.random.permutation(grid_size * grid_size)[:n_prey]
+1556    for pos in positions:
+1557        grid[pos // grid_size, pos % grid_size] = 1
+1558
+1559    print(f"Prey cells: {np.sum(grid == 1)}")
+1560
+1561    # Warmup
+1562    _ = measure_cluster_sizes_fast(grid, 1)
+1563    _ = detect_clusters_fast(grid, 1)
+1564    _ = get_cluster_stats_fast(grid, 1)
+1565
+1566    # Benchmark sizes only
+1567    t0 = time.perf_counter()
+1568    for _ in range(n_runs):
+1569        sizes = measure_cluster_sizes_fast(grid, 1)
+1570    t_sizes = (time.perf_counter() - t0) / n_runs * 1000
+1571    print(f"\nmeasure_cluster_sizes_fast: {t_sizes:.2f} ms  ({len(sizes)} clusters)")
+1572
+1573    # Benchmark full detection
+1574    t0 = time.perf_counter()
+1575    for _ in range(n_runs):
+1576        labels, size_dict = detect_clusters_fast(grid, 1)
+1577    t_detect = (time.perf_counter() - t0) / n_runs * 1000
+1578    print(f"detect_clusters_fast:       {t_detect:.2f} ms  ({len(size_dict)} clusters)")
+1579
+1580    # Benchmark full stats
+1581    t0 = time.perf_counter()
+1582    for _ in range(n_runs):
+1583        stats = get_cluster_stats_fast(grid, 1)
+1584    t_stats = (time.perf_counter() - t0) / n_runs * 1000
+1585    print(f"get_cluster_stats_fast:     {t_stats:.2f} ms")
+1586
+1587    print(
+1588        f"\nOverhead for labels: {t_detect - t_sizes:.2f} ms (+{100*(t_detect/t_sizes - 1):.0f}%)"
+1589    )
+1590
+1591    return stats
+
+ + +

Benchmark the performance of different cluster detection and analysis routines.

+ +

This function evaluates three levels of spatial analysis:

+ +
    +
  1. Size measurement only (fastest, no label map).
  2. +
  3. Full detection (returns label map and size dictionary).
  4. +
  5. Comprehensive statistics (calculates distributions, means, and order parameters).
  6. +
+ +
Parameters
+ +
    +
  • grid_size (int, optional): +Side length of the square grid for benchmarking (default 100).
  • +
  • n_runs (int, optional): +Number of iterations to average for performance results (default 20).
  • +
+ +
Returns
+ +
    +
  • stats (dict): +The result dictionary from the final comprehensive statistics run.
  • +
+ +
Notes
+ +

The benchmark uses a fixed prey density of 30% to ensure a representative +distribution of clusters. It pre-warms the Numba kernels to ensure that +the measurements reflect execution speed rather than compilation time.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/search.js b/docs/search.js new file mode 100644 index 0000000..47dc945 --- /dev/null +++ b/docs/search.js @@ -0,0 +1,46 @@ +window.pdocSearch = (function(){ +/** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();oCellular Automaton Framework\n\n

This module provides the base cellular automaton class and the\nPredator-Prey (PP) implementation with Numba-accelerated kernels.

\n\n
Classes
\n\n

CA: Abstract base class for spatial cellular automata.

\n\n

PP: Predator-Prey model with configurable hunting behavior.

\n\n
Example
\n\n
\n
from models.CA import PP\n\n# Basic usage\nmodel = PP(rows=100, cols=100, densities=(0.3, 0.15), seed=42)\nmodel.run(steps=1000)\n\n# With evolution enabled\nmodel = PP(rows=100, cols=100, seed=42)\nmodel.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15)\nmodel.run(steps=500)\n\n# With directed hunting\nmodel = PP(rows=100, cols=100, directed_hunting=True, seed=42)\n
\n
\n"}, "models.CA.CA": {"fullname": "models.CA.CA", "modulename": "models.CA", "qualname": "CA", "kind": "class", "doc": "

Base cellular automaton class for spatial simulations.

\n\n

This class provides a framework for multi-species cellular automata with\nsupport for global parameters, per-cell evolving parameters, and\ngrid initialization based on density.

\n\n
Attributes
\n\n
    \n
  • grid (np.ndarray):\n2D numpy array containing integers in range [0, n_species].
  • \n
  • params (Dict[str, Any]):\nGlobal parameters shared by all cells.
  • \n
  • cell_params (Dict[str, Any]):\nLocal per-cell parameters, typically stored as numpy arrays matching the grid shape.
  • \n
  • neighborhood (str):\nThe adjacency rule used ('neumann' or 'moore').
  • \n
  • generator (np.random.Generator):\nThe random number generator instance for reproducibility.
  • \n
  • species_names (Tuple[str, ...]):\nHuman-readable names for each species state.
  • \n
\n"}, "models.CA.CA.__init__": {"fullname": "models.CA.CA.__init__", "modulename": "models.CA", "qualname": "CA.__init__", "kind": "function", "doc": "

Initialize the cellular automaton grid and configurations.

\n\n
Parameters
\n\n
    \n
  • rows (int):\nNumber of rows in the grid (must be > 0).
  • \n
  • cols (int):\nNumber of columns in the grid (must be > 0).
  • \n
  • densities (Tuple[float, ...]):\nInitial density for each species. Length defines n_species.\nValues must sum to <= 1.0.
  • \n
  • neighborhood ({'neumann', 'moore'}):\nType of neighborhood connectivity.
  • \n
  • params (Dict[str, Any]):\nInitial global parameter values.
  • \n
  • cell_params (Dict[str, Any]):\nInitial local per-cell parameters.
  • \n
  • seed (int, optional):\nSeed for the random number generator.
  • \n
\n", "signature": "(\trows: int,\tcols: int,\tdensities: Tuple[float, ...],\tneighborhood: str,\tparams: Dict[str, object],\tcell_params: Dict[str, object],\tseed: Optional[int] = None)"}, "models.CA.CA.rows": {"fullname": "models.CA.CA.rows", "modulename": "models.CA", "qualname": "CA.rows", "kind": "variable", "doc": "

int: Number of rows in the grid.

\n", "annotation": ": int"}, "models.CA.CA.cols": {"fullname": "models.CA.CA.cols", "modulename": "models.CA", "qualname": "CA.cols", "kind": "variable", "doc": "

int: Number of columns in the grid.

\n", "annotation": ": int"}, "models.CA.CA.densities": {"fullname": "models.CA.CA.densities", "modulename": "models.CA", "qualname": "CA.densities", "kind": "variable", "doc": "

Tuple[float, ...]: Initial density fraction for each species.

\n", "annotation": ": Tuple[float, ...]"}, "models.CA.CA.n_species": {"fullname": "models.CA.CA.n_species", "modulename": "models.CA", "qualname": "CA.n_species", "kind": "variable", "doc": "

int: Number of distinct species states (excluding empty state 0).

\n", "annotation": ": int"}, "models.CA.CA.validate": {"fullname": "models.CA.CA.validate", "modulename": "models.CA", "qualname": "CA.validate", "kind": "function", "doc": "

Validate core CA invariants and grid dimensions.

\n\n

Checks that the neighborhood is valid, the grid matches initialized dimensions,\nand that local parameter arrays match the grid shape.

\n\n
Raises
\n\n
    \n
  • ValueError: If any structural invariant is violated.
  • \n
\n", "signature": "(self) -> None:", "funcdef": "def"}, "models.CA.CA.evolve": {"fullname": "models.CA.CA.evolve", "modulename": "models.CA", "qualname": "CA.evolve", "kind": "function", "doc": "

Enable per-cell evolution for a specific parameter on a given species.

\n\n

This method initializes a spatial parameter array (local parameter map)\nfor a global parameter. It allows individual cells to carry their own\nvalues for that parameter, which can then mutate and evolve during\nthe simulation.

\n\n
Parameters
\n\n
    \n
  • param (str):\nThe name of the global parameter to enable for evolution.\nMust exist in self.params.
  • \n
  • species (int, optional):\nThe 1-based index of the species to which this parameter applies.\nIf None, the method attempts to infer the species from the\nparameter name prefix.
  • \n
  • sd (float, default 0.05):\nThe standard deviation of the Gaussian mutation applied during\ninheritance/reproduction.
  • \n
  • min_val (float, optional):\nThe minimum allowable value for the parameter (clamping).\nDefaults to 0.01 if not provided.
  • \n
  • max_val (float, optional):\nThe maximum allowable value for the parameter (clamping).\nDefaults to 0.99 if not provided.
  • \n
\n\n
Raises
\n\n
    \n
  • ValueError: If the parameter is not in self.params, the species cannot be\ninferred, or the species index is out of bounds.
  • \n
\n\n
Notes
\n\n

The local parameter is stored in self.cell_params as a 2D numpy\narray initialized with the current global value for all cells of\nthe target species, and NaN elsewhere.

\n", "signature": "(\tself,\tparam: str,\tspecies: Optional[int] = None,\tsd: float = 0.05,\tmin_val: Optional[float] = None,\tmax_val: Optional[float] = None) -> None:", "funcdef": "def"}, "models.CA.CA.update": {"fullname": "models.CA.CA.update", "modulename": "models.CA", "qualname": "CA.update", "kind": "function", "doc": "

Perform one update step of the cellular automaton.

\n\n

This is an abstract method that defines the transition rules of the\nsystem. It must be implemented by concrete subclasses to specify\nhow cell states and parameters change over time based on their\ncurrent state and neighborhood.

\n\n
Raises
\n\n
    \n
  • NotImplementedError: If called directly on the base class instead of an implementation.
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

In a typical implementation, this method handles the logic for\nstochastic transitions, movement, or predator-prey interactions.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "models.CA.CA.run": {"fullname": "models.CA.CA.run", "modulename": "models.CA", "qualname": "CA.run", "kind": "function", "doc": "

Execute the cellular automaton simulation for a specified number of steps.

\n\n

This method drives the simulation loop, calling update() at each\niteration. It manages visualization updates, directory creation for\ndata persistence, and handles the freezing of evolving parameters\nat a specific time step.

\n\n
Parameters
\n\n
    \n
  • steps (int):\nThe total number of iterations to run (must be non-negative).
  • \n
  • stop_evolution_at (int, optional):\nThe 1-based iteration index after which parameter mutation is\ndisabled. Useful for observing system stability after a period\nof adaptation.
  • \n
  • snapshot_iters (List[int], optional):\nA list of specific 1-based iteration indices at which to save\nthe current grid state to the results directory.
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

If snapshots are requested, a results directory is automatically created\nusing a timestamped subfolder (e.g., 'results/run-1700000000/').\nVisualization errors are logged but do not terminate the simulation.

\n", "signature": "(\tself,\tsteps: int,\tstop_evolution_at: Optional[int] = None,\tsnapshot_iters: Optional[list] = None) -> None:", "funcdef": "def"}, "models.CA.PP": {"fullname": "models.CA.PP", "modulename": "models.CA", "qualname": "PP", "kind": "class", "doc": "

Predator-Prey Cellular Automaton model with Numba-accelerated kernels.

\n\n

This model simulates a stochastic predator-prey system where species\ninteract on a 2D grid. It supports evolving per-cell death rates,\nperiodic boundary conditions, and both random and directed hunting\nbehaviors.

\n\n
Parameters
\n\n
    \n
  • rows (int, default 10):\nNumber of rows in the simulation grid.
  • \n
  • cols (int, default 10):\nNumber of columns in the simulation grid.
  • \n
  • densities (Tuple[float, ...], default (0.2, 0.1)):\nInitial population densities for (prey, predator).
  • \n
  • neighborhood ({'moore', 'neumann'}, default 'moore'):\nThe neighborhood type for cell interactions.
  • \n
  • params (Dict[str, object], optional):\nGlobal parameters: \"prey_death\", \"predator_death\", \"prey_birth\",\n\"predator_birth\".
  • \n
  • cell_params (Dict[str, object], optional):\nInitial local parameter maps (2D arrays).
  • \n
  • seed (int, optional):\nRandom seed for reproducibility.
  • \n
  • synchronous (bool, default True):\nIf True, updates the entire grid at once. If False, updates\ncells asynchronously.
  • \n
  • directed_hunting (bool, default False):\nIf True, predators selectively hunt prey rather than choosing\nneighbors at random.
  • \n
\n\n
Attributes
\n\n
    \n
  • species_names (Tuple[str, ...]):\nLabels for the species ('prey', 'predator').
  • \n
  • synchronous (bool):\nCurrent update mode.
  • \n
  • directed_hunting (bool):\nCurrent hunting strategy logic.
  • \n
\n", "bases": "CA"}, "models.CA.PP.__init__": {"fullname": "models.CA.PP.__init__", "modulename": "models.CA", "qualname": "PP.__init__", "kind": "function", "doc": "

Initialize the Predator-Prey CA with validated parameters and kernels.

\n", "signature": "(\trows: int = 10,\tcols: int = 10,\tdensities: Tuple[float, ...] = (0.2, 0.1),\tneighborhood: str = 'moore',\tparams: Dict[str, object] = None,\tcell_params: Dict[str, object] = None,\tseed: Optional[int] = None,\tsynchronous: bool = True,\tdirected_hunting: bool = False)"}, "models.CA.PP.validate": {"fullname": "models.CA.PP.validate", "modulename": "models.CA", "qualname": "PP.validate", "kind": "function", "doc": "

Validate Predator-Prey specific invariants and spatial parameter arrays.

\n\n

Extends the base CA validation to ensure that numerical parameters are\nwithin the [0, 1] probability range and that evolved parameter maps\n(e.g., prey_death) correctly align with the species locations.

\n\n
Raises
\n\n
    \n
  • ValueError: If grid shapes, parameter ranges, or species masks are inconsistent.
  • \n
  • TypeError: If parameters are non-numeric.
  • \n
\n", "signature": "(self) -> None:", "funcdef": "def"}, "models.CA.PP.update_async": {"fullname": "models.CA.PP.update_async", "modulename": "models.CA", "qualname": "PP.update_async", "kind": "function", "doc": "

Execute an asynchronous update using the optimized Numba kernel.

\n\n

This method retrieves the evolved parameter maps and delegates the\nstochastic transitions to the PPKernel. Asynchronous updates\ntypically handle cell-by-cell logic where changes can be\nimmediately visible to neighbors.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "models.CA.PP.update": {"fullname": "models.CA.PP.update", "modulename": "models.CA", "qualname": "PP.update", "kind": "function", "doc": "

Dispatch the simulation step based on the configured update mode.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "models.config": {"fullname": "models.config", "modulename": "models.config", "kind": "module", "doc": "

Experiment Configuration

\n\n

This module provides the configuration dataclass and pre-defined phase\nconfigurations for Predator-Prey Hydra Effect experiments.

\n\n
Classes
\n\n

Config\n Central configuration dataclass with all experiment parameters.

\n\n
Functions
\n\n
\n
get_phase_config: Retrieve configuration for a specific experimental phase.\n
\n
\n\n
Phase Configurations
\n\n
    \n
  • PHASE1_CONFIG: Parameter sweep to find critical point
  • \n
  • PHASE2_CONFIG: Self-organization (evolution toward criticality)
  • \n
  • PHASE3_CONFIG: Finite-size scaling at critical point
  • \n
  • PHASE4_CONFIG: Sensitivity analysis (4D parameter sweep)
  • \n
  • PHASE5_CONFIG: Directed hunting comparison
  • \n
\n\n
Example
\n\n
\n
from models.config import Config, get_phase_config\n\n# Use predefined phase config\ncfg = get_phase_config(1)\n\n# Create custom config\ncfg = Config(grid_size=200, n_replicates=10)\n\n# Generate parameter sweep values\nprey_deaths = cfg.get_prey_deaths()\n
\n
\n"}, "models.config.Config": {"fullname": "models.config.Config", "modulename": "models.config", "qualname": "Config", "kind": "class", "doc": "

Central configuration for Predator-Prey Hydra Effect experiments.

\n\n
Attributes
\n\n
    \n
  • grid_size (int):\nSide length of the square simulation grid.
  • \n
  • densities (Tuple[float, float]):\nInitial population fractions for (prey, predator).
  • \n
  • grid_sizes (Tuple[int, ...]):\nGrid dimensions for Finite-Size Scaling (FSS) analysis (Phase 3).
  • \n
  • prey_birth (float):\nGlobal birth rate for prey species.
  • \n
  • prey_death (float):\nGlobal death rate for prey species.
  • \n
  • predator_birth (float):\nGlobal birth rate for predator species.
  • \n
  • predator_death (float):\nGlobal death rate for predator species.
  • \n
  • critical_prey_birth (float):\nCritical birth rate identified from Phase 1.
  • \n
  • critical_prey_death (float):\nCritical death rate identified from Phase 1.
  • \n
  • prey_death_range (Tuple[float, float]):\nBounds for prey death rate sweep.
  • \n
  • n_prey_death (int):\nNumber of points in prey death rate sweep.
  • \n
  • n_replicates (int):\nIndependent stochastic runs per parameter set.
  • \n
  • warmup_steps (int):\nIterations before data collection begins.
  • \n
  • measurement_steps (int):\nIterations for collecting statistics.
  • \n
  • evolve_sd (float):\nStandard deviation for parameter mutation.
  • \n
  • evolve_min (float):\nLower bound for evolving parameters.
  • \n
  • evolve_max (float):\nUpper bound for evolving parameters.
  • \n
  • directed_hunting (bool):\nToggle for targeted predator movement.
  • \n
  • save_timeseries (bool):\nToggle for recording population time series.
  • \n
  • timeseries_subsample (int):\nSubsample rate for time series data.
  • \n
  • collect_pcf (bool):\nToggle for Pair Correlation Function analysis.
  • \n
  • pcf_sample_rate (float):\nFraction of runs that compute PCFs.
  • \n
  • pcf_max_distance (float):\nMaximum radial distance for PCF.
  • \n
  • pcf_n_bins (int):\nNumber of bins in PCF histogram.
  • \n
  • min_density_for_analysis (float):\nPopulation threshold for spatial analysis.
  • \n
  • n_jobs (int):\nCPU cores for parallelization (-1 = all).
  • \n
\n"}, "models.config.Config.get_prey_deaths": {"fullname": "models.config.Config.get_prey_deaths", "modulename": "models.config", "qualname": "Config.get_prey_deaths", "kind": "function", "doc": "

Generate array of prey death rates for parameter sweep.

\n", "signature": "(self) -> numpy.ndarray:", "funcdef": "def"}, "models.config.Config.get_warmup_steps": {"fullname": "models.config.Config.get_warmup_steps", "modulename": "models.config", "qualname": "Config.get_warmup_steps", "kind": "function", "doc": "

Get warmup steps (can be extended for size-dependent scaling).

\n", "signature": "(self, L: int) -> int:", "funcdef": "def"}, "models.config.Config.get_measurement_steps": {"fullname": "models.config.Config.get_measurement_steps", "modulename": "models.config", "qualname": "Config.get_measurement_steps", "kind": "function", "doc": "

Get measurement steps (can be extended for size-dependent scaling).

\n", "signature": "(self, L: int) -> int:", "funcdef": "def"}, "models.config.Config.estimate_runtime": {"fullname": "models.config.Config.estimate_runtime", "modulename": "models.config", "qualname": "Config.estimate_runtime", "kind": "function", "doc": "

Estimate wall-clock time for the experiment.

\n\n
Parameters
\n\n
    \n
  • n_cores (int):\nNumber of available CPU cores.
  • \n
\n\n
Returns
\n\n
    \n
  • str: Human-readable runtime estimate.
  • \n
\n", "signature": "(self, n_cores: int = 32) -> str:", "funcdef": "def"}, "models.config.get_phase_config": {"fullname": "models.config.get_phase_config", "modulename": "models.config", "qualname": "get_phase_config", "kind": "function", "doc": "

Retrieve configuration for a specific experimental phase.

\n\n
Parameters
\n\n
    \n
  • phase (int):\nPhase number (1-5).
  • \n
\n\n
Returns
\n\n
    \n
  • Config: Configuration instance for the requested phase.
  • \n
\n\n
Raises
\n\n
    \n
  • ValueError: If phase number is invalid.
  • \n
\n", "signature": "(phase: int) -> models.config.Config:", "funcdef": "def"}, "models.numba_optimized": {"fullname": "models.numba_optimized", "modulename": "models.numba_optimized", "kind": "module", "doc": "

Numba-Optimized Kernels

\n\n

This module provides Numba-accelerated kernels for the predator-prey\ncellular automaton, including update kernels and spatial analysis functions.

\n\n
Classes
\n\n

PPKernel\n Wrapper for predator-prey update kernels with pre-allocated buffers.

\n\n
Cluster Analysis
\n\n
\n
measure_cluster_sizes_fast # Fast cluster size measurement (sizes only).\ndetect_clusters_fast # Full cluster detection with labels.\nget_cluster_stats_fast # Comprehensive cluster statistics.\n
\n
\n\n
Pair Correlation Functions
\n\n
\n
compute_pcf_periodic_fast # PCF for two position sets with periodic boundaries.\ncompute_all_pcfs_fast #Compute prey-prey, pred-pred, and prey-pred PCFs.\n
\n
\n\n
Utilities
\n\n
\n
set_numba_seed # Seed Numba's internal RNG.\nwarmup_numba_kernels # Pre-compile kernels to avoid first-run latency.\n
\n
\n\n
Example
\n\n
\n
from models.numba_optimized import (\n    PPKernel,\n    get_cluster_stats_fast,\n    compute_all_pcfs_fast,\n)\n\n# Cluster analysis\nstats = get_cluster_stats_fast(grid, species=1)\nprint(f"Largest cluster: {stats['largest']}")\n\n# PCF computation\npcfs = compute_all_pcfs_fast(grid, max_distance=20.0)\nprey_prey_dist, prey_prey_gr, _ = pcfs['prey_prey']\n
\n
\n"}, "models.numba_optimized.set_numba_seed": {"fullname": "models.numba_optimized.set_numba_seed", "modulename": "models.numba_optimized", "qualname": "set_numba_seed", "kind": "function", "doc": "

Seed Numba's internal random number generator from within a JIT context.

\n\n

This function ensures that Numba's independent random number generator\nis synchronized with the provided seed, enabling reproducibility for\njit-compiled functions that use NumPy's random operations.

\n\n
Parameters
\n\n
    \n
  • seed (int):\nThe integer value used to initialize the random number generator.
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

Because Numba maintains its own internal state for random number\ngeneration, calling np.random.seed() in standard Python code will not\naffect jit-compiled functions. This helper must be called to bridge\nthat gap.

\n", "signature": "(seed: int) -> None:", "funcdef": "def"}, "models.numba_optimized.PPKernel": {"fullname": "models.numba_optimized.PPKernel", "modulename": "models.numba_optimized", "qualname": "PPKernel", "kind": "class", "doc": "

Wrapper for predator-prey kernel with pre-allocated buffers.

\n\n

This class manages the spatial configuration and memory buffers required\nfor the Numba-accelerated update kernels. By pre-allocating the\noccupied_buffer, it avoids expensive memory allocations during the\nsimulation loop.

\n\n
Parameters
\n\n
    \n
  • rows (int):\nNumber of rows in the simulation grid.
  • \n
  • cols (int):\nNumber of columns in the simulation grid.
  • \n
  • neighborhood ({'moore', 'von_neumann'}, optional):\nThe neighborhood type determining adjacent cells. 'moore' includes\ndiagonals (8 neighbors), 'von_neumann' does not (4 neighbors).\nDefault is 'moore'.
  • \n
  • directed_hunting (bool, optional):\nIf True, uses the directed behavior kernel where species search for\ntargets. If False, uses random neighbor selection. Default is False.
  • \n
\n\n
Attributes
\n\n
    \n
  • rows (int):\nGrid row count.
  • \n
  • cols (int):\nGrid column count.
  • \n
  • directed_hunting (bool):\nToggle for intelligent behavior logic.
  • \n
\n"}, "models.numba_optimized.PPKernel.update": {"fullname": "models.numba_optimized.PPKernel.update", "modulename": "models.numba_optimized", "qualname": "PPKernel.update", "kind": "function", "doc": "

Execute a single asynchronous update step using the configured kernel.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\nThe current 2D simulation grid.
  • \n
  • prey_death_arr (np.ndarray):\n2D array of individual prey mortality rates.
  • \n
  • prey_birth (float):\nPrey reproduction probability.
  • \n
  • prey_death (float):\nBase prey mortality probability.
  • \n
  • pred_birth (float):\nPredator reproduction (hunting success) probability.
  • \n
  • pred_death (float):\nPredator mortality probability.
  • \n
  • evolve_sd (float, optional):\nMutation standard deviation (default 0.1).
  • \n
  • evolve_min (float, optional):\nMinimum evolved death rate (default 0.001).
  • \n
  • evolve_max (float, optional):\nMaximum evolved death rate (default 0.1).
  • \n
  • evolution_stopped (bool, optional):\nWhether to disable mutation during this step (default True).
  • \n
\n\n
Returns
\n\n
    \n
  • np.ndarray: The updated grid after one full asynchronous pass.
  • \n
\n", "signature": "(\tself,\tgrid: numpy.ndarray,\tprey_death_arr: numpy.ndarray,\tprey_birth: float,\tprey_death: float,\tpred_birth: float,\tpred_death: float,\tevolve_sd: float = 0.1,\tevolve_min: float = 0.001,\tevolve_max: float = 0.1,\tevolution_stopped: bool = True) -> numpy.ndarray:", "funcdef": "def"}, "models.numba_optimized.measure_cluster_sizes_fast": {"fullname": "models.numba_optimized.measure_cluster_sizes_fast", "modulename": "models.numba_optimized", "qualname": "measure_cluster_sizes_fast", "kind": "function", "doc": "

Measure cluster sizes for a specific species using Numba-accelerated flood fill.

\n\n

This function provides a high-performance interface for calculating cluster\nsize statistics without the overhead of generating a full label map. It is\noptimized for large-scale simulation analysis where only distribution\nmetrics (e.g., mean size, max size) are required.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\nA 2D array representing the simulation environment.
  • \n
  • species (int):\nThe target species identifier (e.g., 1 for Prey, 2 for Predator).
  • \n
  • neighborhood ({'moore', 'neumann'}, optional):\nThe connectivity rule. 'moore' uses 8-way connectivity (including diagonals);\n'neumann' uses 4-way connectivity. Default is 'moore'.
  • \n
\n\n
Returns
\n\n
    \n
  • cluster_sizes (np.ndarray):\nA 1D array of integers, where each element is the cell count of an\nidentified cluster.
  • \n
\n\n
Notes
\n\n

The input grid is cast to int32 to ensure compatibility with the\nunderlying JIT-compiled _measure_clusters kernel.

\n\n
Examples
\n\n
\n
>>> sizes = measure_cluster_sizes_fast(grid, species=1, neighborhood='moore')\n>>> if sizes.size > 0:\n...     print(f"Largest cluster: {sizes.max()}")\n
\n
\n", "signature": "(\tgrid: numpy.ndarray,\tspecies: int,\tneighborhood: str = 'moore') -> numpy.ndarray:", "funcdef": "def"}, "models.numba_optimized.detect_clusters_fast": {"fullname": "models.numba_optimized.detect_clusters_fast", "modulename": "models.numba_optimized", "qualname": "detect_clusters_fast", "kind": "function", "doc": "

Perform full cluster detection with labels using Numba acceleration.

\n\n

This function returns a label array for spatial analysis and a dictionary\nof cluster sizes. It is significantly faster than standard Python or\nSciPy equivalents for large simulation grids.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\nA 2D array representing the simulation environment.
  • \n
  • species (int):\nThe target species identifier (e.g., 1 for Prey, 2 for Predator).
  • \n
  • neighborhood ({'moore', 'neumann'}, optional):\nThe connectivity rule. 'moore' uses 8-way connectivity; 'neumann'\nuses 4-way connectivity. Default is 'moore'.
  • \n
\n\n
Returns
\n\n
    \n
  • labels (np.ndarray):\nA 2D int32 array where each cell contains its unique cluster ID.\nCells not belonging to the target species are 0.
  • \n
  • sizes (dict):\nA dictionary mapping cluster IDs to their respective cell counts.
  • \n
\n\n
Notes
\n\n

The underlying Numba kernel uses a stack-based flood fill to avoid\nrecursion limits and handles periodic boundary conditions.

\n\n
Examples
\n\n
\n
>>> labels, sizes = detect_clusters_fast(grid, species=1)\n>>> if sizes:\n...     largest_id = max(sizes, key=sizes.get)\n...     print(f"Cluster {largest_id} size: {sizes[largest_id]}")\n
\n
\n", "signature": "(\tgrid: numpy.ndarray,\tspecies: int,\tneighborhood: str = 'moore') -> Tuple[numpy.ndarray, Dict[int, int]]:", "funcdef": "def"}, "models.numba_optimized.get_cluster_stats_fast": {"fullname": "models.numba_optimized.get_cluster_stats_fast", "modulename": "models.numba_optimized", "qualname": "get_cluster_stats_fast", "kind": "function", "doc": "

Compute comprehensive cluster statistics for a species using Numba acceleration.

\n\n

This function integrates cluster detection and labeling to provide a\nfull suite of spatial metrics. It calculates the cluster size distribution\nand the largest cluster fraction, which often serves as an order\nparameter in percolation theory and Phase 1-3 analyses.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\nA 2D array representing the simulation environment.
  • \n
  • species (int):\nThe target species identifier (e.g., 1 for Prey, 2 for Predator).
  • \n
  • neighborhood ({'moore', 'neumann'}, optional):\nThe connectivity rule. 'moore' uses 8-way connectivity; 'neumann'\nuses 4-way connectivity. Default is 'moore'.
  • \n
\n\n
Returns
\n\n
    \n
  • stats (dict):\nA dictionary containing:\n
      \n
    • 'n_clusters': Total count of isolated clusters.
    • \n
    • 'sizes': Sorted array (descending) of all cluster sizes.
    • \n
    • 'largest': Size of the single largest cluster.
    • \n
    • 'largest_fraction': Size of the largest cluster divided by\nthe total population of the species.
    • \n
    • 'mean_size': Average size of all clusters.
    • \n
    • 'size_distribution': Frequency mapping of {size: count}.
    • \n
    • 'labels': 2D array of unique cluster IDs.
    • \n
    • 'size_dict': Mapping of {label_id: size}.
    • \n
  • \n
\n\n
Examples
\n\n
\n
>>> stats = get_cluster_stats_fast(grid, species=1)\n>>> print(f"Found {stats['n_clusters']} prey clusters.")\n>>> print(f"Order parameter: {stats['largest_fraction']:.3f}")\n
\n
\n", "signature": "(grid: numpy.ndarray, species: int, neighborhood: str = 'moore') -> Dict:", "funcdef": "def"}, "models.numba_optimized.compute_pcf_periodic_fast": {"fullname": "models.numba_optimized.compute_pcf_periodic_fast", "modulename": "models.numba_optimized", "qualname": "compute_pcf_periodic_fast", "kind": "function", "doc": "

Compute the Pair Correlation Function (PCF) using cell-list acceleration.

\n\n

This high-level function coordinates the spatial hashing and histogram\ncalculation to determine the $g(r)$ function. It normalizes the resulting\nhistogram by the expected number of pairs in an ideal gas of the same\ndensity, accounting for the toroidal area of each radial bin.

\n\n
Parameters
\n\n
    \n
  • positions_i (np.ndarray):\n(N, 2) array of coordinates for species I.
  • \n
  • positions_j (np.ndarray):\n(M, 2) array of coordinates for species J.
  • \n
  • grid_shape (tuple of int):\nThe (rows, cols) dimensions of the simulation grid.
  • \n
  • max_distance (float):\nThe maximum radius to calculate correlations for.
  • \n
  • n_bins (int, optional):\nNumber of bins for the radial distribution (default 50).
  • \n
  • self_correlation (bool, optional):\nSet to True if computing the correlation of a species with itself\nto avoid self-counting (default False).
  • \n
\n\n
Returns
\n\n
    \n
  • bin_centers (np.ndarray):\nThe central radial distance for each histogram bin.
  • \n
  • pcf (np.ndarray):\nThe normalized $g(r)$ values. A value of 1.0 indicates no spatial\ncorrelation; > 1.0 indicates clustering; < 1.0 indicates repulsion.
  • \n
  • total_pairs (int):\nThe total count of pairs found within the max_distance.
  • \n
\n\n
Notes
\n\n

The function dynamically determines the optimal number of cells for the\nspatial hash based on the max_distance and grid dimensions to maintain\nlinear time complexity.

\n", "signature": "(\tpositions_i: numpy.ndarray,\tpositions_j: numpy.ndarray,\tgrid_shape: Tuple[int, int],\tmax_distance: float,\tn_bins: int = 50,\tself_correlation: bool = False) -> Tuple[numpy.ndarray, numpy.ndarray, int]:", "funcdef": "def"}, "models.numba_optimized.compute_all_pcfs_fast": {"fullname": "models.numba_optimized.compute_all_pcfs_fast", "modulename": "models.numba_optimized", "qualname": "compute_all_pcfs_fast", "kind": "function", "doc": "

Compute all three species Pair Correlation Functions (PCFs) using cell-list acceleration.

\n\n

This function calculates the spatial auto-correlations (Prey-Prey,\nPredator-Predator) and the cross-correlation (Prey-Predator) for a given\nsimulation grid. It identifies particle positions and leverages\nNumba-accelerated cell lists to handle the computations efficiently.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\n2D integer array where 1 represents prey and 2 represents predators.
  • \n
  • max_distance (float, optional):\nThe maximum radial distance for the correlation. Defaults to 1/4\nof the minimum grid dimension if not provided.
  • \n
  • n_bins (int, optional):\nNumber of distance bins for the histogram. Default is 50.
  • \n
\n\n
Returns
\n\n
    \n
  • results (dict):\nA dictionary with keys 'prey_prey', 'pred_pred', and 'prey_pred'.\nEach value is a tuple containing:\n
      \n
    • bin_centers (np.ndarray): Radial distances.
    • \n
    • pcf_values (np.ndarray): Normalized g(r) values.
    • \n
    • pair_count (int): Total number of pairs found.
    • \n
  • \n
\n\n
Notes
\n\n

The PCF provides insight into the spatial organization of the system.\ng(r) > 1 at short distances indicates aggregation (clustering),\nwhile g(r) < 1 indicates exclusion or repulsion.

\n", "signature": "(\tgrid: numpy.ndarray,\tmax_distance: Optional[float] = None,\tn_bins: int = 50) -> Dict[str, Tuple[numpy.ndarray, numpy.ndarray, int]]:", "funcdef": "def"}, "models.numba_optimized.warmup_numba_kernels": {"fullname": "models.numba_optimized.warmup_numba_kernels", "modulename": "models.numba_optimized", "qualname": "warmup_numba_kernels", "kind": "function", "doc": "

Pre-compile all Numba-accelerated kernels to avoid first-run latency.

\n\n

This function executes a single step of the simulation and each analysis\nroutine on a dummy grid. Because Numba uses Just-In-Time (JIT) compilation,\nthe first call to a decorated function incurs a compilation overhead.\nRunning this warmup ensures that subsequent experimental runs are timed\naccurately and perform at full speed.

\n\n
Parameters
\n\n
    \n
  • grid_size (int, optional):\nThe side length of the dummy grid used for warmup (default 100).
  • \n
  • directed_hunting (bool, optional):\nIf True, also warms up the directed behavior update kernel (default False).
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

This function checks for NUMBA_AVAILABLE before execution. It warms up\nthe PPKernel (random and optionally directed), as well as the\nspatial analysis functions (compute_all_pcfs_fast, detect_clusters_fast, etc.).

\n", "signature": "(grid_size: int = 100, directed_hunting: bool = False):", "funcdef": "def"}, "models.numba_optimized.benchmark_kernels": {"fullname": "models.numba_optimized.benchmark_kernels", "modulename": "models.numba_optimized", "qualname": "benchmark_kernels", "kind": "function", "doc": "

Benchmark the execution performance of random vs. directed update kernels.

\n\n

This utility measures the average time per simulation step for both the\nstochastic (random neighbor) and heuristic (directed hunting/reproduction)\nupdate strategies. It accounts for the computational overhead introduced\nby the \"intelligent\" search logic used in directed mode.

\n\n
Parameters
\n\n
    \n
  • grid_size (int, optional):\nThe side length of the square simulation grid (default 100).
  • \n
  • n_runs (int, optional):\nThe number of iterations to perform for averaging performance (default 20).
  • \n
\n\n
Returns
\n\n
    \n
  • t_random (float):\nAverage time per step for the random kernel in milliseconds.
  • \n
  • t_directed (float):\nAverage time per step for the directed kernel in milliseconds.
  • \n
\n\n
Notes
\n\n

The function ensures a fair comparison by:

\n\n
    \n
  1. Using a fixed seed for reproducible initial grid states.
  2. \n
  3. Warming up Numba kernels before timing to exclude JIT compilation latency.
  4. \n
  5. Copying the grid and death arrays for each iteration to maintain\nconsistent population densities throughout the benchmark.
  6. \n
\n", "signature": "(grid_size: int = 100, n_runs: int = 20):", "funcdef": "def"}, "models.numba_optimized.benchmark_cluster_detection": {"fullname": "models.numba_optimized.benchmark_cluster_detection", "modulename": "models.numba_optimized", "qualname": "benchmark_cluster_detection", "kind": "function", "doc": "

Benchmark the performance of different cluster detection and analysis routines.

\n\n

This function evaluates three levels of spatial analysis:

\n\n
    \n
  1. Size measurement only (fastest, no label map).
  2. \n
  3. Full detection (returns label map and size dictionary).
  4. \n
  5. Comprehensive statistics (calculates distributions, means, and order parameters).
  6. \n
\n\n
Parameters
\n\n
    \n
  • grid_size (int, optional):\nSide length of the square grid for benchmarking (default 100).
  • \n
  • n_runs (int, optional):\nNumber of iterations to average for performance results (default 20).
  • \n
\n\n
Returns
\n\n
    \n
  • stats (dict):\nThe result dictionary from the final comprehensive statistics run.
  • \n
\n\n
Notes
\n\n

The benchmark uses a fixed prey density of 30% to ensure a representative\ndistribution of clusters. It pre-warms the Numba kernels to ensure that\nthe measurements reflect execution speed rather than compilation time.

\n", "signature": "(grid_size: int = 100, n_runs: int = 20):", "funcdef": "def"}, "experiments": {"fullname": "experiments", "modulename": "experiments", "kind": "module", "doc": "

Predator-Prey Hydra Effect Experiments

\n\n

HPC-ready experiment runner for investigating the Hydra effect in\npredator-prey cellular automata.

\n\n
Experimental Phases
\n\n
    \n
  • Phase 1: Parameter sweep to find critical point (bifurcation + cluster analysis)
  • \n
  • Phase 2: Self-organization analysis (evolution toward criticality)
  • \n
  • Phase 3: Finite-size scaling at critical point
  • \n
  • Phase 4: Sensitivity analysis across parameter regimes
  • \n
  • Phase 5: Model extensions (directed hunting comparison)
  • \n
\n\n
Functions
\n\n
\n
run_single_simulation # Execute one simulation run and collect metrics.\nrun_phase1, run_phase2, run_phase3, run_phase4, run_phase5  # Phase-specific experiment runners.\n
\n
\n\n
Utilities
\n\n
\n
generate_unique_seed # Deterministic seed generation from parameters.\ncount_populations # Count species populations on grid.\nget_evolved_stats # Statistics for evolved parameters.\naverage_pcfs # Average multiple PCF measurements.\nsave_results_jsonl, load_results_jsonl, save_results_npz # I/O functions for experiment results.\n
\n
\n\n
Command Line Usage
\n\n
\n
python experiments.py --phase 1                    # Run phase 1\npython experiments.py --phase 1 --dry-run          # Estimate runtime\npython experiments.py --phase all                  # Run all phases\npython experiments.py --phase 1 --output results/  # Custom output\n
\n
\n\n
Programmatic Usage
\n\n
\n
from experiments import run_single_simulation, run_phase1\nfrom models.config import PHASE1_CONFIG\n\n# Single simulation\nresult = run_single_simulation(\n    prey_birth=0.2,\n    prey_death=0.05,\n    predator_birth=0.8,\n    predator_death=0.1,\n    grid_size=100,\n    seed=42,\n    cfg=PHASE1_CONFIG,\n)\n\n# Full phase (writes to output directory)\nimport logging\nresults = run_phase1(PHASE1_CONFIG, Path("results/"), logging.getLogger())\n
\n
\n"}, "experiments.generate_unique_seed": {"fullname": "experiments.generate_unique_seed", "modulename": "experiments", "qualname": "generate_unique_seed", "kind": "function", "doc": "

Create a deterministic seed from a dictionary of parameters and a repetition index.

\n\n

This function serializes the input dictionary into a sorted JSON string,\nappends the repetition count, and hashes the resulting string using SHA-256.\nThe first 8 characters of the hex digest are then converted to an integer\nto provide a stable, unique seed for random number generators.

\n\n
Parameters
\n\n
    \n
  • params (dict):\nA dictionary of configuration parameters. Keys are sorted to ensure\ndeterminism regardless of insertion order.
  • \n
  • rep (int):\nThe repetition or iteration index, used to ensure different seeds\nare generated for the same parameter set across multiple runs.
  • \n
\n\n
Returns
\n\n
    \n
  • int: A unique integer seed derived from the input parameters.
  • \n
\n\n
Examples
\n\n
\n
>>> params = {'learning_rate': 0.01, 'batch_size': 32}\n>>> generate_unique_seed(params, 1)\n3432571217\n>>> generate_unique_seed(params, 2)\n3960013583\n
\n
\n", "signature": "(params: dict, rep: int) -> int:", "funcdef": "def"}, "experiments.count_populations": {"fullname": "experiments.count_populations", "modulename": "experiments", "qualname": "count_populations", "kind": "function", "doc": "

Count the number of empty, prey, and predator cells in the simulation grid.

\n\n
Parameters
\n\n
    \n
  • grid (np.ndarray):\nA 2D NumPy array representing the simulation environment, where:\n
      \n
    • 0: Empty cell
    • \n
    • 1: Prey
    • \n
    • 2: Predator
    • \n
  • \n
\n\n
Returns
\n\n
    \n
  • empty_count (int):\nTotal number of cells with a value of 0.
  • \n
  • prey_count (int):\nTotal number of cells with a value of 1.
  • \n
  • predator_count (int):\nTotal number of cells with a value of 2.
  • \n
\n\n
Examples
\n\n
\n
>>> grid = np.array([[0, 1], [2, 1]])\n>>> count_populations(grid)\n(1, 2, 1)\n
\n
\n", "signature": "(grid: numpy.ndarray) -> Tuple[int, int, int]:", "funcdef": "def"}, "experiments.get_evolved_stats": {"fullname": "experiments.get_evolved_stats", "modulename": "experiments", "qualname": "get_evolved_stats", "kind": "function", "doc": "

Get statistics of an evolved parameter from the model.

\n\n

This function retrieves parameter values from the model's internal storage,\nfilters out NaN values, and calculates basic descriptive statistics.

\n\n
Parameters
\n\n
    \n
  • model (object):\nThe simulation model instance containing a cell_params attribute\nwith a .get() method.
  • \n
  • param (str):\nThe name of the parameter to calculate statistics for.
  • \n
\n\n
Returns
\n\n
    \n
  • stats (dict):\nA dictionary containing the following keys:\n
      \n
    • 'mean': Arithmetic mean of valid values.
    • \n
    • 'std': Standard deviation of valid values.
    • \n
    • 'min': Minimum valid value.
    • \n
    • 'max': Maximum valid value.
    • \n
    • 'n': Count of non-NaN values.\nIf no valid data is found, all stats return NaN and n returns 0.
    • \n
  • \n
\n\n
Examples
\n\n
\n
>>> stats = get_evolved_stats(my_model, "speed")\n>>> print(stats['mean'])\n1.25\n
\n
\n", "signature": "(model, param: str) -> Dict:", "funcdef": "def"}, "experiments.average_pcfs": {"fullname": "experiments.average_pcfs", "modulename": "experiments", "qualname": "average_pcfs", "kind": "function", "doc": "

Average multiple Pair Correlation Function (PCF) measurements and calculate standard error.

\n\n
Parameters
\n\n
    \n
  • pcf_list (list of tuple):\nA list where each element is a tuple containing:\n
      \n
    • distances (np.ndarray): The radial distances (r).
    • \n
    • pcf_values (np.ndarray): The correlation values g(r).
    • \n
    • count (int): Metadata or weight (not used in current calculation).
    • \n
  • \n
\n\n
Returns
\n\n
    \n
  • distances (np.ndarray):\nThe radial distances from the first entry in the list.
  • \n
  • pcf_mean (np.ndarray):\nThe element-wise mean of the PCF values across all measurements.
  • \n
  • pcf_se (np.ndarray):\nThe standard error of the mean for the PCF values.
  • \n
\n\n
Examples
\n\n
\n
>>> data = [(np.array([0, 1]), np.array([1.0, 2.0]), 10),\n...         (np.array([0, 1]), np.array([1.2, 1.8]), 12)]\n>>> dist, mean, se = average_pcfs(data)\n>>> mean\narray([1.1, 1.9])\n
\n
\n", "signature": "(\tpcf_list: List[Tuple[numpy.ndarray, numpy.ndarray, int]]) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:", "funcdef": "def"}, "experiments.save_results_jsonl": {"fullname": "experiments.save_results_jsonl", "modulename": "experiments", "qualname": "save_results_jsonl", "kind": "function", "doc": "

Save a list of dictionaries to a file in JSON Lines (JSONL) format.

\n\n

Each dictionary in the list is serialized into a single JSON string and\nwritten as a new line. Non-serializable objects are converted to strings\nusing the default string representation.

\n\n
Parameters
\n\n
    \n
  • results (list of dict):\nThe collection of result dictionaries to be saved.
  • \n
  • output_path (Path):\nThe file system path (pathlib.Path) where the JSONL file will be created.
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

The file is opened in 'w' (write) mode, which will overwrite any existing\ncontent at the specified path.

\n\n
Examples
\n\n
\n
>>> data = [{"id": 1, "score": 0.95}, {"id": 2, "score": 0.88}]\n>>> save_results_jsonl(data, Path("results.jsonl"))\n
\n
\n", "signature": "(results: List[Dict], output_path: pathlib.Path):", "funcdef": "def"}, "experiments.save_results_npz": {"fullname": "experiments.save_results_npz", "modulename": "experiments", "qualname": "save_results_npz", "kind": "function", "doc": "

Save simulation results to a compressed NumPy (.npz) binary file.

\n\n

This function flattens a list of result dictionaries into a single\ndictionary of NumPy arrays, prefixing keys with the run index to\nmaintain data separation. The resulting file is compressed to\nreduce storage space.

\n\n
Parameters
\n\n
    \n
  • results (list of dict):\nA list where each dictionary contains key-value pairs of\nsimulation data (e.g., arrays, lists, or scalars).
  • \n
  • output_path (Path):\nThe file system path (pathlib.Path) where the compressed\nNPZ file will be saved.
  • \n
\n\n
Returns
\n\n
    \n
  • None
  • \n
\n\n
Notes
\n\n

The keys in the saved file follow the format 'run_{index}_{original_key}'.\nValues are automatically converted to NumPy arrays if they are not\nalready.

\n\n
Examples
\n\n
\n
>>> results = [{"energy": [1, 2]}, {"energy": [3, 4]}]\n>>> save_results_npz(results, Path("output.npz"))\n
\n
\n", "signature": "(results: List[Dict], output_path: pathlib.Path):", "funcdef": "def"}, "experiments.load_results_jsonl": {"fullname": "experiments.load_results_jsonl", "modulename": "experiments", "qualname": "load_results_jsonl", "kind": "function", "doc": "

Load simulation results from a JSON Lines (JSONL) formatted file.

\n\n

This function reads a file line-by-line, parsing each line as an\nindependent JSON object and aggregating them into a list of dictionaries.

\n\n
Parameters
\n\n
    \n
  • input_path (Path):\nThe file system path (pathlib.Path) to the JSONL file.
  • \n
\n\n
Returns
\n\n
    \n
  • results (list of dict):\nA list of dictionaries reconstructed from the file content.
  • \n
\n\n
Raises
\n\n
    \n
  • FileNotFoundError: If the specified input path does not exist.
  • \n
  • json.JSONDecodeError: If a line in the file is not valid JSON.
  • \n
\n\n
Examples
\n\n
\n
>>> data = load_results_jsonl(Path("results.jsonl"))\n>>> len(data)\n2\n
\n
\n", "signature": "(input_path: pathlib.Path) -> List[Dict]:", "funcdef": "def"}, "experiments.run_single_simulation": {"fullname": "experiments.run_single_simulation", "modulename": "experiments", "qualname": "run_single_simulation", "kind": "function", "doc": "

Run a single Predator-Prey (PP) simulation and collect comprehensive metrics.

\n\n

This function initializes a Cellular Automata model, executes a warmup phase\nto reach steady state, and then performs a measurement phase to track\npopulation dynamics, spatial clustering, and evolutionary changes.

\n\n
Parameters
\n\n
    \n
  • prey_birth (float):\nThe probability or rate of prey reproduction.
  • \n
  • prey_death (float):\nThe base probability or rate of prey mortality.
  • \n
  • predator_birth (float):\nThe probability or rate of predator reproduction upon consuming prey.
  • \n
  • predator_death (float):\nThe probability or rate of predator mortality.
  • \n
  • grid_size (int):\nThe side length of the square simulation grid.
  • \n
  • seed (int):\nRandom seed for ensuring reproducibility of the simulation run.
  • \n
  • cfg (Config):\nA configuration object containing simulation hyperparameters (densities,\nsampling rates, timing, etc.).
  • \n
  • with_evolution (bool, optional):\nIf True, enables the evolution of the 'prey_death' parameter within\nthe model (default is False).
  • \n
  • compute_pcf (bool, optional):\nExplicit toggle for Pair Correlation Function calculation. If None,\nit is determined by cfg.pcf_sample_rate (default is None).
  • \n
\n\n
Returns
\n\n
    \n
  • result (dict):\nA dictionary containing simulation results including:\n
      \n
    • Input parameters and survival flags.
    • \n
    • Population mean and standard deviation for both species.
    • \n
    • Cluster statistics (number of clusters, sizes, largest fractions).
    • \n
    • Evolutionary statistics (mean, std, min, max, and final values).
    • \n
    • PCF data and spatial indices (segregation and clustering).
    • \n
    • Optional time series for populations and evolved parameters.
    • \n
  • \n
\n\n
Notes
\n\n

The function relies on several external utilities: count_populations,\nget_evolved_stats, get_cluster_stats_fast, compute_all_pcfs_fast,\nand average_pcfs.

\n", "signature": "(\tprey_birth: float,\tprey_death: float,\tpredator_birth: float,\tpredator_death: float,\tgrid_size: int,\tseed: int,\tcfg: models.config.Config,\twith_evolution: bool = False,\tcompute_pcf: Optional[bool] = None) -> Dict:", "funcdef": "def"}, "experiments.run_phase1": {"fullname": "experiments.run_phase1", "modulename": "experiments", "qualname": "run_phase1", "kind": "function", "doc": "

Execute Phase 1 of the simulation: a parameter sweep to identify critical points.

\n\n

This function performs a 1D sweep across varying prey mortality rates while\nkeeping other parameters fixed. It utilizes parallel execution via joblib\nand saves results incrementally to a JSONL file to ensure data integrity\nduring long-running batches.

\n\n
Parameters
\n\n
    \n
  • cfg (Config):\nConfiguration object containing simulation hyperparameters, sweep\nranges, and execution settings (n_jobs, grid_size, etc.).
  • \n
  • output_dir (Path):\nDirectory where result files (JSONL) and metadata (JSON) will be stored.
  • \n
  • logger (logging.Logger):\nLogger instance for tracking simulation progress and recording\noperational metadata.
  • \n
\n\n
Returns
\n\n
    \n
  • all_results (list of dict):\nA list of dictionaries containing the metrics collected from every\nindividual simulation run in the sweep.
  • \n
\n\n
Notes
\n\n

The function performs the following steps:

\n\n
    \n
  1. Pre-warms Numba kernels for performance.
  2. \n
  3. Generates a deterministic set of simulation jobs using unique seeds.
  4. \n
  5. Executes simulations in parallel using a generator for memory efficiency.
  6. \n
  7. Records metadata including a timestamp and a serialized snapshot of\nthe configuration.
  8. \n
\n", "signature": "(\tcfg: models.config.Config,\toutput_dir: pathlib.Path,\tlogger: logging.Logger) -> List[Dict]:", "funcdef": "def"}, "experiments.run_phase2": {"fullname": "experiments.run_phase2", "modulename": "experiments", "qualname": "run_phase2", "kind": "function", "doc": "

Execute Phase 2 of the simulation: self-organization and criticality analysis.

\n\n

This phase tests the Self-Organized Criticality (SOC) hypothesis by\ninitializing simulations at different points in the parameter space and\nobserving whether evolutionary pressure drives the system toward a\ncommon critical point, regardless of initial prey mortality rates.

\n\n
Parameters
\n\n
    \n
  • cfg (Config):\nConfiguration object containing simulation hyperparameters, evolution\nsettings, and execution constraints.
  • \n
  • output_dir (Path):\nDirectory where result files (JSONL) and metadata (JSON) will be stored.
  • \n
  • logger (logging.Logger):\nLogger instance for tracking progress and evolutionary convergence.
  • \n
\n\n
Returns
\n\n
    \n
  • all_results (list of dict):\nA list of dictionaries containing metrics from the evolutionary\nsimulation runs.
  • \n
\n\n
Notes
\n\n

The function captures:

\n\n
    \n
  1. Convergence of 'prey_death' across multiple replicates.
  2. \n
  3. Final steady-state population distributions.
  4. \n
  5. Incremental saving of results to prevent data loss.
  6. \n
\n", "signature": "(\tcfg: models.config.Config,\toutput_dir: pathlib.Path,\tlogger: logging.Logger) -> List[Dict]:", "funcdef": "def"}, "experiments.run_phase3": {"fullname": "experiments.run_phase3", "modulename": "experiments", "qualname": "run_phase3", "kind": "function", "doc": "

Phase 3: Finite-size scaling at critical point.

\n\n
    \n
  • Multiple grid sizes at (critical_prey_birth, critical_prey_death)
  • \n
  • Analyze cluster size cutoffs vs L
  • \n
\n", "signature": "(\tcfg: models.config.Config,\toutput_dir: pathlib.Path,\tlogger: logging.Logger) -> List[Dict]:", "funcdef": "def"}, "experiments.run_phase4": {"fullname": "experiments.run_phase4", "modulename": "experiments", "qualname": "run_phase4", "kind": "function", "doc": "

Execute Phase 3 of the simulation: Finite-Size Scaling (FSS) analysis.

\n\n

This phase investigates how spatial structures, specifically cluster size\ncutoffs, scale with the system size (L) at the critical point identified\nin Phase 1. This is essential for determining the universality class of\nthe phase transition.

\n\n
Parameters
\n\n
    \n
  • cfg (Config):\nConfiguration object containing critical point parameters, the list of\ngrid sizes to test, and execution settings.
  • \n
  • output_dir (Path):\nDirectory where result files (JSONL) and FSS metadata (JSON) will be\nstored.
  • \n
  • logger (logging.Logger):\nLogger instance for tracking progress across different grid sizes.
  • \n
\n\n
Returns
\n\n
    \n
  • all_results (list of dict):\nA list of dictionaries containing metrics and cluster statistics for\neach grid size and replicate.
  • \n
\n\n
Notes
\n\n

The function performs the following:

\n\n
    \n
  1. Iterates through multiple grid sizes defined in cfg.grid_sizes.
  2. \n
  3. Generates parallel jobs for each size using critical birth/death rates.
  4. \n
  5. Saves results incrementally to allow for post-simulation analysis of\npower-law exponents.
  6. \n
\n", "signature": "(\tcfg: models.config.Config,\toutput_dir: pathlib.Path,\tlogger: logging.Logger) -> List[Dict]:", "funcdef": "def"}, "experiments.run_phase5": {"fullname": "experiments.run_phase5", "modulename": "experiments", "qualname": "run_phase5", "kind": "function", "doc": "

Execute Phase 5 of the simulation: Global 4D parameter sweep with directed hunting.

\n\n

This phase performs a comprehensive sensitivity analysis by varying four key\nparameters (prey birth/death and predator birth/death) while directed\nhunting is enabled. The results allow for a direct comparison with Phase 4\nto determine how predator search behavior shifts the system's critical\nthresholds and stability.

\n\n
Parameters
\n\n
    \n
  • cfg (Config):\nConfiguration object containing simulation hyperparameters, parallel\nexecution settings, and the fixed grid size for this phase.
  • \n
  • output_dir (Path):\nDirectory where the result JSONL file and execution metadata will\nbe stored.
  • \n
  • logger (logging.Logger):\nLogger instance for tracking the progress of the high-volume\nsimulation batch.
  • \n
\n\n
Returns
\n\n
    \n
  • all_results (list of dict):\nA list of dictionaries containing metrics for every simulation in\nthe 4D parameter grid.
  • \n
\n\n
Notes
\n\n

The function utilizes a Cartesian product of parameter ranges to build a\njob list of over 13,000 unique parameter sets (multiplied by replicates).\nSeeds are uniquely generated to distinguish these runs from other phases\neven if parameter values overlap.

\n", "signature": "(\tcfg: models.config.Config,\toutput_dir: pathlib.Path,\tlogger: logging.Logger) -> List[Dict]:", "funcdef": "def"}, "experiments.main": {"fullname": "experiments.main", "modulename": "experiments", "qualname": "main", "kind": "function", "doc": "

Organize the predator-prey experimental suite across multiple phases.

\n\n

This entry point handles command-line arguments, sets up logging and output\ndirectories, and executes the requested simulation phases (1-5). It\nsupports parallel execution, dry runs for runtime estimation, and\nautomated configuration persistence.

\n\n
Notes
\n\n

The script dynamically retrieves phase-specific configurations using\nget_phase_config and dispatches execution to the corresponding runner\nin the PHASE_RUNNERS mapping.

\n", "signature": "():", "funcdef": "def"}}, "docInfo": {"models.CA": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 369}, "models.CA.CA": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 151}, "models.CA.CA.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 138}, "models.CA.CA.rows": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "models.CA.CA.cols": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "models.CA.CA.densities": {"qualname": 2, "fullname": 4, "annotation": 3, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "models.CA.CA.n_species": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 13}, "models.CA.CA.validate": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 52}, "models.CA.CA.evolve": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 119, "bases": 0, "doc": 267}, "models.CA.CA.update": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 110}, "models.CA.CA.run": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 186}, "models.CA.PP": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 270}, "models.CA.PP.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 229, "bases": 0, "doc": 13}, "models.CA.PP.validate": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 81}, "models.CA.PP.update_async": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 48}, "models.CA.PP.update": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 13}, "models.config": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 284}, "models.config.Config": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 419}, "models.config.Config.get_prey_deaths": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 12}, "models.config.Config.get_warmup_steps": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 13}, "models.config.Config.get_measurement_steps": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 13}, "models.config.Config.estimate_runtime": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 32, "bases": 0, "doc": 48}, "models.config.get_phase_config": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 66}, "models.numba_optimized": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 412}, "models.numba_optimized.set_numba_seed": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 126}, "models.numba_optimized.PPKernel": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 192}, "models.numba_optimized.PPKernel.update": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 181, "bases": 0, "doc": 194}, "models.numba_optimized.measure_cluster_sizes_fast": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 63, "bases": 0, "doc": 327}, "models.numba_optimized.detect_clusters_fast": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 353}, "models.numba_optimized.get_cluster_stats_fast": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 400}, "models.numba_optimized.compute_pcf_periodic_fast": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 145, "bases": 0, "doc": 301}, "models.numba_optimized.compute_all_pcfs_fast": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 107, "bases": 0, "doc": 235}, "models.numba_optimized.warmup_numba_kernels": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 178}, "models.numba_optimized.benchmark_kernels": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 208}, "models.numba_optimized.benchmark_cluster_detection": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 170}, "experiments": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 608}, "experiments.generate_unique_seed": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 263}, "experiments.count_populations": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 206}, "experiments.get_evolved_stats": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 255}, "experiments.average_pcfs": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 344}, "experiments.save_results_jsonl": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 259}, "experiments.save_results_npz": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 262}, "experiments.load_results_jsonl": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 200}, "experiments.run_single_simulation": {"qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 145, "bases": 0, "doc": 360}, "experiments.run_phase1": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 225}, "experiments.run_phase2": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 187}, "experiments.run_phase3": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 36}, "experiments.run_phase4": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 214}, "experiments.run_phase5": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 216}, "experiments.main": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 82}}, "length": 50, "save": true}, "index": {"qualname": {"root": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "a": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.densities": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}}, "df": 10}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.cols": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"models.config.Config": {"tf": 1}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}}, "df": 6}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"experiments.count_populations": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.rows": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.densities": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "z": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}}, "df": 2}}}}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1, "d": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}}, "df": 5, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"1": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}, "2": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}, "3": {"docs": {"experiments.run_phase3": {"tf": 1}}, "df": 1}, "4": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}, "5": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}, "docs": {"models.config.get_phase_config": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "f": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"experiments.count_populations": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.PP.update_async": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 5}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 2}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "l": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}, "fullname": {"root": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.densities": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 35}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.rows": {"tf": 1.4142135623730951}, "models.CA.CA.cols": {"tf": 1.4142135623730951}, "models.CA.CA.densities": {"tf": 1.4142135623730951}, "models.CA.CA.n_species": {"tf": 1.4142135623730951}, "models.CA.CA.validate": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}}, "df": 16}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.cols": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.get_prey_deaths": {"tf": 1.4142135623730951}, "models.config.Config.get_warmup_steps": {"tf": 1.4142135623730951}, "models.config.Config.get_measurement_steps": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1.4142135623730951}, "models.config.get_phase_config": {"tf": 1.4142135623730951}}, "df": 7}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"experiments.count_populations": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.rows": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.densities": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 12}}}}, "p": {"docs": {}, "df": 0, "z": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}}, "df": 2}}}}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1, "d": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}}}}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 15}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}}, "df": 5, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"1": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}, "2": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}, "3": {"docs": {"experiments.run_phase3": {"tf": 1}}, "df": 1}, "4": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}, "5": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}, "docs": {"models.config.get_phase_config": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "f": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"experiments.count_populations": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.PP.update_async": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 12}}}}}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 5}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 2}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "l": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}, "annotation": {"root": {"docs": {"models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.densities": {"tf": 1.4142135623730951}, "models.CA.CA.n_species": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}}, "df": 3}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.densities": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "default_value": {"root": {"docs": {}, "df": 0}}, "signature": {"root": {"0": {"0": {"1": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "5": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}, "docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}}, "df": 3}, "1": {"0": {"0": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3}, "docs": {"models.CA.PP.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}}, "df": 2}, "2": {"0": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}, "docs": {"models.CA.PP.__init__": {"tf": 1}}, "df": 1}, "3": {"2": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}, "9": {"docs": {"models.CA.PP.__init__": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}}, "df": 4}, "docs": {}, "df": 0}, "5": {"0": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"models.CA.CA.__init__": {"tf": 10.488088481701515}, "models.CA.CA.validate": {"tf": 3.4641016151377544}, "models.CA.CA.evolve": {"tf": 9.848857801796104}, "models.CA.CA.update": {"tf": 3.4641016151377544}, "models.CA.CA.run": {"tf": 7.874007874011811}, "models.CA.PP.__init__": {"tf": 13.74772708486752}, "models.CA.PP.validate": {"tf": 3.4641016151377544}, "models.CA.PP.update_async": {"tf": 3.4641016151377544}, "models.CA.PP.update": {"tf": 3.4641016151377544}, "models.config.Config.get_prey_deaths": {"tf": 4}, "models.config.Config.get_warmup_steps": {"tf": 4.47213595499958}, "models.config.Config.get_measurement_steps": {"tf": 4.47213595499958}, "models.config.Config.estimate_runtime": {"tf": 5.0990195135927845}, "models.config.get_phase_config": {"tf": 4.898979485566356}, "models.numba_optimized.set_numba_seed": {"tf": 4}, "models.numba_optimized.PPKernel.update": {"tf": 11.789826122551595}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 7.14142842854285}, "models.numba_optimized.detect_clusters_fast": {"tf": 8.306623862918075}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 6.6332495807108}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 10.723805294763608}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 9.327379053088816}, "models.numba_optimized.warmup_numba_kernels": {"tf": 5.830951894845301}, "models.numba_optimized.benchmark_kernels": {"tf": 5.830951894845301}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 5.830951894845301}, "experiments.generate_unique_seed": {"tf": 4.898979485566356}, "experiments.count_populations": {"tf": 5.916079783099616}, "experiments.get_evolved_stats": {"tf": 4.47213595499958}, "experiments.average_pcfs": {"tf": 8.366600265340756}, "experiments.save_results_jsonl": {"tf": 5.5677643628300215}, "experiments.save_results_npz": {"tf": 5.5677643628300215}, "experiments.load_results_jsonl": {"tf": 5}, "experiments.run_single_simulation": {"tf": 10.677078252031311}, "experiments.run_phase1": {"tf": 7.483314773547883}, "experiments.run_phase2": {"tf": 7.483314773547883}, "experiments.run_phase3": {"tf": 7.483314773547883}, "experiments.run_phase4": {"tf": 7.483314773547883}, "experiments.run_phase5": {"tf": 7.483314773547883}, "experiments.main": {"tf": 2.6457513110645907}}, "df": 38, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1.7320508075688772}, "models.config.Config.get_warmup_steps": {"tf": 1.4142135623730951}, "models.config.Config.get_measurement_steps": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 21}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"models.config.get_phase_config": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 7}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 6}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 16}}, "r": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 7}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 2.6457513110645907}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 2}}, "df": 7}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 5}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.7320508075688772}, "models.CA.PP.__init__": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 12}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 2.23606797749979}}, "df": 9}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 2.23606797749979}}, "df": 9}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1.7320508075688772}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 9}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}, "l": {"docs": {}, "df": 0, "f": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 13}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 4}}}}}}, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"models.config.get_phase_config": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 2}, "d": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "f": {"docs": {"experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 4}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 4}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1, "s": {"docs": {"models.config.get_phase_config": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}}, "l": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 10}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 5}}}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.PP.__init__": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 11}}}}, "j": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}, "bases": {"root": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}, "doc": {"root": {"0": {"0": {"0": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}, "1": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "1": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 3}, "5": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "experiments": {"tf": 1}}, "df": 3}, "docs": {"models.CA": {"tf": 2.23606797749979}, "models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "experiments": {"tf": 2}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 2}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}}, "df": 18}, "1": {"0": {"0": {"0": {"docs": {"models.CA": {"tf": 1}}, "df": 1}, "docs": {"models.CA": {"tf": 2.449489742783178}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}}, "df": 5}, "docs": {"models.CA.PP": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 3}, "2": {"docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}, "3": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}, "5": {"docs": {"models.CA": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"0": {"0": {"0": {"0": {"0": {"0": {"0": {"0": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1.7320508075688772}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments": {"tf": 2.449489742783178}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 2.449489742783178}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 2.8284271247461903}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 25, "d": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 2}, "/": {"4": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "2": {"0": {"0": {"docs": {"models.config": {"tf": 1}}, "df": 1}, "docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3}, "5": {"6": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}, "docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 2}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 14, "d": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1}}, "df": 9}}, "3": {"0": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}, "2": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "4": {"3": {"2": {"5": {"7": {"1": {"2": {"1": {"7": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"6": {"0": {"0": {"1": {"3": {"5": {"8": {"3": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"models.numba_optimized": {"tf": 2.23606797749979}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}}, "df": 5}, "docs": {"models.CA": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 7, "f": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}, "4": {"2": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "experiments": {"tf": 1}}, "df": 2}, "docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7, "d": {"docs": {"models.config": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 2}}, "5": {"0": {"0": {"docs": {"models.CA": {"tf": 1}}, "df": 1}, "docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}, "docs": {"models.config.get_phase_config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 4}, "8": {"8": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}, "docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 7}, "9": {"5": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}, "9": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}, "docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}, "docs": {"models.CA": {"tf": 16.1245154965971}, "models.CA.CA": {"tf": 7}, "models.CA.CA.__init__": {"tf": 7.483314773547883}, "models.CA.CA.rows": {"tf": 1.7320508075688772}, "models.CA.CA.cols": {"tf": 1.7320508075688772}, "models.CA.CA.densities": {"tf": 2}, "models.CA.CA.n_species": {"tf": 1.7320508075688772}, "models.CA.CA.validate": {"tf": 4.123105625617661}, "models.CA.CA.evolve": {"tf": 8.12403840463596}, "models.CA.CA.update": {"tf": 5.656854249492381}, "models.CA.CA.run": {"tf": 6.928203230275509}, "models.CA.PP": {"tf": 9.695359714832659}, "models.CA.PP.__init__": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 4.69041575982343}, "models.CA.PP.update_async": {"tf": 2.8284271247461903}, "models.CA.PP.update": {"tf": 1.7320508075688772}, "models.config": {"tf": 13}, "models.config.Config": {"tf": 12.884098726725126}, "models.config.Config.get_prey_deaths": {"tf": 1.7320508075688772}, "models.config.Config.get_warmup_steps": {"tf": 1.7320508075688772}, "models.config.Config.get_measurement_steps": {"tf": 1.7320508075688772}, "models.config.Config.estimate_runtime": {"tf": 5.0990195135927845}, "models.config.get_phase_config": {"tf": 6.082762530298219}, "models.numba_optimized": {"tf": 15.620499351813308}, "models.numba_optimized.set_numba_seed": {"tf": 6}, "models.numba_optimized.PPKernel": {"tf": 8}, "models.numba_optimized.PPKernel.update": {"tf": 8.94427190999916}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 12.449899597988733}, "models.numba_optimized.detect_clusters_fast": {"tf": 13.341664064126334}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 13.820274961085254}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 9.1104335791443}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 7.874007874011811}, "models.numba_optimized.warmup_numba_kernels": {"tf": 6.855654600401044}, "models.numba_optimized.benchmark_kernels": {"tf": 7.681145747868608}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 7.280109889280518}, "experiments": {"tf": 19.390719429665317}, "experiments.generate_unique_seed": {"tf": 10.862780491200215}, "experiments.count_populations": {"tf": 10.770329614269007}, "experiments.get_evolved_stats": {"tf": 11.269427669584644}, "experiments.average_pcfs": {"tf": 14.2828568570857}, "experiments.save_results_jsonl": {"tf": 11.445523142259598}, "experiments.save_results_npz": {"tf": 11.090536506409418}, "experiments.load_results_jsonl": {"tf": 9.848857801796104}, "experiments.run_single_simulation": {"tf": 10.770329614269007}, "experiments.run_phase1": {"tf": 7.874007874011811}, "experiments.run_phase2": {"tf": 7.681145747868608}, "experiments.run_phase3": {"tf": 3.4641016151377544}, "experiments.run_phase4": {"tf": 7.810249675906654}, "experiments.run_phase5": {"tf": 6.855654600401044}, "experiments.main": {"tf": 3.872983346207417}}, "df": 50, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.CA.PP.update_async": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 12, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 9}}}}, "s": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.count_populations": {"tf": 2}}, "df": 7}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}}, "df": 3}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized": {"tf": 3.1622776601683795}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.449489742783178}, "models.numba_optimized.detect_clusters_fast": {"tf": 2.23606797749979}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 3}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}}, "df": 9, "s": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2.23606797749979}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "a": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA.validate": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 4, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}}}, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 4}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"models.config": {"tf": 3.7416573867739413}, "models.config.get_phase_config": {"tf": 1}, "experiments": {"tf": 2}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 9, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config": {"tf": 2}, "models.config.Config": {"tf": 1}, "models.config.get_phase_config": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 11, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.config": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.PP.update": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 10}}}, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}}, "df": 4}}}}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 3}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase2": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config.Config": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1, "s": {"docs": {"models.config.Config": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1.4142135623730951}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 2.23606797749979}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 7}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 5}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.save_results_npz": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2, "d": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 2.23606797749979}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 11, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"models.config": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 2, "d": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.config": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 2}, "experiments": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1.7320508075688772}, "experiments.run_phase4": {"tf": 1.7320508075688772}, "experiments.run_phase5": {"tf": 1}}, "df": 8, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase2": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "g": {"docs": {"models.config": {"tf": 1.7320508075688772}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}, "p": {"docs": {}, "df": 0, "u": {"docs": {"models.config.Config": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}}, "df": 2}}}, "a": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2.23606797749979}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 2.449489742783178}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.23606797749979}, "models.numba_optimized.detect_clusters_fast": {"tf": 2.449489742783178}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.warmup_numba_kernels": {"tf": 2}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 2.6457513110645907}, "experiments.count_populations": {"tf": 2}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 2}, "experiments.save_results_npz": {"tf": 2}, "experiments.load_results_jsonl": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 2.449489742783178}, "experiments.run_phase1": {"tf": 2.8284271247461903}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 2.23606797749979}}, "df": 29, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized": {"tf": 1}}, "df": 7}}, "a": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {"models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 8, "d": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.validate": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.CA.PP.update_async": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 2}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.7320508075688772}, "experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 3.1622776601683795}, "experiments.run_phase1": {"tf": 2.23606797749979}, "experiments.run_phase2": {"tf": 2.23606797749979}, "experiments.run_phase4": {"tf": 2}, "experiments.run_phase5": {"tf": 2}, "experiments.main": {"tf": 2}}, "df": 34}, "y": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.validate": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 4}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 2}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}}, "df": 11}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase3": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 7}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 4}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 7}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.update": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"models.CA.CA.run": {"tf": 2}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 4}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 2.23606797749979}}, "df": 11, "s": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.7320508075688772}}, "df": 6}}}}, "e": {"docs": {"models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP.validate": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.7320508075688772}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}}, "df": 9, "a": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 16, "o": {"docs": {}, "df": 0, "w": {"docs": {"experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 6, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.update_async": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1}}, "t": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.run": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 4, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}}}, "f": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}}, "df": 4, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.densities": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}}, "df": 3, "s": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1.7320508075688772}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 15}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 2.23606797749979}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.densities": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2.6457513110645907}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 2}, "models.config": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 4.47213595499958}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 2}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.23606797749979}, "models.numba_optimized.detect_clusters_fast": {"tf": 2}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2.6457513110645907}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 2.6457513110645907}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.7320508075688772}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 2}, "experiments.run_phase1": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 2.23606797749979}, "experiments.run_phase5": {"tf": 2}, "experiments.main": {"tf": 1}}, "df": 36, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 4}}, "r": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.get_evolved_stats": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 3}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.config.Config": {"tf": 3.7416573867739413}, "models.numba_optimized.PPKernel.update": {"tf": 2.6457513110645907}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 2}}, "df": 7}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 3.1622776601683795}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 6, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 20, "s": {"docs": {"models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}}, "df": 6}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}}, "df": 8}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 5}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 4}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 2}, "experiments.save_results_npz": {"tf": 2.23606797749979}, "experiments.load_results_jsonl": {"tf": 2.449489742783178}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 5, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 3}}}, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}}, "df": 1, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 30}}, "e": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 2}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.validate": {"tf": 1.7320508075688772}, "models.CA.CA.evolve": {"tf": 4.47213595499958}, "models.CA.CA.update": {"tf": 2.23606797749979}, "models.CA.CA.run": {"tf": 2.8284271247461903}, "models.CA.PP": {"tf": 2.23606797749979}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.7320508075688772}, "models.CA.PP.update_async": {"tf": 2}, "models.CA.PP.update": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 2.8284271247461903}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.6457513110645907}, "models.numba_optimized.detect_clusters_fast": {"tf": 2.23606797749979}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 3}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 4.47213595499958}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 3.1622776601683795}, "models.numba_optimized.warmup_numba_kernels": {"tf": 2.6457513110645907}, "models.numba_optimized.benchmark_kernels": {"tf": 3.605551275463989}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 2.6457513110645907}, "experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 2.8284271247461903}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 2.449489742783178}, "experiments.average_pcfs": {"tf": 3.1622776601683795}, "experiments.save_results_jsonl": {"tf": 2.6457513110645907}, "experiments.save_results_npz": {"tf": 2.6457513110645907}, "experiments.load_results_jsonl": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 3.3166247903554}, "experiments.run_phase1": {"tf": 2.449489742783178}, "experiments.run_phase2": {"tf": 2.449489742783178}, "experiments.run_phase4": {"tf": 2.8284271247461903}, "experiments.run_phase5": {"tf": 3}, "experiments.main": {"tf": 2.23606797749979}}, "df": 44, "i": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 3}}, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}, "m": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.validate": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 8}, "n": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1, "s": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 7}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.update": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 3, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.densities": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}}, "df": 4}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2.449489742783178}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2.23606797749979}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.7320508075688772}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.7320508075688772}, "experiments.save_results_npz": {"tf": 2}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.7320508075688772}, "experiments.main": {"tf": 1}}, "df": 30, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1.7320508075688772}}, "df": 5}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 3}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "d": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1, "s": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1}}}, "m": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}}, "df": 3}}}, "e": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 4, "l": {"docs": {"models.CA": {"tf": 2.6457513110645907}, "models.CA.PP": {"tf": 1.4142135623730951}, "experiments": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 5, "s": {"docs": {"models.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "experiments": {"tf": 1}}, "df": 4}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.7320508075688772}}, "df": 7}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.config.Config": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 4}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 4}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 11, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 6}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}}, "df": 3}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 5}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"experiments.average_pcfs": {"tf": 1}, "experiments.run_phase1": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 5}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.7320508075688772}}, "df": 2, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 5, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}}, "df": 2}}}}}, "y": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 6}, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 3}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 2}, "experiments.run_single_simulation": {"tf": 2}}, "df": 3}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 6, "d": {"docs": {"models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}}, "df": 3, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 2.449489742783178}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 2.6457513110645907}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments": {"tf": 2}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 2.23606797749979}, "experiments.run_phase5": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 19, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.config": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 2.6457513110645907}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.config": {"tf": 1.7320508075688772}, "models.config.Config": {"tf": 3.4641016151377544}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized": {"tf": 3.3166247903554}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 2.449489742783178}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 2.6457513110645907}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 2}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 2.6457513110645907}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 24}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 5}}}}, "p": {"docs": {"models.CA": {"tf": 2.449489742783178}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 3.4641016151377544}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.7320508075688772}, "models.CA.PP.update_async": {"tf": 1}, "models.config": {"tf": 1.7320508075688772}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 2.23606797749979}}, "df": 18, "s": {"docs": {"models.CA.CA": {"tf": 2}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 36}}}}}, "s": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 6}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 3}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"experiments": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 2.449489742783178}, "experiments.save_results_npz": {"tf": 2.23606797749979}, "experiments.load_results_jsonl": {"tf": 2.449489742783178}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}}, "df": 6, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 4, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}}, "df": 6, "s": {"docs": {"experiments": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 6, "s": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"1": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 2.449489742783178}}, "df": 2}, "2": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}, "3": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}, "4": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}, "5": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}, "docs": {"models.config": {"tf": 2.6457513110645907}, "models.config.Config": {"tf": 1.7320508075688772}, "models.config.get_phase_config": {"tf": 2.23606797749979}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 3.4641016151377544}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 2}, "experiments.run_phase5": {"tf": 2}, "experiments.main": {"tf": 1.7320508075688772}}, "df": 12, "s": {"docs": {"experiments": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "f": {"docs": {"models.config.Config": {"tf": 2.449489742783178}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments": {"tf": 1}, "experiments.average_pcfs": {"tf": 2.6457513110645907}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}}, "df": 7, "s": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 2.449489742783178}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 7}}}, "y": {"docs": {"experiments": {"tf": 2}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "experiments": {"tf": 2}}, "df": 3}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6, "d": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 7}}, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 14, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4, "s": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 3}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}}}}}}}, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 11}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.PP": {"tf": 2}, "models.config.Config": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 7}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 2.449489742783178}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase3": {"tf": 1}}, "df": 6, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "n": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2, "s": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}}, "df": 3}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.update": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"models.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "experiments": {"tf": 1.7320508075688772}}, "df": 4}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP.update_async": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}, "experiments": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1.7320508075688772}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 25, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.densities": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 3, "d": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}}, "df": 2}, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"3": {"2": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.config.Config": {"tf": 2.8284271247461903}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 2}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 24, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"models.CA.CA": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 3}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 2}}}}}}}}, "o": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"models.config.get_phase_config": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1.4142135623730951}}, "df": 4}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {"models.CA.CA.validate": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 19, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}}, "df": 18}, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 16, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 3, "s": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 2}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 3}, "r": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 3}, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 2}}, "/": {"docs": {}, "df": 0, "o": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}, "w": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.CA": {"tf": 2}, "models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 20, "i": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 4}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 14}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"models.config.Config": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5}}, "s": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}}, "df": 1}}, "y": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}}, "df": 3}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1, "s": {"docs": {"experiments": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"experiments.average_pcfs": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 2}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}}, "df": 12, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {"models.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized": {"tf": 2.449489742783178}, "models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 14}, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 2}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 2}, "experiments.run_single_simulation": {"tf": 1}}, "df": 19}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.7320508075688772}}, "df": 5}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.count_populations": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 3}}, "df": 10, "z": {"docs": {"experiments": {"tf": 1}, "experiments.save_results_npz": {"tf": 2}}, "df": 2}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.count_populations": {"tf": 1}, "experiments.average_pcfs": {"tf": 2.23606797749979}}, "df": 9}}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 2, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 9}}}}, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}}, "df": 7}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}}, "df": 2}}}, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}}, "df": 2}}, "o": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 3, "n": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}}, "df": 4, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 8}}, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 19}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}, "d": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {"models.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.__init__": {"tf": 1}, "models.numba_optimized": {"tf": 2.449489742783178}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 9}}}}}, "y": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}}, "df": 4}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}}, "df": 15}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_npz": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA": {"tf": 2}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.densities": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2.6457513110645907}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 2}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2}, "models.numba_optimized.detect_clusters_fast": {"tf": 2}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2.23606797749979}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 17}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP.validate": {"tf": 1}, "models.config": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3}}}, "y": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {"experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.set_numba_seed": {"tf": 2}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments": {"tf": 1.7320508075688772}, "experiments.generate_unique_seed": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 9, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "f": {"docs": {"models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "experiments": {"tf": 1}, "experiments.run_phase2": {"tf": 1.4142135623730951}}, "df": 5}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.config": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}}}}}}}, "t": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 5, "s": {"docs": {"models.numba_optimized": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 3}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}, "d": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}}, "df": 6, "s": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 5}}}}}}}, "p": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 4, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 7, "s": {"docs": {"models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}}, "df": 9}}}}}}, "s": {"docs": {"models.numba_optimized": {"tf": 2.23606797749979}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2.23606797749979}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 2.23606797749979}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 6}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 8}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}, "d": {"docs": {"experiments.get_evolved_stats": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}}, "d": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.run": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.CA.PP.update": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "experiments": {"tf": 2.23606797749979}, "experiments.count_populations": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 2.23606797749979}, "experiments.run_phase1": {"tf": 2.23606797749979}, "experiments.run_phase2": {"tf": 1.7320508075688772}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 2}, "experiments.main": {"tf": 1}}, "df": 25, "s": {"docs": {"models.CA.CA": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"models.config": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 3}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.7320508075688772}, "experiments": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase3": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 2.23606797749979}, "experiments.run_phase5": {"tf": 1}}, "df": 18, "s": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.449489742783178}, "models.numba_optimized.detect_clusters_fast": {"tf": 2.6457513110645907}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 2}}, "df": 8}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 2}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 7}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.PP": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"models.CA.CA.__init__": {"tf": 1}}, "df": 1}, "b": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 3, "s": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 10}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1.4142135623730951}}, "df": 5, "d": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}}, "df": 2}, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 2}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"models.config": {"tf": 1.7320508075688772}, "models.config.Config": {"tf": 1.4142135623730951}, "models.config.Config.get_prey_deaths": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase1": {"tf": 2}, "experiments.run_phase5": {"tf": 1}}, "df": 6}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 7}}}, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}}, "df": 2}}}}, "c": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 2}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 9, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"models.CA.CA.update": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 4}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}}, "df": 3}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 3}}}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}}}}, "x": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "c": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 6, "x": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized": {"tf": 1}}, "df": 3, "s": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8, "s": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1.4142135623730951}}, "df": 8}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config": {"tf": 1.4142135623730951}, "models.config.Config.estimate_runtime": {"tf": 1}, "experiments": {"tf": 1.7320508075688772}}, "df": 3, "s": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments": {"tf": 2.449489742783178}}, "df": 3}, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.config": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 5}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.7320508075688772}}, "df": 2}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}}, "df": 4, "d": {"docs": {"models.CA.PP.validate": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}}, "n": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"models.CA": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"experiments.average_pcfs": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}}, "df": 5, "s": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1}}, "df": 4}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "y": {"docs": {"experiments.save_results_npz": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.densities": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_phase4": {"tf": 1.4142135623730951}}, "df": 15}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.n_species": {"tf": 1}, "experiments.count_populations": {"tf": 1.7320508075688772}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1.4142135623730951}, "experiments": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 3}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.CA": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"models.config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 2, "d": {"docs": {"models.CA.CA": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 6}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 14}}}}, "p": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}}, "df": 10, "s": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.CA.PP.update_async": {"tf": 1}}, "df": 3}, "d": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 3}}}, "y": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 6, "l": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "r": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 3, "o": {"docs": {}, "df": 0, "w": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.rows": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 6}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA.run": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 3.7416573867739413}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}}, "df": 9, "s": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 8}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config.estimate_runtime": {"tf": 1}, "experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2, "s": {"docs": {"experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 4, "s": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config.Config": {"tf": 1}}, "df": 3, "s": {"docs": {"models.CA.PP.validate": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.numba_optimized.set_numba_seed": {"tf": 2.449489742783178}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 2}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 9}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 3.1622776601683795}, "models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 2.23606797749979}}, "df": 4, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 7}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.config.Config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1.4142135623730951}}, "df": 4}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 4}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1}}, "df": 4}}}, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}}, "df": 1}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1, "s": {"docs": {"models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.generate_unique_seed": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {"experiments": {"tf": 1}}, "df": 1}, "s": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.CA.run": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.count_populations": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 26}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.config": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}}, "df": 2, "s": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 3}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 9, "s": {"docs": {"models.CA.CA.run": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments": {"tf": 2.6457513110645907}, "experiments.save_results_jsonl": {"tf": 1.7320508075688772}, "experiments.save_results_npz": {"tf": 2.23606797749979}, "experiments.load_results_jsonl": {"tf": 2}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1.4142135623730951}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 12, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 3}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6}}}, "y": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.densities": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 6}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.CA": {"tf": 1}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.CA.PP.validate": {"tf": 1}, "models.config.Config": {"tf": 3.1622776601683795}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 2.23606797749979}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase3": {"tf": 1}}, "df": 11, "s": {"docs": {"models.config": {"tf": 1.4142135623730951}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.update": {"tf": 1}}, "df": 2}, "d": {"docs": {"models.config": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.CA.PP": {"tf": 2.449489742783178}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 2}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}}, "df": 14, "s": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.update_async": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}}, "df": 4}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 3}}}, "m": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}, "d": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"experiments.get_evolved_stats": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.7320508075688772}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 2.23606797749979}, "experiments": {"tf": 1}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 9}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.update": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.CA.CA.run": {"tf": 1.7320508075688772}, "experiments": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 6}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 14, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.4142135623730951}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.generate_unique_seed": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}}, "df": 9}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.n_species": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.config.Config": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 2}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}}, "df": 4, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.average_pcfs": {"tf": 2}}, "df": 2}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 4, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.PPKernel.update": {"tf": 1}}, "df": 1, "d": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.PP.update": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {"experiments.main": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1, "s": {"docs": {"models.CA.CA.validate": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 2}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 4}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments.run_phase1": {"tf": 1}}, "df": 4}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {"experiments": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.config.Config": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.average_pcfs": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 10, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"models.config": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "o": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"experiments.run_single_simulation": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2}, "experiments": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 3.1622776601683795}, "experiments.save_results_npz": {"tf": 2.449489742783178}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}}, "df": 10}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}}, "df": 2, "u": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.numba_optimized.set_numba_seed": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.count_populations": {"tf": 1.7320508075688772}, "experiments.get_evolved_stats": {"tf": 1.4142135623730951}, "experiments.save_results_npz": {"tf": 1}}, "df": 7, "s": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.CA.CA.evolve": {"tf": 1}, "models.config": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 2.23606797749979}, "experiments.average_pcfs": {"tf": 2}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 10}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.config.get_phase_config": {"tf": 1}}, "df": 4}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.validate": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 2.23606797749979}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.CA.validate": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}}, "df": 2, "d": {"docs": {"models.CA.PP.__init__": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.validate": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.run": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP.update_async": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"models.numba_optimized.benchmark_kernels": {"tf": 1}, "experiments.run_phase3": {"tf": 1}}, "df": 2}}, "g": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 9, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA": {"tf": 1.4142135623730951}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.PP": {"tf": 1}, "models.config.Config": {"tf": 2}, "experiments.run_phase5": {"tf": 1}}, "df": 6}}}}}, "r": {"docs": {"models.numba_optimized": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.validate": {"tf": 1.7320508075688772}, "models.CA.CA.run": {"tf": 1}, "models.CA.PP": {"tf": 2}, "models.CA.PP.validate": {"tf": 1}, "models.config": {"tf": 1}, "models.config.Config": {"tf": 2}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel": {"tf": 2}, "models.numba_optimized.PPKernel.update": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_kernels": {"tf": 2}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments": {"tf": 1.4142135623730951}, "experiments.count_populations": {"tf": 2}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 2.23606797749979}, "experiments.run_phase5": {"tf": 1.4142135623730951}}, "df": 28, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA": {"tf": 1.7320508075688772}, "models.CA.CA.__init__": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "experiments.run_phase1": {"tf": 1}}, "df": 4, "s": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"models.config": {"tf": 1}, "models.config.Config.get_prey_deaths": {"tf": 1}, "experiments": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1.4142135623730951}}, "df": 4, "d": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}, "s": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}, "experiments": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"models.config": {"tf": 2}, "models.config.Config.get_warmup_steps": {"tf": 1}, "models.config.Config.get_measurement_steps": {"tf": 1}, "models.numba_optimized": {"tf": 1.7320508075688772}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 1.4142135623730951}, "experiments.main": {"tf": 1}}, "df": 10, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"experiments": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 2.6457513110645907}, "models.numba_optimized.detect_clusters_fast": {"tf": 2.449489742783178}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 3}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 3}, "experiments.count_populations": {"tf": 2.449489742783178}, "experiments.get_evolved_stats": {"tf": 2.449489742783178}, "experiments.average_pcfs": {"tf": 3}, "experiments.save_results_jsonl": {"tf": 2.449489742783178}, "experiments.save_results_npz": {"tf": 2.449489742783178}, "experiments.load_results_jsonl": {"tf": 2.449489742783178}}, "df": 13}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}, "s": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 9, "e": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "experiments": {"tf": 1}}, "df": 3}, "c": {"docs": {}, "df": 0, "e": {"docs": {"models.CA.PP": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 3}}}, "r": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1}, "models.CA.CA.update": {"tf": 1}, "models.CA.PP.validate": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}, "experiments.average_pcfs": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}, "experiments.run_single_simulation": {"tf": 2}}, "df": 10, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"models.config": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 4}}}}}, "e": {"docs": {"experiments.main": {"tf": 1}}, "df": 1, "d": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.generate_unique_seed": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.save_results_npz": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"models.CA.CA.__init__": {"tf": 1.7320508075688772}, "models.CA.CA.rows": {"tf": 1}, "models.CA.CA.cols": {"tf": 1}, "models.CA.CA.n_species": {"tf": 1}, "models.CA.CA.evolve": {"tf": 2.23606797749979}, "models.CA.CA.update": {"tf": 1.7320508075688772}, "models.CA.CA.run": {"tf": 2.23606797749979}, "models.CA.PP": {"tf": 1.4142135623730951}, "models.config.Config": {"tf": 2}, "models.config.Config.get_prey_deaths": {"tf": 1}, "models.config.Config.estimate_runtime": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 1}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 3.1622776601683795}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 3.4641016151377544}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 2}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1.7320508075688772}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 2.449489742783178}, "experiments.generate_unique_seed": {"tf": 2}, "experiments.count_populations": {"tf": 2.6457513110645907}, "experiments.get_evolved_stats": {"tf": 2.23606797749979}, "experiments.average_pcfs": {"tf": 1.7320508075688772}, "experiments.save_results_jsonl": {"tf": 1.7320508075688772}, "experiments.save_results_npz": {"tf": 2}, "experiments.load_results_jsonl": {"tf": 1.7320508075688772}, "experiments.run_single_simulation": {"tf": 2.8284271247461903}, "experiments.run_phase1": {"tf": 2.23606797749979}, "experiments.run_phase2": {"tf": 2.449489742783178}, "experiments.run_phase4": {"tf": 2.449489742783178}, "experiments.run_phase5": {"tf": 2.449489742783178}}, "df": 33, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.7320508075688772}, "models.CA.CA.run": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1.7320508075688772}, "models.numba_optimized.PPKernel": {"tf": 1.4142135623730951}, "models.numba_optimized.PPKernel.update": {"tf": 2}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1.4142135623730951}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_kernels": {"tf": 1.4142135623730951}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}, "experiments.run_single_simulation": {"tf": 1.7320508075688772}}, "df": 15, "l": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "n": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "models.numba_optimized.set_numba_seed": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.evolve": {"tf": 1}, "experiments.get_evolved_stats": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"experiments": {"tf": 1.7320508075688772}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.save_results_npz": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 8}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.CA.CA.update": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.CA.CA.run": {"tf": 1}, "experiments.run_phase2": {"tf": 1}}, "df": 2}}}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.PP": {"tf": 1.4142135623730951}, "experiments.get_evolved_stats": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 8, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.numba_optimized.PPKernel": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"experiments.run_phase1": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {"experiments.run_phase3": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"models.CA.CA": {"tf": 1}, "models.CA.CA.__init__": {"tf": 1}, "models.CA.CA.validate": {"tf": 1}, "models.CA.CA.evolve": {"tf": 1.4142135623730951}, "models.CA.PP": {"tf": 1}}, "df": 5}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"models.CA.PP.validate": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"models.CA.CA.update": {"tf": 1}, "models.CA.PP": {"tf": 1}, "models.CA.PP.update_async": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 5}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}, "r": {"docs": {"experiments.run_phase1": {"tf": 1.7320508075688772}, "experiments.run_phase2": {"tf": 1.7320508075688772}, "experiments.run_phase4": {"tf": 1.7320508075688772}, "experiments.run_phase5": {"tf": 1.7320508075688772}}, "df": 4}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments": {"tf": 1.4142135623730951}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}, "experiments.main": {"tf": 1}}, "df": 6}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.PPKernel": {"tf": 1}}, "df": 2}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"models.config.Config": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"experiments": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1.4142135623730951}}, "df": 2}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"experiments.run_phase2": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.config.Config": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1}, "experiments.run_single_simulation": {"tf": 1}}, "df": 6}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1, "s": {"docs": {"models.numba_optimized.benchmark_cluster_detection": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"experiments.generate_unique_seed": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"models.CA.CA.__init__": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}, "models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}, "models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.average_pcfs": {"tf": 2}, "experiments.save_results_jsonl": {"tf": 1.7320508075688772}, "experiments.save_results_npz": {"tf": 1.7320508075688772}, "experiments.load_results_jsonl": {"tf": 1.7320508075688772}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1.7320508075688772}, "experiments.run_phase5": {"tf": 1.7320508075688772}}, "df": 11, "[": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"models.CA.CA.run": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"models.numba_optimized.compute_all_pcfs_fast": {"tf": 1}, "experiments.save_results_npz": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"experiments": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 2}, "experiments.main": {"tf": 1}}, "df": 4, "a": {"docs": {}, "df": 0, "r": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1}}, "df": 1}}, "s": {"docs": {"experiments.save_results_jsonl": {"tf": 1}, "experiments.load_results_jsonl": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}, "models.numba_optimized.benchmark_cluster_detection": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"models.CA.PP": {"tf": 1}, "models.numba_optimized": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"models.numba_optimized.get_cluster_stats_fast": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"models.numba_optimized": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 3}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized": {"tf": 1.4142135623730951}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.detect_clusters_fast": {"tf": 1.7320508075688772}, "models.numba_optimized.get_cluster_stats_fast": {"tf": 2.449489742783178}, "experiments.run_single_simulation": {"tf": 1}}, "df": 5}}}}}, "w": {"docs": {"experiments.run_phase4": {"tf": 1}}, "df": 1}}}, "j": {"docs": {"models.numba_optimized.compute_pcf_periodic_fast": {"tf": 1.4142135623730951}}, "df": 1, "o": {"docs": {}, "df": 0, "b": {"docs": {"experiments.run_phase5": {"tf": 1}}, "df": 1, "s": {"docs": {"models.config.Config": {"tf": 1}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase4": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"experiments.run_phase1": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.set_numba_seed": {"tf": 1.7320508075688772}, "models.numba_optimized.measure_cluster_sizes_fast": {"tf": 1}, "models.numba_optimized.warmup_numba_kernels": {"tf": 1}, "models.numba_optimized.benchmark_kernels": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"models.numba_optimized.warmup_numba_kernels": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"experiments.generate_unique_seed": {"tf": 1}, "experiments.save_results_jsonl": {"tf": 1.4142135623730951}, "experiments.load_results_jsonl": {"tf": 2}, "experiments.run_phase1": {"tf": 1}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}}, "df": 6, "l": {"docs": {"experiments": {"tf": 1.4142135623730951}, "experiments.save_results_jsonl": {"tf": 2}, "experiments.load_results_jsonl": {"tf": 2}, "experiments.run_phase1": {"tf": 1.4142135623730951}, "experiments.run_phase2": {"tf": 1}, "experiments.run_phase4": {"tf": 1}, "experiments.run_phase5": {"tf": 1}}, "df": 7}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"experiments.load_results_jsonl": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; + + // mirrored in build-search-index.js (part 1) + // Also split on html tags. this is a cheap heuristic, but good enough. + elasticlunr.tokenizer.setSeperator(/[\s\-.;&_'"=,()]+|<[^>]*>/); + + let searchIndex; + if (docs._isPrebuiltIndex) { + console.info("using precompiled search index"); + searchIndex = elasticlunr.Index.load(docs); + } else { + console.time("building search index"); + // mirrored in build-search-index.js (part 2) + searchIndex = elasticlunr(function () { + this.pipeline.remove(elasticlunr.stemmer); + this.pipeline.remove(elasticlunr.stopWordFilter); + this.addField("qualname"); + this.addField("fullname"); + this.addField("annotation"); + this.addField("default_value"); + this.addField("signature"); + this.addField("bases"); + this.addField("doc"); + this.setRef("fullname"); + }); + for (let doc of docs) { + searchIndex.addDoc(doc); + } + console.timeEnd("building search index"); + } + + return (term) => searchIndex.search(term, { + fields: { + qualname: {boost: 4}, + fullname: {boost: 2}, + annotation: {boost: 2}, + default_value: {boost: 2}, + signature: {boost: 2}, + bases: {boost: 2}, + doc: {boost: 1}, + }, + expand: true + }); +})(); \ No newline at end of file diff --git a/scripts/experiments.py b/experiments.py similarity index 62% rename from scripts/experiments.py rename to experiments.py index db7e63c..172d49b 100644 --- a/scripts/experiments.py +++ b/experiments.py @@ -1,31 +1,68 @@ #!/usr/bin/env python3 """ -Predator-Prey Hydra Effect Experiments - HPC Version - -Experimental phases (run sequentially): - Phase 1: Parameter sweep to find critical point (bifurcation + cluster analysis) - Phase 2: Self-organization analysis (evolution toward criticality) - Phase 3: Finite-size scaling at critical point - Phase 4: Sensitivity analysis across parameter regimes - Phase 5: Perturbation analysis (critical slowing down) - Phase 6: Model extensions (directed hunting comparison) - -Usage: - python experiments.py --phase 1 # Run phase 1 - python experiments.py --phase 1 --dry-run # Estimate runtime - python experiments.py --phase all # Run all phases - python experiments.py --phase 1 --output results/ # Custom output +Predator-Prey Hydra Effect Experiments +====================================== + +HPC-ready experiment runner for investigating the Hydra effect in +predator-prey cellular automata. + +Experimental Phases +------------------- +- **Phase 1**: Parameter sweep to find critical point (bifurcation + cluster analysis) +- **Phase 2**: Self-organization analysis (evolution toward criticality) +- **Phase 3**: Finite-size scaling at critical point +- **Phase 4**: Sensitivity analysis across parameter regimes +- **Phase 5**: Model extensions (directed hunting comparison) + +Functions +--------- +```python +run_single_simulation # Execute one simulation run and collect metrics. +run_phase1, run_phase2, run_phase3, run_phase4, run_phase5 # Phase-specific experiment runners. +``` + +Utilities +--------- +```python +generate_unique_seed # Deterministic seed generation from parameters. +count_populations # Count species populations on grid. +get_evolved_stats # Statistics for evolved parameters. +average_pcfs # Average multiple PCF measurements. +save_results_jsonl, load_results_jsonl, save_results_npz # I/O functions for experiment results. +``` + +Command Line Usage +------------------ +```bash +python experiments.py --phase 1 # Run phase 1 +python experiments.py --phase 1 --dry-run # Estimate runtime +python experiments.py --phase all # Run all phases +python experiments.py --phase 1 --output results/ # Custom output +``` + +Programmatic Usage +------------------ +```python +from experiments import run_single_simulation, run_phase1 +from models.config import PHASE1_CONFIG + +# Single simulation +result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=100, + seed=42, + cfg=PHASE1_CONFIG, +) + +# Full phase (writes to output directory) +import logging +results = run_phase1(PHASE1_CONFIG, Path("results/"), logging.getLogger()) +``` """ - -# NOTE (1): The soc_analysis script used temporal avalache data to assess SOC. -# This functionality is not yet implemented here. We can still derive that data -# from the full time series using np.diff(prey_timeseries) - -# NOTE (2): Post-processing utilities and plotting are in scripts/analysis.py. This script should -# solely focus on running the experiments and saving raw results. - - import argparse import hashlib import json @@ -55,7 +92,6 @@ from models.numba_optimized import ( compute_all_pcfs_fast, get_cluster_stats_fast, - get_percolating_cluster_fast, warmup_numba_kernels, set_numba_seed, NUMBA_AVAILABLE, @@ -78,18 +114,102 @@ def set_numba_seed(seed): def generate_unique_seed(params: dict, rep: int) -> int: - """Create deterministic seed from parameters.""" + """ + Create a deterministic seed from a dictionary of parameters and a repetition index. + + This function serializes the input dictionary into a sorted JSON string, + appends the repetition count, and hashes the resulting string using SHA-256. + The first 8 characters of the hex digest are then converted to an integer + to provide a stable, unique seed for random number generators. + + Parameters + ---------- + params : dict + A dictionary of configuration parameters. Keys are sorted to ensure + determinism regardless of insertion order. + rep : int + The repetition or iteration index, used to ensure different seeds + are generated for the same parameter set across multiple runs. + + Returns + ------- + int + A unique integer seed derived from the input parameters. + + Examples + -------- + >>> params = {'learning_rate': 0.01, 'batch_size': 32} + >>> generate_unique_seed(params, 1) + 3432571217 + >>> generate_unique_seed(params, 2) + 3960013583 + """ identifier = json.dumps(params, sort_keys=True) + f"_{rep}" return int(hashlib.sha256(identifier.encode()).hexdigest()[:8], 16) def count_populations(grid: np.ndarray) -> Tuple[int, int, int]: - """Count empty, prey, predator cells.""" + """ + Count the number of empty, prey, and predator cells in the simulation grid. + + Parameters + ---------- + grid : np.ndarray + A 2D NumPy array representing the simulation environment, where: + - 0: Empty cell + - 1: Prey + - 2: Predator + + Returns + ------- + empty_count : int + Total number of cells with a value of 0. + prey_count : int + Total number of cells with a value of 1. + predator_count : int + Total number of cells with a value of 2. + + Examples + -------- + >>> grid = np.array([[0, 1], [2, 1]]) + >>> count_populations(grid) + (1, 2, 1) + """ return int(np.sum(grid == 0)), int(np.sum(grid == 1)), int(np.sum(grid == 2)) def get_evolved_stats(model, param: str) -> Dict: - """Get statistics of evolved parameter from model.""" + """ + Get statistics of an evolved parameter from the model. + + This function retrieves parameter values from the model's internal storage, + filters out NaN values, and calculates basic descriptive statistics. + + Parameters + ---------- + model : object + The simulation model instance containing a `cell_params` attribute + with a `.get()` method. + param : str + The name of the parameter to calculate statistics for. + + Returns + ------- + stats : dict + A dictionary containing the following keys: + - 'mean': Arithmetic mean of valid values. + - 'std': Standard deviation of valid values. + - 'min': Minimum valid value. + - 'max': Maximum valid value. + - 'n': Count of non-NaN values. + If no valid data is found, all stats return NaN and n returns 0. + + Examples + -------- + >>> stats = get_evolved_stats(my_model, "speed") + >>> print(stats['mean']) + 1.25 + """ arr = model.cell_params.get(param) if arr is None: return {"mean": np.nan, "std": np.nan, "min": np.nan, "max": np.nan, "n": 0} @@ -106,9 +226,36 @@ def get_evolved_stats(model, param: str) -> Dict: def average_pcfs( - pcf_list: List[Tuple[np.ndarray, np.ndarray, int]] + pcf_list: List[Tuple[np.ndarray, np.ndarray, int]], ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Average multiple PCF measurements with standard error.""" + """ + Average multiple Pair Correlation Function (PCF) measurements and calculate standard error. + + Parameters + ---------- + pcf_list : list of tuple + A list where each element is a tuple containing: + - distances (np.ndarray): The radial distances (r). + - pcf_values (np.ndarray): The correlation values g(r). + - count (int): Metadata or weight (not used in current calculation). + + Returns + ------- + distances : np.ndarray + The radial distances from the first entry in the list. + pcf_mean : np.ndarray + The element-wise mean of the PCF values across all measurements. + pcf_se : np.ndarray + The standard error of the mean for the PCF values. + + Examples + -------- + >>> data = [(np.array([0, 1]), np.array([1.0, 2.0]), 10), + ... (np.array([0, 1]), np.array([1.2, 1.8]), 12)] + >>> dist, mean, se = average_pcfs(data) + >>> mean + array([1.1, 1.9]) + """ if len(pcf_list) == 0: return np.array([]), np.array([]), np.array([]) @@ -122,14 +269,72 @@ def average_pcfs( def save_results_jsonl(results: List[Dict], output_path: Path): - """Save results incrementally to JSONL format.""" + """ + Save a list of dictionaries to a file in JSON Lines (JSONL) format. + + Each dictionary in the list is serialized into a single JSON string and + written as a new line. Non-serializable objects are converted to strings + using the default string representation. + + Parameters + ---------- + results : list of dict + The collection of result dictionaries to be saved. + output_path : Path + The file system path (pathlib.Path) where the JSONL file will be created. + + Returns + ------- + None + + Notes + ----- + The file is opened in 'w' (write) mode, which will overwrite any existing + content at the specified path. + + Examples + -------- + >>> data = [{"id": 1, "score": 0.95}, {"id": 2, "score": 0.88}] + >>> save_results_jsonl(data, Path("results.jsonl")) + """ with open(output_path, "w", encoding="utf-8") as f: for result in results: f.write(json.dumps(result, default=str) + "\n") def save_results_npz(results: List[Dict], output_path: Path): - """Save results to compressed NPZ format.""" + """ + Save simulation results to a compressed NumPy (.npz) binary file. + + This function flattens a list of result dictionaries into a single + dictionary of NumPy arrays, prefixing keys with the run index to + maintain data separation. The resulting file is compressed to + reduce storage space. + + Parameters + ---------- + results : list of dict + A list where each dictionary contains key-value pairs of + simulation data (e.g., arrays, lists, or scalars). + output_path : Path + The file system path (pathlib.Path) where the compressed + NPZ file will be saved. + + Returns + ------- + None + + Notes + ----- + The keys in the saved file follow the format 'run_{index}_{original_key}'. + Values are automatically converted to NumPy arrays if they are not + already. + + Examples + -------- + >>> results = [{"energy": [1, 2]}, {"energy": [3, 4]}] + >>> save_results_npz(results, Path("output.npz")) + """ data = {} for i, res in enumerate(results): for key, val in res.items(): @@ -138,7 +343,35 @@ def save_results_npz(results: List[Dict], output_path: Path): def load_results_jsonl(input_path: Path) -> List[Dict]: - """Load results from JSONL format.""" + """ + Load simulation results from a JSON Lines (JSONL) formatted file. + + This function reads a file line-by-line, parsing each line as an + independent JSON object and aggregating them into a list of dictionaries. + + Parameters + ---------- + input_path : Path + The file system path (pathlib.Path) to the JSONL file. + + Returns + ------- + results : list of dict + A list of dictionaries reconstructed from the file content. + + Raises + ------ + FileNotFoundError + If the specified input path does not exist. + json.JSONDecodeError + If a line in the file is not valid JSON. + + Examples + -------- + >>> data = load_results_jsonl(Path("results.jsonl")) + >>> len(data) + 2 + """ results = [] with open(input_path, "r", encoding="utf-8") as f: for line in f: @@ -163,9 +396,52 @@ def run_single_simulation( compute_pcf: Optional[bool] = None, ) -> Dict: """ - Run a single PP simulation and collect metrics. - - Returns dict with population, cluster, PCF, and evolution metrics. + Run a single Predator-Prey (PP) simulation and collect comprehensive metrics. + + This function initializes a Cellular Automata model, executes a warmup phase + to reach steady state, and then performs a measurement phase to track + population dynamics, spatial clustering, and evolutionary changes. + + Parameters + ---------- + prey_birth : float + The probability or rate of prey reproduction. + prey_death : float + The base probability or rate of prey mortality. + predator_birth : float + The probability or rate of predator reproduction upon consuming prey. + predator_death : float + The probability or rate of predator mortality. + grid_size : int + The side length of the square simulation grid. + seed : int + Random seed for ensuring reproducibility of the simulation run. + cfg : Config + A configuration object containing simulation hyperparameters (densities, + sampling rates, timing, etc.). + with_evolution : bool, optional + If True, enables the evolution of the 'prey_death' parameter within + the model (default is False). + compute_pcf : bool, optional + Explicit toggle for Pair Correlation Function calculation. If None, + it is determined by `cfg.pcf_sample_rate` (default is None). + + Returns + ------- + result : dict + A dictionary containing simulation results including: + - Input parameters and survival flags. + - Population mean and standard deviation for both species. + - Cluster statistics (number of clusters, sizes, largest fractions). + - Evolutionary statistics (mean, std, min, max, and final values). + - PCF data and spatial indices (segregation and clustering). + - Optional time series for populations and evolved parameters. + + Notes + ----- + The function relies on several external utilities: `count_populations`, + `get_evolved_stats`, `get_cluster_stats_fast`, `compute_all_pcfs_fast`, + and `average_pcfs`. """ from models.CA import PP @@ -189,7 +465,6 @@ def run_single_simulation( "predator_birth": predator_birth, }, seed=seed, - synchronous=cfg.synchronous, directed_hunting=cfg.directed_hunting, ) @@ -334,28 +609,6 @@ def run_single_simulation( result["pcf_prey_pred"] = pcf_cr.tolist() # Short-range indices - """ - NOTE: The Pair Correlation function measures spatial correlation at distance r. - g(r) = 1: random (poisson distribution) - g(r) > 1: clustering (more pairs than random) - g(r) < 1: segregation (fewer pairs than random) - - prey_clustering_index: Do prey clump together? - pred_clustering_index: Do predators clump together? - segregation_index: Are prey and predators segregated? - - For the Hydra effect model: - segregation_index < 1: Prey and predators are spatially separated - prey_clustering_index > 1: Prey form clusters - pred_clustering_index > 1: Predators form clusters - - High segregation (low segregation index): prey can reproduce in predator-free zones - High prey clustering: prey form groups that can survive predation - At criticality: expect sepcific balance where clusters are large enough to sustain but - fragmented enough to avoid total predation. - - If segregation_index = 1 approx, no Hydra effect -> follow mean field dynamics. - """ short_mask = dist < 3.0 if np.any(short_mask): result["segregation_index"] = float(np.mean(pcf_cr[short_mask])) @@ -372,11 +625,38 @@ def run_single_simulation( def run_phase1(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]: """ - Phase 1: Parameter sweep to find critical point. - - - 2D sweep of prey_birth prey_death - - Both with and without evolution - - Outputs: bifurcation data, cluster distributions + Execute Phase 1 of the simulation: a parameter sweep to identify critical points. + + This function performs a 1D sweep across varying prey mortality rates while + keeping other parameters fixed. It utilizes parallel execution via joblib + and saves results incrementally to a JSONL file to ensure data integrity + during long-running batches. + + Parameters + ---------- + cfg : Config + Configuration object containing simulation hyperparameters, sweep + ranges, and execution settings (n_jobs, grid_size, etc.). + output_dir : Path + Directory where result files (JSONL) and metadata (JSON) will be stored. + logger : logging.Logger + Logger instance for tracking simulation progress and recording + operational metadata. + + Returns + ------- + all_results : list of dict + A list of dictionaries containing the metrics collected from every + individual simulation run in the sweep. + + Notes + ----- + The function performs the following steps: + 1. Pre-warms Numba kernels for performance. + 2. Generates a deterministic set of simulation jobs using unique seeds. + 3. Executes simulations in parallel using a generator for memory efficiency. + 4. Records metadata including a timestamp and a serialized snapshot of + the configuration. """ from joblib import Parallel, delayed @@ -439,14 +719,35 @@ def run_phase1(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di def run_phase2(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]: """ - Phase 2: Self-organization analysis. - - SOC Hypothesis: Prey evolve toward critical critical point regardless of initial conditions. - - NOTE: Test is currently start evo from different intial prey_death values (?) - If SOC holds, then all runs converge to the same final prey_death near critical point. - - FIXME: This run script needs to be adjusted + Execute Phase 2 of the simulation: self-organization and criticality analysis. + + This phase tests the Self-Organized Criticality (SOC) hypothesis by + initializing simulations at different points in the parameter space and + observing whether evolutionary pressure drives the system toward a + common critical point, regardless of initial prey mortality rates. + + Parameters + ---------- + cfg : Config + Configuration object containing simulation hyperparameters, evolution + settings, and execution constraints. + output_dir : Path + Directory where result files (JSONL) and metadata (JSON) will be stored. + logger : logging.Logger + Logger instance for tracking progress and evolutionary convergence. + + Returns + ------- + all_results : list of dict + A list of dictionaries containing metrics from the evolutionary + simulation runs. + + Notes + ----- + The function captures: + 1. Convergence of 'prey_death' across multiple replicates. + 2. Final steady-state population distributions. + 3. Incremental saving of results to prevent data loss. """ from joblib import Parallel, delayed @@ -568,12 +869,37 @@ def run_phase3(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di def run_phase4(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]: """ - Phase 4: Global Sensitivity Analysis. - Vary: prey_birth, prey_death, predator_birth, predator_death - - prey_death: 10 values from 0.05 to 0.95 - - prey_birth, predator_birth, predator_death: 11 values each from 0 to 1 - Reps: 10 - Grid size: 250 + Execute Phase 3 of the simulation: Finite-Size Scaling (FSS) analysis. + + This phase investigates how spatial structures, specifically cluster size + cutoffs, scale with the system size (L) at the critical point identified + in Phase 1. This is essential for determining the universality class of + the phase transition. + + Parameters + ---------- + cfg : Config + Configuration object containing critical point parameters, the list of + grid sizes to test, and execution settings. + output_dir : Path + Directory where result files (JSONL) and FSS metadata (JSON) will be + stored. + logger : logging.Logger + Logger instance for tracking progress across different grid sizes. + + Returns + ------- + all_results : list of dict + A list of dictionaries containing metrics and cluster statistics for + each grid size and replicate. + + Notes + ----- + The function performs the following: + 1. Iterates through multiple grid sizes defined in `cfg.grid_sizes`. + 2. Generates parallel jobs for each size using critical birth/death rates. + 3. Saves results incrementally to allow for post-simulation analysis of + power-law exponents. """ from joblib import Parallel, delayed import itertools @@ -666,83 +992,38 @@ def run_phase4(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di def run_phase5(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]: """ - Phase 5: Perturbation analysis (critical slowing down). - - - Points around critical point - - Full time series for autocorrelation analysis - - Measure relaxation times - """ - from joblib import Parallel, delayed - - warmup_numba_kernels(cfg.grid_size, directed_hunting=cfg.directed_hunting) - - pb = cfg.critical_prey_birth - base_pd = cfg.critical_prey_death - - jobs = [] - for offset in cfg.prey_death_offsets: - pd = base_pd + offset - if pd <= 0: - continue - - for rep in range(cfg.n_replicates): - params = {"offset": offset, "phase": 5} - seed = generate_unique_seed(params, rep) - jobs.append( - ( - pb, - pd, - cfg.predator_birth, - cfg.predator_death, - cfg.grid_size, - seed, - cfg, - False, - ) - ) - - logger.info(f"Phase 5: {len(jobs):,} simulations") - logger.info(f" prey_death offsets: {cfg.prey_death_offsets}") - logger.info(f" Base critical point: pb={pb}, pd={base_pd}") - - output_jsonl = output_dir / "phase5_results.jsonl" - all_results = [] - - with open(output_jsonl, "w", encoding="utf-8") as f: - executor = Parallel(n_jobs=cfg.n_jobs, return_as="generator") - tasks = (delayed(run_single_simulation)(*job) for job in jobs) - - for result in tqdm(executor(tasks), total=len(jobs), desc="Phase 5"): - f.write(json.dumps(result, default=str) + "\n") - f.flush() - all_results.append(result) - - meta = { - "phase": 5, - "description": "Perturbation analysis / critical slowing down", - "n_sims": len(all_results), - "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"), - } - with open(output_dir / "phase5_metadata.json", "w") as f: - json.dump(meta, f, indent=2, default=str) - - logger.info(f"Phase 5 complete. Results: {output_jsonl}") - return all_results - - -def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Dict]: - """ - Phase 6: Model Extensions - Directed Hunting Comparison. - Same 4D sweep as Phase 4, but with directed_hunting=True. - Vary: prey_birth, prey_death, predator_birth, predator_death - - prey_death: 10 values from 0.05 to 0.95 - - prey_birth, predator_birth, predator_death: 11 values each from 0 to 1 - Reps: 10 - Grid size: 250 - Compare results with Phase 4 to assess impact of directed hunting on: - - Critical point location - - Hydra effect persistence - - SOC signatures + Execute Phase 5 of the simulation: Global 4D parameter sweep with directed hunting. + + This phase performs a comprehensive sensitivity analysis by varying four key + parameters (prey birth/death and predator birth/death) while directed + hunting is enabled. The results allow for a direct comparison with Phase 4 + to determine how predator search behavior shifts the system's critical + thresholds and stability. + + Parameters + ---------- + cfg : Config + Configuration object containing simulation hyperparameters, parallel + execution settings, and the fixed grid size for this phase. + output_dir : Path + Directory where the result JSONL file and execution metadata will + be stored. + logger : logging.Logger + Logger instance for tracking the progress of the high-volume + simulation batch. + + Returns + ------- + all_results : list of dict + A list of dictionaries containing metrics for every simulation in + the 4D parameter grid. + + Notes + ----- + The function utilizes a Cartesian product of parameter ranges to build a + job list of over 13,000 unique parameter sets (multiplied by replicates). + Seeds are uniquely generated to distinguish these runs from other phases + even if parameter values overlap. """ from joblib import Parallel, delayed import itertools @@ -754,7 +1035,7 @@ def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di other_param_values = np.linspace(0.0, 1.0, 11) # 11 values for the rest # Logging - logger.info(f"Phase 6: Full 4D Parameter Sweep (Directed Hunting)") + logger.info(f"Phase 5: Full 4D Parameter Sweep (Directed Hunting)") logger.info(f" prey_death: 10 values from 0.05 to 0.95") logger.info(f" prey_birth, pred_birth, pred_death: 11 values each from 0 to 1") logger.info(f" Grid Size: {cfg.grid_size}") @@ -801,7 +1082,7 @@ def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di f" Total simulations: {len(jobs):,}" ) # 11 * 10 * 11 * 11 * n_reps = 13,310 * n_reps - output_jsonl = output_dir / "phase6_results.jsonl" + output_jsonl = output_dir / "phase5_results.jsonl" all_results = [] with open(output_jsonl, "w", encoding="utf-8") as f: @@ -817,7 +1098,7 @@ def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di # Save Metadata meta = { - "phase": 6, + "phase": 5, "description": "Global 4D Sensitivity Analysis with Directed Hunting", "prey_death_values": prey_death_values.tolist(), "other_param_values": other_param_values.tolist(), @@ -835,7 +1116,7 @@ def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di with open(output_dir / "phase6_metadata.json", "w") as f: json.dump(meta, f, indent=2, default=str) - logger.info(f"Phase 6 complete. Results: {output_jsonl}") + logger.info(f"Phase 5 complete. Results: {output_jsonl}") return all_results @@ -849,11 +1130,24 @@ def run_phase6(cfg: Config, output_dir: Path, logger: logging.Logger) -> List[Di 3: run_phase3, 4: run_phase4, 5: run_phase5, - 6: run_phase6, } def main(): + """ + Organize the predator-prey experimental suite across multiple phases. + + This entry point handles command-line arguments, sets up logging and output + directories, and executes the requested simulation phases (1-5). It + supports parallel execution, dry runs for runtime estimation, and + automated configuration persistence. + + Notes + ----- + The script dynamically retrieves phase-specific configurations using + `get_phase_config` and dispatches execution to the corresponding runner + in the `PHASE_RUNNERS` mapping. + """ parser = argparse.ArgumentParser( description="Predator-Prey Hydra Effect Experiments", formatter_class=argparse.RawDescriptionHelpFormatter, @@ -863,8 +1157,7 @@ def main(): 2 Self-organization (evolution toward criticality) 3 Finite-size scaling at critical point 4 Sensitivity analysis across parameter regimes - 5 Perturbation analysis (critical slowing down) - 6 Model extensions (directed hunting comparison) + 5 Model extensions (directed hunting comparison) """, ) parser.add_argument( diff --git a/genai_usage/GEN_AI.md b/genai_usage/GEN_AI.md new file mode 100644 index 0000000..c35c60d --- /dev/null +++ b/genai_usage/GEN_AI.md @@ -0,0 +1,3 @@ +All our prompts will go here. + +A reflection of our GENAI usage should also be placed here. \ No newline at end of file diff --git a/docs/kimon_prompts.md b/genai_usage/kimon_prompts.md similarity index 99% rename from docs/kimon_prompts.md rename to genai_usage/kimon_prompts.md index c43602a..7e91867 100644 --- a/docs/kimon_prompts.md +++ b/genai_usage/kimon_prompts.md @@ -369,7 +369,8 @@ For parallel simulations, use different seeds per worker (e.g., base_seed + work Help me create a skeletal version of the updated experiments script for HPC that meets tha phase requirements outlined. The config class has been migrated to config.py. - ## Data Post-processing -Help me load and parse the data according to job and experimental phase number. The data will be analyzed in a jupyter notebook rather than a py file for usability. \ No newline at end of file +Help me load and parse the data according to job and experimental phase number. The data will be analyzed in a jupyter notebook rather than a py file for usability. + + diff --git a/docs/sary_prompts.md b/genai_usage/sary_prompts.md similarity index 100% rename from docs/sary_prompts.md rename to genai_usage/sary_prompts.md diff --git a/docs/sofronia_prompts.md b/genai_usage/sofronia_prompts.md similarity index 100% rename from docs/sofronia_prompts.md rename to genai_usage/sofronia_prompts.md diff --git a/docs/prompts.md b/genai_usage/storm_prompts.md similarity index 100% rename from docs/prompts.md rename to genai_usage/storm_prompts.md diff --git a/scripts/analysis.py b/misc/analysis.py similarity index 100% rename from scripts/analysis.py rename to misc/analysis.py diff --git a/misc/benchmark.py b/misc/benchmark.py index 83a51f1..2dda897 100644 --- a/misc/benchmark.py +++ b/misc/benchmark.py @@ -4,7 +4,7 @@ Measures and compares performance of: 1. Numba-optimized kernel vs pure Python baseline -2. Cell-list PCF vs brute-force PCF +2. Cell-list PCF vs brute-force PCF 3. Grid size scaling behavior 4. Random vs directed hunting overhead 5. Full simulation pipeline diff --git a/misc/mean_field.py b/misc/mean_field.py deleted file mode 100644 index 188ee04..0000000 --- a/misc/mean_field.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 - -import numpy as np -import matplotlib.pyplot as plt -from typing import Tuple, List, Dict, Optional -from scipy.integrate import solve_ivp -from scipy.integrate import odeint - - -class MeanFieldModel: - """ - Mean-field (non-spatial) predator-prey model. - - Equations: - dR/dt = R * (b - d_r - c*C - e*R) - dC/dt = C * (a*R - d_c - q*C) - - where: - R: Prey population density - C: Predator population density - b: Prey birth rate - d_r: Prey death rate - c: Consumption rate of prey by predators - e: Intraspecific competition among prey - a: Conversion efficiency of prey into predator offspring - d_c: Predator death rate - q: Intraspecific competition among predators - """ - - def __init__( - self, - birth: float = 0.2, - consumption: float = 0.8, - predator_death: float = 0.045, - conversion: float = 1.0, - prey_competition: float = 0.1, - predator_competition: float = 0.05, - ): - """ - Initialize the mean-field model with given parameters. - Args: - birth (float): Prey birth rate (b) - consumption (float): Consumption rate of prey by predators (c) - predator_death (float): Predator death rate (d_c) - conversion (float): Conversion efficiency of prey into predator offspring (a) - prey_competition (float): Intraspecific competition among prey (e) - predator_competition (float): Intraspecific competition among predators (q) - """ - self.birth = birth - self.consumption = consumption - self.predator_death = predator_death - self.conversion = conversion - self.pred_benifit = self.consumption * self.conversion - self.prey_competition = prey_competition - self.predator_competition = predator_competition - - def ode_system(self, Z: np.ndarray, t: float, prey_death: float) -> list: - """ - Mean-field ODE system for predator prey dynamics. - """ - R, C = Z - - R = np.maximum(R, 0) - C = np.maximum(C, 0) - - # Net prey growth rate - r = self.birth - prey_death - - # Prey dynamics: growth - predation - competition - dR = R * (r - self.consumption * C - self.prey_competition * R) - - # Predator dynamics: growth from predation - death - competition - dC = C * ( - self.conversion * self.consumption * R - - self.predator_death - - self.predator_competition * C - ) - - return [dR, dC] - - def solve( - self, - prey_death: float = 0.5, - R0: float = 0.5, - C0: float = 0.2, - t_max: float = 500, - n_points: int = 1000, - ) -> Tuple[np.ndarray, np.ndarray]: - """ - Solve the mean-field ODE system. - - Args: - prey_death (float): Prey death rate (d) - Z0 (Tuple[float, float]): Initial conditions (prey, predator) - t_max (float): Maximum time - n_points (int): Number of time points - - Returns: - Tuple[np.ndarray, np.ndarray]: Time points and solution array - """ - t = np.linspace(0, t_max, n_points) - Z0 = [R0, C0] - - sol = odeint(self.ode_system, Z0, t, args=(prey_death,)) - - return t, sol - - def equilibrium(self, prey_death: float) -> Tuple[float, float]: - """ - Calculate the equilibrium densities of the system. - - Args: - prey_death (float): Prey death rate (d) - - Returns: - Tuple[float, float]: Equilibrium populations (prey, predator) - """ - r = self.birth - prey_death - c = self.consumption - a = self.pred_benifit - e = self.prey_competition - q = self.predator_competition - d_c = self.predator_death - - if r <= 0: - return (0.0, 0.0) - - R_prey = r / e - - # Check if predator can invade - predator_invasion_fitness = a * R_prey - d_c - if predator_invasion_fitness <= 0: - return (R_prey, 0.0) # Predator cannot persist - - # Coexistence equilibrium - R_n = r * q + d_c * c - R_d = c * a + e * q - - if R_d <= 0: - return (R_prey, 0.0) - - R_star = R_n / R_d - C_star = (a * R_star - d_c) / q - - if R_star < 0 or C_star < 0: - if r > 0: - return (R_prey, 0.0) - else: - return (0.0, 0.0) - - return (R_star, C_star) - - def equilibrium_numerical( - self, prey_death: float, t_max: float = 1000 - ) -> Tuple[float, float]: - """ - Find equilibrium densities numerically by solving ODEs over a long time. - """ - t, Z = self.solve(prey_death=prey_death, t_max=t_max) - R_eq = max(0, np.mean(Z[-100:, 0])) - C_eq = max(0, np.mean(Z[-100:, 1])) - return (R_eq, C_eq) - - def sweep_death_rate( - self, d_r_values: np.ndarray, method: str = "analytical" - ) -> Dict[str, np.ndarray]: - """ - Sweep prey death rate and record equilibrium densities. - """ - n = len(d_r_values) - R_eq = np.zeros(n) - C_eq = np.zeros(n) - - for i, d_r in enumerate(d_r_values): - if method == "analytical": - R_eq[i], C_eq[i] = self.equilibrium(d_r) - else: - R_eq[i], C_eq[i] = self.equilibrium_numerical(d_r) - - return { - "d_r": d_r_values, - "R_eq": R_eq, - "C_eq": C_eq, - "net_growth": self.birth - d_r_values, - } - - -if __name__ == "__main__": - print("Mean-Field Model Module") - mf = MeanFieldModel() - - print("Model Parameters:") - print(f"Birth rate: {mf.birth}") - print(f"Consumption rate: {mf.consumption}") - print(f"Predator death rate: {mf.predator_death}") - print(f"Conversion efficiency: {mf.conversion}") - print(f"Prey competition: {mf.prey_competition}") - print(f"Predator competition: {mf.predator_competition}") - - d_r_values = np.linspace(0.01, 0.15, 50) - results = mf.sweep_death_rate(d_r_values) diff --git a/misc/profile_sim.py b/misc/profile_sim.py deleted file mode 100644 index af28176..0000000 --- a/misc/profile_sim.py +++ /dev/null @@ -1,26 +0,0 @@ -import cProfile, pstats -from pathlib import Path -import sys - -# Ensure we can find our modules -sys.path.insert(0, str(Path(__file__).parent.parent)) -from scripts.experiments import Config, run_single_simulation - -# 1. Setup a single simulation configuration -cfg = Config() -cfg.default_grid = 150 -cfg.warmup_steps = 200 -cfg.measurement_steps = 300 - -# 2. Profile the function -profiler = cProfile.Profile() -profiler.enable() - -# Run a single simulation (no parallelization) -run_single_simulation(0.2, 0.05, 150, 42, True, cfg) - -profiler.disable() - -# 3. Print the top 15 time-consumers -stats = pstats.Stats(profiler).sort_stats("tottime") -stats.print_stats(15) diff --git a/misc/soc_analysis.py b/misc/soc_analysis.py index 45c7ff6..7f7cbf9 100644 --- a/misc/soc_analysis.py +++ b/misc/soc_analysis.py @@ -26,7 +26,6 @@ # Import directly from models to avoid __init__ issues from models.CA import PP - # ============================================================================ # 1. STRESS METRIC & PERTURBATION DYNAMICS # ============================================================================ diff --git a/models/CA.py b/models/CA.py index aef57d3..f69db56 100644 --- a/models/CA.py +++ b/models/CA.py @@ -1,8 +1,34 @@ -"""Cellular automaton base class. +#!/usr/bin/env python3 +""" +Cellular Automaton Framework +============================ + +This module provides the base cellular automaton class and the +Predator-Prey (PP) implementation with Numba-accelerated kernels. + +Classes +------- +CA: Abstract base class for spatial cellular automata. + +PP: Predator-Prey model with configurable hunting behavior. + +Example +------- +```python +from models.CA import PP -Defines a CA class with initialization, neighbor counting, update (to override), -and run loop. Uses a numpy Generator for all randomness and supports -Neumann and Moore neighborhoods with periodic boundaries. +# Basic usage +model = PP(rows=100, cols=100, densities=(0.3, 0.15), seed=42) +model.run(steps=1000) + +# With evolution enabled +model = PP(rows=100, cols=100, seed=42) +model.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15) +model.run(steps=500) + +# With directed hunting +model = PP(rows=100, cols=100, directed_hunting=True, seed=42) +``` """ from typing import Tuple, Dict, Optional @@ -22,15 +48,27 @@ class CA: - """Base cellular automaton class. + """ + Base cellular automaton class for spatial simulations. + + This class provides a framework for multi-species cellular automata with + support for global parameters, per-cell evolving parameters, and + grid initialization based on density. Attributes - - n_species: number of distinct (non-zero) states - - grid: 2D numpy array containing integers in {0, 1, ..., n_species} - - neighborhood: either "neumann" or "moore" - - generator: numpy.random.Generator used for all randomness - - params: global parameters dict - - cell_params: local (per-cell) parameters dict + ---------- + grid : np.ndarray + 2D numpy array containing integers in range [0, n_species]. + params : Dict[str, Any] + Global parameters shared by all cells. + cell_params : Dict[str, Any] + Local per-cell parameters, typically stored as numpy arrays matching the grid shape. + neighborhood : str + The adjacency rule used ('neumann' or 'moore'). + generator : np.random.Generator + The random number generator instance for reproducibility. + species_names : Tuple[str, ...] + Human-readable names for each species state. """ # Default colormap spec (string or sequence); resolved in `visualize` at runtime @@ -39,19 +77,23 @@ class CA: # Read-only accessors for size/densities (protected attributes set in __init__) @property def rows(self) -> int: + """int: Number of rows in the grid.""" return getattr(self, "_rows") @property def cols(self) -> int: + """int: Number of columns in the grid.""" return getattr(self, "_cols") @property def densities(self) -> Tuple[float, ...]: + """Tuple[float, ...]: Initial density fraction for each species.""" return tuple(getattr(self, "_densities")) # make n_species protected with read-only property @property def n_species(self) -> int: + """int: Number of distinct species states (excluding empty state 0).""" return int(getattr(self, "_n_species")) def __init__( @@ -64,22 +106,26 @@ def __init__( cell_params: Dict[str, object], seed: Optional[int] = None, ) -> None: - """Initialize the cellular automaton. - - Args: - - rows (int): number of rows (>0) - - cols (int): number of columns (>0) - - densities (tuple of floats): initial density for each species. The - length of this tuple defines `n_species`. Values must be >=0 and sum - to at most 1. Each value gives the fraction of the grid to set to - that species (state values are 1..n_species). - - neighborhood (str): either "neumann" (4-neighbors) or "moore" - (8-neighbors). - - params (dict): global parameters. - - cell_params (dict): local per-cell parameters. - - seed (Optional[int]): seed for the numpy random generator. - - Returns: None + """ + Initialize the cellular automaton grid and configurations. + + Parameters + ---------- + rows : int + Number of rows in the grid (must be > 0). + cols : int + Number of columns in the grid (must be > 0). + densities : Tuple[float, ...] + Initial density for each species. Length defines `n_species`. + Values must sum to <= 1.0. + neighborhood : {'neumann', 'moore'} + Type of neighborhood connectivity. + params : Dict[str, Any] + Initial global parameter values. + cell_params : Dict[str, Any] + Initial local per-cell parameters. + seed : int, optional + Seed for the random number generator. """ assert isinstance(rows, int) and rows > 0, "rows must be positive int" assert isinstance(cols, int) and cols > 0, "cols must be positive int" @@ -142,12 +188,16 @@ def __init__( self.grid[r, c] = i + 1 def validate(self) -> None: - """Validate core CA invariants. + """ + Validate core CA invariants and grid dimensions. + + Checks that the neighborhood is valid, the grid matches initialized dimensions, + and that local parameter arrays match the grid shape. - Checks that `neighborhood` is valid, that `self.grid` has the - texpected shape `(rows, cols)`, and that any numpy arrays in - `self.cell_params` have matching shapes. Raises `ValueError` on - validation failure. + Raises + ------ + ValueError + If any structural invariant is violated. """ if self.neighborhood not in ("neumann", "moore"): raise ValueError("neighborhood must be 'neumann' or 'moore'") @@ -164,10 +214,29 @@ def validate(self) -> None: raise ValueError(f"cell_params['{k}'] must have shape equal to grid") def _infer_species_from_param_name(self, param_name: str) -> Optional[int]: - """Infer species index (1-based) from a parameter name using `species_names`. - - Returns the 1-based species index if a matching prefix is found, - otherwise `None`. + """ + Infer the 1-based species index from a parameter name using `species_names`. + + This method checks if the given parameter name starts with any of the + defined species names followed by an underscore (e.g., 'prey_birth'). + It is used to automatically route global parameters to the correct + species' local parameter arrays. + + Parameters + ---------- + param_name : str + The name of the parameter to check. + + Returns + ------- + Optional[int] + The 1-based index of the species if a matching prefix is found; + otherwise, None. + + Notes + ----- + The method expects `self.species_names` to be a collection of strings. + If `param_name` is not a string or no match is found, it returns None. """ if not isinstance(param_name, str): return None @@ -184,13 +253,44 @@ def evolve( min_val: Optional[float] = None, max_val: Optional[float] = None, ) -> None: - """Enable per-cell evolution for `param` on `species`. - - If `species` is None, attempt to infer the species using - `_infer_species_from_param_name(param)` which matches against - `self.species_names`. This keeps `CA` free of domain-specific - (predator/prey) logic while preserving backward compatibility when - subclasses set `species_names` (e.g. `('prey','predator')`). + """ + Enable per-cell evolution for a specific parameter on a given species. + + This method initializes a spatial parameter array (local parameter map) + for a global parameter. It allows individual cells to carry their own + values for that parameter, which can then mutate and evolve during + the simulation. + + Parameters + ---------- + param : str + The name of the global parameter to enable for evolution. + Must exist in `self.params`. + species : int, optional + The 1-based index of the species to which this parameter applies. + If None, the method attempts to infer the species from the + parameter name prefix. + sd : float, default 0.05 + The standard deviation of the Gaussian mutation applied during + inheritance/reproduction. + min_val : float, optional + The minimum allowable value for the parameter (clamping). + Defaults to 0.01 if not provided. + max_val : float, optional + The maximum allowable value for the parameter (clamping). + Defaults to 0.99 if not provided. + + Raises + ------ + ValueError + If the parameter is not in `self.params`, the species cannot be + inferred, or the species index is out of bounds. + + Notes + ----- + The local parameter is stored in `self.cell_params` as a 2D numpy + array initialized with the current global value for all cells of + the target species, and `NaN` elsewhere. """ if min_val is None: min_val = 0.01 @@ -219,13 +319,27 @@ def evolve( } def update(self) -> None: - """Perform one update step. - - This base implementation must be overridden by subclasses. It raises - NotImplementedError to indicate it should be provided by concrete - models that inherit from `CA`. - - Returns: None + """ + Perform one update step of the cellular automaton. + + This is an abstract method that defines the transition rules of the + system. It must be implemented by concrete subclasses to specify + how cell states and parameters change over time based on their + current state and neighborhood. + + Raises + ------ + NotImplementedError + If called directly on the base class instead of an implementation. + + Returns + ------- + None + + Notes + ----- + In a typical implementation, this method handles the logic for + stochastic transitions, movement, or predator-prey interactions. """ raise NotImplementedError( "Override update() in a subclass to define CA dynamics" @@ -237,12 +351,35 @@ def run( stop_evolution_at: Optional[int] = None, snapshot_iters: Optional[list] = None, ) -> None: - """Run the CA for a number of steps. - - Args: - - steps (int): number of iterations to run (must be non-negative). - - Returns: None + """ + Execute the cellular automaton simulation for a specified number of steps. + + This method drives the simulation loop, calling `update()` at each + iteration. It manages visualization updates, directory creation for + data persistence, and handles the freezing of evolving parameters + at a specific time step. + + Parameters + ---------- + steps : int + The total number of iterations to run (must be non-negative). + stop_evolution_at : int, optional + The 1-based iteration index after which parameter mutation is + disabled. Useful for observing system stability after a period + of adaptation. + snapshot_iters : List[int], optional + A list of specific 1-based iteration indices at which to save + the current grid state to the results directory. + + Returns + ------- + None + + Notes + ----- + If snapshots are requested, a results directory is automatically created + using a timestamped subfolder (e.g., 'results/run-1700000000/'). + Visualization errors are logged but do not terminate the simulation. """ assert ( isinstance(steps, int) and steps >= 0 @@ -294,19 +431,46 @@ def run( class PP(CA): - """Predator-prey CA. - - States: 0 = empty, 1 = prey, 2 = predator - - Parameters (in `params` dict). Allowed keys and defaults: - - "prey_death": 0.05 - - "predator_death": 0.1 - - "prey_birth": 0.25 - - "predator_birth": 0.2 + """ + Predator-Prey Cellular Automaton model with Numba-accelerated kernels. + + This model simulates a stochastic predator-prey system where species + interact on a 2D grid. It supports evolving per-cell death rates, + periodic boundary conditions, and both random and directed hunting + behaviors. + + Parameters + ---------- + rows : int, default 10 + Number of rows in the simulation grid. + cols : int, default 10 + Number of columns in the simulation grid. + densities : Tuple[float, ...], default (0.2, 0.1) + Initial population densities for (prey, predator). + neighborhood : {'moore', 'neumann'}, default 'moore' + The neighborhood type for cell interactions. + params : Dict[str, object], optional + Global parameters: "prey_death", "predator_death", "prey_birth", + "predator_birth". + cell_params : Dict[str, object], optional + Initial local parameter maps (2D arrays). + seed : int, optional + Random seed for reproducibility. + synchronous : bool, default True + If True, updates the entire grid at once. If False, updates + cells asynchronously. + directed_hunting : bool, default False + If True, predators selectively hunt prey rather than choosing + neighbors at random. - The constructor validates parameters are in [0,1] and raises if - other user-supplied params are present. The `synchronous` flag - chooses the update mode (default True -> synchronous updates). + Attributes + ---------- + species_names : Tuple[str, ...] + Labels for the species ('prey', 'predator'). + synchronous : bool + Current update mode. + directed_hunting : bool + Current hunting strategy logic. """ # Default colors: 0=empty black, 1=prey green, 2=predator red @@ -324,6 +488,9 @@ def __init__( synchronous: bool = True, directed_hunting: bool = False, # New directed hunting option ) -> None: + """ + Initialize the Predator-Prey CA with validated parameters and kernels. + """ # Allowed params and defaults _defaults = { "prey_death": 0.05, @@ -374,13 +541,19 @@ def __init__( # Remove PP-specific evolve wrapper; use CA.evolve with optional species def validate(self) -> None: - """Validate PP-specific invariants in addition to base CA checks. - - Checks: - - each global parameter is numeric and in [0,1] - - per-cell evolved parameter arrays (in `_evolve_info`) have non-NaN - positions matching the species grid and contain values within the - configured min/max range (or are NaN). + """ + Validate Predator-Prey specific invariants and spatial parameter arrays. + + Extends the base CA validation to ensure that numerical parameters are + within the [0, 1] probability range and that evolved parameter maps + (e.g., prey_death) correctly align with the species locations. + + Raises + ------ + ValueError + If grid shapes, parameter ranges, or species masks are inconsistent. + TypeError + If parameters are non-numeric. """ super().validate() @@ -428,6 +601,14 @@ def validate(self) -> None: ) def update_async(self) -> None: + """ + Execute an asynchronous update using the optimized Numba kernel. + + This method retrieves the evolved parameter maps and delegates the + stochastic transitions to the `PPKernel`. Asynchronous updates + typically handle cell-by-cell logic where changes can be + immediately visible to neighbors. + """ # Get the evolved prey death map # Fallback to a full array of the global param if it doesn't exist yet p_death_arr = self.cell_params.get("prey_death") @@ -455,8 +636,7 @@ def update_async(self) -> None: ) def update(self) -> None: - """Dispatch to synchronous or asynchronous update mode.""" - if self.synchronous: - self.update_sync() - else: - self.update_async() + """ + Dispatch the simulation step based on the configured update mode. + """ + self.update_async() diff --git a/models/__init__.py b/models/__init__.py index 279bbd8..cf70414 100644 --- a/models/__init__.py +++ b/models/__init__.py @@ -1,2 +1,90 @@ -from ..misc.mean_field import MeanFieldModel +""" +Predator-Prey Cellular Automaton Models +======================================= + +This package provides a Numba-accelerated cellular automaton framework +for simulating predator-prey dynamics with spatial structure. + +Main Components +--------------- +- `PP` : Predator-Prey cellular automaton model +- `Config` : Configuration dataclass for experiments +- `PPKernel` : Low-level Numba-optimized update kernel + +Spatial Analysis +---------------- +- `get_cluster_stats_fast` : Comprehensive cluster statistics +- `detect_clusters_fast` : Cluster detection with labels +- `measure_cluster_sizes_fast` : Fast cluster size measurement +- `compute_all_pcfs_fast` : Pair correlation functions + +Example +------- +```python +from models import PP, Config + +# Create a model with default parameters +model = PP(rows=100, cols=100, seed=42) + +# Run simulation +for _ in range(1000): + model.update() + +# Or use the run method +model.run(steps=1000) +``` + +For experiments, use the configuration system: + +```python +from models import Config, get_phase_config + +# Use predefined phase config +cfg = get_phase_config(1) + +# Or create custom config +cfg = Config(grid_size=200, n_replicates=10) +``` +""" + +# Core model classes +from models.CA import CA, PP + +# Configuration +from models.config import * + +# Numba-optimized components +from models.numba_optimized import * + +__all__ = [ + # Core + "CA", + "PP", + # Config + "Config", + "get_phase_config", + "PHASE_CONFIGS", + "PHASE1_CONFIG", + "PHASE2_CONFIG", + "PHASE3_CONFIG", + "PHASE4_CONFIG", + "PHASE5_CONFIG", + # Numba kernel + "PPKernel", + # Cluster analysis + "measure_cluster_sizes_fast", + "detect_clusters_fast", + "get_cluster_stats_fast", + # PCF analysis + "compute_pcf_periodic_fast", + "compute_all_pcfs_fast", + # Utilities + "set_numba_seed", + "warmup_numba_kernels", + "NUMBA_AVAILABLE", +] + +__version__ = "1.0.0" + from .CA import CA, PP + diff --git a/models/config.py b/models/config.py index 669fe38..eed429c 100644 --- a/models/config.py +++ b/models/config.py @@ -1,196 +1,189 @@ #!/usr/bin/env python3 """ -Configuration for Predator-Prey Hydra Effect Experiments - -Single Config dataclass with pre-defined instances for each experimental phase. - -Usage: - from config import PHASE1_CONFIG, PHASE2_CONFIG, Config - - # Use pre-defined config - cfg = PHASE1_CONFIG - - # Or create custom config - cfg = Config(grid_size=150, n_replicates=20) - - # Or modify existing - cfg = Config(**{**asdict(PHASE1_CONFIG), 'n_replicates': 30}) - - - -NOTE: Saving snapshots of the grid can be implemented with the following logic: - - final_grid: cluster analysis verfication for every n_stps. - - For Phase 3, save fro all grif sizes - - Add to config: - save_final_grid: bool = False - save_grid_timeseries: bool = False # Very costly, use sparingly - grid_timeseries_subsample: int = N # Save every N steps - snapshot_sample_rate: float = 0.0X # Only X% of runs save snapshots - - For run_single_simulation(): - # After cluster analysis - if cfg.save_final_grid: - # Only save for a sample of runs - if np.random.random() < cfg.snapshot_sample_rate: - result["final_grid"] = model.grid.tolist() # JSON-serializable - - # For grid timeseries (use very sparingly): - if cfg.save_grid_timeseries: - grid_snapshots = [] - - # Inside measurement loop: - if cfg.save_grid_timeseries and step % cfg.grid_timeseries_subsample == 0: - grid_snapshots.append(model.grid.copy()) - - # After loop: - if cfg.save_grid_timeseries and grid_snapshots: - # Save separately to avoid bloating JSONL - snapshot_path = output_dir / f"snapshots_{seed}.npz" - np.savez_compressed(snapshot_path, grids=np.array(grid_snapshots)) - result["snapshot_file"] = str(snapshot_path) - - - OR create separate snapshot runs using some sort of SNAPSHOT_CONFIG. +Experiment Configuration +======================== + +This module provides the configuration dataclass and pre-defined phase +configurations for Predator-Prey Hydra Effect experiments. + +Classes +------- +Config + Central configuration dataclass with all experiment parameters. + +Functions +--------- +```python +get_phase_config: Retrieve configuration for a specific experimental phase. +```` + +Phase Configurations +-------------------- +- ``PHASE1_CONFIG``: Parameter sweep to find critical point +- ``PHASE2_CONFIG``: Self-organization (evolution toward criticality) +- ``PHASE3_CONFIG``: Finite-size scaling at critical point +- ``PHASE4_CONFIG``: Sensitivity analysis (4D parameter sweep) +- ``PHASE5_CONFIG``: Directed hunting comparison + +Example +------- +```python +from models.config import Config, get_phase_config + +# Use predefined phase config +cfg = get_phase_config(1) + +# Create custom config +cfg = Config(grid_size=200, n_replicates=10) + +# Generate parameter sweep values +prey_deaths = cfg.get_prey_deaths() +``` """ - -from dataclasses import dataclass, field, asdict -from typing import Tuple, Optional +from dataclasses import dataclass +from typing import Tuple import numpy as np @dataclass class Config: - """Central configuration for all experiments.""" + """ + Central configuration for Predator-Prey Hydra Effect experiments. + + Attributes + ---------- + grid_size : int + Side length of the square simulation grid. + densities : Tuple[float, float] + Initial population fractions for (prey, predator). + grid_sizes : Tuple[int, ...] + Grid dimensions for Finite-Size Scaling (FSS) analysis (Phase 3). + prey_birth : float + Global birth rate for prey species. + prey_death : float + Global death rate for prey species. + predator_birth : float + Global birth rate for predator species. + predator_death : float + Global death rate for predator species. + critical_prey_birth : float + Critical birth rate identified from Phase 1. + critical_prey_death : float + Critical death rate identified from Phase 1. + prey_death_range : Tuple[float, float] + Bounds for prey death rate sweep. + n_prey_death : int + Number of points in prey death rate sweep. + n_replicates : int + Independent stochastic runs per parameter set. + warmup_steps : int + Iterations before data collection begins. + measurement_steps : int + Iterations for collecting statistics. + evolve_sd : float + Standard deviation for parameter mutation. + evolve_min : float + Lower bound for evolving parameters. + evolve_max : float + Upper bound for evolving parameters. + directed_hunting : bool + Toggle for targeted predator movement. + save_timeseries : bool + Toggle for recording population time series. + timeseries_subsample : int + Subsample rate for time series data. + collect_pcf : bool + Toggle for Pair Correlation Function analysis. + pcf_sample_rate : float + Fraction of runs that compute PCFs. + pcf_max_distance : float + Maximum radial distance for PCF. + pcf_n_bins : int + Number of bins in PCF histogram. + min_density_for_analysis : float + Population threshold for spatial analysis. + n_jobs : int + CPU cores for parallelization (-1 = all). + """ # Grid settings - grid_size: int = 1000 # FIXME: Decide default configuration - densities: Tuple[float, float] = ( - 0.30, - 0.15, - ) # (prey, predator) #FIXME: Default densities - - # For FSS experiments: multiple grid sizes + grid_size: int = 1000 + densities: Tuple[float, float] = (0.30, 0.15) grid_sizes: Tuple[int, ...] = (50, 100, 250, 500, 1000, 2500) - # Default/fixed parameters + # Species parameters prey_birth: float = 0.2 prey_death: float = 0.05 - predator_birth: float = 0.8 # FIXME: Default predator death rate - predator_death: float = 0.05 # FIXME: Default predator death rate + predator_birth: float = 0.8 + predator_death: float = 0.05 - # Critical point (UPDATE AFTER PHASE 1) + # Critical point (from Phase 1) critical_prey_birth: float = 0.20 - critical_prey_death: float = 0.947 + critical_prey_death: float = 0.0968 - # Prey parameter sweep (Phase 1) + # Parameter sweep settings prey_death_range: Tuple[float, float] = (0.0, 0.2) - n_prey_birth: int = 15 # FIXME: Decide number of grid points along prey axes - n_prey_death: int = 5 - - # Predator parameter sweep (Phase 4 sensitivity) - predator_birth_values: Tuple[float, ...] = ( - 0.15, - 0.20, - 0.25, - 0.30, - ) # FIXME: Bogus values for now - predator_death_values: Tuple[float, ...] = ( - 0.05, - 0.10, - 0.15, - 0.20, - ) # FIXME: Bogus values for now - - # Perturbation offsets from critical point (Phase 5) - prey_death_offsets: Tuple[float, ...] = ( - -0.02, - -0.01, - 0.0, - 0.01, - 0.02, - ) # FIXME: Bogus values for now - - # Number of replicates per parameter configuration - n_replicates: int = 15 # FIXME: Decide number of indep. runs per parameter config - - # Simulation steps - warmup_steps: int = 300 # FIXME: Steps to run before measuring - measurement_steps: int = 500 # FIXME: Decide measurement steps - - # Evo - with_evolution: bool = False + n_prey_death: int = 20 + + # Replication + n_replicates: int = 15 + + # Simulation timing + warmup_steps: int = 300 + measurement_steps: int = 500 + + # Evolution settings evolve_sd: float = 0.10 evolve_min: float = 0.0 evolve_max: float = 0.10 - # Sensitivity: mutation strength values to test - sensitivity_sd_values: Tuple[float, ...] = ( - 0.02, - 0.05, - 0.10, - 0.15, - 0.20, - ) # FIXME: Don't know if we use yet - - # Update mode - synchronous: bool = False # Always False for this model + # Model variant directed_hunting: bool = False - # For Phase 6: compare model variants - directed_hunting_values: Tuple[bool, ...] = (False, True) - - # Temporal data collection (time series) + # Time series collection save_timeseries: bool = False - timeseries_subsample: int = 10 # FIXME: Save every how many steps + timeseries_subsample: int = 10 # PCF settings collect_pcf: bool = True - pcf_sample_rate: float = 0.2 # Fraction of runs to compute PCF + pcf_sample_rate: float = 0.2 pcf_max_distance: float = 20.0 pcf_n_bins: int = 20 - # Cluster analysis - min_density_for_analysis: float = ( - 0.002 # FIXME: Minimum prey density (fraction of grid) to analyze clusters/PCF - ) - - # Perturbation settings (Phase 5) - perturbation_magnitude: float = ( - 0.1 # FIXME: Fractional change to apply at perturbation time - ) + # Analysis thresholds + min_density_for_analysis: float = 0.002 # Parallelization - n_jobs: int = -1 # Use all available cores by default - - # Helpers - def get_prey_births(self) -> np.ndarray: - """Generate prey birth rate sweep values.""" - return np.linspace( - self.prey_birth_range[0], self.prey_birth_range[1], self.n_prey_birth - ) + n_jobs: int = -1 def get_prey_deaths(self) -> np.ndarray: - """Generate prey death rate sweep values.""" + """Generate array of prey death rates for parameter sweep.""" return np.linspace( self.prey_death_range[0], self.prey_death_range[1], self.n_prey_death ) - def get_warmup_steps( - self, L: int - ) -> int: # FIXME: This method will be updated depending on Sary's results. - """Scale warmup with grid size.""" + def get_warmup_steps(self, L: int) -> int: + """Get warmup steps (can be extended for size-dependent scaling).""" return self.warmup_steps def get_measurement_steps(self, L: int) -> int: - """Scale measurement with grid size.""" + """Get measurement steps (can be extended for size-dependent scaling).""" return self.measurement_steps def estimate_runtime(self, n_cores: int = 32) -> str: - """Estimate total runtime based on benchmark data.""" - # Benchmark: ~1182 steps/sec for 100x100 grid + """ + Estimate wall-clock time for the experiment. + + Parameters + ---------- + n_cores : int + Number of available CPU cores. + + Returns + ------- + str + Human-readable runtime estimate. + """ ref_size = 100 ref_steps_per_sec = 1182 @@ -200,13 +193,9 @@ def estimate_runtime(self, n_cores: int = 32) -> str: total_steps = self.warmup_steps + self.measurement_steps base_time_s = total_steps / actual_steps_per_sec - # PCF overhead (~8ms for 100x100) pcf_time_s = (0.008 * size_scaling) if self.collect_pcf else 0 - # Count simulations - n_sims = self.n_prey_birth * self.n_prey_death * self.n_replicates - if self.with_evolution: - n_sims *= 2 # Both evo and non-evo runs + n_sims = self.n_prey_death * self.n_replicates total_seconds = n_sims * (base_time_s + pcf_time_s * self.pcf_sample_rate) total_seconds /= n_cores @@ -217,11 +206,9 @@ def estimate_runtime(self, n_cores: int = 32) -> str: return f"{n_sims:,} sims, ~{hours:.1f}h on {n_cores} cores (~{core_hours:.0f} core-hours)" -############################################################################################ -# Experimental Phase Configurations -############################################################################################ - -# FIXME: These configs are arbitraty and should be finalized before running experiments. +# ============================================================================= +# Phase Configurations +# ============================================================================= PHASE1_CONFIG = Config( grid_size=1000, @@ -234,79 +221,53 @@ def estimate_runtime(self, n_cores: int = 32) -> str: warmup_steps=1000, measurement_steps=1000, collect_pcf=False, - pcf_sample_rate=0.2, save_timeseries=False, directed_hunting=False, ) -# Phase 2: Self-organization (evolution toward criticality) PHASE2_CONFIG = Config( grid_size=1000, - n_prey_birth=1, # Fixed at cfg.prey_birth (0.2) + n_prey_death=10, n_replicates=10, - warmup_steps=1000, # Shorter warmup (evolution starts immediately) - measurement_steps=10000, # Longer measurement to see convergence - # Evolution settings - with_evolution=True, - evolve_sd=0.01, # Smaller mutation rate for smoother convergence + warmup_steps=1000, + measurement_steps=10000, + evolve_sd=0.01, evolve_min=0.0, - evolve_max=0.20, # Allow full range + evolve_max=0.20, collect_pcf=False, - save_timeseries=False, # Track evolution trajectory + save_timeseries=False, ) -# Phase 3: Finite-size scaling at critical point PHASE3_CONFIG = Config( grid_sizes=(50, 100, 250, 500, 1000, 2500), n_replicates=20, warmup_steps=1000, measurement_steps=1000, - critical_prey_birth=0.20, # Add explicitly - critical_prey_death=0.947, # Add explicitly - verify from Phase 1! + critical_prey_birth=0.20, + critical_prey_death=0.0968, collect_pcf=True, pcf_sample_rate=1.0, save_timeseries=False, - with_evolution=False, directed_hunting=False, ) -# Phase 4: Sensitivity analysis PHASE4_CONFIG = Config( - grid_size=250, # As requested - n_replicates=10, # As requested - warmup_steps=500, # As requested - measurement_steps=500, # As requested - with_evolution=False, + grid_size=250, + n_replicates=10, + warmup_steps=500, + measurement_steps=500, collect_pcf=False, save_timeseries=False, - timeseries_subsample=10, directed_hunting=False, ) - -# Phase 5: Perturbation analysis (critical slowing down) PHASE5_CONFIG = Config( - grid_size=100, - prey_death_offsets=(-0.02, -0.01, 0.0, 0.01, 0.02), # FIXME: Is this what we vary? - n_replicates=20, - warmup_steps=500, - measurement_steps=2000, - perturbation_magnitude=0.1, - collect_pcf=False, - save_timeseries=True, - timeseries_subsample=1, # Full resolution for autocorrelation -) - -# Phase 6: Model extensions (directed reproduction); same config as phase 4 but with directed reproduction -PHASE6_CONFIG = Config( grid_size=250, n_replicates=10, warmup_steps=500, measurement_steps=500, - with_evolution=False, collect_pcf=False, save_timeseries=False, - timeseries_subsample=10, directed_hunting=True, ) @@ -316,14 +277,30 @@ def estimate_runtime(self, n_cores: int = 32) -> str: 3: PHASE3_CONFIG, 4: PHASE4_CONFIG, 5: PHASE5_CONFIG, - 6: PHASE6_CONFIG, } def get_phase_config(phase: int) -> Config: - """Get config for a specific phase.""" + """ + Retrieve configuration for a specific experimental phase. + + Parameters + ---------- + phase : int + Phase number (1-5). + + Returns + ------- + Config + Configuration instance for the requested phase. + + Raises + ------ + ValueError + If phase number is invalid. + """ if phase not in PHASE_CONFIGS: raise ValueError( f"Unknown phase {phase}. Valid phases: {list(PHASE_CONFIGS.keys())}" ) - return PHASE_CONFIGS[phase] + return PHASE_CONFIGS[phase] \ No newline at end of file diff --git a/models/numba_optimized.py b/models/numba_optimized.py index 90fba55..90e1ad1 100644 --- a/models/numba_optimized.py +++ b/models/numba_optimized.py @@ -1,30 +1,55 @@ #!/usr/bin/env python3 """ -Numba-optimized kernels for predator-prey cellular automaton. - -Added full cluster detection with labels - -Key additions: -- detect_clusters_fast(): Returns (labels, sizes_dict) like Hoshen-Kopelman -- get_cluster_stats_fast(): Full statistics including largest_fraction -- get_percolating_cluster_fast(): Percolation detection for phase transitions - -Optimizations: -1. Cell-list PCF: O(N) average instead of O(N²) brute force -2. Pre-allocated work buffers for async kernel -3. Consistent dtypes throughout -4. cache=True for persistent JIT compilation - -Usage: - from numba_optimized import ( - PPKernel, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, # Sizes only (fastest) - detect_clusters_fast, # Labels + sizes dict - get_cluster_stats_fast, # Full statistics - get_percolating_cluster_fast, # Percolation detection - NUMBA_AVAILABLE - ) +Numba-Optimized Kernels +======================= + +This module provides Numba-accelerated kernels for the predator-prey +cellular automaton, including update kernels and spatial analysis functions. + +Classes +------- +PPKernel + Wrapper for predator-prey update kernels with pre-allocated buffers. + +Cluster Analysis +---------------- +```python +measure_cluster_sizes_fast # Fast cluster size measurement (sizes only). +detect_clusters_fast # Full cluster detection with labels. +get_cluster_stats_fast # Comprehensive cluster statistics. +``` + +Pair Correlation Functions +-------------------------- +```python +compute_pcf_periodic_fast # PCF for two position sets with periodic boundaries. +compute_all_pcfs_fast #Compute prey-prey, pred-pred, and prey-pred PCFs. +``` + +Utilities +--------- +```python +set_numba_seed # Seed Numba's internal RNG. +warmup_numba_kernels # Pre-compile kernels to avoid first-run latency. +``` + +Example +------- +```python +from models.numba_optimized import ( + PPKernel, + get_cluster_stats_fast, + compute_all_pcfs_fast, +) + +# Cluster analysis +stats = get_cluster_stats_fast(grid, species=1) +print(f"Largest cluster: {stats['largest']}") + +# PCF computation +pcfs = compute_all_pcfs_fast(grid, max_distance=20.0) +prey_prey_dist, prey_prey_gr, _ = pcfs['prey_prey'] +``` """ import numpy as np @@ -54,7 +79,29 @@ def prange(*args): @njit(cache=True) def set_numba_seed(seed: int) -> None: - """Seed Numba's internal RNG from within a JIT context.""" + """ + Seed Numba's internal random number generator from within a JIT context. + + This function ensures that Numba's independent random number generator + is synchronized with the provided seed, enabling reproducibility for + jit-compiled functions that use NumPy's random operations. + + Parameters + ---------- + seed : int + The integer value used to initialize the random number generator. + + Returns + ------- + None + + Notes + ----- + Because Numba maintains its own internal state for random number + generation, calling `np.random.seed()` in standard Python code will not + affect jit-compiled functions. This helper must be called to bridge + that gap. + """ np.random.seed(seed) @@ -79,7 +126,55 @@ def _pp_async_kernel_random( evolution_stopped: bool, occupied_buffer: np.ndarray, ) -> np.ndarray: - """Asynchronous predator-prey update kernel with random neighbor selection.""" + """ + Asynchronous predator-prey update kernel with random neighbor selection. + + This Numba-accelerated kernel performs an asynchronous update of the + simulation grid. It identifies all occupied cells, shuffles them to + ensure unbiased processing, and applies stochastic rules for prey + mortality, prey reproduction (with optional parameter evolution), + predator mortality, and predation. + + Parameters + ---------- + grid : np.ndarray + 2D integer array representing the simulation grid (0: Empty, 1: Prey, 2: Predator). + prey_death_arr : np.ndarray + 2D float array storing the individual prey death rates for evolution tracking. + p_birth_val : float + Base probability of prey reproduction into an adjacent empty cell. + p_death_val : float + Base probability of prey death (though individual rates in `prey_death_arr` are used). + pred_birth_val : float + Probability of a predator reproducing after consuming prey. + pred_death_val : float + Probability of a predator dying. + dr_arr : np.ndarray + Array of row offsets defining the neighborhood. + dc_arr : np.ndarray + Array of column offsets defining the neighborhood. + evolve_sd : float + Standard deviation of the mutation applied to the prey death rate during reproduction. + evolve_min : float + Lower bound for the evolved prey death rate. + evolve_max : float + Upper bound for the evolved prey death rate. + evolution_stopped : bool + If True, offspring inherit the parent's death rate without mutation. + occupied_buffer : np.ndarray + Pre-allocated 2D array used to store and shuffle coordinates of occupied cells. + + Returns + ------- + grid : np.ndarray + The updated simulation grid. + + Notes + ----- + The kernel uses periodic boundary conditions. The Fisher-Yates shuffle on + `occupied_buffer` ensures that the asynchronous updates do not introduce + directional bias. + """ rows, cols = grid.shape n_shifts = len(dr_arr) @@ -166,12 +261,50 @@ def _pp_async_kernel_directed( """ Asynchronous predator-prey update kernel with directed behavior. - Directed behavior: - - Prey: Searches all neighbors for empty cells, randomly picks one to reproduce into - - Predator: Searches all neighbors for prey, randomly picks one to hunt - - This makes both species more "intelligent" compared to random neighbor selection. - Uses efficient two-pass counting approach (Numba-compatible, no heap allocation). + This kernel implements "intelligent" species behavior: prey actively search + for empty spaces to reproduce, and predators actively search for nearby + prey to hunt. A two-pass approach is used to stochastically select a + valid target from the neighborhood without heap allocation. + + Parameters + ---------- + grid : np.ndarray + 2D integer array representing the simulation grid (0: Empty, 1: Prey, 2: Predator). + prey_death_arr : np.ndarray + 2D float array storing individual prey mortality rates for evolution. + p_birth_val : float + Probability of prey reproduction attempt. + p_death_val : float + Base probability of prey mortality. + pred_birth_val : float + Probability of a predator reproduction attempt (hunting success). + pred_death_val : float + Probability of predator mortality. + dr_arr : np.ndarray + Row offsets defining the spatial neighborhood (e.g., Moore or von Neumann). + dc_arr : np.ndarray + Column offsets defining the spatial neighborhood. + evolve_sd : float + Standard deviation for mutations in prey death rates. + evolve_min : float + Minimum allowable value for evolved prey death rates. + evolve_max : float + Maximum allowable value for evolved prey death rates. + evolution_stopped : bool + If True, prevents mutation during prey reproduction. + occupied_buffer : np.ndarray + Pre-allocated array for storing and shuffling active cell coordinates. + + Returns + ------- + grid : np.ndarray + The updated simulation grid. + + Notes + ----- + The directed behavior significantly changes the system dynamics compared to + random neighbor selection, often leading to different critical thresholds + and spatial patterning. Periodic boundary conditions are applied. """ rows, cols = grid.shape n_shifts = len(dr_arr) @@ -288,7 +421,37 @@ def _pp_async_kernel_directed( class PPKernel: - """Wrapper for predator-prey kernel with pre-allocated buffers.""" + """ + Wrapper for predator-prey kernel with pre-allocated buffers. + + This class manages the spatial configuration and memory buffers required + for the Numba-accelerated update kernels. By pre-allocating the + `occupied_buffer`, it avoids expensive memory allocations during the + simulation loop. + + Parameters + ---------- + rows : int + Number of rows in the simulation grid. + cols : int + Number of columns in the simulation grid. + neighborhood : {'moore', 'von_neumann'}, optional + The neighborhood type determining adjacent cells. 'moore' includes + diagonals (8 neighbors), 'von_neumann' does not (4 neighbors). + Default is 'moore'. + directed_hunting : bool, optional + If True, uses the directed behavior kernel where species search for + targets. If False, uses random neighbor selection. Default is False. + + Attributes + ---------- + rows : int + Grid row count. + cols : int + Grid column count. + directed_hunting : bool + Toggle for intelligent behavior logic. + """ def __init__( self, @@ -322,6 +485,37 @@ def update( evolve_max: float = 0.1, evolution_stopped: bool = True, ) -> np.ndarray: + """ + Execute a single asynchronous update step using the configured kernel. + + Parameters + ---------- + grid : np.ndarray + The current 2D simulation grid. + prey_death_arr : np.ndarray + 2D array of individual prey mortality rates. + prey_birth : float + Prey reproduction probability. + prey_death : float + Base prey mortality probability. + pred_birth : float + Predator reproduction (hunting success) probability. + pred_death : float + Predator mortality probability. + evolve_sd : float, optional + Mutation standard deviation (default 0.1). + evolve_min : float, optional + Minimum evolved death rate (default 0.001). + evolve_max : float, optional + Maximum evolved death rate (default 0.1). + evolution_stopped : bool, optional + Whether to disable mutation during this step (default True). + + Returns + ------- + np.ndarray + The updated grid after one full asynchronous pass. + """ if self.directed_hunting: return _pp_async_kernel_directed( grid, @@ -372,7 +566,44 @@ def _flood_fill( cols: int, moore: bool, ) -> int: - """Stack-based flood fill with configurable neighborhood and periodic BC.""" + """ + Perform a stack-based flood fill to measure the size of a connected cluster. + + This Numba-accelerated function identifies all contiguous cells of a + specific target value starting from a given coordinate. It supports + both Moore and von Neumann neighborhoods and implements periodic + boundary conditions (toroidal topology). + + Parameters + ---------- + grid : np.ndarray + 2D integer array representing the simulation environment. + visited : np.ndarray + 2D boolean array tracked across calls to avoid re-processing cells. + start_r : int + Starting row index for the flood fill. + start_c : int + Starting column index for the flood fill. + target : int + The cell value (e.g., 1 for Prey, 2 for Predator) to include in the cluster. + rows : int + Total number of rows in the grid. + cols : int + Total number of columns in the grid. + moore : bool + If True, use a Moore neighborhood (8 neighbors). If False, use a + von Neumann neighborhood (4 neighbors). + + Returns + ------- + size : int + The total number of connected cells belonging to the cluster. + + Notes + ----- + The function uses a manual stack implementation to avoid recursion limit + issues and is optimized for use within JIT-compiled loops. + """ max_stack = rows * cols stack_r = np.empty(max_stack, dtype=np.int32) stack_c = np.empty(max_stack, dtype=np.int32) @@ -415,7 +646,36 @@ def _flood_fill( @njit(cache=True) def _measure_clusters(grid: np.ndarray, species: int, moore: bool = True) -> np.ndarray: - """Measure all cluster sizes for a species (sizes only).""" + """ + Identify and measure the sizes of all connected clusters for a specific species. + + This function scans the entire grid and initiates a flood-fill algorithm + whenever an unvisited cell of the target species is encountered. It + returns an array containing the size (cell count) of each identified cluster. + + Parameters + ---------- + grid : np.ndarray + 2D integer array representing the simulation environment. + species : int + The target species identifier (e.g., 1 for Prey, 2 for Predator). + moore : bool, optional + Determines the connectivity logic. If True, uses the Moore neighborhood + (8 neighbors); if False, uses the von Neumann neighborhood (4 neighbors). + Default is True. + + Returns + ------- + cluster_sizes : np.ndarray + A 1D array of integers where each element represents the size of + one connected cluster. + + Notes + ----- + This function is Numba-optimized and utilizes an internal `visited` mask + to ensure each cell is processed only once, maintaining $O(N)$ + complexity relative to the number of cells. + """ rows, cols = grid.shape visited = np.zeros((rows, cols), dtype=np.bool_) @@ -502,65 +762,6 @@ def _detect_clusters_numba( return labels, sizes[:n_clusters] -@njit(cache=True) -def _check_percolation( - labels: np.ndarray, - sizes: np.ndarray, - direction: int, -) -> Tuple[bool, int, int]: - """ - Check for percolating clusters. - - Args: - direction: 0=horizontal, 1=vertical, 2=both - - Returns: - percolates, perc_label, perc_size - """ - rows, cols = labels.shape - max_label = len(sizes) - - touches_left = np.zeros(max_label + 1, dtype=np.bool_) - touches_right = np.zeros(max_label + 1, dtype=np.bool_) - touches_top = np.zeros(max_label + 1, dtype=np.bool_) - touches_bottom = np.zeros(max_label + 1, dtype=np.bool_) - - for i in range(rows): - if labels[i, 0] > 0: - touches_left[labels[i, 0]] = True - if labels[i, cols - 1] > 0: - touches_right[labels[i, cols - 1]] = True - - for j in range(cols): - if labels[0, j] > 0: - touches_top[labels[0, j]] = True - if labels[rows - 1, j] > 0: - touches_bottom[labels[rows - 1, j]] = True - - best_label = 0 - best_size = 0 - - for label in range(1, max_label + 1): - percolates_h = touches_left[label] and touches_right[label] - percolates_v = touches_top[label] and touches_bottom[label] - - is_percolating = False - if direction == 0: - is_percolating = percolates_h - elif direction == 1: - is_percolating = percolates_v - else: - is_percolating = percolates_h or percolates_v - - if is_percolating: - cluster_size = sizes[label - 1] - if cluster_size > best_size: - best_size = cluster_size - best_label = label - - return best_label > 0, best_label, best_size - - # ============================================================================ # PUBLIC API - CLUSTER DETECTION # ============================================================================ @@ -572,18 +773,39 @@ def measure_cluster_sizes_fast( neighborhood: str = "moore", ) -> np.ndarray: """ - Measure cluster sizes only (fastest method). - - Use when you only need size statistics, not the label array. - ~25x faster than pure Python. - - Args: - grid: 2D array of cell states - species: Target species value (1=prey, 2=predator) - neighborhood: 'moore' (8-connected) or 'neumann' (4-connected) - - Returns: - 1D array of cluster sizes + Measure cluster sizes for a specific species using Numba-accelerated flood fill. + + This function provides a high-performance interface for calculating cluster + size statistics without the overhead of generating a full label map. It is + optimized for large-scale simulation analysis where only distribution + metrics (e.g., mean size, max size) are required. + + Parameters + ---------- + grid : np.ndarray + A 2D array representing the simulation environment. + species : int + The target species identifier (e.g., 1 for Prey, 2 for Predator). + neighborhood : {'moore', 'neumann'}, optional + The connectivity rule. 'moore' uses 8-way connectivity (including diagonals); + 'neumann' uses 4-way connectivity. Default is 'moore'. + + Returns + ------- + cluster_sizes : np.ndarray + A 1D array of integers, where each element is the cell count of an + identified cluster. + + Notes + ----- + The input grid is cast to `int32` to ensure compatibility with the + underlying JIT-compiled `_measure_clusters` kernel. + + Examples + -------- + >>> sizes = measure_cluster_sizes_fast(grid, species=1, neighborhood='moore') + >>> if sizes.size > 0: + ... print(f"Largest cluster: {sizes.max()}") """ grid_int = np.asarray(grid, dtype=np.int32) moore = neighborhood == "moore" @@ -596,23 +818,41 @@ def detect_clusters_fast( neighborhood: str = "moore", ) -> Tuple[np.ndarray, Dict[int, int]]: """ - Full cluster detection with labels (Numba-accelerated). - - Returns both the label array and size dictionary for richer analysis. - - Args: - grid: 2D array of cell states - species: Target species value (1=prey, 2=predator) - neighborhood: 'moore' (8-connected) or 'neumann' (4-connected) - - Returns: - labels: 2D array where each cell has its cluster ID (0 = non-target) - sizes: Dict mapping cluster_id -> cluster_size - - Example: - >>> labels, sizes = detect_clusters_fast(grid, species=1) - >>> largest_id = max(sizes, key=sizes.get) - >>> largest_size = sizes[largest_id] + Perform full cluster detection with labels using Numba acceleration. + + This function returns a label array for spatial analysis and a dictionary + of cluster sizes. It is significantly faster than standard Python or + SciPy equivalents for large simulation grids. + + Parameters + ---------- + grid : np.ndarray + A 2D array representing the simulation environment. + species : int + The target species identifier (e.g., 1 for Prey, 2 for Predator). + neighborhood : {'moore', 'neumann'}, optional + The connectivity rule. 'moore' uses 8-way connectivity; 'neumann' + uses 4-way connectivity. Default is 'moore'. + + Returns + ------- + labels : np.ndarray + A 2D int32 array where each cell contains its unique cluster ID. + Cells not belonging to the target species are 0. + sizes : dict + A dictionary mapping cluster IDs to their respective cell counts. + + Notes + ----- + The underlying Numba kernel uses a stack-based flood fill to avoid + recursion limits and handles periodic boundary conditions. + + Examples + -------- + >>> labels, sizes = detect_clusters_fast(grid, species=1) + >>> if sizes: + ... largest_id = max(sizes, key=sizes.get) + ... print(f"Cluster {largest_id} size: {sizes[largest_id]}") """ grid_int = np.asarray(grid, dtype=np.int32) moore = neighborhood == "moore" @@ -627,23 +867,42 @@ def get_cluster_stats_fast( neighborhood: str = "moore", ) -> Dict: """ - Compute comprehensive cluster statistics (Numba-accelerated). - - Args: - grid: 2D array of cell states - species: Target species value - neighborhood: 'moore' or 'neumann' - - Returns: - Dictionary with keys: - - 'n_clusters': Total number of clusters - - 'sizes': Array of sizes (sorted descending) - - 'largest': Size of largest cluster - - 'largest_fraction': S_max / N (order parameter for percolation) - - 'mean_size': Mean cluster size - - 'size_distribution': Dict[size -> count] - - 'labels': Cluster label array - - 'size_dict': Dict[label -> size] + Compute comprehensive cluster statistics for a species using Numba acceleration. + + This function integrates cluster detection and labeling to provide a + full suite of spatial metrics. It calculates the cluster size distribution + and the largest cluster fraction, which often serves as an order + parameter in percolation theory and Phase 1-3 analyses. + + Parameters + ---------- + grid : np.ndarray + A 2D array representing the simulation environment. + species : int + The target species identifier (e.g., 1 for Prey, 2 for Predator). + neighborhood : {'moore', 'neumann'}, optional + The connectivity rule. 'moore' uses 8-way connectivity; 'neumann' + uses 4-way connectivity. Default is 'moore'. + + Returns + ------- + stats : dict + A dictionary containing: + - 'n_clusters': Total count of isolated clusters. + - 'sizes': Sorted array (descending) of all cluster sizes. + - 'largest': Size of the single largest cluster. + - 'largest_fraction': Size of the largest cluster divided by + the total population of the species. + - 'mean_size': Average size of all clusters. + - 'size_distribution': Frequency mapping of {size: count}. + - 'labels': 2D array of unique cluster IDs. + - 'size_dict': Mapping of {label_id: size}. + + Examples + -------- + >>> stats = get_cluster_stats_fast(grid, species=1) + >>> print(f"Found {stats['n_clusters']} prey clusters.") + >>> print(f"Order parameter: {stats['largest_fraction']:.3f}") """ labels, size_dict = detect_clusters_fast(grid, species, neighborhood) @@ -681,46 +940,6 @@ def get_cluster_stats_fast( } -def get_percolating_cluster_fast( - grid: np.ndarray, - species: int, - neighborhood: str = "moore", - direction: str = "both", -) -> Tuple[bool, int, int, np.ndarray]: - """ - Detect percolating (spanning) clusters (Numba-accelerated). - - A percolating cluster connects opposite edges of the grid, - indicating a phase transition in percolation theory. - - Args: - grid: 2D array of cell states - species: Target species value - neighborhood: 'moore' or 'neumann' - direction: 'horizontal', 'vertical', or 'both' - - Returns: - percolates: True if a spanning cluster exists - cluster_label: Label of the percolating cluster (0 if none) - cluster_size: Size of the percolating cluster (0 if none) - labels: Full cluster label array - - Example: - >>> percolates, label, size, labels = get_percolating_cluster_fast(grid, 1) - >>> if percolates: - >>> print(f"Prey percolates with {size} cells!") - """ - grid_int = np.asarray(grid, dtype=np.int32) - moore = neighborhood == "moore" - labels, sizes_arr = _detect_clusters_numba(grid_int, np.int32(species), moore) - - dir_map = {"horizontal": 0, "vertical": 1, "both": 2} - dir_int = dir_map.get(direction, 2) - - percolates, perc_label, perc_size = _check_percolation(labels, sizes_arr, dir_int) - return percolates, int(perc_label), int(perc_size), labels - - # ============================================================================ # PCF COMPUTATION (Cell-list accelerated) # ============================================================================ @@ -733,7 +952,48 @@ def _build_cell_list( L_row: float, L_col: float, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float, float]: - """Build cell list for spatial hashing.""" + """ + Build a cell list for spatial hashing to accelerate neighbor lookups. + + This Numba-optimized function partitions a set of coordinates into a + grid of cells. It uses a three-pass approach to calculate cell occupancy, + compute starting offsets for each cell in a flat index array, and finally + populate that array with position indices. + + Parameters + ---------- + positions : np.ndarray + An (N, 2) float array of coordinates within the simulation domain. + n_cells : int + The number of cells along one dimension of the square grid. + L_row : float + The total height (row extent) of the simulation domain. + L_col : float + The total width (column extent) of the simulation domain. + + Returns + ------- + indices : np.ndarray + A 1D array of original position indices, reordered so that indices + belonging to the same cell are contiguous. + offsets : np.ndarray + A 2D array where `offsets[r, c]` is the starting index in the + `indices` array for cell (r, c). + cell_counts : np.ndarray + A 2D array where `cell_counts[r, c]` is the number of points + contained in cell (r, c). + cell_size_r : float + The calculated height of an individual cell. + cell_size_c : float + The calculated width of an individual cell. + + Notes + ----- + This implementation assumes periodic boundary conditions via the + modulo operator during coordinate-to-cell mapping. It is designed to + eliminate heap allocations within the main simulation loop by using + Numba's efficient array handling. + """ n_pos = len(positions) cell_size_r = L_row / n_cells cell_size_c = L_col / n_cells @@ -772,7 +1032,39 @@ def _periodic_dist_sq( L_row: float, L_col: float, ) -> float: - """Squared periodic distance.""" + """ + Calculate the squared Euclidean distance between two points with periodic boundary conditions. + + This Numba-optimized function accounts for toroidal topology by finding the + shortest path between coordinates across the grid edges. Using the squared + distance avoids the computational expense of a square root operation, + making it ideal for high-frequency spatial queries. + + Parameters + ---------- + r1 : float + Row coordinate of the first point. + c1 : float + Column coordinate of the first point. + r2 : float + Row coordinate of the second point. + c2 : float + Column coordinate of the second point. + L_row : float + Total height (row extent) of the periodic domain. + L_col : float + Total width (column extent) of the periodic domain. + + Returns + ------- + dist_sq : float + The squared shortest distance between the two points. + + Notes + ----- + The function applies the minimum image convention, ensuring that the + distance never exceeds half the domain length in any dimension. + """ dr = abs(r1 - r2) dc = abs(c1 - c2) if dr > L_row * 0.5: @@ -798,7 +1090,57 @@ def _pcf_cell_list( self_correlation: bool, n_cells: int, ) -> np.ndarray: - """Compute PCF histogram using cell lists.""" + """ + Compute a Pair Correlation Function (PCF) histogram using spatial cell lists. + + This Numba-accelerated parallel kernel calculates distances between two sets + of points (pos_i and pos_j). It uses a cell list (spatial hashing) to + restrict distance calculations to neighboring cells within the maximum + specified distance, significantly improving performance from $O(N^2)$ + to $O(N)$. + + Parameters + ---------- + pos_i : np.ndarray + (N, 2) float array of coordinates for the primary species. + pos_j : np.ndarray + (M, 2) float array of coordinates for the secondary species. + indices_j : np.ndarray + Flattened indices of pos_j sorted by cell, produced by `_build_cell_list`. + offsets_j : np.ndarray + 2D array of starting offsets for each cell in `indices_j`. + counts_j : np.ndarray + 2D array of particle counts within each cell for species J. + cell_size_r : float + Height of a single spatial cell. + cell_size_c : float + Width of a single spatial cell. + L_row : float + Total height of the periodic domain. + L_col : float + Total width of the periodic domain. + max_distance : float + Maximum radial distance (r) to consider for the correlation. + n_bins : int + Number of bins in the distance histogram. + self_correlation : bool + If True, assumes species I and J are the same and avoids double-counting + or self-interaction. + n_cells : int + Number of cells per dimension in the spatial hash grid. + + Returns + ------- + hist : np.ndarray + A 1D array of length `n_bins` containing the counts of pairs found + at each radial distance. + + Notes + ----- + The kernel uses `prange` for parallel execution across points in `pos_i`. + Local histograms are used per thread to prevent race conditions during + reduction. Periodic boundary conditions are handled via `_periodic_dist_sq`. + """ n_i = len(pos_i) bin_width = max_distance / n_bins max_dist_sq = max_distance * max_distance @@ -855,7 +1197,46 @@ def compute_pcf_periodic_fast( n_bins: int = 50, self_correlation: bool = False, ) -> Tuple[np.ndarray, np.ndarray, int]: - """Cell-list accelerated PCF computation.""" + """ + Compute the Pair Correlation Function (PCF) using cell-list acceleration. + + This high-level function coordinates the spatial hashing and histogram + calculation to determine the $g(r)$ function. It normalizes the resulting + histogram by the expected number of pairs in an ideal gas of the same + density, accounting for the toroidal area of each radial bin. + + Parameters + ---------- + positions_i : np.ndarray + (N, 2) array of coordinates for species I. + positions_j : np.ndarray + (M, 2) array of coordinates for species J. + grid_shape : tuple of int + The (rows, cols) dimensions of the simulation grid. + max_distance : float + The maximum radius to calculate correlations for. + n_bins : int, optional + Number of bins for the radial distribution (default 50). + self_correlation : bool, optional + Set to True if computing the correlation of a species with itself + to avoid self-counting (default False). + + Returns + ------- + bin_centers : np.ndarray + The central radial distance for each histogram bin. + pcf : np.ndarray + The normalized $g(r)$ values. A value of 1.0 indicates no spatial + correlation; > 1.0 indicates clustering; < 1.0 indicates repulsion. + total_pairs : int + The total count of pairs found within the `max_distance`. + + Notes + ----- + The function dynamically determines the optimal number of cells for the + spatial hash based on the `max_distance` and grid dimensions to maintain + linear time complexity. + """ rows, cols = grid_shape L_row, L_col = float(rows), float(cols) area = L_row * L_col @@ -915,7 +1296,39 @@ def compute_all_pcfs_fast( max_distance: Optional[float] = None, n_bins: int = 50, ) -> Dict[str, Tuple[np.ndarray, np.ndarray, int]]: - """Compute all three PCFs using cell-list acceleration.""" + """ + Compute all three species Pair Correlation Functions (PCFs) using cell-list acceleration. + + This function calculates the spatial auto-correlations (Prey-Prey, + Predator-Predator) and the cross-correlation (Prey-Predator) for a given + simulation grid. It identifies particle positions and leverages + Numba-accelerated cell lists to handle the computations efficiently. + + Parameters + ---------- + grid : np.ndarray + 2D integer array where 1 represents prey and 2 represents predators. + max_distance : float, optional + The maximum radial distance for the correlation. Defaults to 1/4 + of the minimum grid dimension if not provided. + n_bins : int, optional + Number of distance bins for the histogram. Default is 50. + + Returns + ------- + results : dict + A dictionary with keys 'prey_prey', 'pred_pred', and 'prey_pred'. + Each value is a tuple containing: + - bin_centers (np.ndarray): Radial distances. + - pcf_values (np.ndarray): Normalized g(r) values. + - pair_count (int): Total number of pairs found. + + Notes + ----- + The PCF provides insight into the spatial organization of the system. + g(r) > 1 at short distances indicates aggregation (clustering), + while g(r) < 1 indicates exclusion or repulsion. + """ rows, cols = grid.shape if max_distance is None: max_distance = min(rows, cols) / 4.0 @@ -964,7 +1377,32 @@ def compute_all_pcfs_fast( def warmup_numba_kernels(grid_size: int = 100, directed_hunting: bool = False): - """Pre-compile all Numba kernels.""" + """ + Pre-compile all Numba-accelerated kernels to avoid first-run latency. + + This function executes a single step of the simulation and each analysis + routine on a dummy grid. Because Numba uses Just-In-Time (JIT) compilation, + the first call to a decorated function incurs a compilation overhead. + Running this warmup ensures that subsequent experimental runs are timed + accurately and perform at full speed. + + Parameters + ---------- + grid_size : int, optional + The side length of the dummy grid used for warmup (default 100). + directed_hunting : bool, optional + If True, also warms up the directed behavior update kernel (default False). + + Returns + ------- + None + + Notes + ----- + This function checks for `NUMBA_AVAILABLE` before execution. It warms up + the `PPKernel` (random and optionally directed), as well as the + spatial analysis functions (`compute_all_pcfs_fast`, `detect_clusters_fast`, etc.). + """ if not NUMBA_AVAILABLE: return @@ -991,11 +1429,39 @@ def warmup_numba_kernels(grid_size: int = 100, directed_hunting: bool = False): _ = measure_cluster_sizes_fast(grid, 1) _ = detect_clusters_fast(grid, 1) _ = get_cluster_stats_fast(grid, 1) - _ = get_percolating_cluster_fast(grid, 1) def benchmark_kernels(grid_size: int = 100, n_runs: int = 20): - """Benchmark random vs directed kernels.""" + """ + Benchmark the execution performance of random vs. directed update kernels. + + This utility measures the average time per simulation step for both the + stochastic (random neighbor) and heuristic (directed hunting/reproduction) + update strategies. It accounts for the computational overhead introduced + by the "intelligent" search logic used in directed mode. + + Parameters + ---------- + grid_size : int, optional + The side length of the square simulation grid (default 100). + n_runs : int, optional + The number of iterations to perform for averaging performance (default 20). + + Returns + ------- + t_random : float + Average time per step for the random kernel in milliseconds. + t_directed : float + Average time per step for the directed kernel in milliseconds. + + Notes + ----- + The function ensures a fair comparison by: + 1. Using a fixed seed for reproducible initial grid states. + 2. Warming up Numba kernels before timing to exclude JIT compilation latency. + 3. Copying the grid and death arrays for each iteration to maintain + consistent population densities throughout the benchmark. + """ import time print("=" * 60) @@ -1049,7 +1515,32 @@ def benchmark_kernels(grid_size: int = 100, n_runs: int = 20): def benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20): - """Benchmark cluster detection methods.""" + """ + Benchmark the performance of different cluster detection and analysis routines. + + This function evaluates three levels of spatial analysis: + 1. Size measurement only (fastest, no label map). + 2. Full detection (returns label map and size dictionary). + 3. Comprehensive statistics (calculates distributions, means, and order parameters). + + Parameters + ---------- + grid_size : int, optional + Side length of the square grid for benchmarking (default 100). + n_runs : int, optional + Number of iterations to average for performance results (default 20). + + Returns + ------- + stats : dict + The result dictionary from the final comprehensive statistics run. + + Notes + ----- + The benchmark uses a fixed prey density of 30% to ensure a representative + distribution of clusters. It pre-warms the Numba kernels to ensure that + the measurements reflect execution speed rather than compilation time. + """ import time print("=" * 60) @@ -1070,7 +1561,6 @@ def benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20): _ = measure_cluster_sizes_fast(grid, 1) _ = detect_clusters_fast(grid, 1) _ = get_cluster_stats_fast(grid, 1) - _ = get_percolating_cluster_fast(grid, 1) # Benchmark sizes only t0 = time.perf_counter() @@ -1093,13 +1583,6 @@ def benchmark_cluster_detection(grid_size: int = 100, n_runs: int = 20): t_stats = (time.perf_counter() - t0) / n_runs * 1000 print(f"get_cluster_stats_fast: {t_stats:.2f} ms") - # Benchmark percolation - t0 = time.perf_counter() - for _ in range(n_runs): - perc, label, size, _ = get_percolating_cluster_fast(grid, 1) - t_perc = (time.perf_counter() - t0) / n_runs * 1000 - print(f"get_percolating_cluster_fast: {t_perc:.2f} ms (percolates={perc})") - print( f"\nOverhead for labels: {t_detect - t_sizes:.2f} ms (+{100*(t_detect/t_sizes - 1):.0f}%)" ) diff --git a/notebooks/.DS_Store b/notebooks/.DS_Store new file mode 100644 index 0000000..90df1da Binary files /dev/null and b/notebooks/.DS_Store differ diff --git a/notebooks/plots.ipynb b/notebooks/plots.ipynb index 66b07b4..cd569bf 100644 --- a/notebooks/plots.ipynb +++ b/notebooks/plots.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 43, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -18,7 +18,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -304,7 +304,7 @@ } ], "source": [ - "DATA_ROOT = Path.home() / \"CSS_Project\" / \"hpc_data\"\n", + "DATA_ROOT = Path.home() / \"CSS_Project\" / \"data\"\n", "\n", "\n", "def load_jsonl(filepath):\n", @@ -352,7 +352,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -474,7 +474,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -515,7 +515,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -597,7 +597,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -709,7 +709,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -805,7 +805,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -839,7 +839,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -909,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -960,7 +960,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -1074,7 +1074,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -1102,7 +1102,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -1179,7 +1179,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -1218,7 +1218,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -1304,7 +1304,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -1379,7 +1379,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -1446,161 +1446,7 @@ }, { "cell_type": "code", - "execution_count": 60, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABWwAAAG2CAYAAADr489yAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA78FJREFUeJzs3Qd4k1X7BvA76R7QUkrZGwFBkK04QHGgskEQFFBAHJ8DBffCiX6K/v3cIioqQ1A2ihNFBARRlCV779G9R/K/7gOpaUhLW9Kmae/fdeVqM5q+eZO85z3Pec5zLHa73Q4RERERERERERER8TqrtzdARERERERERERERE5SwFZERERERERERESkjFDAVkRERERERERERKSMUMBWREREREREREREpIxQwFZERERERERERESkjFDAVkRERERERERERKSMUMBWREREREREREREpIxQwFZERERERERERESkjFDAVkRERERERERERKSMUMBWypRu3bqhWbNmuZdzzz0XnTt3xgMPPIDY2FiP/q8ffvjB/I/COnLkCL7++muUhCVLluDmm29Gx44dzWX48OFYuXJl7v2rVq0y2+qJfZCTk4NPP/0UnrR///4871vz5s3Rtm1bDB48GD/++GOexw4bNgzPPvtsoZ533bp1+OOPP874f9evX1/k5y7MvnnzzTfRs2fPYj+fiIhrG/fhhx+63Sk8ln3zzTeF2mGebBM8zfW47IrHVef2wvVSXiQmJmL27NlnfNzLL7+Mzz77DN6wZ88enH/++YX+HGVmZqJXr155PqeOz6K7y1tvveWxbeW+vPLKK832jhgxAnv37i3w8X/99Reuv/56tG7d2mzzsmXL8tz/3Xffmdt5/7XXXou5c+fm3sfn7t+/P7Kysjy2/SJSunhOP3nyZFx33XVo1aoVLrjgAtx5553YtGmTx9ozT/dDSgr7YuyTsW/m6J8tXrzYY8//yCOP4Pbbby/V85PNmzeb12G32+GLCvNZ4T7kZ3fmzJmn3Tdp0iRceumlaNOmDe69994z7m/2b7t3724e36dPHxMHccbrvXv3zv18OPe/16xZg1GjRhX5NYpnKGArZc5dd92FX3/91Vx+/vlnvPPOO9i4cSMefPBBr24XD6q//PKLx5+XHRoGpC+//HJ8/vnnmD59Olq2bGkOjOxQeNpPP/2EF154ASWBJ0Z835YuXWoalwsvvBD33HNPnkA3O+tjx44t1PNxH7BDmZ+aNWua/8fAfknsm5EjR3qtIy0ikh+eUPPYV6VKFZ/cSQ0bNsxt510v5QXb9nnz5hX4mH/++Se3I13aGJQcPXo00tPTC/X41NRU0yncunWr28+i8+U///kPIiMjMWDAAI9sK88pnnnmGdx999344osvEBwcjNtuuw3Z2dluH3/s2DHz2ngOwkAsA708t9y9e7e5f+3ateY85IYbbsBXX32FW2+9FU888URuULdevXpo164dPvjgA49sv4iUvv/973+mTzVu3DjTD/noo48QEhKCm2666YwDPsVVlD5OaWHf9f777zfBuDlz5uDLL7/EZZddZm4riUSk0jg/sdls5pg9ZswYWCwWlFds7ypVqoSpU6fmuX3GjBmmz/3cc8+ZzziTygr63PF5Xn/9ddOGz58/3wxWsn/uCMquXr3atJGMRXBwtG/fvqYN3bJli7m/Q4cOCAgIMH8rpU8BWylzwsLCUK1aNXOpXr26OfA7grjMWPGWkhjBYwbp22+/jTfeeAO33HILGjdujHPOOQcPP/ywGf1i8NCXMjzYQXO8b02bNsV9991nsoUnTJhgMnMcjwkPD/fIPvfz8zP/z9/fHyX1WfTVgIiIlF+BgYHm2OerHRUesx3tvOulvCjMOQODugwasiNUmth5YwYp27jCYKeuX79+plOY32fRcWEAmIGR8ePHm0HVgjB4wKzzM+HzcXvZieS5xSuvvGK2hYFcd2bNmoWYmBgzGM7zKnbqmUnLTi4dPXrUZOkOHToUdevWNYFlPu9vv/2W+xyc9cT/m5ycXIg9JCJlDRNH7rjjDlxxxRXme85kGB472A/hsackFKWPU5rH+6uuugo33nijGSzlMZH7pUePHqcFAn3l/IQzUzMyMsws3PKcIc5ErqefftoMQjKo6sCZWgyoMvDeokULTJw40czMZdaxOwzS8/3ne16/fn0zSMmM8wULFpj7P/74Y1x88cUmiN+oUSMziMzHOs8IY5vIcxYGy6V0KWArPoGBOavVajp5nGrBkZ7XXnvN/HSMKHGKC6cX8KScHQDe7wgSOu4fMmSImU7HE/+dO3fm+R8bNmwwQVNmVZx33nmmY7BixYrcqR7MgmGmhqNzkZKSgpdeeskcLPk/eeLPAKwDt4UdFk6p50HR+T4HjmIxO/SSSy457T4GO5ld7C4Y6Tp11nVKDqcuDBw40LxWHoCff/55sy+47xj8djwHrxNHWDllyDF10HkEjSc1vI37gPuG0zeLgiPZbGgco3jOU0C4D9mh4v7hFA02Bo7RPO7npKQkPProo+Z/O14j9wkfz/fS3VSlhIQEcyLCKSSc+uE8euxu+oljirK7feNaEmHfvn1mdJL/3/HZO378eJ73hftr0KBBuf+fWbsO+b0vIiLOeOzh1E22Y8wS5MAls4SY5eg65ZBZRDx+u04H50k8A1PFOcYXdGym5cuXmxN6Hsv4nDweF2Wa6ZmwE8HSOjxmEjtm3H5He89tZXYNBze5DTyOM0jn7NtvvzXtuOOcYMqUKXleMwdFP/nkE3Tp0sU8hhmbzsdzHu/ZlnDfc9rhU089lSd4V9Dxnu8fpx+yg5VfmQe2X3z81VdfnXsb97vjfejUqZN5/5n1yUCDu8BhQeUI+Dz54fkM919Bj3HN0LrmmmtM5/FMuM3cn66fyeJi5/Dvv/82+8OBARF+vh2fD1d//vmnaaOdsdyU4/F8r/h9cnSKOZtpx44duOiii3IfzwBP7dq185RKEBHfwYAhj5HOiS/sT/LYzGQS4jGcx3hn7sqhsT/IYzWPbQyUHThwwO3/dO5neKKdKahvSmzb/vvf/5qf7EsePnzY7X7gTFXXKfOPPfaYCfQ5sM/1+OOP5/Zx2N9xbGtaWpp5XdxGtkf8X6+++qrbfeBaEsHR/nLWIvcB+80MIjsPbjJ5ic/J/cB2ncfngtonx/R+B8e+5qCc43mYfcr9y/ea/5f7jrNaCnMec6ZzkNLA8wPOIuGAA7fdEVxnn5qvy7lNZHvFAdL8ygg+9NBDZnDYGeMqfM+JGef8jDljfMK5jeX/Y+IcZz9L6VLAVso0HsS3bduG9957zwS4QkNDze08wHBaHk+kGWSLi4szDRpPyHmQZWYqa7G8+OKLuUE8ZlM0aNDAHNTZaPA5HdgJZgPcpEkTM4WRgVRmZ7Dh4jbwJxsp1jnjKBUxY4MHLf4PPicPlnxe54aYz8ODJKfVsXPhig0DO8PuMEuVjWJRRyjZ+eA0B54ccKofO0/8yan9bMAc+4QZy7zOETl23BgcWLRokRl1Y6PsXNuI+5ojptw3bNCKgvuFU5C2b99+2n3MLGajw5MZvpfshDk6UdzPvM4TCu5/5wbMMeLoDl8DO88MSLDx5vM5Bxry427fOGMjxdfOACu3l+8pGzhO0XTOpOL+5gkZt4Oj2Dzh4N8U9L6IiLhioO7QoUOYNm2a6RgxqOTIEHTGaY4MNjkf53j8YaCX7VhxjvEFHZu5TexgshPG52Mngp0KT5ba4Wvi83PQk51tTuVjJ5bXHbit7Hyz/eVrYpvgKCPEADW3l1mhPCfgeQID2877j/uMgUjWgWMmJYOCHBAkHrNZkof7j20RO/DcR459cKbjPc8FGMh1TA11h9mhzHSpU6dO7m2cjcJOJP8f9ys7l+zAs6PuLmvLXTkCx8W53XTF8x92AAuLWTe8BAUFFfi4Xbt2mc8Vz488hedvDBbwvXDGDC53wQniQEVhHs+SSzxfYNvM/cHzTGc873MedBUR38HjMI/N/B5zAJJBQrZf7JdERUUV6bl4PGY7yoFBtkWFPcadTTtzpr6p63R3Pm+NGjVO2wYGLDlAyEAp68wySYVtS9WqVVGrVq3cx/E46Jj5yWn2DLhyvxGDwr///rvJsGTCEM8B+Jqc11spCNtftuvs93Tt2tW0144BZWZ3MqDL/cv9wNfGx+WH+5+BRL6v7vY1z1t4rsD3jMlUzCzlPuJMFsd5ypnOYwpzDlLSeO7HdonnOUz24UAr2zDHTBd3bRxflzvt27c3n3sHZuJyRokjYczd33JQgvEVB24HBzXVJpa+kplHLHIW/u///i93oQoeJJldwSkPrp1BdtAcBx8+nqUEOBpIDMzyoMogLhs+HvgZ+GQNNHZK2bHigd3RaLIzwIadj3dMTeQoKf8Hg8OVK1c2f8e6aWzkGURmZ5oHU0cWB7MlmdXBABw7NsT7GKDLD4OArE3jSdxeHmB58GV2CDuDDC46XgN/kmPqKfcBs6eYYeWo3cZAJBt0BqgdWJPO3YlAYfB/ussO4v9hEJ7byM4o3x9H5jP3M98z7h9e2GkjNr6c0kM8AXHFIIUjU5bbzI4r36czFXZ3t2+cLVy40IywcjTa0XFmA84Rd56wODJzOILJennEzyODxuzEMgCf3/siIuKKJ8fMEGG7wzaLbQkzZ13xeMjsEAYpmdXCNpMdKkdHqzjH+IKOzWyX+dx8TnJMKXfO1DkTtr+ug2LEACsDlMT/ySwnBkEZBOTxMiIiIvex7KzwuM5ZKNw/bH95rOcxma+Nx17HNvKcgB2dd999N3fQka+D/8MRMOXjHdkkPGdw3O8YNOXrY6YLBx/ZgS7oeM/3ge8bzyfyK/PAzClutwPbSAbH+ZocgUN2kvm+O2fhupt6WlZw/zMA6prd6oz7mEEIYgeZ+9nxWeBn1LWtdtTY5Wt1xuv5lSvgOZ1rcJmPZxvujOcZDIKw88rvWnR0dO55JPG8koELEfE9HEzj9G4O1HHGBc/jeTxnu8c+W2FLwhD7ko7+HI/LLDHA9pjHjIKcTTvDkmxn6psSM015DpAfHo8ZCGW7yICmI0OSA4GcUePoE7Mvw8Cm47m4XQwU8zjN5CLOTnXcx74YB/44UFyYsgTcRscgIc8fmHjDNpBBUWbLsk3gLA7ie+NcnsZdshOTYJzbT8e+5vkDM015H98nBocdz8vzCwaOC3sec6ZzEGcM+vL8gm0S+6GOBd64v7n/ClP6xxnPt/h+OALEfD3MBOZn2RGoLkwb5w7PhdhP5nvJwLTjc/nkk0+aGra88HyKbaNrWUa2iZ5crE4KRwFbKXOYCcuVfYkdMY4AsuPjyjkrhSOSbDidO4AcoWPHlYtMMMDKDpbzCb9zZiv/B7Nh2IDwxJ1/45gW4W5RCz4fpxLw4OncueZ0AudMUgbmCsLG2BGI9GT9JDZAPMhztJYHdjaUbJjd4WvhvnOeLsrX7FyKgScKDDgWFxswd4FpjixzhJONG7OjOfrLBrUgZ9qnrhnL7Dy6K0dRVNxPbKics5z4GWSAg/c5ArbMmHJwPJYNXlHfFxEpf3hcdVf/y3Gb83GXx1znto/H0Pj4eLfPy5Ntx2Ahp+HzmMsOZXGP8QUdmxnwZYeXC17wuRmgZLZOUWqJ8zmYnePKuZ1gIJIlcTg9kucErtmPbO+d/yc7H46ZM2yHXRfy4utg5rBjCiBfs3N7wv/t6JzwnIKdGtcpgsSOrSNgm9/xvjA4G8d5EJQZzeyEss1yDiiy85lfO+Ec/HTlLvhZkvgZ5qABZ50UhK/FsRgbM6L5uXXMNHGXRezolLqWD+J1zt5xh9+bwjye7zmnffLCaab8/DDLzBE84f5nmQoGjd2dh4pI2cYBNV74HeZ0cQZJOSuDxxWWtSss574F2y8G7tj+nSlgezbtDPsIhembnqlfRJzez/q9PE6zPAJrwPK4y3aeA7zcFvZrnWeDMkjoyPblOQZnhXCfsc3n9vCYyTarMJzbSkc7z/3ARBZmdjoHnNlXd24H3bWd3K+u7QVvc66bzuO9c1apc7tQmPOYM52DOOP+ZMCTsQDO8GQsg6+PA8vvv/8+iooDDa71aB2Dh44ygO7aOMdM5PzwdfL8jvvGuewig+kcqOc5JD9bfO/5GpgV7oxt4okTJ4r8euTsKGArZQ4DW84H9vw4B195cGGHkuUHXLETypNv106y8yIfnF7A6Qb8vxxBZSPJA58jU9NVftMC2XA5N15nmj7IBopTTNzhgZrTYAsz/cK1wWQDwykgnLbATGC+Do7K8nZX3HfsoDjXAnLFfVXc4vFsANjh4cmCu1FfngDwwu3k6CSnpXAkOL9RzDPtU55wOOP7XtCCLvmtMl3Y/8vnd97/rllA5Ji6VJT3RUTKn/xmGzgG7pyPewUdS1yxthpLurAzx6AZM3Qc2UPFOcYXdGxmB5NZqqxzxwsDuexcFiXDlv+vMO08O5bsADHbg5kjzsdh1+M6j8WO47+747XjOO045rtr1xz7l49hx5UlD1xxgLc475Erbqtz25FfG8tOH/fBmYKfrkp74Rt2IjmF1pFxnB92mh3vPfclO4wFfRZ4Tsi/YXDAGa87D5q7nve5e7xjUIKDuPy8OAdhmBXNKcj8nDmCs47zRl9d3E+komIfioFOZlzyWMvvNANuvPCYUtBq9+76Ba7H4DP1LRzOpp0pbN+0oH4R+198fvY9GITjvmAwlBce/5iFzEDwmV4Ly9PxXICBPQ4GsnwBs2wLK7+20hEwdG03C2pH+Rq4//kY533r7jW49gkdmBlcmPOYgs5BnDEY7viMcHCYwVue17mWLfAEx0AvS0o4D/qyjSvo/zGjmdnZLIPBbGvnRbW5H1nmgzOtGETn8zDz2TngTTxnyW+fSsnRHpdygdkunD7AAwsbNl54IOMBlJ0dZk9wBI0Nl/OBy4EjrjwIOaZlsGF0XQ3ZuVHg/3MshOHAv+d1jooVFhsInlS4q3HHmj4c7XNXhoCNkvNrYWPrXHOGtYH4d5xGw4MyD8KOlSBd8bUws8ex33jhFA5ObfQETt/gtrib/spsFu4zR7CB28gAr2Pab3E6Sa4jknwuZsa6228MnBR2pJD7iZ8h52ALSzLwc+I6Lcedor4vIlL+sHPGE39Xa9euNSf77ga2CoNZD8za57TP77//PneaW3GP8QUdm1nDj+0cg7g8ljELl8e3wgYqC4vZo9xGZoFwirtjKqNzR8oZt9eRHcRjMvepM2ZXsRPMzvqZOPYZs6cc+4xtPveF68It+TlT+8XsHefn4vkL2yjnRTS5DWxjnBdKcRf8dHdxDiyXBu5vlp7wdAeVnUMGFpwXU2E7zMGJ/EovMGPNdUEyDo47Hs+pniyB4Iz7ne+BcyYt3x8OopxpoFhEyha2R+x/uOtfceDUUcOWx1xm3zoHaVnbuqC+BQN7nKnhmGlRXGdqZwrTNz0TZlJyur67xROZ6crjKwN3LK3E/+3c1jDrluUOmNHKrGSWJmQZAZYJ4HGR/aezbfe5DcyKdW73+F7k1+Y52k7ul7OZoVqY85gznYMUFNBnm1ESwVrH62db5dwm8nPETOX82kTGSFheg6+ZnyfnYC2x7i+D1fw+OLabawG5lrtwlPaT0qWArZQLN910k8n64UgqG1JOCWXWIoO1bAw47YHTBHgbp0ryIOSc5s8gGg/8LOjNAzbrHDkOzI4pB/x73sfGko0qa+JwhJF1dvicHE1jkNh1FcaCMJDM0S5mP3FqCgOv7ITyudhBdtTnc8WsEo4cM4DIThK31dE5ZCPKDCv+rWOKBzM6HdNLHFlXDFhztJAnAWyIebBmh5yjzgx0F6cEAqfrcoSP+4Gjl5z6z9fFAvnuMoQY8GSNIDaKbGzYgLKxcKyqzX3ORia/acDuMCOMU235OeCoMk+y2Bg79htrN3EFU97PEWPn7XLdN844oszPEk9W+JwMDnC1UJ5wFaZ+05neFxGpGG0Vj9ms0cbjN4/57JTxOMj7zqamOYO0PN6yLXCeulecY3xBx2b+HZ+Hx1Hexw4N/6/r9LyCsEPGtsLdhc/DDjSPzxzU5OwZtrUs6eA8SMrOJDsYPJ4y+5fT6zmFjzjNk6+THRPuY3ZWOUjGsjSFGQjk8Z4dXU4H5fGenUke+w8ePJinHFNB2H6xLXRXa92RHeu8UBzbH9bQY7vJfcuA5IMPPmimbjq/7tLAYIGjdERh8bU4BkcLwvfX8V5zkGHmzJm51/P7n5yJwnMeBlr5vnM2FTv5rE/o/JyOzyCnr3K/M/DBtp6lMNiuM8uM+JPbyzUTGJxxfD54LuaM7bTaaBHfw/4V+2k8hrKNcpTH43GEtUjZRjj6BQzSMSjHYwbbOg5ouuKxhDVFeRzh8Ydt7NmWNDtTO1OYvumZMCDLutw8vrFeLY9pbL85sMu+GUsuMHjNQU4eTzmrkwOz3B4mmXAQmf0Xtk/sO7PNZ3vE0jfsYxel3c8P222WJmAbzuM1zz24D/Jrq3kewvMR1wSdojjTeUxhzkE8ga+Tnzfni3PwOj9cSI6fWb4njnMFLiDm6D9z+51LVvC95meNC5sy+Oxoc7mWDnGwleeILI/B/cHPO9ta/h9nahO9QyURpFzggZcBWHbeeHDlVEBOCWXdGeJ1ZqyyEWDRdDaE7MSy8SI26uxE84DGgzWDcHwsG2U2zmw02QFgbRd2itloc6SRBc2ZJcngHht9HuwLM83TGRtqjnixNg0XsWKAlica7GhyWoU7bFAZnOZr5SgbA9GOkw++VtbL4Wvja2VAko0wGx7idvJ5Wd+PJRc4xYaZJjyBYSeeI2ts3FnjpqgYfCY2shwx5uvg8+YX0GQhdzYK3IdsNNjZY2aXI6uYDQU7rwzaOrb/TDjFhe8PO2gcMea+cEzp4OgiT9q4Sir3EwO5jsbK3b5xxqwbnlCwsWNQnqOnbMT5GXM31cfVmd4XESn/eDLNdoILZTJAy5Nqtkc81vH4dDa4qAWPvcyKdR7oY1tY1GN8QcdmbitP5Dktk/+PHTq2h+wwsANVmKn4DLI6Vid2xbaPHVQG7/icxGn2PN6yrXOUAOA0RmaUsLYea/ix/Xe0NTy2MijuOOZyCiDbbwb+Chts5TkF6/WxTWHnkM/N43VhpwMyC4kDrz169DCdKtesFL4edogZJHAEgdme8ByE7Tn/JwO4zKpim8Xp+meqT+cpPN/p1KlTkWo8MgurMAO9PNdy7QQ68JzG3f/k+8/3nucDHMDlitd8bx0Dro7n5GeHnwtuB+/nZ4DnVjwXYOeWwW/H95DfB35meG7IzzXfW+fMdEdWrmNNBRHxLWzvGGRjkJa/E9srtglsF4nHBCbJMNGDQU22S2wfWf7HGdtLHiM4mMX2hcfus3WmdqYwfdPC4LkGg7Ic2OSiYgzY8XWz3ILzsZh9WrblfK08tnJAjf+b28XBLW4nB0LZlrFdY5am60yX4mC7zP3Kvi3PiXgOw1mZ+ZVp4DkG+2o8PjMztjjOdB7DQe6CzkEK0+8rDCbu8OKMnwHnNQfy22dsC/nZZQyCn1vnzyQThLitPJfifnTM7HItjcXXxLaSn2mef/C7wSxa9ocZwHU+b2Hwl8/Dc0MpXRa7p+ewiYiIiIiUEAaU2akozmIeZQnrxTFLiz+lbGE2HoMo7EyfTea7iIjkj1mlDJg6lxBgsJoZyPmtJcNBUAYXmZUrpYNtIZOWmIWbX119KRkqiSAiIiIiUsoYqGUmlyemlYpnMROes3AUrBURKTnMZuUMGE6353R8zqTgzBMGbfPjWFiVZfCkdDBDm+csCtaWPgVsRURERERKGbNrr776ajNtX8oO1rVlrUKuoC4iIiWH0/pZxoYDZCwltGzZMlOioqAFnVnGgCUMWF5Kk8VLHtcG4qJ0LFskpU8lEURERERERERERETKCGXYioiIlAPr1q0zC+7kZ82aNWYBnXbt2pkFBpwzE/izQ4cOaNOmjVnsgRcusiAiIiIiIiKl799lhEVERMQnLVq0yKywyylL7iQnJ5vpvVxFlgs5HDhwAIMHD0ajRo3Mirx79+41q/NyBVhPrX4rIiIiIiIixaMMWxERER82ceJEfPTRR/mupkvh4eFmJd7evXub63FxccjJyUFkZKS5vnHjRjRr1kzBWhERERERkTJAGbYusrOzkZCQgKCgIFitimeLiJRHzETNyMhAREQE/P19uykcNmwYHnjgAaxatarAxzFoSyybwHaOiztceOGFuQFb7o/+/fvj4MGDaNmyJR577LECF31wR22oiEj5V57a0LJEbaiISPlnK0IbqhbWBTuxu3fvLsn3R0REyogGDRqgatWq8GVcXbcouALv/v37ceedd+Ltt9/GPffcYzJrW7Vqhfvvv98Edlnf9tZbb8VXX32F0NDQQj+32lARkYqjPLShZYnaUBGRiqNBIdpQBWxdMLPWsfNCQkKKteM5zXTr1q1o2rQp/Pz8ivUcImWFPs9SHj/P9erVM3VbHcf8ioSvmZmzt9xyC2bMmGECtmPGjMnzmAcffBCff/451q9fX+BCZu6em7h/g4ODPT4avX37djRp0kQzYER8kL7D5QdrnlfUNrSs90NLgvoCIvquieekpaWZJNHCtKEK2LpwlEFgI1mUrCLXRo349wrYiq/T51nK4+fZEUysKKVvNm3ahHHjxmHBggUICAgwt2VmZqJy5crm9/fffx+dO3dG69atc6dlcl8VtTPu2J9hYWHFbkPP9N4xA1htq4jv0Xe4/HAcgytKG+pL/dCSoL6AiL5r4nmFaUPVyoqIiJRzzKhlgJalDhiM3bx5Mz7++GMMHDjQ3L9z5068+OKLOHHihBn1nTBhgsmSZZkEERERERERKV0K2IqIiJRDzKZt27at+Z2ZspMmTcLff/9tFhpjCQTWsO3bt6+5/8knn0SjRo3MQmQXXXQRDh06ZLJulckqIiIiIiJS+lQSQUTER2sB2u12b29GmWaxWCrUdE3Wml27dm3u9d69e5uLc5btlClT3P4tywy88MILpbKdIiIiIiIiUjAFbEVEfEhGRgZ27NhhfkrhF9jSwigiIt4dDHTUwXT8lLKrog14+prSHrTXd1d8EY9hPJaJ+DIFbEVEfAiDtREREahevbpOQs6AnZkjR46YfdaiRYvSeYNERHxQaQ0GsvO8cePGEv0f4hka8Cx7vDlor++u+GLANioqCrVq1VKJL/FZCtiKiPhQRgVP0hms9ffX4bswuK+OHj1q9p2yhUREvDcYyEE0x7FYWU9lmwY8yyZvDdrruyu+hp9ZLrZ74MABbN++Hc2aNfP2JokUi3r8IiI+wjH9TR3dwnPsK9X7FRHx7mAgj8OOqfZqx8o+DXiWLd4ctNd3V3wRvycNGjTAhg0blLghPkvFiUREpNj279+Pbt26Ffnvtm7dih49euS57bPPPsO1116Lq6++GjNnzsy9ffXq1ejTpw+6d++O8ePHIzs7W++YiIiHaDBQ3NGAZ9mi76lI0Tlm1ylxQ3yVArYiIlKq5syZg1GjRiEtLS33tk2bNmHWrFmYPXu2uX/q1Klm6h+nMz388MN4/fXX8c033yA9PR1ffvml3jERERGRQrKlpyN2+nTsGjwY26++2vzkdd4uIiJlk0oiiIhUAHabDcjJhiUgsMT/Fxf6uu222067fdKkSWYRk59//hmvvfaaCcQ6/PTTTyaDNjQ01Fzn74sXL8aFF16IOnXqoGHDhub266+/Hm+++SYGDx5c4q9DRERExNclLFqEvaNHIyc+nimHrK9gfibMn48D48ah3uTJiHCZ9VRW5OTkmPNKLhwlIlLRKGDrYZnHjyNuwVxYbXagTRtPP72ISJHZY4/C9tt3QGYGLNXrwtLpClj8/EpsT7K+2vz58/O9/4033jClFJzxZLxVq1Z5nmPdunXmdv7uEBMTY24TEZGKiwvIsD7hsmXLzCrgzoYNG2ZK6fz4449mwK884SyTm2++GSNGjMA111xzxpqnY8aMQZs2bcysFnrvvffw/vvvn/acF110ET788MMS3XbxXrB216BB/97AYK3Tz5yEBOwaOBANZ81CRM+eHv+eBgcHm2npnJLOQfuOHTvigQceMLVFC+P+++/H+eefn/sZFhGpSBSw9aCMgwfw51VXIS0x1Vzf/defaPzWe578FyIiRWb/e7kJ1prfj+yDZf92oH4zr2TYOgdf3S2o5lo/jx1Od7eLiEjFVqlSJXz11VcmQOtw8OBB/PPPPyiP9uzZY2am/PXXX2d87LFjx/Dkk0+a2SsM2Drccccd5uKwZs0a3HXXXXlmvEj5wXIHzKw13Jxn5d5usZjHtdy1C9bgYI9uA0tcOQbk4+PjzaDBjTfeaAb2q1Wrdsa/j4uL8+j2iIj4EtWw9aCDzz+TG6xl43dg8Q+efHoRkWKx5+TkvV7Ci3Y5MmxdL/kFax1/c/To0dzr/L1GjRrmwo6nA3/nbSIiUrFdd911WLBgQZ7b2NZw4Upnhw8fxj333GNK7HCRzHfeecdMs6aMjAw899xzpgxP27Ztcfnll5t66s4Zgp9++imuuOIKdOjQAXfeeSeSkpLcbg+Dxbfeeqt5HJ9v5cqVZiHNefPmnfZYBq34/1wvrotxOmzZssWUAmJW7ZmmhjMo1rNnT7PtfM78pKammkzHBx98EE2bNi3wOcU3xc+Zc7IMQn7BWge73Twufu7cEt2eyMhIPPLII6bM1ZQpU874HXz22WfNoML//d//4fHHHze3TZs2Db179zbfswsuuABPP/20FpQSkVIbOOUxiGuslBZl2HqQNSDv7rQqCUxEygBrszaw/fnLyRP28AhY6jZGWdO1a1dzMn7LLbeY61xg7KWXXsI555yD3bt3mwXIGjdubBYlu+yyy7y9uSIi5R4Dk8nJyXlu4/TmKlWqIDs7O89gmkPNmjXNz+PHjyMrKyvPfREREWZKdEpKymlBz/DwcJMxWxQMzHKRSrYRjunVDNgy+MO2ghiYZUYpgzusn87tYtA1LCzMlBZg0Gj9+vWYOXMmKleubP6Of8/AKR9DS5cuNf+HpQOYGThjxgy3s0jGjh2L2rVr45dffjGDjsxc3blzJy6++OLTHuua6XomfN4ffvjBbBMDyAVhLXjWgGepCOfsY3dBY5aMYG14KZ8SOKDhqFl7Jqdq2kYNGVIq53zff/+9+b2g7+BTTz2Fbdu2mfM+lkRYu3atWYR2+vTp5vxw8+bNuOGGG0ywt3PnziW+3SJScW3ZssUsfM3zH5Zj4kBuaVDA1oNqP/UMjv6yHEknEmCxWlF/YB9PPr2ISLFY6jaBtUo1ID0ViIyGxT/Ao3uSWUXOWTwtWrQwo49FwelyAwcOxKBBg0xDyEyili1bmvteeeUVjBs3znSWW7dubTrMIiJSsv744w8TrHQ9Vvfv3x+JiYmmzI2r8ePH5wZOXWuV9+3bF+eddx42bdpkAoquAZyiDsYxeMkOE7Ns7733Xvz999/mNg7uOWzYsAG7du3CF198gYCAABOgZKCUWbYM2LKtYdvDQBEzcRmQZsZfQkJCbsB2+PDhJtjMC4OvfD537SCDSWyvGDBlAHnAgAGmLSzMtO8zYUC7sAIDA0+r6+uK7x+nqrt7D6X8yImNLVywlmw25JRS+QFm2vI7RoX5Djqce+655tjCLPPY2Fgz8MOBHq1tICIliW0mzyM4CMwZKV26dEFpUcDWkzuzajW0W/YrUpb/hD3JqajVd6Ann15EpNgs4REmu9bT2PllhkNx/m7JkiV5bmMmkLtsoE6dOrmdUioiIiWnffv2Zlq9MwZTiMEVd1mmDn369HGbYesY1Ktbt26xA5LOODX6hRdeMAFbthP9+vXLc/+BAwfM1EXn7DvWTHfUQmcGMaddM9jKLFZHaQDn+unOwU8udOZuKqQj29i59A9/b9KkidvtZqDUXbCUGcoLFy5ESWPt33r16pnMYym//PjZLUKGrV+VKqWxWThx4oTJ1C/sd/DfTbSa7w1nYTHoy2MJH+fusSIinsJzHpY44kB0r169zLGotChg62F+4ZUQdmUPZBZiQQARERERkbKImWv5lSlg4NJR/sCd6Ojo025joJSBFWbNFTdA6+qSSy4xJRZY55JTrBm4dV7EkkFTBnZYT9aBmXuOkgzMCGaQaPny5SYDl9mzxRkgdARqDx06hPr165vf9+3bZ8o1uMNgd0EB75LGfcU6t1K+RfTubcocFIrNhog+pTM7lJn7rD9b1O+go3zCt99+mzsAxGOAiIin8VwiLS3NzJpxDGK3a9eu1Be/1qJjIiIiIiLic/z8/EytSy481KZNm9ysPQeW0alatSr+97//mWnWXJDrvvvuM1m5jmmOLCHA52HWH0sakGt28JlwMcyOHTvizTffNOV7OPOEpRq4QAn/Z1nrhP7111/Krq0AIvv3h19kJHCmAIPFYh4X6ZKh7mksY8DvHkuIsCRJYb6DvM8xwMLHMqjLASN+z95++22T3V7U76uISEFY+oDlVz766CMzKOxQ2sFaUsBWRERERER8EssvcGEi1sh1xeDO+++/bxYLYZ1cLk7EEgdc1JK42OVvv/1mMme4+BZrZDL4yucrqldffdVk77LOLTN9H3vsMbMw0ooVK1DSWEeeAeLCiIuLMx1Q5/INUj5Zg4NRb/Lkk1fyCzScup2P4+M9bejQoebzyQu/o/yOcNEwDqQU5jvIsidcaI/fqZEjR5oMfWbVsn41M9gvv/zyYn1fRUTc4QAQF0FkXXwOMrnW4y9tFrvzvCFBamoq/vnnH9NYONKfixOR58g1R/o5Wijiy/R5Llvvxbp160zGkI4txdtnjs8za6Rt3br1rI71UjJtaH50LBLx7bbFURKBtd+8kaUinvtclOSxviIraL+ezfc0YdEi7B09GjnM9nbUtD31k5m1DNZG9OiR79/ruyu+ytf6TjrX9a60tDTMmDHDlDRiJj8XRHTU1fakorShqmErIiIiIiIiUg5F9OyJlrt2IX7uXFPTNicuziwwxpq1LINQEpm1IiK+JDExEdOmTcPRo0fNAqtDhgwxi3N6mwK2IiIiIiIiIuUUg7JRQ4aYi4iI/Iv1sz/77DNTsoWLrd50001lpmyQArYiIiIiIiIiIiJSoQQFBZlyTaxxP2zYMERyscYyQgFbEREpNhZiHz58OJYsWVKox2dmZuKpp57Cxo0bTS1D1ga65ZZbzH1jx441t3MaCt1999246qqrTK3ZJ554wkxVYR0hLhajmnkiIiIiIiJyNsLDw02gNjAw0CxsWJYoYCsiIqWGK/1yBJOrWbPgOlcE7tixI1q2bIkNGzZg1qxZp41qPvjgg2YV4U6dOuF///sf3n33XYwbN07vmoiIB2kdYtHnQUREKoKNGzciOzsb559/vrlepUoVlEUK2IqIlHNmdd9tG4GkBFganANr1ZgS/X9HjhzBbbfddtrtkyZNMo3ilVdeaVYI5wgmi7kfOnQItWrVQmxsLB566CFzvXv37rjrrrtw+PBhJCUlmWAtMSOXI6AK2IqIeAYH0XjhDAiuiixC/Dw4PhsiIiLlxe+//46vv/7a9Eejo6NRu3ZtlFU6KxMRKedsq35GzoY/Tl5Z9zsC+gyFJSq6xP4fi7TPnz8/3/sc1qxZg3Xr1uHll1/GsWPHcNFFF+GZZ54xdYTuuOMO1KhRA02aNMnzNzExMSYgLCIinsEOC+u2HThwAA0aNCixAJ0ZPLTZzE/+Tym7+D7x88DPhd4rEREpD3j+sXTpUnOhdu3aoWbNmijLFLAVESnnbHu2/3slJxu2/bvgV4IB24IybB3B1xUrVphSB6+++ioiIiLM5Y033sh9LLNoGfRt1KjRac+jzqOIiGdxlsP27dtNaZqSpGCt72CteH4uREREysNA5DfffGOya6lr167mUtb7lQrYioiUc5aIKrAnJThdjyrR/1dQhi3NmzcPr7zyiqlH26FDB3Pb+vXrTZZtt27dchtVPz8/k2XL2x34O28TERHP4fG2WbNmuRmwJSEnJ8cc68877zzz/6TsYgdWpRBERKQ8yM7ONv1P1q2la6+9NrfcXlmngK2ISDnn1+VaYPn3sCclwtrkXFjrN/batqxcudJk1XLxscaN/92OrKwsTJgwwTSeXKHz888/x4ABA0x2T0hICFavXm3umz17thkNFRERzyuNIB2DtQrYioiUnWAW14ZgGZSUlBQ8++yzZkFgkfJi3Z9/mmCt1W5H561bUe2ffxDbuzci+/eHNTgYZZmqyIuIlHOW0DD4X9UXAf2Hw6+150/ADh48iLZt2+Zebrrppnwfy6xanhiOHTsWffr0MZfvv//e1BC68cYbzaJiPXv2NBlY/EkM8PJy3XXXmZq3Y8aM8fhrEBERERHP2bNnD0aPHo327dubwfaPP/7YY8+9bds2DBkyxJx3cqFanksW9NgRI0aYIOQll1yCF154wSyqR2lpaXjuuefMOgoXXnghxo8fb27zBG4Tt61NmzZmW7kd+eG6Dn379jWP5bkxrztwAd67774bF1xwgdnOhx9+GAkJ/86cc8XtHzx4sHnMqlWrzOwJ5/N0Xphh6JgNl5iYiC5duuDLL7/E1VdfjW+//fasX3tycjLuv/9+s8+5zW+++Wa+j42Li8MDDzxgXh/fn//7v/8zMzIcpk2bZhYs5ufo1ltvxb59+8ztnA1yyy23YPfu3We9vVJ+JSxaBP8+fdDo999xwaxZiJ47FwkLF2LvqFHY2LAhEr76CmWaXfJISUmxr1mzxvwsruzsbPMc/Cni6/R5LlvvxZ9//qljy1nsM8fnOTEx8ayP9VIybWh+dCwS8W36DpcfJXmsr8gK2q/ePAe02Wzm//JnUfTo0cP+1ltv2bOysuzbt2+3X3jhhfbffvvtrLcnIyPDfvnll9s/+OADe2Zmpn3p0qX2Nm3amP/hiv/7sssus7/55pvmsYcPH7YPGDDA/vLLL5v7n3nmGXvPnj3te/futaemptoffPBB+9ixYwv8/3wN/P8F4bZwm/hYbi/3Q7du3cw2uDpx4oS9ffv29kWLFpntnT17tr1Dhw722NhYc/8NN9xgf/LJJ832xcfH22+77Tb7mDFj8v3fEyZMsH/yySe528rtcN0nc+fOtTdv3ty+Y8eO3Nt/+ukne/fu3fPcVlzch3fffbc9OTnZvmvXLvuVV15pnzVrltvH8vUMHTrUfuzYMXtcXJx9xIgR9tdee83ct3jxYrP9K1euNNs9bdo0+xVXXGFPT083969du9Y+ePDgctN3UjvpOfHx8fZj8+fb14aEnLwEB59+OXVf/MKF9rLahirDVgoX2D+wCzmLpyPn66mw781/dFBERERERERKHjNF87twRlNhH8vSVGd6bFHExsaajNLrr78e/v7+pgwW1y1w1JB01aNHj9OyQHl57733Tnsss0aZRTpq1CgEBASY7FBm8LpbP4EL4fJ/33nnneaxXGeBGax//PGHuZ+LEN17772oW7euKcH10EMPmduSkpJwNlgv89JLLzVZoyz19Z///Afp6elm21199913qF+/vtkH3Ff9+/dHgwYNzO18neHh4bjnnnvM9nGR3htuuCFPBq6zo0ePYu7cueY58sP/wWzeypUrY+vWreY27mdepkyZ4nbB36eeesrt+8OMV1epqam5+zUsLMy8FmY4s6yZK76+pUuX4tFHH0V0dDQiIyPNTLpZs2aZDFo+D0ukMfuZ283ZeHwfWWKNmJHM5/j111/P8I5IRXL06FF8OHkyZs+dCxtLPeVXm//U7XtHj4YtPR1lkWrYyhnZMzNg++NnrgJkrtvWLoO1Wi1YQsK090RERERERLzgxRdfzPe+c845xwS4HCZOnHhaYNaBAUNOL3cuYcXAm2vQrrAcZQUYXGPgeO3atfjrr7/yLWv1VRGmJe/YsQNNmjTJs7o7g7KbNm067bG1a9fG5MmTc68zCPjjjz+iefPm5joXWmQg1LmON7eX0+5btGhR6G1yt43nnntu7nVuKwOh27dvN9P+XR/L98oZXw8D3tw25+2nH374Ic9zuwaKGdxkkDc/GRkZpvwBg/AMeG7YsMEESGNiYvDggw+aIDNLMDhjXVteClsKg/vVOfDreD2u+DhyfQ8Y8GepBt4fGhqa529YA925DAJLpnH7XferVEz79u3D9OnTzQAJwsKQFRiIoILKnNjtyImPR/zcuYgaMgRljQK2cmbZmbnB2tyRiMx0QAFbERERERERccLAqAProjLjjVmm7rI3i4qB5GCXhYJ4/Uy1Zxn8Y9CRwb5XXnnF3HbVVVfhrbfeMgHgSpUqmTUTGBA0wR4XvXr1wqFDh0x9Vf4vZgzTbbfdZi6F2UbXIHhRX8/7779vauPOmDHD7WtkBi9rxro+P7eV7wmDtAweMyuZCwDXqFHDXJYsWQJP4cJlzCp2XlySAVl3r4cZuNxe7vfnn3/ebOPbb79t7uN7wPeHixKzhi2D1AxI79y5M8/707p1a3z00Uce237xXdu2bTPBew66VOPnfsYMBBamJrXVioT58xWwFR8VEo6cyGo4MGch7DY7avfqjsBKVby9VSIiIiIiIhUWp5Lnh5mKzriwU36cs1XJOROWQTRHJmRR8W8ZDDxw4ACefPJJPP74426zgh3BUFfugqHMuHQNqPK6ayamM2Zr8vVzO7iIVbVq1cztjzzyCP773/+a0g0MKo4cOdIsusVyAa4WLlyYGxTlfi8oyMnnYiZrYbaRj2WQ0/WxDCA7MNDKBdGWL19uAq0MMLvDBcocr82B/9NRQmHz5s2mvALLD7Rq1QqF9fTTT2PRokWn3c6FwBhEdv1/zOTmZ8bxGWSwNr/35+WXXzYLwXEhtKpVq+Lmm2/GTz/9ZN4Dfi5OnDhhFivmc/AxDPA67xu+Xi5cxn3mGviWiuPvv/82ZVF4zGHGertPP0W6mwESt2w25MTFoSxShq2ckd1mw18T3kHK1q3gWOmR9dvR6bLe8AsK0t4TERERERHxAmYylvRjzyZgy2Abg3AsTcDAK7Ns3QVsHcHQwuD0etcyASw1kF8Qk1OkWWuV2b0zZ87MUy6ANW7vu+++3On+LE/ADFrWXT0b3BZmgjrvQ16//fbb3T6WdWddX8/AgQPN7/Hx8WbfMfjJOrCuAVlnfExB7xVLQbz77rumLizfk9GjRxc6YMtLYXDfcQCAmcyOjOqC3p/jx4/jueeey31ffvnlF/McDGQzM7tbt2655Tr43lx++eW46667cv/e8XpdByik4vj999/x9ddf52Zc9+7dG3sXLEA6PxOFOXZZrfCrUjYTEvWpljNKWb8eyQzWMlprB9L27EXCL79oz4mIiIiIiIhb//d//2cCjsx+XLBgAVq2bHnWe4o1Vlkbl4tkMfOUAT4uXMUgzWn92JQUszhZx44d8c4775xW2/Xjjz82tXm5fQwcclr+oEGDzAJXBf3/M5UQYGYos0S5GBa3kUHSoKAgdOrU6bTHsmQEA5qc7s/M1Dlz5mDXrl2mHACDkXfccYdZbIyZtQUFa6lmzZomyFkQBk5Zq5Z1irds2QJPYyYtXxNrJicnJ5vALfczFzrLL8OWnxNH7WCWR7jppptys5m5YBlfEzNs+ThmB7P2rgPv46BAUQYkpHypVauWOSawfjM/ZyzHEcHjQWEHmmw2RPTpg7JIAVs5o4CgAFidpslYYEFgkJKzRQTYv3+/GfkuCq6ayxV6HRfHFLjFixebFXJ5ksd6Yg5cwZYnz9dcc41ZcdZd/S8RERERKVs4fb1fv364+OKLTTbrSy+9dNbPycAcM2xZHoABGk6nZ9DPkcHJwHDbtm3N75zGz0Ww+LNdu3bmdl5YAoFYJoHZoDw3ZcCXGaj5lY7gOarj750vDBy7atq0qamTy2xiBniXLVtmSgc4gooMEjPrl6KiojBp0iQTkGVAlz/5WN6+YsUKs2AbA5d8Hsf/5P7Mb3/z8WfCgCjLGbC0AwOlnvbMM8+Y+rSsPcuF7xhEGzx4sLnv4MGD5jU4yjQwu5bZx3ztQ4cONft5+PDh5r6ePXua6/x7vkcM/nJ/O5fw4Ot1rdsrFUvt2rVx5513mj6k47MR2b8//CIjWe+l4D+2WMzjIvv1Q1lksTtXBBcTCPjnn39MUeuC6uAUhKn6XAWTIz/OxbZ9FT8i+265CftW/mEybGu1a4H6076AtYCRRyk/ytvn2dffi3Xr1pmpHmXlvWDAlidVhV2sgMFZTgfjybSzY8eOmaAsp3qxLhWnaDEjwhHcZc0znsgxG4AnluPGjSvWPnN8nnkizUDw2RzrpWTa0PzoWCTi2/QdLj9K8lhfkRW0X715DugoicAp5661bgs6P7ziiiuwcuVKE3iU0sFsU2b3/vDDD3nqvJZnDLQ//PDDboPYZbHvVBC1k4WTlZVlyqhwEIPB2vwkfPUVdp0qLXJyuriLU8ezhl98gYgePVAW21Bl2MoZsWGuM/kTdHj3dXR4+zXU/+xzBWtFJF/MoHDOoHVcePv69etNwJWj7f3798d3331n/saRJcGTek5p4Ug6axExwJuUlJQ7hYz1vBw1ikRERERE5KSYmBhzDv3ll19WiF2yevVqU+Yiv4xjKX/S09MxdepU06ecNWtWgRniDMI2nDULfhERJ29w1Dk+9ZO3l3awtqiUIimFYg0IQNAV12pvifignOPHkDb3C9gS4hDQsjWCr+lZ6AyJ4qhevbpZpTO/EdGuXbuaDFkGYzn1iYtHMCOAJ5kO/J0BXl74fK63i4iIiIhIXmPGjDGLdDExgrVvyytmfr/55pum7rBUDElJSZg2bZrpC7ImNMutFFRvmiJ69kTLXbsQP3cuEubPR05cnFlgjDVrWQbBGhyMsqzcBmw/+eQTZGRkmBUVRUQqsvSvF8B27GSQM2vtGvjXq28CtyWFjai7Yy/rc7EOFS9Ut25ds6ACs2vdrWjLoHJ+t8vpOOWLpSRY58wd1gpj7TjWCeM0OWYrc5Vdx/5k3eAZM2aYRR24Ai9XA64o0+lERETEc+rUqVMiC1rJmXGKNTMPyzuev3722Wfe3gwpJbGxsSazNi4uztRHZtJPjRo1CvW3DMpGDRliLr7GvzyOtIwfP97Uy2FnVESkorOnJOe5bkvOe700M2y/+eYbNGjQwCzqYLbNbjc1pdjgclqTAzNueRsvrG/rwN8L2zhXJFxMg22fuwA3cZVeBtEZhGVtswMHDpjFHxo1aoTrrrsOM2fONLWg+JPZGFw9+NlnnzULZoiIiIiIiHjDoUOHTGZtSkoKqlSpgmHDhpmfFUG5q2HLzn+XLl3MKnEiIgIEtOuYuxssYeEIOLel13YLV+p99913zbGaQdkff/wRl112GTp37ozffvsNx48fN2UTuCgZb69VqxZCQkJyg7lclIwlFeRfEydOxEcffWSyZfPD+l6//PKLWZiBODrNhQ0iuXoqgDlz5pjF45gRw6xaBmwXL15sToxERESkbNB64SJF/75odp5vW7FihemTMGln5MiRFSZY6/MZtl988YW5ODA1+uOPP8aVV15pOp8iIgIEXXAR/GrWgi0hAf4NG8Ea7tlp7gcPHkTbtm1zr7do0cKMgrozYsQIPPnkk6YsAk+iHnroodzVPRkk5P2ZmZno1q2bKZdAr776qvkb1i1iQJHX5V8cZX7ggQfyLYXgHLQlrqiakJCAnj17moXeaMeOHWjSpEnuY5kFzYAuA+x8P0VERMR7rFarufAc6Uw1G0XkJAb5+H3hd0d8V+/evU0/hkk7wWW85qyn+fTRniUPVPZAROTM/Os1KJHdxADq5s2bC/34wMBA/Pe//3V737XXXmsurpo2bWqm6ot7zouyFcayZcuwf/9+MxPl7bffxj333IPU1FSTyezAMhV8r3h7cTDYy4snOZ7P088rIqVD3+HyQ8fh0scMwaioKFPSiIOqpRmA4gA7Sy7xpzIVxRfws8pgLRMPatas6e3NkWLge1evXj1zzAkICED37t0r5H706YCtiIiIFA1XVW3cuLFZQZiLjDFgy2Btenp6ns44s3g4c6U4tm7dWmJvy/r160vsuUWk5Ok7LFI8LBO1fft2bNiwodR3oYK14muYWctgbbVq1by9KVLEY82vv/6KJUuWmFKnXAi5IlPAVkREpJzbtGkTxo0bZ2oDc5SaGJCtXLmy+Z3lEHbu3IlOnTqZ67t27TLZO8ziKQ5mRXOVYk9iEJmBnlatWpkMYBHxLfoOlx+cfVGSA3PiHtu+Zs2a5Wa7lvZ397zzzlP7Kz6BWZkqg+B7eFz79ttvc8u82ZTZX34Dtv379/f2JogH5Rzej/QVy+AfXQ2BF3WFxf9kwEFERM6MGbUM0L711lsmo5YZOqz5fv/995v7+/bta65z8TdOueRCZpx65FwmoaidypIKqpbkc4tIydN32PfpGOxd3gpE6bsrIiU5MDR//vzcWTjsh1x4aq2NiqxMBWzXrVuH0aNH51k4Zdu2bXjqqadMjcSYmBizsIpjIZqSdDb191Sjy7OyjxzChr79kHDkBPz8rGg8sBdqTpjo4f8i+dHnuey9Fxx91CrBhePYT66fY47YlnfMph0/fjzWrl1ryiBMmjQJzz33nDn5qVq1qqlhy0At3XDDDYiNjcXw4cNNza9LL70Uzz77rLdfgoiIiIiIlGNMKvniiy9MQgkHpPr06YPWrVt7e7PKhDITsF20aJHpWDp3ovnG3X777bjxxhvx6aefYuXKlRgzZgy+/PJLky1UkjwxzUc1ujzD8uEkxB8+BtiBbJsNO2Z/hSODhnro2aWw9HkuO1N8eJzUog+F45hK4/r55QlBeXPBBReY4Kzziqq8OLDdnDJlitu/5cnRXXfdZS4iIiIiIiIljf20adOmYe/evaZs26BBg0ypNilDAVtOvVyxYoXpKL755pu5tzPTNi0tDaNGjTLBCRYd7tq1q0mVHjt2bIlu09nU31ONLs/aE1EZ8U5lmix2G9q0aePh/yL50ee5bL0XGzduNME11WUq2iIZjrprjs8zTwTKY9BWRERERETEF7Cf1rZtWxw7dgxDhgxB3bp1vb1JZUqZCNgOGzbMlDpwLoVAO3bsMJ1q50wyZghx8RRfqNGjOj+eUeeG63F4wddIT0oxn4VanduodpYX6PNcdvB7oAzbwu8rd59fBbxFRERERES8l1RDTMbjgorFXTujPPNOxXIX1atXz3cF0uDg4Dy38TqzbqXiCGjUFO2euAvn3tQb5989FI3uvs3bmyQip+zfvx/dunUrVtmZHj165Lnts88+w7XXXourr74aM2fOzL199erVppYRi8+zdE52dra5/ciRI2bAj3/D2qsnTpzQ+yIiIiIiIlKG+48ffvghkpOTc29TsLYMB2zzw5IE6enpeW7j9eKWKhDfZKlcBUFX9kX16wcg8rresLS/zNubJCJnYc6cOabUjfPgG2dOzJo1C7Nnzzb3T5061cyyYC3zhx9+GK+//jq++eYb0wawjjk988wz6N+/PxYvXmxqtb7wwgt6X0RERERERMoglqTj+lQHDhzAkiVLvL05ZV6ZDtiy/MGuXbtOe4NVhLjisVSrBWv7rrC27AhLQKC3N0fE56Rt3Yak31YhOz6+xP8XM1+ZEet64e3x8fH4+eef8dprr+X5m59++slk0HJALjw83PzOQOy6detQp04dNGzY0Eybuf766/H1118jKyvLlNHp2bOn+fu+ffua5+XtIiIiIiIiUnZwHZEZM2aY/hpjfddcc423N6nMKxM1bAta8Zorxb333nsYOXIkfvvtNyxdujQ3u0pERM4s4aeliPt6sfndGhqKWmPugX9UlRItc8PFIfPzxhtvmKkwzhjMbdWqVZ7nYLCWtzuXzYmJickN/IaFhZk2gvz9/U2gNzY2Nt8yOyIiIiIiIlK6mGjD2ZLExaCZbHO2a0ZVBGU6YBsYGIjJkyfj6aefxqRJk1CtWjW8/PLLyrAVESmCpFWrc3+3paYiZcMGRHS5tMT2IQOqt912eq1pHsfzC6ay8LwrZtTabLZC305aTExERERERMT72MfjTMply5aZ6x07djTrj2gBbR8M2DKjdu3atXluY6o0F6IREZHi8YuojGynBbn8K1cu0V15pgzb/P7m6NGjudf5e40aNczl2LFjubfzd94WFRVlCtVzATJm1/JnSkoKIiMjPfpaREREREREpOi4HgnXKqHLLrsMXbp0UbC2vNSwFSlpmbFxWH/HXfi9Vx/smfShdriUS9EDByCwTh1Yw8JQ+dKLEXp+a5Q1Xbt2xbfffmuCrrxwygxvO//887F7926zABlxUTI29iyF0KlTJyxcuNDczp+87iiRICIiIiIiIt4TFBSEoUOHmvVM2LdTZq0PZ9iKlLYNw4chft0GwA4kr1+PoKgI1Lj+er0RUq4EREej1pi7S+z5Dx48iLZt2+Zeb9GiBaZNm1ak52D92oEDB2LQoEEmW3bw4MFo2bKlue+VV17BuHHjkJ6ejtatW+PGG280t48fPx6PPvqoKZ0TERGBiRMneviViYiIiIiISGFlZGRgz549aNq0qbnOGZBt2rTRDiwGBWylQkvdstUEa8meY0PC7JkK2IoUQZ06dbB58+Zi/d2SJUvy3DZs2DBzccXM2Xnz5p12e82aNTFlyhS9XyIiIiIiIl7GknVM3OGaJkzEad68ubc3yaepJIJUaOGVQ3N/twCIaFTPq9sjIiIiIiIiIuJL4uLi8PHHH+Pw4cMICQkxMyDl7ChgKxVa82efRI2akYiKDEXj9ueg2sNPeXuTRERERERERER8AjNqP/roI8TGxpoSCCNHjjSzIeXsqCSCVGjBPQag2cWXA8cPw1L/HFi1YJGIiIiIiIiIyBmxXu2MGTNM7dqYmBizyFilSpW05zxAAVup8PwiowBezoLt2CHYtm2CJTQM1vM6wOKvr5aIiIiIiIiIlE/Hjx/H1KlTzaLR9erVw5AhQxAcHOztzSo3FFUSOUv2+BPIXjQTyMk+ef3EMfhf0Uv7VURERERERETKpapVq6Jt27ZISEjA9ddfjwDNWPYoBWxFzpLt8AFkHz2KrEOHYQ0KQqDFT18sERERERERESlX7HY7bDYb/Pz8YLFYcO2115rbrFYtkeVpCtiKnKWcHBu2zf4WcccT4Ofvh0ZdU1B7lHariIiIiIiIiJQPDMx+//33OHr0qCl/4Aja8iKepxC4yFk69vOviI1Nhs0OZGXbsPuPTdqnUmHs378f3bp1K/TjMzMz8cgjj6BXr17o0aMHpkyZknvf2LFj0b17d/Tp08dceDJAW7duxaBBg3DNNdfg3nvvRWpqqrk9OTkZd955J6677joMGDAAu3fvLoFXKCIiIiIiUrHl5ORg/vz5WLlyJXbs2GEuUrKUYStylrJtFsBihcVqN9dzoNElkfx8+umnZrrMggULTOCVtY46duyIli1bYsOGDZg1axYiIyPz/M2DDz6Ixx9/HJ06dcL//vc/vPvuuxg3bpz5nX/H6zxxYCD4888/184XERERERHxkKysLHzxxRfYtm2byabt3bs3mjZtqv1bwpRhK3KWYq69CkGVQgGrBfCzovoFbbVPpUzJTEjA9kmTsenFl3Ho+x9K/P8dOXIkN0vW+cLbzz//fNx2222moQ8LCzOriR46dAhxcXGIjY3FQw89ZLJv33rrLTPlhvclJSWZYC0NHDgQX3/9tfn9p59+Qr9+/czvnTt3xrFjx3Dw4MESf30iIiIiIiIVQVpaGj777DMTrPX398fgwYPRpk0bb29WhaAMW5GzFOxvR5t7huL4ui0IighH1Q6ttU+lTNkzfSaStm4zvx/6+luE1KyJyPNaltj/q169upkuk999DmvWrMG6devw8ssvm2DrRRddhGeeeQZBQUG44447UKNGDTRp0iTP38TExJjAL/Gn632HDx9GrVq1Suy1iYiIiIiIVARMnJk6daqpWRscHGzq1jLhRkqHArYiZ6tyFQRVqYzaXTuaq5aIqtqnUqZknDiR9/rxvNc9jYFUZtG6mjRpUm6AdcWKFabUwauvvoqIiAhzeeONN3IfO2zYMBP0bdSo0WnP4yhqzwxcV1qdVERERERE5OxxzZD4+HiEh4dj6NCheZJlpOQpYCtylixRMbC27wr77i1AcAgsrS7UPpUypUqb83Hkx5/M79agQESc27xE/19BGbY0b948vPLKK6YGbYcOHcxt69evN1m2jgXMbDabWXWUWba83YG/8zbH/+H1mjVrnnafiIiIiIiIFB/7WTfeeKNJrnFdZ0RKnmrYiniApW4TWC/tAWvHbrAEhxb67xKW/IC/LrsMay+9FLFfzNJ7ISWids/r0GDYjah13TVodt+9CK4e47U9zcXBmFXLxcccwVpHIfsJEyaYUdzMzEyzeNhVV11lyhuEhIRg9erV5nGzZ89G165dze+XXXaZuU6rVq1CaGioArYiIiIiIiLFtHPnTuzbty/3ev369RWs9RJl2Ip4SXZSEtbfeQ8y0zPM9cRHn8AF7dohuHETvSficVHtSm4xPC701bbtv8/fokULTJs2ze1jmVWbnZ2NsWPH5t529913m+AsR2+5qFhOTg66d++Onj17mvsZ4H3yySdNDaU6deqY6zRmzBg8/vjj5nGBgYGmFq6IiIiIiIgU3caNGzF37lwEBATg1ltvRdWqKvfoTQrYinhJ2uZ/coO1lJOVjaTVqxSwFZ/CAOrmzZsL/XhmzuZn5MiR5uKqadOmmDlz5mm3V65cGW+++WYRtlZERERERERc/f777/j666/N782aNTNlEMS7FLAV8ZLgFi0REBqCrNQ0c90vMADhF1+q90NEREREREREShwXcl66dKm5UPv27XHddddpMecyQDVsRbwkICwMrT+ejCrNz0HkOY1w3puvI6RePb0fIiIiIiIiIlKiuNAzs2odwdouXbqgR48eCtaWEcqwFfGiiAs7o+233+o9kCKPgor2lYiIiIiISHFxYec1a9aY36+99lp06tRJO7MMUcBWxMcCdTk7tsOWmYGAc5rCEhDo7U2SUmS1Ws0lMzMT/v46fBcG95Vjv4mIiIiIiMhJHTp0wLZt28wC0uedd552SxmjHr+ID4l9723s+WQabNk5qHHJhag98VVY/AO8vVlSSiwWC6KionDgwAE0aNBAQchCTPHhvuI+474TERERERGpyNLT0xEUFGT6R0wCGjp0qPpKZZQCtiJeZLfZgCP7GFkCatSDxc8v38cyq3bLW+8jPTnFXE9Z9A0q9+2PypddVopbLN5Wq1YtbN++HRs2bPD2pviE0NBQs89EREREREQqsvj4eEydOhXNmzfHlVdeaW5TYkvZpYCtiBfZf18C+6E95ndL1RrAxdfCks/U7ezUVKSnnAzWUk5WNlK2blHAtoLx8/NDs2bNTPaoatkWjCcfKoUgIiIiIiIV3dGjR02wNikpCevXr8fFF1+MkJAQb2+WFEABWxEvsael5AZrzfUTh2FJjAUio90+3r9SJYRWr4bUI8f5aPiHhiLiAhUFr6gUiBQREREREZEz2bdvH6ZPn27KIVSrVs2UQVCwtuxTwFbEa9++AMDqB9hy/r0tMCjfh1v9/HHeS89i52uvwZaRidr9+iD0/Lals60iIiIiIiIi4lO4qNisWbOQnZ2NOnXqYMiQIaZsnJR9CtiKeIklIBDW9l1hW7fC1LC1tuwIS2ilAv8mNMiOFsP7mcdbIqNgz8yAJZ8grz0zE7bYE7BGRsISrKkOIiIiIiIiIhXFunXrMG/ePFNKr0mTJhg4cCACAwO9vVlSSArYiniRpXZD+NVuWKjH2rOzYT96AJaAgJM3pKUA8ceAmDqnPTbnxAkkPP0wMg4cQEBUFUQ+9iz8GzXx9OaLiIiIiIiISDHZ0tMRP2cO4ufPh3XfPuypWxeRffogsn9/WIODz3pNDwZrW7dujd69e5v1UMR3KGAr4it4cA0OBdJTT163WIB8MnITP3gbm7/9FRmZmQjw90eT4JdQ463Jpbu9IiIiIiIiIuJWwqJF2Dt6NHLi47lICSw2GxL//huJCxbgwLhxqDd5MiJ69Cj23mvVqhUqV66MevXqmeCt+Bb3y9GLSNlc8b7z1bBUrQFLRBSsHS6HJTzC7WMPrlyD9IxM2O1AZlY2Dvy5qdS3V0RERERERETcB2t3DRqEnISEkzfYbHl+8vZdAweaxxWWzWbDjz/+iKSkpNzb6tevr2Ctj1LAVsSHWCKqwnppD1gv72fKKeT7uEaNYbGe+npbLLDUOr1sAtlOHEX2yiXI+es3U3JBRHy7RtUFF1xQ4IIDI0aMQMeOHXHJJZfghRdeQGZmprmPU6U6dOiANm3aoG3btuZy5ZVXluLWi4iIiIhUnDIIzKw1mGXlzqnb+Tg+/kyysrLM4mK//vorpk+fboK34tsUsBUph+rdcy8CmzWDNaoqAho0RP1xY097jD0xHtkLZ8C28U/krPkVOT995ZVtFZGzt2jRIhOMdQRgXXFV2Ntuuw3t27fHihUrMHv2bKxduxb/+9//zP179+5Feno6Vq9ebW7n5YcfftBbIyIiIiLiYaxZa8og5BesdbDbzePi584t8GE8j582bRq2bNli6tR26dIFVkcCl/gs1bAVKYfCGzVEh2mfInn3XoTWqomQmjVOe4z9yAEgOyv3uu3A7lLeShHxhIkTJ5og7F133YU333zT7WOOHDmCxo0b48477zQncdWrV0efPn3w1VcnB2o2btyIZs2aadVYEREREZESlrBggalZm1sGoSBWKxLmz0fUkCFu72b5AwZreb4fFBSEwYMHo0GDBp7faCl1CtiKlFNBVauaS76qRCPreCwy9uyFX0gwQjpfXJqbJyIeMmzYMDzwwANYtWpVvo+pXbs2Jk/+d+FBlkBgfavmzZvnBmwzMjLQv39/HDx4EC1btsRjjz1mgrzFkZOTYy6e5Hg+Tz+viJQOfYfLDx2HRUTO8jgaG1u4YC3ZbMiJi3N7V2xsLKZOnYq4uDiEhYVh6NChqFHj9GQt8U0K2IpUULasHGxbuBTxBw/D6u+PRpbKqDMw72PsNhvsB3YiNPYg7NnnAX5+3tpcEckHs2WLgvWsnn32WezevRuvvPKKuS0wMNCsInv//fcjPDwcb731Fm699VaTgRsaGlrkfb9169YSe7/Wr19fYs8tIiVP32EREano/KKiipRh61elitu7Fi5caIK1VapUMcHaKD6vlBsK2IpUUEdmzkLcwSOAHcjJysaur79DnVeRJwPPvup74PA+RB0/DgTYYO/S69/FzETE5yQmJpps3AMHDpipU9WqVTO3jxkzJs/jHnzwQXz++ecmsFLQQmb5adq0abECvWfK6OL2MLDMsg4i4lv0HS4/UlNTS3RgTkSkvIvo3duUOSgUmw0Rffq4vatv3774+uuv0atXL5N0IeVLuQrYcjonO6IcYaAJEyagXr163t4sEa9gwBWZ6UBAkNsgqy0r70qTNrvL6F56KuxH9v97Pe4YkBgLREaX2DaLSMnZt2+fyZpt1KgRZs6cmeek7v3330fnzp3RunXr3EXKGFxhHaziYEC1pIKqJfncIlLy9B32fToGi4icncj+/XFg3DjkJCQUvPCYxQK/iAhE9uuXe1N8fDwiIyPN7xERERiST21b8X3lKlVuzpw5pu4ea3jccccdeOedd7y9SSJeYc9Ih/3nebAtng7b97NgT0447THV+/dDcPVoICCA86FR6wqXGrb+AYCf05iOxQIEhZTC1ouIp6WkpGDUqFHo2LGjaRtdR+B37tyJF198ESdOnEBaWlrugCezWUVERERExHOswcGo51hfgv1sd07dzsfx8fTnn3+aRYa5/oSUf+UqYMs08OHDh5vfmRnEmnwiFZF9+3rYE2JPXklLgX3TmtMeE9i4Gdq9PRHNRwxA6/tHockLL+W53xIQCGvHy4GQcOQEBgFtLoUlJKy0XoKInKUFCxagbdu25vdFixZhz5495me7du3M7bxcf/315v4nn3zSZN727NkTF110EQ4dOmSybpVFJSIiIiLieRE9eqDhrFkmg9ZwzIo99ZO3N/ziC/M4zp5dtmyZqVnL9Si4FoWUfz5bEuGLL74wFweuiPfxxx+b37nC9cSJE5VhKxWXzWUV9Zxstw8Lan8Jara/JN+nsdSoh7QGqTiQEoiYOo1Ou99+eC/ssUdhiYoxjxUR72Gt2bVr1+Ze7927t7nQDTfcYC75YcbtCy+8UCrbKSIiIiIiQETPnmi5axfi585F/Lx5SNi3DxF16yKyb19TBoGZtQzWfvvtt1i1apXZZZdccgm6deum3VcB+GzAduDAgebiavv27WaV6+eeew5169b1yraJeJulUQvY9+8EMtIAf39YmrYp1vNsGzsWB776Bjk52djy1SK0+PBDWE5NzbDv3wHbmp9P/s6BwPZdYanbxKOvQ0RERERERKS8YlA2asgQRAwahL/++gv127TJneXGmePz5883C+9S9+7dceGFF3p5i6W0+GzA1h1O4bzvvvvw2muvmRWqRSoqS1hlWK8YACTGAeGVYQku+mrtaXv3Yv+CRbBnZQN2G47+vAx1Vq5ExEUXmfvth/bmeTyvK2ArIiIiIiIicnYYrP38889NUqLVakWfPn1yFwiWiqFcBWxZb48LqzC7lrgA2SOPPOLtzRLxCgvrzkbXKPbf2xMTYc/+t5SCPScHObHH/31ApQjYE+NhT06GJTwclmaRTo/Nhn3LX0BKIiy1GsJSu2HxX4iIiIiIiIhIBcIgbXR0tKlXO2jQIJxzzjne3iQpZeUqYPv00097exNEyo2QJo1RtX5tnNhzALBbEFEjGpUv7Jx7vz0gFDmHDp8su5CUDIt/8L/3/b0C9r3bTv5+YBesgdfCUq2WV16HiIiIiIiIiC9hKcKrr74a7du3N4FbqXjKTMB23bp1GD16dG4hZdq2bRueeuopbN68GTExMXjggQdw1VVXlVr6OS/F/VvnnyI+KSAQzV6diBNTPkRifDzqjLoNlipR/36+d20FqsTkPjxnzzagUTPzu/34YcBu//e+44dhiaruhRchkpfj88vVVUVERERERMqK5ORkLFiwAL169YI/16KxWBSsrcDKRMB20aJFGD9+fJ4OdGZmJm6//XbceOON+PTTT7Fy5UqMGTMGX375JRo3blzi27R169azfg5HYWgRn2XxB4bfapYV2+rnD/z1V+5d4SfiELBxM9KOxCIkJgpZETFIPnV/leQMhJ04WT7BbrHg2JFYZKb++7ci3sZaUCIiIiIiImXBgQMHsHz5cmRlZSEsLMxk10rF5vWA7cSJE7FixQrcddddePPNN3NvZ6ZtWloaRo0aZUYVunTpgq5du5oV8saOHVvi28VFy0JDi75QkyODi8HaVq1a5a7uJ+Kr8vs8Hz90GJu/WwVbRgasQUFofmVfNGnTxtxnb3UesPVvIDUJqNUA1Wo28OIrEDn989ykSRMFbUVERERExOt27NiBmTNnmmBtrVq1cPHFF3t7k6QM8HrAdtiwYabUgXMpBMcHlh1qBmsdmFm7adOmUtkuBqbONtjqiecQKStcP8+HPvkUNk4v9/c3Pw9+8imq9+7leDBwXifvbaxIIYr4i4iIiIiIeNOGDRswd+5cM+OctWqHDh2KkJAQvSni/YBt9eru61qmpqYiOPjfRYyI15l1KyLe5xfgX+B1EREREREREXFv9erVWLx4sfm9RYsWaNCgAQIDA7W7xCizKUYsR5Cenp7nNl4vbpkCEfGsRvfejZCoKrD4+yMkKhKN7707z/22E0dh27UV9rRU7XoRERERERGRU1JSUrBkyRLze8eOHdGvXz/NApQ8ymxKHMsfTJ48+bRFYlgmQUS8L+zCzmg/4zNk7diOgEaNEdDo3+9mzpb1yFn2Hew2G6xh4fDvMxSW8Epe3V4RERERERGRsoALiw0ZMgR79+7FJZdcYkoiiPhEwPaCCy5AQEAA3nvvPYwcORK//fYbli5dii+//NLbmyYipwQ0bGwurlJ+WYKdi5YgIzkVVerXQqNz28C/XWftNxEREREREamQsrOzceLEidzSoPXr1zcXEZ8qicC6HcywXb58OS688EK88MILePnll5VhK+IDdv+0EvHb9iDt4DEcXrMRx9b/4+1NEhEREREREfGKjIwMTJ8+HR9//DGOHDmid0F8J8OWGbVr1649rSzCZ5995rVtEpHiSUvg4oAW2G05sMGK1Nik3PvsmelAQiwQHgFLSJh2sYiIiIiIiJTrerXTpk3DoUOHTHJiaqrWeREfCtiKSPlRuVYNpG7bxvAsrDYbIpo0MrfbEk4g+6NXYY89Dgtr2974H1jrqS61iIiISEXG4MXu3bvNbMrMzEyEh4d7e5NERDwiPj7eJCLGxsYiNDQUN910E2rVqqW9K75bEkFEfFf1JvVQo24NREVXRe1z6iOsSoS5PWfJQtiPHwVsNtiTEpHzrWpSi4iIiFRUWVlZeO6559ChQwdcf/31Zprwo48+ijvvvNNkpImI+DIe0z788EMTrI2MjDTrMylYK4WlgK2IeFxQTDRqXXYx6l11CaI7nA+/4KCTd2RnI/lIPA6v342Efcdgy8rW3hcRERGpoN566y2sWrUKU6ZMQVDQyfPF4cOHY/v27Wb9El/3ySefYNKkSd7eDBHxgqNHj5pjW3JyMmJiYkywtmrVqnovpNAUsBURjwu68hpYw8LgFxQE/3r1EXB+W3N7ckgMNi5dj51/78A/yzfiWIqqsoiIiIhUVF999RWefPJJdOrUKfe2jh07mgWnf/zxR/gqu92Op556ClOnTvX2poiIl0RFRaFmzZqoV68ebrnlFlSqVEnvhRSJoiUi4nH+desj/N5xsKelwRJeCRaLxdx+6KtvkQML4B8Am8WKQz8tR50H9QaIiIiIVETHjh1zOz2YWWjMSvPlgG2XLl3Qpk0bHD9+3NubIyJe4O/vjxtuuAFWqxUBAQF6D6TIlGErIiXC4h8Aa6XKucFac8DhEYfX/fwAqwV+1n/vExEREZGKpXXr1pg/f/5pt3MacatWreCrGKC58sorvb0ZIlLKli9fjm+//dYM2hBLvShYK8WlDFsRKTX1bxmOhL/XIz0hEQEhwWhw803a+yIiIiIVFBcYGzFihKljywXIWLd2586dOHz4MD766CNvb56ISKEwQPv9999j5cqV5nrTpk3RsGFD7T05KwrYikipCel4Adq8+X9I27geQfUbIPjiLrn3pW3cgIwdOxHarh0C69TRuyIiIiJSzrVo0QLffPMNpk2bZuo9Zmdnm8zUm266ySzSIyJS1uXk5GDhwoX4+++/zXUewxSsFU9QwFZESlVQ2/bm4uzErFnY8vxLyMrMQEhEBFpNmYyQc1vonREREREpxw4dOmQW5bn77rvz3J6RkYF58+ahb9++Xts2EZEz4cyAL7/8Elu3bjWlAHv37m1qV4t4gmrYiojX7XrvA2SkJMOWmYmUE7HY+8Zb3t4kERERESlh3bp1w8MPP4zMzMw8tyclJZlyCb6uf//+uO2227y9GSJSAtLS0vDZZ5+ZYC0XGBs8eLCCteJRCtiKiNfZ0lNhz86GPScH9qxM5KSnenuTRERERKQU6j7+/vvvuPHGG3HkyJGzfr7jx4/jnnvuQYcOHXDhhRfihRdeMNOVz9a6detwwQUX5Llt27ZtGDJkCNq2bYvu3bub+pUiUnHs27fPXIKDgzFs2DBTt1bEkxSwFRGvq93tIvgF+JtpJAGhwajd42pvb5KIiIiIlDCe+3388ceIjo7GgAED8Oeff57V8911112IjIzEr7/+ivnz55uf06dPP+1xzIhjFq8Dgy4nTpxw+5yLFi0yC6M5ZwHz99tvvx1XXHEFVq9ejccffxwPPfQQduzYcVbbLyK+gwHaPn364JZbbkG9evW8vTlSDilgKyJeV/2Ky3Ben0vRtFsbtL7+CoS3au3R57enpcC24hvk/PAlbJvPriMgIqXLlp6O2OnTsefGG2G95x7zk9d5u4iI+H6GbXh4ON59913069cPN998M2bOnAk/P79iZcHu3LkTTz75pMl4q169OiZPnmwWAHL1/vvvY+TIkSZou2fPHgwdOhQLFiw47XETJ07ERx99ZALBzlatWmWmQ48aNQoBAQHo0qULunbtaoLEIlK+624nJCTkXme9Wh5rREqCFh0TEa+zhAajUt0aqFStMiwRVWANPHmSnnX8OFLW/gW/8EoIv6AjLNbijTHZ1y6D/eiBk79vXgt75ShYajXw6GsQEc9LWLQIe0ePRk58PGC1wmKzIfHvv5G4YAEOjBuHepMnI6JHD+16EREfzrB1/Bw3bhyaNWuGJ554Ine19aJYv369yXh7++23MXfuXFNTklm7d95552mPffHFF03pBAaIjx07ZrLkmEXritOcH3jgAROgdcZM2iZNmuRuPzVu3BibNm0q8naLiG/ggBAHlCpXrmyOF6Ghod7eJCnnFLAVEa+zZKTCr0Gj3Ov2lCTkxMVj38uvImX7dvgFhyBmby9E3zCwWM9vT0k87fq/p9ciUlaDtbsGDfr3Bpstz8+chATsGjgQDWfNQkTPnl7aShEROdsMW2c9e/ZE/fr1T8toLQxmva1du9bUlGU92f3792P06NGIiIgwGbTOAgMDcf/995tFwVhC4dZbb3X7nPllzqWmpposXme8zqxbESl/Nm7caAaCWBO7UqVKxZoFIFJUKokgIt5X+99gLSxWWGrWR9LaP3Fg8Tc4vmEDjqz5HXs/nlLsp7c4P7+/PyzV657lBotISWK5A2bWGi6d+VynbufjVB5BRMQ3/fjjj4iKispzW6tWrTBnzhxMmDChSM/FIGxQUBDGjh1rfjLj9aabbsK333572mO5YBhLIjCg265dO1ODMi4urtD/i5l16S6leXhdGXci5Q8XRvzyyy9NsPbcc881iyTyGCNS0pRhKyJeZ23cEvawSrAnxsFSrRYsVaohZfNsZGf8eyKcfOiQ+Zlz7CgyVy6DJSwcQZdeDktg4Jmfv0UHUwbBnpZsgrWWylVK9PWIyNmJnzPnZBmEM7HbzePi585F1JAh2u0iIj7gu+++w+WXX25qvzJrjRd3wsLCivS8DNDabDZkZ2eb4C0xwOLOG2+8YYIud999N7KyskyQd9asWWYhscL+L9bHdbZ9+3ZTJkFEys8MgF9++QU///yzud6+fXtcd911sBazTJ9IUSlgKyJlgqVGPXNxCG3SBAERlZGTmmZqV4bVq4OchHgkv/A4bLHHzWOyN/yF8DEPF+756zRSGQQRH5HAhV94Muwog1AQqxUJ8+crYCsi4iPuvfdeLF++HFWrVjW/54f1Yf/5559CP+/FF19ssnVfffVVUw+XJRFmzJjhNgj72muvmYAx8ef//d//FWmK8wUXXGD+7r333jOZur/99huWLl1qsvBEpHxYsWJFbrCWCwtedtlleepWi5Q0BWxFpEyKuvpq1F7zO479tBQB4WFo/MgjyP77j9xgLWWtXW0yKTTKKVK+5MTGFi5YSzYbcoowjVVERLxr8+bNbn8/W8yq/eyzz/DCCy+Y4AoDsIMHD8YNN9xw2mMdwVoHLlBW1P/FDNunn34akyZNQrVq1fDyyy8rw1akHGndujX++OMPM0DDi0hpU8BWRMqsho89bi4O2ev/xOHdh3D8YCysflbUb9MUkRrlFB80aNAgdO7cGW3atMF5551nOpXMIuKq2CtXrjQdzorMj/UMi5Bh61dFZU5ERMrD9OPdu3ejSpUqZiGw4qhTpw7effddj28bgzVc0My1LEJFb69FyhvnZCAuLnbnnXeeNsAjUloUsBURn5GUmo0jx1NgZxDHbseeAwmoo4Ct+CAGa+fNm4f3338/z9QqdjT79OmDii6id29T5qBQbDZEaJ+JiPiUxYsXY9q0aZg4cSJq1KiBnTt3msDI3r17TbCEWbFPPPGEZlGJSKlJTU3F9OnTzQANFz8kBWvFm1QtWUTKLHt6GmzbN8F2YLe5nnHoMCwRVWCNrm4u2XaLGQU1j81vJXlPbUsJP79ULM2aNUNISAjefvttUxuLC7C8+OKLZqGUevX+reVcUUX27w8/ZlcVckDGZOSKiIhP+Oabb8wiXzVr1sxdHOyhhx5CXFycqQnLrFXWhP3000+9vakiUkHEx8fjo48+woEDB8x5eWZmprc3SUQZtiJSdoO12fOnwp6UYK77nX8BqlxyMQLen4ysU6vHR3VoD4stB7bfvoP96AFYKleB5cKrYAmt5LntiDsG2+ofgfRUWOqdA0ubS1RsXs4aFzt56aWX0KFDh9zb6tata6aBPvvss+jdu3eF3svW4GDUmzwZuwYOPBm0dTdg4nT7ruuvR9233kLVm28u/Y0VEZEimTJlCu67777cxcBYx3bDhg2466670LVrV3Pb/fffj//973+45ZZbtHdFpEQdPXoUU6dORVJSEipXroxhw4blDiaJeJMybEWkTLLv35UbrKWcTX8hKLoq2n40CfVvHobG996Flq+9DPv2DSZYa/4mMQ72Db97djvWLgPSUkxgyL5nK3DwZLavyNmeGLIulitmGx0//u/CehVZRI8eaDhrFvwiIk7ecKqemOMnb28wfToiBw0CsrOx7447cGj8+JMlU0REpMzaunUrrr766jwrsbM8ULdu3XJva968uSmPICJSkvbt24ePP/7YBGujo6MxatQo81OkLFANWxEpm4JD81y1hJy8HlKzBhrcMTr3dltWJpIOHUPSwaMIqVIZUdVqeXQz7Fku02GyMjz6/FIxcbGx559/3mTZ1q5d29y2f/9+TJgwAW3btvX25pUZET17ouWuXYifOxfx8+YhYd8+RNSti8i+fRHZr5/JxI3o2xdBjRrhyEsv4cjLLyNj1y7UmzTJ3CciImUPy0w5FvWh1atXIyIiAi1btsy9LSUlxZQOEhEpKdu2bcOsWbNMSTKuIzFkyBCEhubtg4p4kzJsRaRMstZpAL/WnYCAQFgqRcLv8uvcPi4pw4LNc77H3u9XYNv8JTh6OMmj22Fpct6/V1hqoWYDjz6/VEzPPPMMjh07hiuvvBIXXnihuVx11VUmu5YlEYpj3bp1ZpGEgk5KR4wYgY4dO+KSSy7BCy+8kKc+11tvvYWLL74Y7dq1w7hx40ymQVnAwGvUkCGoP306bG++aX7yuiMgy6ysmuPHo+6kSYC/P+K/+AI7rr0W2cpUFhEpk5g9yxq1xLq1K1euzC2F4LBw4UI0bdrUS1soIhUBs/gZrG3SpIkpg6BgrZQ1yrAVkTLLr1MXcynI0V9WIiMpA8ixAOk2HPlpBWoMGOSxbbA2Pg/2qOonyyJE14QlMMhjzy0VV8OGDfH1119j2bJl2L17twk6NmrUyARM+XtRLVq0COPHj89dhM8VT0Zvu+02DBgwAJMmTUJsbKypFcj6gA8++CBmzpxpOsf8ySwn3sbA8SuvvAJfUZX1xurWxa7Bg5Hy22/Y2rUrGs2bh+BzzvH2pomIiJNbb73VLDq2Zs0abNy40bRdI0eONPft2rULc+fONXVuWe9dRKSksAwL1484//zz4efnpx0tZY4ybEXEp1mzsmBPS4MtKRG2lGRYsrI8/j8sVarBUquBgrXiUZwOyoyim2++GcOHDzdZr8UJ1k6cONGsassAbH6OHDmCxo0b484770RAQACqV6+OPn364I8//jD3z5kzx2wDp4Oxti4DtosXLzZTUn1JpcsuQ9Off0Zg/frI3LkT27p2RfKvv3p7s0RExMkVV1yBN998ExkZGTjnnHNM/chmzZqZ+6ZNm2YGDx9//HF0795d+01EPFqOhee+Waf6izzv5swyBWulrFKGrYj4tCrRlRGblYaMrCwE2HNQLSLYLDqUvXUzkJ0F/+YtYPEP8PZmipQYTuF64IEHsGrVqnwfwzq5kydPznPC+uOPP5ppqbRjxw4zHcyhQYMGyMnJwZ49e9CiRQufeveCmzfHOb/8gl3XX4/U33/HjuuuQ9333zdlFEREpGzo0qWLubhi5i2DtcUZwBQRyQ8z+Tkjbe3atdi+fTsGDRqk44yUeQrYiohPC0qLR+0mtZGZnAb/4ECE+Wcj7uMPcOCrb0zgtsZFF6La2Adh8dfhTsonZssW9YSV5Q5YisFR8iA1NTXP4i7MNAgMDDS3FweDvbx4kuP5CvO81qpV0fDrr7Fv9GgkzpuHvSNHIn37dsQ8+qhOzkW8pCjfYSnbSvI9VA1JEfE0lgabPXs2Nm/ebM4DmdmvQSHxBYpgiIhP86/fECGR4eZC1mrVsPWjz5B2LJZ5hEjafwiV+/RDcLOTmYQiFVliYqLJxj1w4ICZdlqtWjVzO4O16enpeTrjXJAsLCysWP9n69atKCnr168v/IPHjoUlLAzWadNwdMIEHP7jD9gffhgIDCyx7RMRD36HRUREzgLPbz///HMza4wJCVzP4dxzz9U+FZ+ggK2I+LTA6/rBHn8C2f9sgrVWbeC665H23gwTrKXMxGSkHjykgK2UKY8++qiZ8hkefnKgwSEhIcHc/tZbb3n8f+7bt88s9MLFzVgf0Pl/sxzCzp070alTp9xFX1hjl6URioMre3s6S4pBZAZ6WrVqVbRaY++/j9gLL8SB++6D9dtvEZaSgnrTp8M/Ksqj2yciJfQdljKHsy9KcmBORMQTkpOTMXXqVLOWQ1BQEAYPHlzsc1sRb1DAVkR8msXPH8HD7si9np2SgqDq1ZFx9CgLdcI/MhLBdet5dRtF6M8//8TevXvN7/PmzUPLli1PC9gyaLp8+XKP7zAuHjZq1CgTkH3uuedOmwbWt29fs+hL586dERUVZRYy42IvzmUSioLBXl48iXV3eSnOc0ePGoWg+vWx+6abkPLrr9h5xRVoOHcugho18ug2ikjJfIelbPHk+8dASlFL+4iInAnbmxkzZphjDGeMDR06FDVq1NCOE5+igK2IlCv+YWE4Z8zdODBrtqlhG3PF5Qhp3Aipm/5BdmwsQpo3R0B0VW9vplRAAQEBmDBhQm7Q4vXXX88TOOXvzEq95557PPL/FixYgPHjx5vFFbjIAqeCHT16FF999VXuYxo3bowvv/wSN9xwA2JjYzF8+HAT3L300ktNndvi4v/h6/Uk1t6Nj4/HwYMHixcsOPdcVP78cyTeeisytm3DlksvReX33kNAu3Ye3U4RKaHvsJQZjhXWPWHgwIF4++23Tea1iIin8Lz6mmuuMefAPM9lQoKIr1HAVkTKnWoD+iOic2fYc7IRWKcOEpb8hPhvvjP3Wb/9HjXH3I2A6Ghvb6ZUMOyMrl692vzerVs3Eyj15MnjBRdcYIKzDr179zYX4okqL/lh8OSuu+4yF0/ggmWceubp6dT+/v7meYs9nbp1a4QsXIjjI0Yga/16JAwfjqjXXkPoqf0kIiXHI99hKRM8uVgPPxNa/EdEPIVrMPA8lOrWrYs77rhDxxjxWQrYiki5Y8/KhN/x/ewdAlWjEPvzLzj851/IzkhHWHQ0Kq/fiMjLu3p7M6UCW7JkCco7T3fA+XzOl+Lyr1EDMbNnI/aee5D27beIvesu5Ozdi0p3360TepES5KnvsJQvzIAbOXKkKcNTu3bt0wb7RowY4bVtExHf8s8//5iMWpY/qFmzprlN7Y34MgVsRaRc4VTz7MVfwn70oLlu27oBsdu3Iys11VxPPnIUKYcOIWzTBtjiYuF/TjP4xah2mpQulh945513TEYsp5byc+ts4cKFektKkDU0FFUnTUL8888j+YMPkPDf/yJ7925UeeklWDxcykFERPL3zTffmHru7uq3M9CigK2IFHatCAZreU79+++/584yE/FlCtiKSPmSlpobrCV7UjwCa9ZEemwcbBkZCKhaFbZjR5D68TLYMzNhqRKN8Lvuh1+1GK9utlQsjz76KP7++2/06dMHlSpV8vbmVEgWPz9UGT8e/vXrI/6pp5AycyayDxxA9PvvwxoR4e3NExGpECrCjBMRKTkM0P7666+5x5K2bduiZ8+e2uVSLviXt3olDzzwAI4fP25WG/3vf/+bW79ERCqIoCBYgkNhT05gCw4Eh6LGtVcj2w6zCFlARGWE7tsG25EDXAEF9rjjyFqzEtbLrgRysoGwyl6fOmPPzgJSk4HQcFj8le1XHv3222/4+OOP0U4LXnldpVtugX/dujhx553I+PVXHOnXD9U++cTcJiIipRNwWbZsGXbu3In+/ftj9+7daNSokcm8FREp6Njx7bffYtWqVeb6JZdcYtaJ8HZfTsRTytUSrV9//bVp3KdPn25Wvp4/f763N0lESpnFzx/Wc5oD8ceAuKOw1q6Dqq1aopY1HdXiD6J+3WoIsKUDyYlAahKQkgT7/h2wffs5bD98CfvvS06bnl6a7CmJsP04G7Ylc2D7/gvYE+O8ti1SciIjI5VZW4aEXHEFYubOhV/16sjeuhVHevVChtMCbiIiUjKYaNO3b1/ce++9ePnll5GQkGBKBvXo0QO7du3SbheRfBeynDdvXm6wlnWwr7jiCgVrpVwpVwFbNvb33HOPCbYcPXoU0VoFXqTCsWdmAId2wa95S/i1aAVLagJS330NQcf2o5I9HbblP8JqtSMpKwfHE9KQHRIE/8yEf//+4G7g+CHvbf+2dUBayskrGWmwb/nLa9siJWfUqFFmFsihQ977rElegS1bImbhQgS0aAHb8eM4NnAgUr/+WrtJRKQETZgwwazkzpknjgXHGLg955xz8NJLL2nfi0i+UlJSYLVa0a9fP1x44YXaU1Lu+GxJhC+++MJcHMLCwsz0Uj8/PwwePBgnTpzA3Xff7dVtFJGygTVr/71iw7FjiTiezuioPxJjMxFss8D/yDHY0jMRVPfkiqJlhqb0lEtz5szB9u3bzbStgIAA+Pv7n7ZwgpQ+/1q1EDNnDk785z9IX7IEJ26/HdmPP45Kt9+ujA0RkRIsERQcHJx7W+XKlfHQQw/hxhtv1D4XEbcY9xk0aBAOHjyIBg0aaC9JueSzAduBAweaizuff/45fvjhB5O99Oqrr5b6tomI91gCg2Bp0QH2TWtOXm/QHIGt45G+4ldz3RoUhCRrZWRlHoYtMxsBIf44+NduWDZuBOxAaJOGqNnzZvh5a/vPOR/2owdO1rANDoWlWRsvbYmUpJtvvlk7uIyyhocj+qOPED9+PJI/+QQJzz+P7N27UeX552FxCayLiMjZycrKcjsglp6ebjLnREQcEhMTsW7dOlx88cXmuMH1ihSslfKsXPU8GKhlltL111+P0NBQM+oiIhWPten5sNc7B7DlwBJaCcFNWsEvMgI5x48joF1H2D+bh6zMkxm22bHJCDweh7BKlXkDUo/EIm35rwi/vJtXtt0SVgnWKwacLIsQHKYAUTnFqVtSdjEwG/n88/Bv2BDxzzyDlKlTkbN/P6q++y6slSp5e/NERMqNrl274s0338yTZHPkyBFTKoELCImIOOpdf/bZZyZoyzhP586dtWOk3DurYUt+WbiaJ1fyZP0Qb7vmmmvMKoHDhg3Dhx9+iPvvv9/bmyQiXmJhdmroycCKJbwyAgffipC7H4H/RVegSq1aCK0WjcBKlRDZoD6C/P2AjHTY09OBHBsQEOj1hdMs4REK1pZzbK84U6RDhw7Yt2+fqdU3ZcoUb2+WnMLMjUq33oqqH3wAS3Aw0n/+GUf79UP2wYPaRyIiHvL4448jLi7OtIXMqr3ppptMuSBm3j7yyCPazyKCAwcO4KOPPjLxp6pVq6JFixbaK1Ih+BfnyzJ16lT89NNP2LNnT577GjZsiMsvv9zUkGXxeG+suv3BBx+U+v8VEd8S2rwpqiWeWmiM02mqhiLp9z8Amx0htaIR2lZlCKRkLViwAM8995wpjbB161ZzW+3atTFx4kSzcOaIESP0FpQRoddcA//Zs3FsxAhkbd6MI716odqUKQhs1crbmyYi4vOqVKli+parV6/Gtm3bkJ2djSZNmuCiiy5S7XARwY4dOzBz5kwziFOrVi0zqMPZ1CIVQaEDtklJSaYm7Ndff23Sz2+55RazemdUVBRsNhtiY2Pxzz//4Pfff0fv3r1x7bXX4uGHH0ZEREShnp+1SEaPHo1Vq1bl3sZG+6mnnsLmzZsRExODBx54AFdddRVKQ05OjrkU92+df4r4svL4eY7s3ROWSuHIPhGL0PNawu/Xr5Cek4WcjExUatEYOccOwR7cyNubKSXA8Tlmu+VNkydPNu1br169zIwQ4uwQLrTCqaEK2JYtgeefj+oLFuD4zTcja8sWHO3fH1XfeQchpXROIiJSXg0fPhxvvfUWOnXqZC4O7FuOGjUKc+fO9er2iYj3bNiwwRwDeN7eqFEj3HDDDaZurUhFUeiALVfpHDBgAJYvX46QkJDT7m/cuDE6duxoGt2EhAQzCjJkyBAT4D2TRYsWYfz48Xk60JmZmbj99tvN//3000+xcuVKjBkzBl9++aX5XyXNkfF0NtavX++RbREpC8rd57la9MlLdhayf1mDzCPHzM1HDxxGSOvOsJ9I9PYWSgnavn27V/cvZ6i0aXN6Jne7du1w9OhRr2yTFMy/Th3EzJ2L43fcgYxffsHxUaMQOX48Ko0apV0nIlIES5cuzT2vZLLPe++9d1rGHNtJzuwUkYopPj4+N1jbsmVLs/6D1iiSiqbQAdvp06ejUiEX2mBW7W233WZKI5wJp3+uWLECd911l8kqcmCmbVpamhlZZR25Ll26mKL08+fPx9ixY1HSmjZtWuxUe2Zw8SSkVatWOqiIzyvvn2dbdjb+jq6FYJv15CJlUdFoUKsuIlqc6+1NkxL8PHO6pTeDtnXq1DHb4Vo+iOWGvFFSSArHWrkyqn3yCeIefxwp06cjfvx4ZO/ejcinn4alHB4fRURKQr169fD666+bEkC8LFu2LM85Jvt+7IcxoUdEyi9bejri58xBwoIFyImNhV9UFCJ690Zk//6m3GWPHj1w+PBhM3ubxwWRiqbQAduCgrUsl7B27Vo0a9YM1atXz72dUzvPhFNAWerAuRSCo1YJO9TOX0xm1m7atAmlgScNZxuc8sRziJQV5fXzzNcUWrce0gODzXWLvx/CatYol69V/mW1ntWam2eNg5RPPvmkaeuYOfDNN9+YhcfmzJmD559/3qvbJgWzBASgyn//C/8GDZAwYQKSP/4Y2fv2oerbb8MaFqbdVwFxwcrURYuQ9u23sMXHwxoZiZDu3RHas6dZsE5EcNq6J45SB+wLsiRCYcvoiUj5kLBoEfaOHo2c+HiemLNeGex+fjjy/fcIGzcO9SZPRrsePby9mSJeZS1uuQDW3VuzZo0J1rJUAjNqWV+WpQuKwjnA6yw1NRXBLie5vM6sWxERT2p82yhUaXM+KjdvhsajRiAoOlo7WErUddddZzqonArKLKJJkyaZtvWNN95A3759tffLOA4mV/7Pf1D1vfdgCQpC+g8/4OiAAcg5fNjbmyalLO2773CgfXvE3nefCdhmrFxpfvI6b0/7/nu9JyIF+OyzzxAWFmYWGyMOXs6YMaP8leISkTzB2l2DBiEn4dQi0DYbcvz8sKZXL/w6dChSs7Kwa+BA8ziRiqzQGbbOJkyYgPr165vCzxwdTU5ONlNZWLeWJQ5mz5591hvGDmx6enqe23hdKwKKiKcFRUWh4c1D3d6XefAQjk2bjuyERIR3bI+qfXrrDRCP4AKevIjvYgalX82aOD5yJLI2bMCRXr0Q/cknCGzRwtubJqUUrGUt41yOtRhO/bQnJprPRvSHHyLk6qv1noi4wYHLe+65x5RI4GxKLirEtUyYpPPKK6+YAU4RKV9lEJhZa9jt5kdWYCBW9++P4w0awJqdjYSYGMTs3m0e13LXLlg1W0UqqGIFbP/++2/MmzcPUVFR+Pnnn3H55ZejWrVqJivogw8+8MiGscHmKtrOWG+QZRJEREpS4vIVSPptFfwqVULW8RPIiYsztyf9ugLBjRohrNV5egPkrG3cuBF//fUXsrKyTA0/ZyNGjNAe9hFB7duj+oIFOHbzzcjevh1H+/Uzmbchl1/u7U2TEi6DcOL++09dsefzIDvTsc3jav/xh8ojiLjBZJ9rrrkGrVu3Ntm2nFHJ/iX7mu+8844CtiLlDGvWmjIIp2SEhmIls2lr1oRfRgYumDMH1fbsMffxcfFz5yJqyBAvbrGIj5VECAoKMiOfKSkpZlT00ksvNbezIHRhFyY7kwsuuAABAQFm1VD+r19++cWsKNq7t7LbRKTkpO/chaNTPkXcjz8i7ptvkLxqdZ77bckp2v1y1ti2sZzQ//73P3z66aemk+q4TJ06VXvYx/jXr4/q8+Yh6KKLYE9JwfFbbkHyZ595e7OkBLFmrZ1TOfML1jpwUaWEBKR+9ZXeDxE3/vnnH4wePdrMomRfr1u3bggMDMTFF1+MvXv3ap+JlDNcYMzUrAWQEhGBZUOHmmBtYEoKLpk+PTdYa1itSJg/33sbK+KLGbaXXHKJWSyF9YZCQkLQtWtXU7v2mWeeMdm2nsCGmhm2Tz/9tKntxwzel19+WRm2IlKiUv/ZhEO//ooslmSxWFApOhohTc8x9/lFRiD0PE11lrPHwOyYMWNw5513aneWE1xoqtrUqYh95BGkzpqFuEcfRfbu3Yh4/HFYvLzInXge69Q6Fkk5I6sVad98g7ABA/RWSKkr64vicZHquLg406fkLM7bb7/d3L5z504zm1NEypec2FjTdiZWrYoVgwcjo1IlhMbHo/PMmQg/NasxF2vbut4mUoEUK2D77LPPmqwgFoXnVBU2sOvWrUPHjh3x6KOPFjujdu3ataeVRWCnVkSktKRt24JsLm54KmsqIzUZNe64DdkJCSZw6xcerjdDzhoX1uTinVK+WAIDEfXqqwho0AAJL7+MpPffR/aePYh6801YQ0K8vXniQQx8FSpYax5sMwHbQ926mWxs/wYNTv50XOrWhSUgQO+PlEidZZbkMNngjgEGDiAsXoy48eNR9fXXEXLVVV7d8yyHMG7cOFMKITo6GhdddBG+/vprvPDCC1qEU6Qc8uNAjNWKwLQ0+GdnI/DoUXSeNQshycmnP9hqhV+VKt7YTBHfDdhyyoprYNYxGioi4ssCo6MQWDkUtswsc5IQFFkJh1etRlZCIqra7Ijq0M7bmyjlAGem/Pjjj7j55pu9vSniYRaLBZXvvRd+9eohduxYE6g7dv31iP74Y/jFxGh/lxOWsLCi/YHdjuytW83FbYe0Tp28QVxHUJcLsISGemy7peLwlUXx2KesV6+eSQQaMmSIKYnHBa2HDRtmSiWISPkS0bu3KXMQnJqKi2bMgH9GBgIzMtw/2GZDRJ8+pb2JIr4XsB08eDDuv/9+kwlbGKw5+/bbb2PmzJlns30iIqWqao9eiPn+G8Ru2Qm/oEAENmqM2N//MPclbduOwKgqCG/UUO+KnJVGjRrh1VdfNUHbBg0amDJAzp544gntYR8X1rcv/GvVMgGTzL//xpHevVHtk08Q0KyZtzdNzgIXCEydPx8Zq1YV6e8inngCAc2bm4xrXnL4c/du8zunrOfs3WsuGcuWnfa31mrV3GfmNmwIa5UqZpBAxFcXxfPz88Pw4cPz3DZo0CCvbIuIlCwutms/5xz4R0YiJyEBoYmJ+T/YYoFfRAQi+/XT2yIVVqEDtqwly7q1GRkZ6N69u1lojCULWMeWEhMTsXnzZqxevRoLFy40t3Mqi4iIL7FWqYrGE/8P9TevhzWsEjbN+QZIST15p92OtIMHcwO29swM2A/t57QDWKvVdPt89rRU2I8ehKVSJCxR0aX5UqQM44Kd559/vgn+7Nq1K899Cr6UH0GdOiFm/nwcv/lmZO/ahSN9+yJ60iQEn1qsVXxL1s6diHviCWT88svJGwpTw9ZigaVyZVS65Ra3ATEeA2xHjuQGcrMdgdxTwVyWXrAdO4ZMXtasOf3pK1XKNzPXr2ZN1U+u6IvinYnTonjeqrF8xx13nHGRThHxfcuXL8cPP/xgznOH/O9/SLnlFtNGuh1UOjUQWW/yZFjLQK1tkTIfsG3evDlmzZplsoFYV5a1a202G/z9/c3JZk5Ojhkh7dSpEx588EFceeWVJbvlIiIlhMHXwFMB2Mr/7MaJX5fDnpUFv8oRCG/cODdYm71gOuzxJ8x1v45d4Hd+pzzPY09ORPb8abCnpZgTD//LroO18bl636Tc12eP58I2TgttsTYhF5bJzs5GLBebcBFzqlQAF57JysrKcx//jn+flpaGpKQkHDt2LPe5mZkcGRlpzkeOHz9+2vNWrVrVnJskJCSYAWdn4eHhpsRTenq6GXR2xnMbx2I3/H88z3HG+/gY/h3/3hmfk8+dmZlp9gPCw2GZMgUpzzyDnPXrYR82DFVefBHpV11lttsZXwtfE6cDs86xs4L2ITs/XJyVeB8f424f8jn53M6CgoIQERFhzuNOnDh5PHPGmpLc33wtfE3OKlWqZNYxcLcPOa25yqm6c0ePHj3teQvahxz05yV3Hzrh+8n3lfieF2Ufclu5zfyM8bNW6H2YlQXLrFlIf+stpPv7I71uXYQNHgz/evWQ8MILCEhPRxgDq1YrEk89x6knNT8aT5hggrXuPt9mH9aogcyICKQ0/Hf2Bv8yNCAAEVYrsnbvxpFt25Bz6BByDh40P7M5eLh5M6xJSYjftw9Zx44BTgHd4ORkBGdlwda4MdKbNIF/zZomgMtLUJ06qHbuubAEBbn9fPN94/vH7xu/dwV+vp3wc8LPC/GzxM+UkZmJtGXLEPDTTwjbtw8H6teHvWtXhHDg4tTsAk8eI5z5xDEin314xs93cjLsaWmwpaaajNrArCyE5+QgKykJ+2fPRnaNGnkCIZFHjpifSVWrIsffP29Jju+/NwHbwh4jXLfrbDiOEw78DOzZswdbtmzBLQzoiIhP4/Hx+++/N4vU04UXXogmV12FxNBQ7B09Gjk8DjrV2OZPZtYyWBvRo4e3N1/Ed2rY8mSWX7BLLrnENKb//POPOZngyQVPcps1a5abcSsiUh7UaNoItqU/IDszHZHVaiO4SoS53b53R26wlnL+Xn1awNa2bdPJYK35Azty1q1RwFZyMSjBwO327dtNm9qkSRMzDbRu3bo+v5e+++67PAGOpk2b4uqrrzaBAA7+urr77rvNT2ZeHDkVVHC46qqrzPnFjh078Mcff5iLA/dVnz59zP5z97yjRo0yQbply5Zh9+7dee67+OKL0bZtW+zfvx/ffPNNnvt4TnPDDTeY37/44ovTghOss8hAz5o1a7Bp06Y897Vr184smsMg5bx58/69o1MnhLRti6tffRVxDz6I7x96CKlOQW3q27cv6tSpYxZy/fPPP/Pc16JFC3Tr1s0EllxfK8/D/vOf/5jf2SliAMl1UR9+vhgAYYaLM5bk6NmzpwkeuduHt912mwkQLV261NSYdNalSxe0bt3aBFf4f51Vr14dAwcONL+7e96hQ4ea4NNvv/2GrS51XbmILUtwHTp0yMzacsbAEWtbEvevazBswIABqFmzplnIlivOO2vVqpWpH82gn+s2MUDpWI+BnwfXoGGn+fNRMyMDB/v3x/r69U/eeOAAcMstqLVjBzp+8QUywsOxdMSI015riyuuMD9/+uknHDx4MM99l19+OVq2bImdO3ea+53VqlUL/fv3h/9552EBSyUwAFy79skLgOHTpyMkNhbfrliB3S5BtnOXLUPT5ctxJCsLq1u2PHkjA5K7d6PS77+jW69e8KtVCwuGDEG2c/AOwMCePVG9QQPzGVy/fn2e+zgzgLPsGLibPXt2nvsYNL311lvN71w0ip9VZ503b0bMzp3YHBmJLQz8OX0/PHGM2LZtmynJ5qwsHiPaNmmCjvXq4cChQ/jqn3/y3Mdqxf1TUmBPTsb8qlWR5vLeXPrjj4jeuRMb2rTBto4d89xX7++/0XbxYiRGR+Mnvg8XX5x7nzU7G70mTjS//9GrFxIYzHXSeeNG1AIKfYxgkJzff0948cUX3d7+7rvvmn0vIr6Lx8YFCxbktsdM6uOxlSJ69kTLXbsQP3euqWmbExdnFhhjzVqWQVBmrQhgsbsOCeeDJ64PPfSQacTZMWDQ9rnnnssd8S4vOLLMQPS5555rRsCLgyPQrM/Spk0bM2ov4ssq+uc56c3XYE862enk4TKzak1kZ9vh729FJSTA6n9yn1gqRSDghryLY+Rs/BM5K5fkXrfWqgf/61SXrSx8nhkcYJDobI71Z4NBkJEjR5oOPQMCPKHldjHQNm3aNLNdvtyGMnDj6QxbBnK43xqybqcvZdg64XYHTJmCxNdfN5mYAZddhoixY02mIynDtuxk2GYeP46kyZOR/uOPJz8v/v6o/thjsF1xxWnPyxzRgGXLkPztt4hnNm6lSgi66CKEXHKJySDl8/L5882wDQlxmx3qyFLm5881CH+mz3doSAiC4+ORsmMH4llegdm5py72fftQ6VQgLCEmBnaXGrjhJ04gMCICmc2aIbtevZOZubVqmZrM4Q0aoHK9euZ1nCnDNnX5csQ/++zJO+12hMXFISAzE+lhYUgPD8/NQI586ilEXHZZ2cqwtdsRarUiJCcHafHxSDxxAra0NJPJyoxWv/R0RPD3lBScYJZrejrspzJd+bPS8eOwJCUhxW5HOssOMFP5VMZxUEqKWQ09KzAQKS7ZpVabDZVPvdc8RjBr25ljH6aFhyPDkSTj5wdrSAiCrFaEWSywh4cjLjkZNpeAeUEZtlXbt0et994rUoYtj30l2YYyWMtgu/MgXXnniX5oSajofQEpHh6rv/zyS3O+zTawV69e5pxX8qfvWsWQWoRjvX9RRj+ZFTBx4kRzQvb++++bRVEmTZrkiW0WESmTLEGBsJ/q/6UdPILUXYdhrRIFhmjsVcMRWc0flqAQ+HW59rS/tTZvDfuBPbDt3QlLpcrw63wyy0rk5ZdfNlkGrPXuCD6yA8x2lfd9/PHHPr2TGCBhAMUVg5wFDfS6To11F2xj8Mu1w8h9WNDzMtiQH26nu211cATx3GGgiBd3GChyu00PPGDqi+LBB5maB+zciaoffQS/U0FIYsCXF3fOtA8dgWZ3eFKY34kh92lBz8v3tLj7sKDnLdY+PMURHHSnoH3IQGh+z2u32RC4eDHSJkxAcEICgi0WhA8fjoiHHoL11OfI7fMOGGCmlFfPd4vO/PnmxR12dIv1+a5UCRF16yLCXd3c48dNjdyoU7VyHTVzTd3c7GzYTpyA/4oV5uLA0DiHNpLCwkw5iNyauU6Lodmzs2HhgEdYGNIefRSRHAxxGfAITkkxl1MvDpZHH0WlU0G54h4j7JmZCExPR1RmJmwsFcAs1ZQU83tKaqr5Gey47VQGKwOvsXzsqftz7+MlNRUM3+YN65/EIzZfkSNcHeDmMaeKQYBDMSeHY/7F8hiW6GgEh4UhNCwMlvBwWENDT/7k9VOXiFM/rSyrwlr5/Om4zenvEBR0Wu3ziC+/ROx997ndV5XclD6JuOqqIh0jGKw9fPgwStKKFSsKPK6ISNnGQXYGa3lcv/76681MCBEpmkIHbJlZy6kpnApFnFrXr18/k/3gurq1iEh5EXxtL6R9+blZPMwWEYXEHXuRvv8gAkNDEdTgcgSMuD3fRV0sfv7wv7qfCQDk9xipmDjlnYt5Omeh8ndOz+VJrZRvYddfbzIWj48ejcw//sCR3r1R7ZNPENCkibc3rULL3LwZcY8+iszffzfXA1q2NPWGg9q1Q3nC4J4fBz6qVUNQhw6n3W9LSkL23r1mobw8i6Ht2YOcAwdMQDPrn3/M5TQBAfCvU8dkjRdl0auEN95AUPv2uUFWt0HUU0FW55+O+1lnuERYrXkCqq7BUtcgq/VMQVZeSiFDMbRnT8SNHw+7m4C5u0XxQr1YJ5JZd65SUlJMSRRHqRcR8T3MpmVWPme11XeUEhKRkgnYctoTa2k5nHPOOeYnv4SsFSYiUh75162P8PseArKzkDR3PhJ+Xm5SazISEhEWl4BahQjEKlgr7rIg2X42PrWInQOn7HL6qZR/wRddhOrz5+PY8OHI2bMHR/v0QdUPPjC3S+li8C/x//4PSR98wBWPTKAt4sEHET5ihMkWrWislSohsGVLc3Flz8hA9v79eTJyc39njWPev2tXkf9n0htvIG8xg+JhoNhypoBqQUFW58AqL8yGdcle9QXc7qqvv47jI0eecRV2Po6P9xbWLnbdx8yCZ5IQ104REd/BsjWchcLvML/XrDEuIsVX6LNQTtV0zgQifhFdVyIWESlvTEciIBA2vwCENGuGnMTEkx3ByPynWYsUpHv37ibDliURuGgTsT7cM888YzqvUjEwo7b6woU4PmIEMv/8E8duuglRr7xiMnCldKT98APinngCOadquoZccw0in33W1GsV9wHRgMaNzcUVZ5PkHD5sArhxjzyC7J07C78Lg4MR0LRpoYKsjuvOGazmNv5tgLsCBRVTyFVXIfrDD3Hi/vtPZju7rMLOzFoGa/k4b7rnnnu8+v9FxDOYFT916lTUrl3bLMqoesciZ6/ipQ2IiBRT5XOb4divy+F/qpZjVkIC/rr7XvhHVkGjO0YjtE4d7VsplHvvvRfbt283K4k7Tmg5MMpA7iOPPKK9WIGwdm21mTMRe//9SFu0yNSdZMCr8rhxPpnZ5yuyDx5E/PjxSFu82Fz3q10bVZ5/3uvBK1/G2SQMdPMSwAXLdu8+GSA8Ey7udfnliGaGs3hUyNVXo/YffyD1q6+Q9s03sMXHwxoZaQYmWAbBW5m1zz//fKEfy9ruIlK27dq1C59//rkpl8nFH7mIY1laOE+kQgRsZ8yYYVbtdV7Fjiv/uS5EMWLECM9toYhIGRHR4lw0Hj0SiZu3mBWhD3/6Kezpaea+7fGxaP3Wm97eRPERPImdPHmyCdpu27bNlEFgbfh69ep5e9PEC7jCe9V33kFC/fpIevttJL7++skFoSZONBmN4jlcFCt5yhQkvPKKqX8KPz9UGj0alceOPbmAk3hESPfuucHwM7LZTABRSgaDslwQj5eyYsuWLYV6nAatRMq+TZs2Yc6cOSY21KBBAwwePFjlvURKO2DL+rX8Irquzrtw4cLTGlYFbEWkPAdteTky8/NTwVrWhbMgY8d2b2+a+Ji0tDRzksuALUsOscQQa8Kz3JBUzOzEyEcfhX/9+mbhq9S5c5Fz8CCqTp4MvypVvL155ULGX3+ZqfpZGzaY64Ht25tFxQJbtPD2ppU7vrTolZS+zz77TLtdpBxYs2YNvvrqK/P7ueeei/79+8O/AtZ+Fykphf42LVmypMQ2QkTE11Ru3gy21DRkxCea6Zw1O7X19iaJD9mxY4cZ3OS0sYYNG5qshE8//RTVq1fHlClTUKNGDW9vonhJ+I03wr9OHRy//XZkrFqFo717I/rTTxHQsKHek2KyJSYi4b//RfKnn5rgoSUiwgTHw268UYtClhBfWvRKvINTp5kMdN1116Fy5cq5t7MNDAwMxMCBAzWAKVKGrVy5Et999535vV27dujRo8dpax6JyNnRN0pEpJDsdjvSl3yP5MnvIm35r6hapzYiq0YiOqYqQmvV1X6UQuOCY1xs7JdffjEd1vnz5+Onn34yU8l4n1RswV26IGbuXFNXNXvXLhO0zVi92tub5ZPH7NQFC3DossuQ/MknJmgY2r8/av78M8KHDlWwtpQWvWIGreHoyJ/6ydujP/pIdYMroOTkZAwfPtzUsmVpIGf79+/Hiy++iJEjRyI1NdVr2ygiBatfv74ZXOnSpQt69uypYK1ICVC+uohIIWX+/hvS584AkpOQmZoFP/8gRF586cksoUDVmZTCW79+PWbPno1KlSrl3sZ68OPGjcOgQYO0KwWBzZuj+sKFODZiBLL+/htHBw9G1GuvIaxvX+2dQuCCV3GPP470pUvNdf+GDU35g+BLLtH+89KiV6mLFyP54EGE16qF0Guv9eqiV+Jd77//PuLi4rB48WLUrVv3tEXGuCDn6NGj8cEHH2DMmDFe204RKbhk5l133ZUnQ15EPEsZtiIihZSz7g/YE+Jgz8lGgL8d1tREU7ebkzoDoiKwc8w92PvYw8jat0f7VM6YlfDPP/+cdjszi1jHVoT8YmIQ8+WXZgEnZGYi9u67kfjGGyZzVNyzZ2aafXT4iitOBmsDA82CYjW+/17BWi8vehX1/vtIefll85PXFaytuL755hs89thjpwVrHRo3bowHHngAX3/9dalvm4jkX8Zk1qxZOHDgQO5tCtaKlCxl2IqIFJJfdFTu71Y/K2KuvBjWq/ojOz4WW599DtkZmea+pL0H0HLqVO1XydfQoUPx3HPPYdeuXejQoYNZoGHjxo2YNGkSrr/++tyaYHT11VdrT1Zg1pAQVJ00CQkTJiDp/feR8PLLyNq9G1EvvQRLYKC3N69MSV+5EnGPPYbsbdvM9aBLLkGVCRMQ0KiRtzdNRJwcPXoUTZo0KXCftGrVCkeOHNF+EykDWJ5k2rRpOHjwoLncc8898PPz8/ZmiZR7CtiKiBRSQOfLELxzK3JiY2END0Nwj36wntMCx7/+OjdYS0k7dyH5k8mwVqqE4CuvgbVyhPax5PHkk0+an2+//fZpe4ZBWwdmcLvLxJWKxeLnh8gnn4R//fqIe+IJpM6ahZwDBxA9aRKsETq+8Jgc//zzZr+QNToakU89hdB+/cx3SETKlpiYGBP0qV27dr6POXz4MKKi/h0oFxHvSEhIwGeffYYTJ04gJCTEJBYoWCtSOhSwFREpJGv1Wgi+5W7YDu+DpUo1WGvWMbeHtmwJm82OrNg4TshFSGgwbAf2wQYgLSEeYSNu1z6WPDZv3uzxPbJu3TpT82/VqlUFPm7fvn0YMGCAmZLq6Axzin3Hjh2RnZ2dG+CqWrUqfvjhB71zZUj48OFmIbIT//kPMpYvx5G+fVHtk0/gX68eKiK7zYaUWbOQ8PzzsMXHm9vChg5F5COPwBoZ6e3NE5F8XHHFFXjvvffQvn17twsVsU3i4OXFF1+sfSjiRceOHTPB2qSkJFP+gDPEqlWrpvdEpJSohq2ISBFYoqLh16JtbrCWAkJDEdGgEQIqRyAwrBIqO2WE5Bw9qv0rp/n111/d7hWbzYZ33nmnyHts0aJFGDFihKkvVpDly5fjxhtvNNkSzvbu3Yv09HSsXr0aa9euNRcFa8umkCuuQMzcufCrUcNM/T/Sqxcy/vgDFU3Wli04NnAg4h54wARrA5o3R8z8+aZUhIK1ImXbHXfcYUoC3XzzzVi6dCni4+NN+xcbG4uffvoJw4YNw5YtW/Cf//zH25sqUmFxgP+jjz4ywdro6GiMHDlSwVqRUqaArYjIWco6egxh1WNQ6+KLUOPCC2C123MXBfJvfI72r5zmtttuw/PPP4+MjIzc29g55TSzDz/8sEh7bOLEieaEmiv1FmTGjBl45plnTN0xV6yf26xZMwSqJqpPCGzRAjELFyKA2f0nTuDYoEFIXbQIFYEtLQ3xL72Ew927I2PVKlhCQhDxxBOovngxgtq39/bmiUghREZGmjYpLCwMd955Jzp37oyWLVuajNq7774bVapUwfTp07UIp4gXccYWB/Pr1KljkgIiVIJJpNSpJIKIyFkKqlcX1rAw2FJSzCJA1rbtcSg+zWTc1r2yu/avnObjjz82K2Qz4/WFF17AihUrzPTQyy67DO+++26R9hgzkbia9plKIVx55ZW44YYbTN1AdwFbBo/79+9v7mfHmdvHlbqLg5lSOTk58CTH83n6eX2VJSYGVb/4AnF3342MJUtw4o47kPXoowi7/fZyW7c1/aefkPDkk8jZt89cD77qKlR+5hn4165tStBAn40yTd/h8oPH+LNVvXp10+5xyjVrtScmJppA7XnnnafAkEgZ0KdPHzO40qVLFw3oi3iJArYiImfJr1Il1LzrDiStXoOs1FTs+mI2Mg8fgcXfH+l2C1o+9bj2seRxwQUXYOHChXjkkUdw0003mcUbXn31VXTv3r1Ynd7CKKjmGDNruSL3/fffj/DwcLz11lu49dZb8dVXXyE0NLTI28Spro4sc0/bunVriTyvzxo3DsHh4QhasACJL76IY3/9hXRmW/uXn1M8y4kTCH7vPQQuW2au26Kjkfaf/yDhootwJDER4EV8hr7Dvo+DQsyO9QS2TaqJKVI27Ny5Ew0bNjTf8YCAADPYLyLeU37O5kVESnBhG+RkwxIQmO9jAqpVQ1SPa3F44VfI2Lf/5N9lZSH2518ANwFbe0421yczQV2peLi4Fxdx+OWXX9CpUydTJ+yNN94wndZ27dqV+vaMGTMmz/UHH3wQn3/+OdavX2+Cy0XFk/2goCCPZ+cx0NO0aVOtTuzqzTeR3K4dEp95BkGLF6NySgqqvP02rJUrw5fZc3KQ8umnSJo4EfbkZMDPD2EjRqDS2LFmVoP4Fn2Hyw/OyDiqGv0i5QYH2ZcsWWLWWLj00ktx+eWXl9vZOiK+RJECEZEC2A7tR/YP84CMdFgbNIVft56wuFnR2CGoUjgsflbYc05OFwwKC4U9OztPYDZn8zrkLP+BZ0fwa9cZfu0u0ntQwfTt2xeHDx/G448/joEDByIlJQUvvfSSybbl9WeffbZUt+f99983NQRbt26dG1BmcKW4QVeu+s2s4ZLA5y2p5/ZlEbfeisB69XDirruQ8csvOHH99Yj+9FNTLsAXZa5bh9hHHkHWunXmemCbNqjy3/8isGVLb2+anCV9h30fj/EiUn5KnHDxWi44S8ysFZGyQa2tiEgBcn79zgRrybZ7K+w7txS4vyp36ohaF1+M8BrVUblObdRu3hhJLz+HpLf/DznHjsKemYGc5d8zbZfj2cj5cwXs8Sf0HlQwLGPAkggMzhKnlj733HOYPHmyyW7wxhS4F198ESdOnEBaWhomTJiAevXqmTIJ4jtCrr4aMbNnwxoTg6wtW3CkVy8T+PQltqQkxD31FI707GmCtZbKlVFlwgTEzJ+vYK2IiIgHcYD+iy++MMFaZtT26tXLZNgqu1akbFDAVkSkINlZea7aszMLfLhfaCgaPvUYmj72MBoN6odKVU5O27UnxCPhy5mIX7ceWalpeZ8zK+//kPLvww8/dLv6NVfIZiDXExYsWIC2bdsW6rFPPvkkGjVqhJ49e+Kiiy7CoUOHTNatMll9T2Dr1qi+cCECmjeH7ehRHB0wAGnffgtfmI6ZumgRDl92GZI/+ogpPwjt0wc1f/4Z4cOHw6KsapFyiYuO5WfNmjWlui0iFUl6ejqmTp2KzZs3m/O9QYMGeaUsl4jkTwFbEZGCDpJtO3N5DfO7JTIa1kbNz7i//MLCUKljBwTX+ncxqKRjJ7B90bfYNW0mtv6xHemJySefv15jWKILt2iU+LZHH30UyazD6eTvv/9GZua/gwCxsbHo3bt3sZ6ftWYd09mIz+N83aFOnTrYsmULoqKicm/jQmMvvPACVq5caf6GK3fXqlWrWNsh3scyCDFz5yK4a1fY09Jw/NZbkTR5coktBHe2svfuxfGbb8aJO+5AzpEj8K9fH9WmTUPVt9+GX0yMtzdPREoQM/q+++670wJJnHUyfPhw7XuREiqDwLUU9uzZY8pfDR06FM2bn7mPIyKlSzVsRUQK4Ne8Naw1asOekgxLTM0CFx477QDbsjV2fzQFiXv3IzUuAcFNm5nbbUFhOLrrKGqcG4Ggc1pr2lEFMW/ePDzwwAMmOOowYsQIzJ8/H3Xr1s09gT548CDKA08HB/l8zhcpmCU8HFU//hjxTz6JlGnTEP/008javRuR48eXmcUOObsgadIkJL3+Ouzp6Sych0r/+Q8q33UXLCEhep/LGX2HxR0GisaOHYsePXqY2R6bNm3CY489ZqZqczFOESmZOtRc9PaHH37AjTfe6HbWl4h4X9k4YxcRKcMskVXNpaji/tmC5NBIWOr5w2bfj8QduxASGw/En0Dkec1gi49H2pcz4HfnfbA6BfGkfHIXZCyvgUdmDXv6tTGYzQ48VyfXgjeFFzx+POx16iD1v/9FypQpyNy9G5Vffx2WsJPlWrwla80aJD/1FHK2bTPXAy64AGHPPAP/xo2RwRsYwJVyRd/h8iPLg6Wc7r77bnTr1s3MQrnqqquQlJSEYcOG4Z577kFoaKjH/o+InDzvdNSnPf/8801WbXEXmBWRkqeArYhICclKTDSZbJbKkQgMOY6sQ4eQYwECUlMRGnRqBdasLNji4xSwlXIlJibG4x3tnJwcHD161JRqUG3dIho/HvHnn4+9I0ci6+efkTJ8OBrNno2A2rVR2rJPnMChJ55AwiefmOt+0dGo9eKLqHLjjZptUM7pO1x+pKammhI+nsLFLskx0MegLT8vIuI5u3fvNuVHmFHrmO2lYK1I2aaArYhICanSti2O/rwMOenpsGdmombbNgirUR05WzcjJ+Vk5yTleDyS58xHYK1aiOrVA1aNcks5wOwNT68w7HjOknjuiqBKv34IrF0buwYORPq6ddjWtSsazZ2LkNatS+X/MxATN20aDjz6KHKOHze3RY0YgVrPPQf/qkWfwSC+R9/h8sOTx2CWQZg9e7YpifDpp59i3759Jtv2mmuuMT+5GKaInB0uLPbll1+agZCff/5Z3ysRH6GArYhICQmOqYbmD96PpK3bkbl1KzLXbzC3W5o0RXi7VshOT0f60WRY9u1H5r79ZlX06EHX6/0QkRIR1qkTzlm6FDv79UPG5s3YdsUVaPDZZ6h8zTUlusfTt2zBvnvuQcqyZeZ6cMuWqPPGGwi/6KIS/b8iUvb99NNPplbtlVdeaa5HRERgzpw5eOutt/DII48osCRylv78808sWrTIDJyyBEL37t21T0V8hAK2IiIlKCgqCkEXdoL9go5IWr4CmQcPIaRZU4Sd3xqxXy2GZcvO3MdmHj6s96KcmzFjBsKcaocy04EZD5GRkeZ6SkqKF7dOKoKgBg1wzk8/YfeQIUj++WfsHDAAdV57DdG33+7x/2VLS8ORl1/G0VdfNQuMcSGxGo89hpgxY2AJOFUWRkQqtK+++soEaZ0FBATg/vvvx9VXX+217RLxdQzQLl++HD/++KO53rZtWzMAonUARHyHArYiIiUofc8epG3ajIBq0ah8ycV57mPgNnHpLzyjMtdDmzfTe1GOsfYqs4acRUdHY+HChXlu00q9UtL8IyPRaP587L/nHsR++in233cfMnbsMLVkLX5+HvkfiT/8gP1jxiBz58lBKWbx1v6//zMBYxERBwZrWQZh69atuXVrGWji4pUbN25Ey5YttbNEiojfIdar/e2338z1Sy65xCzup5JSIr5FAVsRkRKSsW8fDr87iWmU5nrWiROo0v3fbJGQJo1RffQopG3ejICYGIR36qj3ohxbsmSJtzdBJJc1MBB133sPQY0b49D48Tj25pvI2LUL9adMgZ9TFnhRZR0+jAMPPYT4L74w1wNq1kTtV19FRN++6iiKyGlYt/bFF1/MXb3esfAYf2dGoIgUXUZGBrZt22Z+Z6Z6586dtRtFfJDV2xsgIlJepf2zOTdYa65v/Oe0x/jbshBizUEgsnMzbTPX/YW0r+Yjc+2aUt1eEalYGBCp/tBDqP/JJ7AEBSFx0SJsv/pqE3QtKntODo6//z7+adPmZLDWakX0XXeh+V9/IbJfPwVrRcStKVOm4Pbbb8e6detQtWpVLF261NTbbNq0Ka666irtNZFiCA4OxrBhwzBgwAAFa0V8WLnMsP3jjz/w9P+3dx/gUZXZ48fPzKR3EpJAqAILKuICIigIKEVWEVQUUIoKCOiiooJt/f9EXcsusOvu4iqwLK4KSBOk2CsizUZTRFpoIQFCIL3OzP85LyQkISAlmfr9PM88M/fmzp03d2by5p573vM+++wpw0wBwJUCEhIqLAcmxJt7e0GBpK9eKyX790rozl8kIDjIrHfm5oo1Lk4K3l9ilos3/ihSUiJBV17FGwegxtQaMEAC69eX5AEDJP/HH2Vbly7SZPFiCW3ZUhwFBXJs0SLJXLpU7BkZYouNlei+fSWmXz+xhoSY5+dt2GDKK+R9f/wiU2jbttJgyhQJa9uWdw3AGR06dMgElYKCgsyESBs3bjQZgU899ZQ8//zzMmzYMI4gcBby8/Nl9+7dcskll5SVG6lcHxqAd/G5gG1GRoa88cYbUlxc7O6mAPBzEa1/LyVHMiTvp59NDdvYW/qaoX47pv1HcnfvFfuB/VJ84IBE14mXkKgISUysK7acrAr7KNmzm4AtgJr/e9WxozRfsUJ23XqrFG7fLtu7dZP4hx6S9FdfFfuxYyZjVhwOc5+5ZImkjBsn9adMkbxvv5XD//63+Zk1MlLqPvec1B41qtpq4QLwbVFRUSbQpBo3bmxq2WrAtmHDhnLgwAF3Nw/wCllZWTJr1ixJT0+X/v37lwVtAXg3nyqJoIGQF154QZ544gl3NwUAjJju10nS2AckftAdYgsLk5LsbBOsVcXFdslMPShHU1Ll8M49cmh/mljrJFU4cra6dTmSAFxC69n+7ssvJfyaa8SRlSUHX3jheLBWabC23L2u3zN0qKl9q+tibrtNLtmwQeLvv59gLYCz1qlTJ3nppZdk79690rZtW1MOITk5Wd59910zMSeAM9Mg7cyZM+Xw4cMSERFhSosA8A1em2G7YMECcysVHh5u6rNoraMGDRq4tW0A/I/T4RDnkYNiCQwWiYoWZ/ohsQSHiCW6VoXtNGgbEB4mJbl5YheLWCOjJCCmltgiI6UoIvp4Nm1Jidj37RFr3SQJaNZMnMcyxBITe/x1crLFmZstEhYukpcjEhoukp8rlqhaYgkNO9megnxxZmaY17eEnFwPAGcSEBcnFy1cKD83aCDOsxmtZLVK43nzJOammziwAM6Zlj548sknZeXKlXLHHXfI/Pnz5YYbbpCAgACTiAPg9FJSUmT27NkmS10DtVq3ljIIgO/w2oCtpvrrrbx+/fqZwO2cOXMkNTVVXnzxRXn66afd1kYA/hOstX/0rjgO7DGPLbpShw+LRWwdu4nt0pOzHFsDAqTpqBGSsmS5maU9MDpKgmJizM8imjQxE/MEd+wsTkcnsX+8SOzL3hGdtsx2ZRexxMRJyedLxZmXI860FLEkJpl7a1IDkYgoCfjD7WLVdRnpUvLBfHEW5IkEh0jAjQPEGlexni4AnE7W+++fXbBWORziyM7mYAI4L7GxsTJ9+vSyZS1tt2XLFpNdm5iYyFEFTmPnzp0yb948UwoyKSlJBg0aZGIhAHyH1wZsq7Jo0aKyx3/4wx8I1gJwCWfKHhOsNXKzxZGyR6wtWmm8VuzfrawQsFXhDRtK8wf/aB5nfP+jZP68RULqJEidHt1P7vPAXnGk7C5btn+/Uiyx8SIOuziPHDJZtfq6UlwkzozDJpvXsWGtWHv1E/tP3x8P1qrCAnFs/Fas3ch+A3B2dIKxspq1v+VETdvYO+/k8AI4K79Vm7ZWrVpit9vNdhqIAlCRlj/QJDWHwyFNmjSRgQMHmon7APgWnwrYAoBbBJT7U2qxHL8dz7MVi+3Mf2Zj27U1t1NUnrDHahMp3ZfFWun++GuV/bzyc3+jDQBQnj0j4+yCtUpHGBw9ygEEcNa6detmRhSVn4dEVV6ny7/88gtHFqhEM9DbtWsnubm5cuutt4qNiT4Bn+SzZ/EfffSRu5sAwE9Y6zYQa/PLxLHtJ5HIGLFd0UCcOZkmUGrr1OP899milTh+3WyCtQGdrxeJqiUlH78rlvhEcVqtxzNu09PEEpcolvBIsbW7xjzX1voqk6HrzDwqFm1P26ur+TcG4MtssbHnlGFrq1WxVjcAnEndunUlLS3NTDKm9Wr13mpKSQE4Hb2IoZnnWt9ZL2boiOLKFzoA+BaPCdhu2rRJRo4cKevWrStbt337dnnmmWdk69atkpCQIOPHjzeTirmC/jHU2/k+t/w94M34PJ8dS6eeYr3iGpPdagkMEmd+nkhgoDgDAs/7b4GlYw+xtu1k9ukMPD7MyTpwlClz4NQJzcru80VCwsShJzv6WiFhYrnlLrEU6PrQk+tR9l7oEDIAVYvu29eUOTgrDodE33wzhxLAWfvyyy/Nud+HH35oatZOmzZNevXqZYK3V1xxBUcSOKWrdcgHH3wgR48eNbVqNaOWQC3g+zwiYLt8+XKZMGFChRPooqIiGT16tPmD9NZbb8maNWtk7NixsnDhQmnatGmNt2nbtm0XvI/NmzdXS1sAT8Dnufo4U1JEVnwjTrtdLFe1F8vvmlXj3nE2duzYwYECTiOmXz9JGTdO7JmZmtJz+uNksYgtOlpibr2VYwngnFx++eXm9sQTT5jg7ccff2weFxYWmuCtZg/qkG/A35WUlMjixYvNZHxq9+7dLomHAHA/twdsJ0+eLKtXr5YxY8bIlClTytZrpm1+fr6MGDHCXD3q0qWLdO3aVZYsWSKPPvpojberefPmEhYWdt4ZXBrcatWqFfVk4PX4PFcvnXl9/+Kl4tThS1r7dv1GSerZUwJioqv5lXCmz3OzZs0I2gKnYQ0JkYYzZkhy//7Ha2RXFbQ9MQRTt9PtAeBCg7ePPfaYCUppabv77rtPQkNDZeXKlRxY+C29gDFv3jxJTk42cQWtV0uwFvAfbg/YDh061JQ6KF8KQe3cudOcUJdP9dc/TqVXlmqa/kG80OLd1bEPwFPwea4eJbm5+t/Xyb9tOrIgN0ecoSFy8IuvxJ6TI3k/rpf8PXsl4nfNpMmfnhRbWGjZ8x05OeLMzhRLRIQ4s3PEGhsrlpCTP8fZoVYecGbRvXvLRfPny96RI8V+7NjJmrYn7jWzVoO1uh0AVMcF1bVr18qnn34qn332mVlu06YNBxZ+SycUmz17tqSmpkpQUJAMHDhQmjRp4u5mAfCngG1iYmKV6/Py8iSkUsaGLmvWLQB4K1tkpIT8rpkUbD8+JD+wbh0JrFtXtv97quTu2SvZmzdLTvJuCa9dW7L27hNLeLg0e+pxs23Jju2St2ieCdiW7NsnAU2aijU6RsIG3yO22vFu/s0A+Jrom26SlsnJcmzxYlPT1n70qJlgTGvWahkEMmsBXGj2oGbQapBW69pqkPbaa681c5jo6MrK54KAv9BatbNmzZKMjAwz6nfw4MGSlJTk7mYB8LeA7enoH6aCgoIK63T5fMsUAIAn0MzaxOH3SM6P68VZUiIRbdqI0+EwwVpVlJUt4nCKvbhEAoKDJCc5uey5hSu/ECkpFvvBNHHm5ojjSLpYAgKkaN1qCe3NpD8Aqp8GZWPvvNPcAKA6LFu2zARpNVgbHBws3bp1k4kTJ0rHjh1NJiHg7/RihmbYRkdHmxHJcXFx7m4SADfw2ICtlj+YMWPGKZPEaJkEAPBmGmSNbH9lhXXBtWtLYXq6BCcmSNGRI2INsJkakTHlt7OeKLFSWk6h9F6HKAMAAHgBrVUbGBgoHTp0MLeAgABTo1NvlQ0bNswtbQTcqU6dOiarVgO2UVFRvBmAn/LYgK123tqRT506VYYPH25qGq1YsUIWLlzo7qYBQLVrdt+9cmD5hxJ5cXOp06O7FKWlSeSll0rDwXeUbRPcvZfkL5gjtjpJIharWGvHi7VWnAR36sI7AgAAvELp0O5du3aZ25lGJRGwhb/Ytm2bmWivQYMGZrn0HoD/8tiArQ6H0QzbZ599VqZPny7x8fFmqAwZtgB8UXBcnFx095AzbhNQv4FEPPioOPPyRMLCRfJyxRIRKRYybAEAgJf44osv3N0EwKNs2LBBli5dakqEjBw5UmJjY93dJAAeIMCTMmrXr19/SlmEt99+221tAgBPYwkIFEtU9PGF0nsAAAAAXmf16tWmprNq0aKFKYMAAB4VsAUAAAAAAPB1TqdTPvvsMxOwVVdffbX07NnTlAIBAEXAFgAAAAAAwAUcDocsW7bMlEJQPXr0kE6dOnHsAVRAwBYAPFTB4cNyZM06sYaGSEKXzmILDnZ3kwAAAABcgDVr1phgrWbT9unTR9q0acPxBHAKArYA4IGKs7Jl2z9flZLcPLOcs2OX/O7+Ue5uFgAAAIALnL9n9+7d0q5dO1O3FgCqQsAWADxQ3r59ZcFah90uBz//QkISEyT+mk4SkhDv7uYBAAAAOEv5+fkSEhJismoDAgJk0KBB1KsFcEbWM/8YAOAOwfHxYrEd/xOdvfVXKUw/IodXrpJf//mqyb4FAAAA4PmOHDki06dPN5OMlWJyMQC/hYAtAHggzaKt06G9lOzdK8WHDklEo4ZmvT0vT/L27zePnQ6HHFm8RFIm/k0Oz35HHAUFbm41AAAAgFKpqanyxhtvyLFjx2Tr1q1ScOL/df2/PWPOHEm+4w7Zcf315l6X+X8eQClKIgCABypKS5PC776XhCYXScnRY1K4Z48ExsaKJcBWVhIh6+tvJHv1GvO4+PBhsYaESNxtt7q55QAAAACSk5Nl7ty5UlRUJHXq1JHBgwebsgiZy5fL3pEjxX7smIjVKuJwmPvMJUskZdw4aThjhkT37s0BBPwcGbYA4IFKMo6KOJ3mcULLSyQ0MlIimzWVpsPvkeDatU9sk1HhOcVHjrilrQAAAABO2rJli8yePdsEaxs3biz33HOPREREmGBt8oABYs/MPL6hBmvL3ev65P79zXYA/BsBWwDwQMGNG4ktJto8DggJkUa395PfjblPIpo1lZKsLHE6nRLW6rLjV+VPCG/9eze2GAAAAMAPP/wgCxYsELvdLpdcconJrA0ODjblDjSz1jiRmHGKE+t1O8ojAP6NkggA4IFsYWFS94E/Su76jWINCZaIK9tJwZ49cmjmm+LIyzMB3cR7h0udP46Wgh27JLh+koS2aOHuZgMAAOCEN998UwoLC2XUqFEcEz8SFBRk7tu2bSu9e/cW64kEi2OLFh0vg/BbnE6z3bHFiyX2zjtrurkAPBQBWwDwUAHR0RJ9bZey5aPL3jfBWlW4e49kr/tWort0lpBGjdzYSgAAAJSnI6EmTJgga9askf79+3Nw/EyrVq2kVq1aUq9ePbFYLGXrM5cuPVmz9recqGlLwBbwX5REAAAv4bRX+ueuxO6upgAAAOAMAdsuXbrI/fffzzHyA1r64OOPP5asrKyydfXr168QrDXb6fwTZxOsVQ6H2I8ere6mAvAiBGwBwEvE9Opp/iEsyMoSW61aEtG+nbubBA+yadMm6dChw29ut2/fPmnfvr1kVJq07tVXX5VOnTqZ4Xvjxo2T7OzsGmwtAAC+S4fA9+jRw93NgAvopGLvvPOOrF271tw7zhCQtcXGVph/4oysVvP/PgD/RcAWADxASWampC94Vw7PmiOFe/dW/FlWlqQvXCQHlyyVjGOZku1wyuFDhyTtq68le+cut7UZnmP58uUybNgwc9JwJqtWrZJBgwZJZunMxCfMmzdPli1bZu5XrFghubm58vzzz9dwqwEAALxXXl6evPXWW7Jz504JDAyU7t27l9WrrUp0377nlGEbffPN1ddYAF6HgC0AeICDM2ZKzrffSe7GTZI2/b8mgFvq0H//JznrvpW0z76QvG3bpCg7W9LXfivJb86S7f+eKpk/b3Fr2+FekydPlpkzZ8qYMWPOuJ1mfTz33HPy4IMPnvKzRYsWyV133WWG70VGRspjjz0mH374oQncAgAAoCK9+K3/f6WkpEhoaKj5P6pZs2ZnPEwx/fqJLSZGpFKphFNYLGa7mFtv5bADfoxJxwDAzRyFhVKcdrBs2XliWScdcxQXS9GBA2a9Ra/Y2x1SeGJbi81qZpHNWL9Bolte6rb2w72GDh0q48ePl3Xr1p1xOx2aOXDgQDlw4vNUnmaGlD/JaNy4sSm/sWfPHrn00nP/bOlz9VadSvdX3fsF4Bp8h30Hf4fh7w4fPiyzZs0yNWujoqJkyJAhEh8f/5vPs4aESMMZMyRZJ6LToK3TeepGJ4K5up1uD8B/EbAFADezBgdLUFJSWWBW/zkLrFvn+OPAQAmqX1+K9u+XWhc1lsO/bheJjBCLrq8dJ5lbfpG8/ful8OAhaXLvMAmKjnbzbwNXS0xMPKvtznQioUP6NDuklM1mk6CgILP+fGzbtk1qyubNm2ts3wBqHt9h+JN+/fq5uwmogQnl3n//fROsrV27tgnWRp/D/9/RvXvLRfPny96RI8V+7NjxmrZaJuHEvS062gRrdTsA/o2ALQB4gMR7h8mxz74w2bZR13SSgKiokz8bcY/5WXhBgTR68nGxREZK6kcfS9pnX4o1OEhCk5Ikb3+KpCx9Xy4aOsitvwe8kwZrCwoKKmRPaT3c8PDw89pf8+bNJSwsrBpbeLxNGuhp1aqVCSgD8C58h32HXsyryQtz1dVGDZb2799fRowYUS0Te44cObLCaJbt27fLM888I1u3bpWEhAQz2qVnz54X/FrwbBaLxXy2Pv74Y+ndu/d5/b8TfdNN0jI5WY4tXiyZS5aI/ehRM8GY1qzVMghk1gJQBGwBwAPYIiMl7taqJxawRURI3C19K6xrPPhOsYWEyuFvVpWts59nNiSg5RB27dol7du3NwcjOTnZTJqhpRHOhwZUayqoWpP7BlDz+A57P2/4G/zCCy+Ysj6nowHnunXrmrrtat++fSbwFhcXV+XEnhMmTBBHucmi9KLm6NGjzUSeOunUmjVrZOzYsbJw4UJp2rRpDf1WcKeMjAyJjY01j7UMgl4MuBAalI29805zA4CqMOkYALhI8c+bpeCLT6Vk965q2V/cVe3FUlgg9gP7xZF2QKKDrZKz7D1Jn/WW5K5dXbad0+GQoh++lYKvPhP7wTSxpx82jwu/XSPOkpIzvoazuEgK166SghWfi0OHbcEn3XLLLfLGG2+Yk9vs7GwzkVmvXr0qlEkAAMAb6KSZu3fvlrZt2552m2nTpsnw4cNNn6d9nw5rX7p06VlP7KmZtvn5+SZ7NzAwULp06SJdu3aVJUuW1MjvBPdau3atvPrqq/LTTz/xVgBwGTJsAcAFCtetlsLPPzaPi9atkrA7hkrARReWgRGYnyMNa4dLblGWBBxKk5IvP5GfN/4ijuBQsUWES9Mx90tcv9ul4KPlUrzhh+PtWP21DuaS0rlp7Sn7JezW02cI5C2YK/bdO83j4g0/SvjIP4o17PyGycO19MRTM4LWr1//m9vqZGSaOaIzHOfm5krnzp3l+eefd0k7AQCoLjqx5qRJk0zW61NPPXXa7V5++WV58MEH5e677zYTSN18880ybNiws57Ys3SyTh0eX0oza7ds2cKb6WP1ar/44gv55ptvyj5fl112mbubBcBPkGELAC5Qsm3ryQWnU0p2XHjtt5Ltv0pgSLBEBNkkKDhQju7cLfYSuziLCsVhd0jahx+d8trOjKPiSE+vsI/TcZYUlwVrzXJujjhOTIwGz9OhQ4cKwdm+fftWGaytX7++/Prrr2XD+pSWP9DsoRUrVsj3338vr7zyikRERLis7QAAVEed5Mcee8yUJtC+7kx0Ys1HHnnE1J/V5917773nNLGn1sgNCQmpsE6XNesWvkFLYCxbtqwsWNutWzdqFANwKQK2AOCKP7a14ysux1VcPh+2E/u0nDhhKJ2gwHKitpwtutaprx0SXLb98Z/VPu3+LQGBYo05vo/jG9vEWi7IBwAA4Clef/11E2DVbNnfohOGaUkEnUhMSyfcc889cvTo0bN+La13W36yTqXL1T3hJtyjpKREFixYYC58axZ1nz59zOij8hnVAFDTKIkAAC4Q0r2XXqoXx6GDYmvSTALbXHHB+wxs10EcuTlStO1XSTlwRLKtdikICpHQmFgJqVdPGj3ysNku9ObbpOCTD8SZnS0hv28jlsBAKfrhO7GEhknI9Tee8TVCBwyWws8+EmdRkQR16CjW2FMn4wAAAHA3nRzs0KFD0q5du7Is2I0bN8q3335rataW969//ctMGPbAAw9IcXGxPProozJ//nwzkdjZ0PIHM2bMqLBux44dpkwCvD9YO3v2bFMHWSfYu+222+SSSy5xd7MA+CECtgDgApagIAntfXP17tNikZBre8ixYosU1d0twXVFgkUkssXvpNnokWVZANbIKAm77Y4Kzw287PdnncWr9XYBAAA82UcfHS8FVb7+7LXXXmsmBqvs73//u5ksTOm9lgLS4Ny5lCHS502dOtVk6uqkVFpWaOHChdXwm8CdAgICpG7dupKamip33HGHNG7cmDcEgFsQsAUAL2evVC/NWWIXe3a2FKcdlMDEBAmIjnZb2wAAADxNabC2fJDuXGgNXM2wffbZZ2X69OkSHx8vEydOJMPWR/Ts2VOuvPJKqVWrXGkwAHAxArYA4OXiruog6eu+E3tenlhsVql16SVyYNLfxVFQIJbgYKkzaoQEN2zo7mYCAAC4zNtvv11jE3uWlkWozteA+6SlpcmqVatM/WMN3usoNYK1ANyNgC0AeLmQhHi59Ilxkrt3n4TEx0vOihUmWKuchYWS9fU3Ej9kkLubCQAAAHgUrVU7d+5cKSwslOjoaOnRo4e7mwQABgFbAPABgVFREnNZS/M4N0gr2VasnwsAAADgpK1bt5q6w3a7XRo1aiTXXHMNhweAx7C6uwEAgOoV0/06CUpKMo8D6yRKTK+eHGIAAADgBC1xMX/+fBOsbdGihQwePFhCQkI4PgA8Bhm2AOBjbJGRkvTIQ6YsgpV/PAEAAADD6XSaerWff/65WW7durX06dNHrFZy2QB4FgK2AOCjHA6H7HlzluQfOCBRl1ws9W7uYyZRAAAAAPxRTk6OCdiqTp06Sffu3fn/GIBHImALAD4qZelyObpho3lccOiwBMXFSkJnanMBAADAP0VGRsqdd94pBw4ckKuuusrdzQGA0yJgCwA+qjD9SIXlokrLAAAAgK8rKiqSI0eOSN26dc1yw4YNzQ0APBmFWgDAR9VqffnJBatFoi9v5c7mAAAAAC6Vn58vb7/9trz55puSlpbG0QfgNciwBQAfUJSWJgU7d0lQnToS0rSJWRd/TScJqlVL8rSGbfPfSXijRu5uJgAAAOASWVlZMmvWLDl8+LCEhIRIcXExRx6A1yBgCwBernDfPkl7fbo4T/wTGtf/Nolsf6V5HN3yUnMDAAAA/EV6eroJ1mZmZpq6tUOGDJGEhAR3NwsA/Ddge+ONN0pcXJx5fMMNN8igQYPc3SQAqFG5GzaWBWtVzg8/lgVsAQAAAH+SkpIis2fPNuUQNDagwdqYmBh3NwsA/Ddgq0MetJD4f//7X3c3BQBcJqDSP6AB0dFVbud0OiV7+w4Ru10iWzQXi5Uy5gAAAPAdqamppl6tlj9ISkoyCVzh4eHubhYA+HfA9tdffzX1aYYOHWquoE2YMEFq167t7mYBQI2K7NRRilLTJH/bNlPDNrbvTVVut2fOXMn4/kfzOOriFtJ05HCCtgAAAPAZ8fHx0qBBA/N4wIABEhwc7O4mAYB/BWwXLFhgbqX0qtnjjz8uo0aNkptuukk++OADmTRpkvz1r391azsBoKZppmztAbdXmVFrsVjM4+KsrLJgrcra+qvkH0iVsPr1eIMAAADg1Ur/7w0ICJCBAweK1Wo1jwHAW3ntX7D+/fubW3mFhYXStGlT87hbt27yn//8x02tAwD3KcnNlV0z35Sc5N0S3qiBNBk+TKxBQWKxWcVpdxzfyGIRW2gIbxMAAAC8OlD71VdfmVhAr169TNA2KCjI3c0CgAvmUwUM33nnHXn99dfN43Xr1snFF1/s7iYBgMulffKZ5OxK1v9gJXf3Xkn96GOxhYRIw4EDxBoUKJYAm9Tre5MEn5igEQAAAPA2DofDjKz9+uuvzfn/3r173d0kAKg2XpthWxUd+jBu3DgzC2RoaKi89NJL7m4SALhcSV5exeXc48txV14hse3amselpRIAAAAAb1NSUiKLFy+WLVu2mOUbb7xRGjVq5O5mAUC18amArQZpX3vtNXc3AwDcqnanjnJs02ZxFBWLJTBA4q/pWPYzArUAAADwZlr+YN68eZKcnGxq1fbr109atmzp7mYBgG8GbDdt2iQjR440QxlKbd++XZ555hnZunWrJCQkyPjx46Vnz54uaY/dbje3831u+XvAm/F59j6hDepL83GPSP7+FAlNqivB8bX5e1Tp86xD6AAAAOBdcnNzZfbs2ZKammpq1eoo2yZNmri7WQDgmwHb5cuXy4QJEyqcQBcVFcno0aNl0KBB8tZbb8maNWtk7NixsnDhwrKJxWrStm3bLngfmzdvrpa2AJ6Az7OXStl//IYKduzYwREBAADwMikpKSZYGxYWJoMHD5akpCR3NwkAfDNgO3nyZFm9erWMGTNGpkyZUrZeM23z8/NlxIgRZghvly5dpGvXrrJkyRJ59NFHa7xdzZs3N53A+WZwaXCrVatWYrPZqr1tgCvxeYYvfp6bNWtG0BYAAMDL6Hn6rbfeKvXq1ZM4JtAF4MPcHrAdOnSoKXVQvhSC2rlzpzmhLl9vUTNrS4uK1zQNtF5osLU69gF4Cj7P8CVa7wwAAACeb9++fRIVFSXR0dFm+fLLL3d3kwCgxrn9jDUxMbHK9Xl5eRISElJhnS5r1i0AAAAAAPBtWqpQSyS+/fbbJkYAAP7C7Rm2p6PlCAoKCiqs0+XzLVMAAAAAAAC8w8aNG01JRKfTKbGxsRIYGOjuJgGA/2TYno6WP0hOTj5lkhgtkwAAAAAAAHyTznPz3nvvmWCtlkAYOHAgAVsAfsVjM2w7dOhg/iBPnTpVhg8fLmvXrpUVK1bIwoUL3d00AHC77O++l6K9+yT4osYS0bZN2frczT9JwbbtEpSUJOEdrpScteukODVNQi9uIWEtLz1lP46iIsla8bXYc3Ilol1bKUo7eHy/TZtIROvfn1ObnA6HZH2zSkoOp0vYZS0ltEXzU7YpOXpMslau1CKyEt21i9giI8/zCAAAAMDXaID2s88+MwFbddVVV8n1119fYW4bAPAHHhuwDQoKkhkzZsizzz4r06dPl/j4eJk4cSIZtgD8XtbqtZKx+D1zHLLXrhOn3S6RV7aTvJ9+lsNvzSo7PlmrVkvxwYNl2yUMu1vCLr2kwvFLf2eeeZ7KWLZcbOHhYg0JMduL3S4RV7Q96+N9dPn7krVy1fHXW/et1LlvlIQ0uajs547CQkl9barYjx0zy/lbtkrSuIfFwuSMAAAAEJGvv/66LFjbo0cP6dSpE8cFgF8K8KSM2vXr159SFkGLiwMATirYvr3C4SjYsdMEbPN37KywPnfTZglKTCi33Y5TArb55fZVkn5ExOE0AdvjP9txTgFb3b6M0ykFO3dWCNgWpx8pC9aa5cOHpeRYpgTGxfL2AgAAQNq2bSubNm2Sa665Rtq0OTmKDAD8jcfWsAUAVC2ofr2Ky0l1K9yXCrmoUaXtkk7dV72T+7JGRoi13MSOlff3Wyrvv/y+VUCtWmINDS1btkVFmhsAAAD8l91uL3scGRkp999/P8FaAH7PYzJsAQBnJ/q6a8VZUiKFe/aaDNaoLp3N+sj2V4ojL1/yt20zwdPo7tdJ5udfSJHWsG3RXCLaXXHKvhKGDJKjH3wk9uxsqT1kkKl3W7h3r4Q0bSJRna85p7ckrt8tJju3OD1dwltddko2ry0sVBJHjpBjn30uFqtVYnr1FCuz/QIAAPit7OxsmT17tnTs2NFMLqYCAghTAAB/CQHAy2iws1av66v8WfS1XcytVOxNvc+4L530q/bA/idX/P74P8rnwxocLHG33nzGbYIb1JfEYXef92sAAADANxw5ckRmzZolx44dk88//1wuueQSM/E4AMCDatgCAAAAAADfl5qaajJrc3NzJTY2VoYMGUKwFgDKIWALAAAAAABcIjk5WebOnStFRUVSp04dGTx4sERERHD0AaAcArYAAAAAAKDG/fLLL/Luu++aicYaN24sd9xxhwQHB3PkAaASa+UVAADA+2zatEk6dOhw2p8fPHhQ7r33Xmnbtq1ce+218s4775T9zOl0Srt27aR169ZmVma99ejRw0UtBwAA/uLAgQMmWHvxxRebzFqCtQBQNTJsAQDwcsuXL5cJEyaIw+E47TZjx46Vyy67TF577TXZunWrjBw50mS2XH311bJ3714pKCiQH3/8UYKCglzadgAA4D+6desm8fHx5n8Sq5X8MQA4HQK2AODjirNzJGXpMinOzJK4DldK7BVtTUZl2iefyrGftkhB2kGJaNxIal/TUWr9/nJ3NxfnaPLkybJ69WoZM2aMTJkypcptdu3aZTJwZ8yYYQKyl19+udx+++2ycOFCE7D9+eefpUWLFgRrAQBAtdL/Ob/99lszwicwMFAsFov5PwQAcGYEbAHAx+2eNUeyt203j7N37JSg2FjJ3bNXUj/SgO3PUpKdLTnJuyV71y4JfuQhCatf391NxjkYOnSojB8/XtatW3fabTRgq5N6lJ/Qo2nTprJy5UrzWAO2hYWF0q9fPzNUsWXLlvKnP/3JbHM+dKij3qpT6f6qe78AXIPvsO/g7zDO5bPy3nvvyU8//SS7d++WAQMGmIAtAOC3EbAFAB+Xf+DAyQWnU/JTUyU/JcUs2nNzT947nJJ/II2ArZdJTEz8zW1yc3MlJCSkwjpdzs/PN48167ZVq1byyCOPmKDuq6++aurdvv/++xIWFnbObdq2bZvUlM2bN9fYvgHUPL7DgH8oKiqS+fPny86dO03pg0svvZRgLQCcAwK2AODjolo0l4wf1pvHlsAAiWjSRGxBwZLx/Y8SGBMjRRkZEhgTLdbgIAm/qJG7m4saoEFXrVFbni6XBmO1vm15jz32mMydO9cEVs40kdnpNG/e/LwCvb+VpaPt0cCyzWar1n0DqHl8h31HXl5ejV6Yg298RubMmSMpKSmmDIJm1jZr1szdzQIAr0LAFgB8XMM7BkhI3bpSfCxTYtu1kdA6ieZmsVkla9sOKTxyxCzHXnmFhMTHu7u5qAFa2uDgwYMm0zY8PNys27FjR9nJ07Rp00wt29KaciUlJSa4cr4zN2tAtaaCqjW5bwA1j++w9+NvMM4kMzNTZs2aJenp6RIaGiqDBg2S+pTbAoBzRsAWAHycNSBA6nS/7pT1tdq0Njf4viZNmpi6tJMmTZInn3zSZEa9++67ZsKy0hq3X331lSmFoJmxEydOlIYNG5psVgAAgLOdYExH6GiwNioqSoYMGSLxJAMAwHmxnt/TAACAJ1u6dKm0adOmbHnKlCmSlpYmnTt3loceekgefvhh81j93//9nwnq3nTTTdKxY0dJTU01WbdkUQEA4J8cBQWSMWeO7Bk0SKwPPmjudVnXn45OKNa7d29JSkqS4cOHE6wFgAtAhi0A+JmjGzfJ4ZWrJCAiQurf0keCYmIkc8svcuirr8UWGiL1+t4kwXFxFZ5z8IuvJPPnLRJSt47U69NbbOc5VB41R2vNrl9/vFax6tu3r7mVn5xs6tSpVT5XJxp78cUXeXsAAIBkLl8ue0eOFPuxYyJWq1gcDsnauFGyli6VlHHjpOGMGRLdu3eFuvilk5tq+QOduFSDtwCA80fAFgD8SN7+FEl+a5aIw2mWi44elcZD7pRdb7wpzhK7WVeQdlAuferxsufo5GQpy943j3N2JYs4HNJwwO1u+g0AAABQk8Ha5AEDTq5wOCrc2zMzJbl/f7lo/nyJvukmMyHohx9+aMofaGatIlgLABeOkggA4EcKDh4sC9aq/NRUKTx0uCxYa7Y5dFgcJSUntzlwoMI+9DkAAADwLVruQDNrDefJ/xcrOLFet1v7zTeyaNEiyc/Plw0bNriwpQDg+wjYAoAfCb+osVjLlTOIuriFhDVoILawsLJ1kb9rZiYqK1tu0VxTJSo8BwAAAL7l2KJFx8sgnC5YW25ysZ9atZKPP//cLLdv315uuOEGF7USAPwDJREAwI8Ex8ZK8wf/KBnffW9q2CZ07SzWwEBpMfYBSV+z1tSwTejapcJzolo0l2ajRkjWL1slpE6ixF3VwW3tBwAAQM3IXLrU1KwtK4NQBafFIht79ZI9rVub5euuu85MYkoZBACoXgRsAcDPhNVLkrB6JyejUiEJ8VL/5j6nfY5m1ZJZCwAA4LvsGRlnDNbabTb5oW9fSW3RwmzXYdcu6TJhgkvbCAD+gpIIAAAAAAD4OVts7PEM29OwOJ1iDwgQa0mJXLlkibQoLnZp+wDAnxCwBQAAAADAz0X37XvGDFurwyFXvveedJozR5J+/VWib77Zpe0DAH9CwBYAAAAAAD8X06+f2GJiKkw2mxsdLb927Cil05AFFBdLbGqq2S7m1lvd1lYA8HUEbAEAAAAA8HPWkBBpOGPG8QWLRTITEmTl0KGytUsX2dWuXdl6pdvp9gCAmkHAFgAAAAAASHTv3nLR/Ply9OKL5ZtBg6QwIkKiDh2Ser/+ao6OLTpaLlqwwGwHAKg5ATW4bwAAAAAA4EVSmzWTVf36id1ul4S8PLli82apfd11EnPLLaYMApm1AFDzCNgCAAAAAABZv369LFu2TJxOp7Ro0UJuueUW+fnnn6VR69Zis9k4QgDgIgRsAQAAAADwcxkZGWXB2tatW0ufPn3MYwCA6xGwBQAAAADAz8XGxpogbXp6uvTo0UMsFospiwAAcD0CtgAAAAAA+CENyObl5UlkZKRZbtOmjbubBAAQEStHAQAAAAAA/1JcXCzz5s2T//3vf5Kbm+vu5gAAyiHDFgAAAAAAP5Kfny9z5syR/fv3S0BAgBw+fFjCw8Pd3SwAwAkEbAEAAAAA8BNZWVkya9YsE6QNCQmRQYMGSYMGDdzdLABAOQRsAQAAAADwAzqhmAZrMzMzTd3aIUOGSEJCgrubBQCohIAtAAAAAAA+Li0tTd5++20zyVhcXJwJ1sbExLi7WQCAKhCwBQAAAADAx2lGrZZA0CCtlkGgZi0AeC4CtgAAAAAA+DgN0N51110maBscHOzu5gAAzsAqPuaVV14xVwtvv/122bRpk7ubAwAAAACAW3z33Xeyfv36suXo6GiCtQDgBXwqw3bVqlVy6NAhmTNnjuzatUt++uknufzyy93dLAAAAACAn3nzzTelsLBQRo0a5fLXdjqdsmLFCnOzWCxSt25dqVOnjsvbAQA4Pz4VsF2zZo3phEaOHClBQUHy3HPPubtJAAAAAAA/osHSCRMmmPPT/v37u/z1HQ6HfPjhh/L999+b5S5dukhiYqLL2wEA8MOA7YIFC8ytfD2epKQkycnJkWnTpsmnn34qf/vb3+Tll192azsBwNtOMOy5eWILDzPL5nFYqFis1iq30YwNVyvJzRVrcLBYA7y2CwMAAD5M/1fSIGnr1q0lPT3dpa9dUlIiixcvli1btpjlG264Qdq3b+/SNgAALpzXnu3qlcrKVysnTpxoOkWr1SrXXnutvP76625rHwB4m8KMDNnx+n+kMD1dgmJrmYBowaHDEhgdJc1Gj5TQunWkKDNTdrw+XQoOHpLg2rWl2f0jJTg21mUnP7vfniNH128Qa3CQXHTXEIm+9BKXvDYAAMDZ0vPRHj16yKJFi1x60LT8wrx58yQ5Odm0oV+/ftKyZUuXtgEAUD18atKxtm3bmjq2SgurN23a1N1NAgCvkfrhxyZYq45t3Czp674zj4szsyRl2XLz+OBnX5hgrdJt0z7+1GXty/zpZxOsVY7CItk7f6HLXhsAAMDTbd682QRrAwMDZfDgwQRrAcCLeW2GbVW6d+8uq1evlgEDBojNZpNJkya5u0kA4DWcJfaTjx0OcTpOLjuKS47fFxVVeI6jqNhl7XMUV3yt0jYBAABA5IorrpDMzEy5+OKLpV69ehwSAPBiPhWw1VqKzzzzjLubAQBeKaFbV8na+qvYCwokokljEYtVnCUlYg0KlDo9ux/fpmsXOfbTFrHn5YktNNQ8x1ViLmspYQ0bSN7effoHX5Ju7OWy1wYAAPCG82FNYgIAeD+PCdhu2rRJRo4cKevWrStbt337dhOA3bp1qyQkJMj48eOlZ8+eLmmP3W43t/N9bvl7wJvxefYfIUlJ0uLJ8VJ48JCE1KkjFqtF8g+kSlDtOAmKjjafhaDEBLn48XFScPCgBCcmSGBEhOv+1tls0vSPoyV/336xhYdLSEL8Ob926fY6ezIAAEBN0hqyAAB4bcB2+fLlMmHChAon0EVFRTJ69GgZNGiQvPXWW7JmzRoZO3asLFy40CW1abdt21YtNYQAX8Hn2c9kZ1X9+HTbuFrmMZEDKef99B07dlRrcwAAgPt9+umn8s9//lNSUlKkdu3aMmzYMHM+6WvJRQAA3+f2gO3kyZNN3dkxY8bIlClTytZrZ5ifny8jRowwQzu6dOkiXbt2lSVLlsijjz5a4+1q3ry5hIWFnddzNYNLg1utWrUytXQBb8bnGb74eW7WrBlBWwAAfMiePXtk3LhxMn36dLnqqqvkl19+kYEDB8pll10ml19++SnJOXXr1pXIyEizvG/fPnPuFxcX5xXJRQAA3+f2gO3QoUPN1cjyVyvVzp07zQm1BmtLaee3ZcsWl7RLA60XGmytjn0AnoLPM3yJ1Wp1dxMAAEA1atSokUkEioiIMMHVo0ePmv9fw8PDT9l22rRpsnfvXpk5c6ZkZGTIXXfdJffcc4/JyPWG5CIAgO9z+xlrYmJilevz8vIkJCSkwjpd1o4RAAAAAIDyNFh77Ngxk1WrwdchQ4ZUmfH68ssvS2xsrNx9991mmz59+pwSrC1NLlq0aJG0bNnyrJKLtEwCAAA+EbA9HR2SUlBQUGGdLp9vmQIAAAAAgG/TMgcbNmyQBQsWyPz5803AtbKgoCB55JFHTP1ZLZd07733VrkvkosAAO7isQFbvUKZnJx8yiQxeiUTAAAAAIDKtAyCBmS1bu3tt98un3zyySnbaCbs8OHDzURibdu2NeUQtITC2SK5CADgtwHbDh06SGBgoEydOtUUdf/6669lxYoV0rdvX3c3DQAAAADgQb788ktT3qA8PY+Mioo6Zdt//etfZsIwzbJ95ZVXpEGDBiYb92yRXAQA8NuArV4VnTFjhqxatcrM8vniiy/KxIkTybAFAAAAAFTQqlUr2bZtm8ydO9dMOvbdd9+ZcgiaZVvZ3//+d3nggQfMY00S0qDtqFGjzvqIklwEAKhpAeIhtNNbv379KVcu3377bbe1CQAAAADg+WrXri3Tpk0zE4pNmjRJkpKS5C9/+Yu0b9/+lG01SFteQEDAeSUXPfvsszJ9+nSJj48nuQgA4JsBWwAAcP42bdpkavGtW7euyp8fPHhQnn76afnxxx/N8NDRo0fLnXfeWfbzV199Vd555x3Jz8+X6667zpyE6sQtAAB4izZt2pxTaYOzRXIRAMDVPLYkAgAAODvLly+XYcOGmVp9pzN27Fhp3LixrF271tTu+8c//iFr1qwxP5s3b54sW7bM3Gu9+NzcXHn++ec5/AAAAADgBgRsAQDwYpMnT5aZM2fKmDFjTrvNrl27TAbuww8/XGHm7IULF5qfa42/u+66S+rXr2+yah977DH58MMPTeAWAAAAAOBalESoRAvUKx0Ser7sdru5z8vLE5vNdiHvD+B2fJ7hi5/ngoKCCn/zvdnQoUNl/Pjxpy2FUBqwrVOnjkRERFSoE79y5UrzeOfOnRUm9dRMXD1We/bskUsvvfSs21J6PDXQW3qsq0vpvnNycsRq5Xoz4G34DvsOX+pDfe08tCZwLgDwXUP1Kf0bfzZ9KAHbSgoLC8397t27L/iN0FlKAV/B5xm+ZO/evWV/88sHMb1RYmLib26jAdSQkJAK63S59B8GvcAYGhpa9jO92KiZuLr+fPrQ0uNbE3bs2FFj+wZQ8/gO+w5f6EN99Ty0JnAuAPBdg2v7UAK2lURHR5vMouDgYDJ4AMBH6RVN7ST1b74/CAsLK8uIKqXLul5psLb8zzWbRuvhhoeHn9Pr0IcCgO/ztz7UVehDAcD3Oc6hDyVgW/mABARIXFxcTb03AAAP4U9ZQVr+4ODBgybTtjQIq1lupWUQ9F7LJrRv394sJycnm4uWegHzXNCHAoB/8Kc+1FXoQwHAP0ScZR9KETgAAHxckyZNpGXLljJp0iSTSasTkL377rtyyy23mJ/r/RtvvGFq1mZnZ5uJzHr16lWhTAIAAAAAwDUI2AIA4IOWLl0qbdq0KVueMmWKpKWlSefOneWhhx6Shx9+2DxWAwcOlL59+8pdd90l1113nQnUPv/8825sPQAAAAD4L4vT6XS6uxEAAAAAAAAAADJsAQAAAAAAAMBjUBIBAAAAAAAAADwEAVsAAAAAAAAA8BAEbAEAAAAAAADAQxCwBQAAAAAAAAAPEeDuBviTkpISeeqpp2T//v0SFxcnEydOlLCwMHc3C7hgb775phQWFsqoUaM4mvBK+vkdP368HD161Cy/9NJL0rBhQ3c3C2eB9w7wbkVFRebvb3p6uiQmJspf//pXCQoKcnezAPwG+l/Adegr/RMZti70ySefmEDtO++8Ix06dJD58+e78uWBaud0OuWZZ56RWbNmcXTh1RYtWiQtW7Y0n+X77rtPXnvtNXc3CWeJ9w7wbh988IE0adJE5syZI02bNpUlS5a4u0kAzgL9L+A69JX+iQxbF9qwYYN07tzZPO7UqZP87W9/k3vuuceVTQCqPWDbpUsXad26tcmMAbxVnz59xGo9fg3TbreT3eVFeO8A73bLLbeYv7v6P8WhQ4fMxTMAno/+F3Ad+kr/RIatC+Xk5EhERIR5HB4eLrm5ua58eaDaaYCrR48eHFl4Pf3brCVqDhw4IJMnT5aRI0e6u0k4S7x3gPez2Wxy5513ypo1awjYAl6C/hdwLfpK/0PA1sWdWmmQVu8jIyNd+fIAgDPYsWOHjB49Wv785z9LgwYNOFZehPcO8H5z586VJ554wtSwBeAd6H8B16Kv9C8EbF1Ih42vWrXKPNZ7XQYAuF9qaqo8/PDDplQNf5u9C+8d4P0nnwsXLjSPdaSDZhAB8Hz0v4Dr0Ff6J4tTC0bBJbQ+15/+9CfZs2ePREVFySuvvGJKIwC+MOmA1rAdNWqUu5sCnJdnn31WVqxYIfXr1zfLWkPxySef5Gh6Ad47wLsdO3ZMHnvsMSkoKDD1w1944QWpW7euu5sF4DfQ/wKuQ1/pnwjYAgAAAAAAAICHoCTCBdi0aZN06NChwrrt27ebSRPatGkjvXr1kk8//fRC3yPAZfhMw1fwWfZevHeAd+M7DHgnvrsA3zd4FgK252n58uUybNgwKSoqKlunj3XCmu7du8u3334rTz/9tDz++OOyc+fO6nq/gBrDZxq+gs+y9+K9A7wb32HAO/HdBfi+wfMQsD0PkydPlpkzZ8qYMWMqrF+3bp3k5+fLiBEjJDAwULp06SJdu3aVJUuWVNf7BdQIPtPwFXyWvRfvHeDd+A4D3onvLsD3DZ6JgO15GDp0qJlkSSelKU8zaZs1ayYWi6VsXdOmTU2ZBMCT8ZmGr+Cz7L147wDvxncY8E58dwG+b/BMBGzPQ2JiYpXr8/LyJCQkpMI6XdasW8CT8ZmGr+Cz7L147wDvxncY8E58dwG+b/BMBGyrUVhYmBQUFFRYp8u6HvBGfKbhK/gsey/eO8C78R0GvBPfXYDvG9yLgG010vIHycnJFdbt2LHDlEkAvBGfafgKPsvei/cO8G58hwHvxHcX4PsG9yJgW406dOhgJhubOnWqFBUVyddffy0rVqyQvn37VufLAC7DZxq+gs+y9+K9A7wb32HAO/HdBfi+wb0I2FajoKAgmTFjhqxatUquuuoqefHFF2XixIlk2MJr8ZmGr+Cz7L147wDvxncY8E58dwG+b3Avi9PpdLq5DQAAAAAAAAAAMmwBAAAAAAAAwHNQEgEAAAAAAAAAPAQBWwAAAAAAAADwEARsAQAAAAAAAMBDELAFAAAAAAAAAA9BwBYAAAAAAAAAPAQBWwAAAAAAAADwEARsAQAAAAAAAMBDELAFAAAAAAAAAA9BwBZwsfz8fHn11VflhhtukN///vdy3XXXyTPPPCOHDx8u2+bJJ5+U0aNHV8vrHTx4UD744AOpbj/88IMMGzZM2rZta36PW2+9VebMmVNhm27dusl///vfan9tAIBvstvtMmPGDLnxxhulVatW0qFDB7n//vtly5Yt1bL/wsLCU/qqmlZQUCCvvPKK9OjRQy677DLp1KmTjBs3Tvbu3Vu2zaJFi6RNmzY18vrr1q2TFi1aSEZGRo3sHwDgGehDqx99KNyJgC3gQjk5OXLHHXfIl19+KU888YQsX75cXnrpJfnll19kyJAhNXIy9fzzz8vXX39drfvcunWrCda2a9dOFixYIEuXLpWBAwfKX/7yF5k+fXrZdgsXLpRBgwZV62sDAHzXP//5TxNQ1YCmXmycOXOmhIaGyuDBgysEOM/X3LlzTUDYlZ5++mn56quvTH/80UcfyZQpU0x/r/1jVlaW2UYD1J999plL2wUA8C30oYBvIWALuJBm2GimzVtvvSXXXnutNGjQQK6++mqThXr06NEayUZ1Op3Vvs/33ntPLr30UhkzZow0bdpUGjVqZALRGsSdPXt22XaxsbHmRBsAgLMxb948ue+++6R79+6mj2zZsqVMmjRJYmJiTBaqJ/aJv3Wh9v3335fHH39cOnbsKPXr1zcjUzRoq8Hajz/+2GwXEhIicXFxLm0bAMC30IcCvoWALeAiRUVFsmTJEhk6dKiEh4dX+FlUVJTJ+Ln77rtPeV5VwyT1RO+mm24qW37ttddMAFiHWvbp00c++eSTstIKn3/+uSxevNiUJygtyfDcc8/JVVddJVdeeaWMGjVKdu/eXbYvbd+ECRPM/nUo6qZNm05pk8ViMc/Zt29fhfUjRowwweiqSiLocMyqbt9++635ue5LT9L1d+3cubMpE6EnugAA/6H9iw4/LC4uLltns9lM33LXXXeZ5d69e8tf//rXCs/7+9//bkaqKM1U1b5QSypo36hliDRQq/3pyy+/LCkpKab/2b9/v9le960B4tatW8ttt90mK1eurNDf/vGPfzT37du3N/3itGnT5Oeff5Z+/fqZkkCaKav7PNPvtGrVKnE4HGXrIiIizOiUP/zhD6f09fpaVfWX2j+XOlObAQD+iT6UPhS+hYAt4CIakMzOzjYnd1W5/PLLJSEh4Zz3qwHZN99805Qj0KGWevL36KOPmtq1OgxTg59aL1fLEygNxmpJAz3hfOedd6ROnTrmJLd8cPTdd9812UD/+c9/TCZtZQMGDJCSkhLzWhpk1oDx+vXrzQmoZttW5Ztvvim7rVixwgSX9eRXM400mK3BXv39tZ16srpt2zYzJBYA4D+GDx9uygVp3zV+/HhTdic1NdVk2+qoDXXzzTebcgml2bJ6r8/R9Vpq4OGHHzbBTe0T/9//+3+mVI9msmrZgYceesj0e9oX1a1b1/Q52tdpf6kB1FtuucUEaMtfrNSyQocOHTJt0fZpcFjbpn2UjirR/vZf//pXlb+P9ovaZ+rFS61Zr6+jF2+1nY0bN5bIyMgqj0H5PlOD0xq01gus6mzaDADwP/Sh9KHwLQRsARfJzMwsy6at7kBwYGCg1KtXzwy11MlZXn/9dXOSqCeCQUFBZqilnuhqNpGe3E2cONEEjps1a2aybYODg80JZCmtTdulSxcTRA4ICDjlNS+66CKTtatZPTt37jT1krQkggaGNXBblfj4+LKbnjxrCQh9nu5fh4tqNpW2RUssaMbQ5MmTTc2/HTt2VOvxAgB4Lg1K/vvf/zalEDTIqgFXDXQ+8sgjkpuba7bp27evCaB+//33ZZNgpqenm4uIaWlppj/RoKz2izrR1xtvvGGyV7Uv1BEuGvzUvkjv9YLj2LFjzYiQhg0bmkCvZvCWL1Gk2/3f//2fuSBZmuWrE23q5GF68VFfd/v27af9nZ599lmT2at9tPadekFUA9J//vOfzcXPyrSNpf2l/s5a614vxOpz1Nm0GQDgf+hD6UPhW06NxACoEbVq1aoQuK0uOuxT6xX17NnTZMPqCZ0O06xcdkFp8FMzkfRkt/Ks2bt27Spb1pPc36LZTjqBit40G1azZjXTV/9R0Kzf0wWmta2awavZvaXZUvp8PcnWbNvKNCCsgWUAgH/QIKvetOa7BmP1op6WDNCLizqaRIOxWppAs2q1tM+yZctM8FIvUl5yySXSq1cvGTlypOnLtE/UfjIxMfGU19FgqJYy0H7sxRdfLFuvAV+9MFlKX08vfqrSuuwafC2lgWAdKXKmIaraL+tNR7NoyQetBT9r1ixTm/fBBx+s8nk6KkcvwurvcO+9955TmwEA/ok+9Dj6UPgCAraAi2hmjp6YbdiwwWSuVjZ16lQTzH3iiSd+c1/lM3J0khI9adVasDpsU4eAam07zbSpHADV51mtVjOcsnLmrGbkltKT4jPR4ZlaO08zcVXz5s3NTbOgNMtHf0fN0K3su+++MxlFesJdvtSCtkuXdZhpZUzCAgD+Qcv1zJ0719Qw175KA6Gaxao37T/LjwTRMgDal/zpT38y/Z4+Lg2OankC3ZeO0tCLiVr256mnnjqlTrzdbjf3GvysXCu+fB+po1gq0/adDQ3OalmD0hI/2tdq/6k3zZLV2rNVBWy13q1m1eoxeOGFF865zQAA/0IfehJ9KHwFJREAV33ZrFZzgvn222+bib/KO3LkiMlOrYqeKGqWUfkg7Z49e8oe64mqnuBeffXVJtj74YcfmmwgnXSl9OS1lJYb0A5MA8MaQNabZiD97W9/k40bN57177J69WpTt6+y0lp8VQVZtRyD1g7UE+byE6YpzaDV0g61a9cua5e2U4eQap0/AIDv0xEgOvpCA5yV6aiN0lEZ6vrrrzejQ7SWq/avpeUCfvnlF9N3XHzxxWYiS92f1pDVLNzKfaLuU8sO6AiP0r5Hb5rNq1m91UEzfLSNOlrkt36n8iZNmiQ//fSTKQ9RmtXrqjYDALwPfehJ9KHwFVyKB1xozJgxJgtW681p8FKHL+pJnAZMo6OjzcllZVprVjtgrVmnwyk1WKr7SEpKMj/XYZhak1ZLLui2OnO1DpfU2bFVWFiYWdZJUfT1tHSCZiTp5GMa2NXMXj05fuyxx87699AJXXSCEz2JHDhwoDnhTE5ONm3Uk2atPVheXl6e2V6Hqmox/MOHD5f9TNunw1X1uZqBpPvWoZ1a80/vyw87BQD4Lu0jtB6s9kfaR2pmrfYDWhZBg56aVVq+79Bhn7q+f//+ZdmlmsGqFxS1f9I663pBVLNcO3bsWPY8vWi5e/du079oGZ9//OMfpj+84oorTEkfrbNe1YiP86EjT7Quu06sqf2bvoYGcVetWmWCyDNmzDjlOVouQS/uvvrqq2bES/k+U4O159vmtWvXVhhNo7Rt1V1bHwDgevShx9GHwpcQsAVcSE+K5syZY4KkOsGWnoRpVmnXrl3lgQceMEHbynRCEZ3sRE/GtMzBNddcY05ktayB0nq0mmmjQV8NymqdPh1GqROAqdtvv91M1qLbrVmzxmQeaYBX12nmrpYimDlzpqlJey4noPocPdHUE0etqacnjloOoaqg8+bNm+XXX381t9KT5lL6e+twUN2fDmm98847TVaxZgxrYPlsh50CALyfZsX873//MyNH9LHSbFntt7SMQHk333yzmUizfF127cs00KmTWupkYxq41Zq2pRclr732WlM2SC8UamBXL6Bqpq6WUdCJzPT5OslXaR96oXTCMu0rp02bZiYETU1NNcFlDZTq+tLSQuVpnXcNVI8ePfqUn2k/er5t1n6/Mj0GVbUBAOB96EPpQ+FbLE5N3QMAAAC8yOLFi02G7QcffODupgAA4FXoQwHPR4YtAAAAvMaOHTtMpumUKVNMmR0AAEAfCvgaxhoDAADAa2zbts2UzNFa7XfccYe7mwMAgNegDwW8ByURAAAAAAAAAMBDkGELAAAAAAAAAB6CgC0AAAAAAAAAeAgCtgAAAAAAAADgIQjYAgAAAAAAAICHIGALAAAAAAAAAB6CgC0AAAAAAAAAeAgCtgAAAAAAAADgIQjYAgAAAAAAAICHIGALAAAAAAAAAOIZ/j8U+iDWNrW8bgAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "α = 1.17 ± 0.03\n", - "β = 0.29 ± 0.02, R² = 0.992\n" - ] - } - ], - "source": [ - "# === Predator Finite-Size Scaling: 3 Panels ===\n", - "\n", - "from scipy import stats as scipy_stats\n", - "\n", - "# Color palette for different L values\n", - "colors_L = plt.cm.Reds(np.linspace(0.35, 0.85, len(grid_sizes)))\n", - "\n", - "# Collect data\n", - "alphas = []\n", - "max_sizes = []\n", - "Ls = []\n", - "Rs = []\n", - "cluster_data = []\n", - "\n", - "for i, L in enumerate(grid_sizes):\n", - " subset = df_phase3[df_phase3[\"grid_size\"] == L]\n", - "\n", - " clusters = []\n", - " for sizes in subset[\"pred_cluster_sizes\"]:\n", - " if isinstance(sizes, list) and len(sizes) > 0:\n", - " clusters.extend(sizes)\n", - "\n", - " if len(clusters) < 50:\n", - " continue\n", - "\n", - " clusters = np.array(clusters)\n", - " fit = powerlaw.Fit(clusters, discrete=True, xmin=1, verbose=False)\n", - " R_tpl_ln, _ = fit.distribution_compare(\n", - " \"truncated_power_law\", \"lognormal\", normalized_ratio=True\n", - " )\n", - "\n", - " # Compute PDF\n", - " unique, counts = np.unique(clusters, return_counts=True)\n", - " probs = counts / len(clusters)\n", - "\n", - " alphas.append(fit.truncated_power_law.alpha)\n", - " max_sizes.append(clusters.max())\n", - " Ls.append(L)\n", - " Rs.append(R_tpl_ln)\n", - " cluster_data.append(\n", - " {\"L\": L, \"unique\": unique, \"probs\": probs, \"color\": colors_L[i]}\n", - " )\n", - "\n", - "# Convert to arrays\n", - "Ls = np.array(Ls)\n", - "alphas = np.array(alphas)\n", - "max_sizes = np.array(max_sizes)\n", - "\n", - "# Fit scaling exponent β\n", - "slope, intercept, r, p, se = scipy_stats.linregress(np.log(Ls), np.log(max_sizes))\n", - "\n", - "# === Plot (3 panels) ===\n", - "fig, axes = plt.subplots(1, 3, figsize=(14, 4.5))\n", - "\n", - "# Left: Cluster distributions\n", - "ax1 = axes[0]\n", - "for data in cluster_data:\n", - " ax1.scatter(\n", - " data[\"unique\"],\n", - " data[\"probs\"],\n", - " color=data[\"color\"],\n", - " s=10,\n", - " alpha=0.6,\n", - " edgecolors=\"none\",\n", - " label=f'L={data[\"L\"]}',\n", - " )\n", - "\n", - "ax1.set_xscale(\"log\")\n", - "ax1.set_yscale(\"log\")\n", - "ax1.set_xlabel(\"Cluster Size\")\n", - "ax1.set_ylabel(\"P(s)\")\n", - "ax1.set_title(\"Predator Cluster Distributions\")\n", - "ax1.legend(loc=\"upper right\", fontsize=8, framealpha=0.95)\n", - "\n", - "# Middle: α vs L\n", - "ax2 = axes[1]\n", - "ax2.plot(Ls, alphas, \"o-\", color=COLORS[\"predator\"], markersize=8, linewidth=1.5)\n", - "ax2.axhline(\n", - " np.mean(alphas),\n", - " color=\"0.5\",\n", - " linestyle=\"--\",\n", - " linewidth=1,\n", - " label=f\"Mean α = {np.mean(alphas):.2f}\",\n", - ")\n", - "ax2.fill_between(\n", - " [min(Ls) * 0.8, max(Ls) * 1.2],\n", - " np.mean(alphas) - np.std(alphas),\n", - " np.mean(alphas) + np.std(alphas),\n", - " color=\"0.5\",\n", - " alpha=0.15,\n", - ")\n", - "\n", - "ax2.set_xscale(\"log\")\n", - "ax2.set_xlabel(\"System Size L\")\n", - "ax2.set_ylabel(\"Exponent α\")\n", - "ax2.set_title(f\"Universal Exponent (α = {np.mean(alphas):.2f} ± {np.std(alphas):.2f})\")\n", - "ax2.set_xlim(min(Ls) * 0.8, max(Ls) * 1.2)\n", - "ax2.set_ylim(1.0, 1.35)\n", - "ax2.legend(loc=\"upper right\", framealpha=0.95)\n", - "\n", - "# Right: max_size scaling\n", - "ax3 = axes[2]\n", - "ax3.loglog(Ls, max_sizes, \"o\", color=COLORS[\"predator\"], markersize=8, label=\"Data\")\n", - "\n", - "# Fit line\n", - "L_fit = np.logspace(np.log10(min(Ls) * 0.8), np.log10(max(Ls) * 1.2), 100)\n", - "max_fit = np.exp(intercept) * L_fit**slope\n", - "ax3.loglog(\n", - " L_fit,\n", - " max_fit,\n", - " \"--\",\n", - " color=\"0.5\",\n", - " linewidth=1.5,\n", - " label=f\"β = {slope:.2f} ± {se:.2f} (R² = {r**2:.2f})\",\n", - ")\n", - "\n", - "ax3.set_xlabel(\"System Size L\")\n", - "ax3.set_ylabel(\"Max Cluster Size\")\n", - "ax3.set_title(f\"Sublinear Scaling (max ~ L^{slope:.2f})\")\n", - "ax3.legend(loc=\"upper left\", framealpha=0.95)\n", - "\n", - "plt.tight_layout()\n", - "plt.savefig(\"plot_predator_fss.png\", dpi=150, bbox_inches=\"tight\")\n", - "plt.show()\n", - "\n", - "print(f\"α = {np.mean(alphas):.2f} ± {np.std(alphas):.2f}\")\n", - "print(f\"β = {slope:.2f} ± {se:.2f}, R² = {r**2:.3f}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 61, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -1800,17 +1646,6 @@ "plt.show()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "NOTE:\n", - "\n", - "The Hydra effect does not appear to be associated with spatial criticality. Despite the sharp population transition, prey clustesr follow lognormal distribution across all parameters. Finit-size scaling analysis confirms this is not a finite-size result. The lognormal signature strenghthens with increasing system size L. This is the opposite behavior near a critical point.\n", - "\n", - "Conclusion (?): The Hydra effect represents a population-level transition driven by local birth-death dynamics without scale free structure characteristics of SOC." - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1820,7 +1655,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -1831,7 +1666,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -1860,7 +1695,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -1891,7 +1726,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -1929,7 +1764,7 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 31, "metadata": {}, "outputs": [ { @@ -1978,7 +1813,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -2022,7 +1857,7 @@ }, { "cell_type": "code", - "execution_count": 68, + "execution_count": 33, "metadata": {}, "outputs": [ { @@ -2077,7 +1912,7 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": 34, "metadata": {}, "outputs": [ { @@ -2092,7 +1927,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/var/folders/fm/9tf79c_d1691c4jwk2qqr42m0000gn/T/ipykernel_6177/2511870083.py:122: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n", + "/var/folders/fm/9tf79c_d1691c4jwk2qqr42m0000gn/T/ipykernel_35436/2511870083.py:122: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n", " plt.tight_layout(rect=[0, 0, 0.88, 1])\n" ] }, @@ -2236,7 +2071,7 @@ }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 35, "metadata": {}, "outputs": [ { @@ -2406,7 +2241,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 36, "metadata": {}, "outputs": [ { @@ -2519,7 +2354,7 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 37, "metadata": {}, "outputs": [ { @@ -2689,12 +2524,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Phase 6" + "### Phase 5" ] }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -2705,7 +2540,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 39, "metadata": {}, "outputs": [ { @@ -2734,7 +2569,7 @@ }, { "cell_type": "code", - "execution_count": 75, + "execution_count": 40, "metadata": {}, "outputs": [ { @@ -2765,7 +2600,7 @@ }, { "cell_type": "code", - "execution_count": 76, + "execution_count": 41, "metadata": {}, "outputs": [ { @@ -2803,7 +2638,7 @@ }, { "cell_type": "code", - "execution_count": 77, + "execution_count": 42, "metadata": {}, "outputs": [ { @@ -2852,7 +2687,7 @@ }, { "cell_type": "code", - "execution_count": 78, + "execution_count": 43, "metadata": {}, "outputs": [ { @@ -2896,7 +2731,7 @@ }, { "cell_type": "code", - "execution_count": 79, + "execution_count": 44, "metadata": {}, "outputs": [ { @@ -2951,7 +2786,7 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": 45, "metadata": {}, "outputs": [ { @@ -3121,7 +2956,7 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": 46, "metadata": {}, "outputs": [ { @@ -3234,7 +3069,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 47, "metadata": {}, "outputs": [ { diff --git a/requirements.txt b/requirements.txt index 6a14b5a..5dcd13e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,5 @@ seaborn black tqdm numba -powerlaw \ No newline at end of file +powerlaw +pdoc \ No newline at end of file diff --git a/scripts/__init__.py b/scripts/__init__.py deleted file mode 100644 index 9531307..0000000 --- a/scripts/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ..models.numba_optimized import * diff --git a/bash_scripts/run_phase1.sh b/scripts/run_phase1.sh similarity index 86% rename from bash_scripts/run_phase1.sh rename to scripts/run_phase1.sh index 07b5996..760ba56 100644 --- a/bash_scripts/run_phase1.sh +++ b/scripts/run_phase1.sh @@ -14,9 +14,6 @@ # ============================================================================= # # PHASE 1: Find critical point via 2D sweep of prey_birth × prey_death -# - 15 × 15 × 15 reps × 2 (evo/non-evo) = 6,750 simulations -# - Estimated runtime: ~2 min on 32 cores -# - Memory: ~8 GB peak # # SUBMIT: sbatch run_phase1.sh # MONITOR: squeue -u $USER @@ -88,10 +85,4 @@ echo "Results in: $OUTPUT_DIR/" echo "" echo "Output files:" ls -lh $OUTPUT_DIR/ -echo "" -echo "Next steps:" -echo " 1. Download phase1_results.jsonl" -echo " 2. Run analysis.py to find critical point" -echo " 3. Update config.py with critical_prey_birth/death" -echo " 4. Run phase 2-5" -echo "========================================" \ No newline at end of file +echo "" \ No newline at end of file diff --git a/bash_scripts/run_phase2.sh b/scripts/run_phase2.sh similarity index 81% rename from bash_scripts/run_phase2.sh rename to scripts/run_phase2.sh index 05da45e..8ee9708 100644 --- a/bash_scripts/run_phase2.sh +++ b/scripts/run_phase2.sh @@ -13,11 +13,6 @@ # PP Hydra Effect - Phase 2: Self-Organization (SOC Test) # ============================================================================= # -# PHASE 2: Test if prey_death evolves toward critical point -# - 6 initial prey_death values × 30 reps = 180 simulations -# - Longer runs (5000 steps) for evolution to equilibrate -# - Tracks evolved_prey_death_timeseries -# # SUBMIT: sbatch run_phase2.sh # MONITOR: squeue -u $USER # CANCEL: scancel @@ -88,10 +83,4 @@ echo "Results in: $OUTPUT_DIR/" echo "" echo "Output files:" ls -lh $OUTPUT_DIR/ -echo "" -echo "Next steps:" -echo " 1. Download phase2_results.jsonl" -echo " 2. Plot evolved_prey_death_final vs initial prey_death" -echo " 3. Check if all runs converge to ~0.095-0.105 (critical point)" -echo " 4. If SOC confirmed, proceed to Phase 3 (finite-size scaling)" -echo "========================================" \ No newline at end of file +echo "" \ No newline at end of file diff --git a/bash_scripts/run_phase3.sh b/scripts/run_phase3.sh similarity index 83% rename from bash_scripts/run_phase3.sh rename to scripts/run_phase3.sh index 59b9e86..54a9076 100644 --- a/bash_scripts/run_phase3.sh +++ b/scripts/run_phase3.sh @@ -13,11 +13,6 @@ # PP Hydra Effect - Phase 3: Finite-Size Scaling # ============================================================================= # -# PHASE 3: Test finite-size scaling at critical point -# - Grid sizes: 50, 100, 250, 500, 1000 -# - 20 replicates per size = 100 simulations -# - Cluster size distributions for power-law analysis -# # SUBMIT: sbatch run_phase3.sh # MONITOR: squeue -u $USER # CANCEL: scancel @@ -86,10 +81,4 @@ echo "Results in: $OUTPUT_DIR/" echo "" echo "Output files:" ls -lh $OUTPUT_DIR/ -echo "" -echo "Next steps:" -echo " 1. Download phase3_results.jsonl" -echo " 2. Analyze cluster size distributions P(s) for each grid size" -echo " 3. Fit power-law exponent tau from P(s) ~ s^(-tau)" -echo " 4. Check finite-size cutoff s_max ~ L^D (fractal dimension)" -echo "========================================" \ No newline at end of file +echo "" \ No newline at end of file diff --git a/bash_scripts/run_phase4.sh b/scripts/run_phase4.sh similarity index 81% rename from bash_scripts/run_phase4.sh rename to scripts/run_phase4.sh index cd242aa..cf8258c 100644 --- a/bash_scripts/run_phase4.sh +++ b/scripts/run_phase4.sh @@ -13,13 +13,6 @@ # PP Hydra Effect - Phase 4: Global Sensitivity Analysis # ============================================================================= # -# PHASE 4: Full 4D Parameter Sweep (Global Sensitivity) -# - Parameters: prey_birth, prey_death, pred_birth, pred_death -# - Sweep: 0.0 to 1.0 (11 values each) = 14,641 combinations -# - Replicates: 10 per combination -# - Total Simulations: ~146,410 -# - Grid Size: 250x250 -# # SUBMIT: sbatch run_phase4.sh # MONITOR: squeue -u $USER # CANCEL: scancel @@ -89,10 +82,4 @@ echo "Results in: $OUTPUT_DIR/" echo "" echo "Output files:" ls -lh $OUTPUT_DIR/ -echo "" -echo "Next steps:" -echo " 1. Download phase4_results.jsonl" -echo " 2. Perform Global Sensitivity Analysis (Sobol Indices)" -echo " 3. Identify parameter dominance for extinction events" -echo " 4. Plot parameter heatmaps for predator/prey survival" -echo "========================================" \ No newline at end of file +echo "" \ No newline at end of file diff --git a/bash_scripts/run_phase6.sh b/scripts/run_phase5.sh similarity index 75% rename from bash_scripts/run_phase6.sh rename to scripts/run_phase5.sh index f925586..9d84a9f 100644 --- a/bash_scripts/run_phase6.sh +++ b/scripts/run_phase5.sh @@ -10,17 +10,9 @@ #SBATCH --error=pp_phase6_%j.err # ============================================================================= -# PP Hydra Effect - Phase 6: Directed Hunting 4D Sweep +# PP Hydra Effect - Phase 5: Directed Hunting 4D Sweep # ============================================================================= # -# PHASE 6: Full 4D parameter sweep with directed hunting enabled -# - Same structure as Phase 4 but with directed_hunting=True -# - 11^4 × 10 reps = 146,410 simulations -# - Grid size: 250 -# - Collects time series for comparison with Phase 4 -# - Estimated runtime: ~4-6 hours on 128 cores -# - Memory: mem=0 (use all available node memory) -# # PURPOSE: Test if Hydra effect and SOC persist under directed hunting # # SUBMIT: sbatch run_phase6.sh @@ -93,11 +85,4 @@ echo "Results in: $OUTPUT_DIR/" echo "" echo "Output files:" ls -lh $OUTPUT_DIR/ -echo "" -echo "Next steps:" -echo " 1. Download phase6_results.jsonl" -echo " 2. Compare with Phase 4 results (random hunting baseline)" -echo " 3. Analyze if Hydra effect persists under directed hunting" -echo " 4. Compare critical point locations between Phase 4 and Phase 6" -echo " 5. Check for differences in SOC signatures" -echo "========================================" \ No newline at end of file +echo "" \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1a3922a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,270 @@ +""" +Shared pytest fixtures for Predator-Prey CA test suite. +""" + +import pytest +import numpy as np +from dataclasses import dataclass +from typing import Tuple + + +# ============================================================================= +# Minimal Config for Testing (avoids importing full config module) +# ============================================================================= + + +@dataclass +class MinimalConfig: + """Minimal configuration for fast test simulations.""" + + grid_size: int = 10 + densities: Tuple[float, float] = (0.3, 0.15) + grid_sizes: Tuple[int, ...] = (5, 10) + prey_birth: float = 0.2 + prey_death: float = 0.05 + predator_birth: float = 0.8 + predator_death: float = 0.05 + critical_prey_birth: float = 0.2 + critical_prey_death: float = 0.097 + prey_death_range: Tuple[float, float] = (0.05, 0.15) + n_prey_death: int = 3 + n_replicates: int = 2 + warmup_steps: int = 5 + measurement_steps: int = 10 + evolve_sd: float = 0.05 + evolve_min: float = 0.01 + evolve_max: float = 0.15 + directed_hunting: bool = False + save_timeseries: bool = False + timeseries_subsample: int = 2 + collect_pcf: bool = False + pcf_sample_rate: float = 0.0 + pcf_max_distance: float = 5.0 + pcf_n_bins: int = 10 + min_density_for_analysis: float = 0.01 + n_jobs: int = 1 + + def get_prey_deaths(self) -> np.ndarray: + return np.linspace( + self.prey_death_range[0], self.prey_death_range[1], self.n_prey_death + ) + + def get_warmup_steps(self, L: int) -> int: + return self.warmup_steps + + def get_measurement_steps(self, L: int) -> int: + return self.measurement_steps + + +# ============================================================================= +# Grid Fixtures +# ============================================================================= + + +@pytest.fixture +def empty_grid_10x10(): + """10x10 grid with no species.""" + return np.zeros((10, 10), dtype=np.int32) + + +@pytest.fixture +def prey_only_grid_10x10(): + """10x10 grid with only prey (species 1) in a known pattern.""" + grid = np.zeros((10, 10), dtype=np.int32) + grid[2:5, 2:5] = 1 # 3x3 block of prey = 9 cells + return grid + + +@pytest.fixture +def predator_only_grid_10x10(): + """10x10 grid with only predators (species 2).""" + grid = np.zeros((10, 10), dtype=np.int32) + grid[0, 0] = 2 + grid[0, 9] = 2 + grid[9, 0] = 2 + grid[9, 9] = 2 # 4 predators in corners + return grid + + +@pytest.fixture +def mixed_grid_10x10(): + """10x10 grid with both prey and predators.""" + grid = np.zeros((10, 10), dtype=np.int32) + # Prey cluster + grid[1:4, 1:4] = 1 # 9 prey + # Predator cluster + grid[6:8, 6:8] = 2 # 4 predators + return grid + + +@pytest.fixture +def single_cluster_grid(): + """Grid with exactly one connected cluster of prey.""" + grid = np.zeros((5, 5), dtype=np.int32) + grid[1, 1] = 1 + grid[1, 2] = 1 + grid[2, 1] = 1 + grid[2, 2] = 1 # 2x2 block = 4 connected cells + return grid + + +@pytest.fixture +def two_cluster_grid(): + """Grid with two separate prey clusters (no periodic connection).""" + grid = np.zeros((10, 10), dtype=np.int32) + # Cluster 1: top-left corner + grid[0, 0] = 1 + grid[0, 1] = 1 + grid[1, 0] = 1 # 3 cells + # Cluster 2: center (far enough to avoid periodic Moore connection) + grid[4, 4] = 1 + grid[4, 5] = 1 + grid[5, 4] = 1 + grid[5, 5] = 1 # 4 cells + return grid + + +@pytest.fixture +def periodic_cluster_grid(): + """Grid where prey connect via periodic boundary.""" + grid = np.zeros((5, 5), dtype=np.int32) + grid[0, 0] = 1 # Top-left + grid[4, 0] = 1 # Bottom-left (connects to top-left via periodic) + grid[0, 4] = 1 # Top-right (connects to top-left via periodic) + return grid + + +@pytest.fixture +def checkerboard_grid(): + """Alternating pattern - many small clusters.""" + grid = np.zeros((6, 6), dtype=np.int32) + for i in range(6): + for j in range(6): + if (i + j) % 2 == 0: + grid[i, j] = 1 + return grid + + +# ============================================================================= +# Config Fixtures +# ============================================================================= + + +@pytest.fixture +def minimal_config(): + """Minimal config for fast test runs.""" + return MinimalConfig() + + +@pytest.fixture +def minimal_config_with_pcf(): + """Config with PCF collection enabled.""" + return MinimalConfig(collect_pcf=True, pcf_sample_rate=1.0) + + +@pytest.fixture +def minimal_config_with_timeseries(): + """Config with time series collection enabled.""" + return MinimalConfig(save_timeseries=True) + + +@pytest.fixture +def minimal_config_directed(): + """Config with directed hunting enabled.""" + return MinimalConfig(directed_hunting=True) + + +# ============================================================================= +# Model Fixtures +# ============================================================================= + + +@pytest.fixture +def pp_model_small(): + """Small PP model for quick tests.""" + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).parent.parent)) + from models.CA import PP + + return PP( + rows=10, + cols=10, + densities=(0.3, 0.15), + neighborhood="moore", + seed=42, + directed_hunting=False, + ) + + +@pytest.fixture +def pp_model_with_evolution(): + """PP model with evolution enabled.""" + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).parent.parent)) + from models.CA import PP + + model = PP( + rows=10, + cols=10, + densities=(0.3, 0.15), + neighborhood="moore", + seed=42, + ) + model.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15) + return model + + +# ============================================================================= +# Utility Fixtures +# ============================================================================= + + +@pytest.fixture +def temp_output_dir(tmp_path): + """Temporary directory for test outputs.""" + output_dir = tmp_path / "test_results" + output_dir.mkdir() + return output_dir + + +@pytest.fixture +def sample_results(): + """Sample simulation results for I/O testing.""" + return [ + { + "prey_birth": 0.2, + "prey_death": 0.05, + "predator_birth": 0.8, + "predator_death": 0.1, + "grid_size": 10, + "seed": 42, + "prey_mean": 25.5, + "prey_std": 3.2, + "pred_mean": 12.1, + "pred_std": 2.5, + "prey_survived": True, + "pred_survived": True, + "prey_cluster_sizes": [10, 5, 3], + "pred_cluster_sizes": [8, 4], + }, + { + "prey_birth": 0.2, + "prey_death": 0.10, + "predator_birth": 0.8, + "predator_death": 0.1, + "grid_size": 10, + "seed": 43, + "prey_mean": 20.0, + "prey_std": 4.0, + "pred_mean": 15.0, + "pred_std": 3.0, + "prey_survived": True, + "pred_survived": True, + "prey_cluster_sizes": [12, 8], + "pred_cluster_sizes": [10, 5], + }, + ] \ No newline at end of file diff --git a/tests/smoke_test.py b/tests/smoke_test.py deleted file mode 100644 index 8ec5f9b..0000000 --- a/tests/smoke_test.py +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/env python3 -""" -Smoke Test for Predator-Prey Simulation Pipeline - -Run this before HPC submission to verify everything works correctly. - -Usage: - python smoke_test.py # Run all tests - python smoke_test.py --quick # Run minimal tests only - python smoke_test.py --verbose # Extra output - -Tests: - 1. Module imports - 2. Numba kernel (random movement) - 3. Numba kernel (directed hunting) - 4. Full simulation (random, no evolution) - 5. Full simulation (random, with evolution) - 6. Full simulation (directed, no evolution) - 7. Full simulation (directed, with evolution) - 8. PCF computation - 9. Cluster measurement - 10. Reproducibility (seeding) - 11. Binary save/load roundtrip -""" - -import sys -import time -import argparse -import tempfile -from pathlib import Path - -# Setup path -project_root = str(Path(__file__).resolve().parents[1]) -if project_root not in sys.path: - sys.path.insert(0, project_root) - -import numpy as np - -# Track results -RESULTS = [] -VERBOSE = False - - -def log(msg: str, level: str = "INFO"): - """Print formatted log message.""" - symbols = {"INFO": "ℹ", "PASS": "✓", "FAIL": "✗", "WARN": "⚠", "RUN": "→"} - print(f" {symbols.get(level, '•')} {msg}") - - -def run_test(name: str, func, *args, **kwargs): - """Run a test function and track results.""" - print(f"\n{'='*60}") - print(f"TEST: {name}") - print("=" * 60) - - start = time.perf_counter() - try: - result = func(*args, **kwargs) - elapsed = time.perf_counter() - start - - if result: - log(f"PASSED in {elapsed:.2f}s", "PASS") - RESULTS.append((name, True, elapsed, None)) - return True - else: - log(f"FAILED in {elapsed:.2f}s", "FAIL") - RESULTS.append((name, False, elapsed, "Test returned False")) - return False - except Exception as e: - elapsed = time.perf_counter() - start - log(f"FAILED with exception: {e}", "FAIL") - RESULTS.append((name, False, elapsed, str(e))) - if VERBOSE: - import traceback - - traceback.print_exc() - return False - - -def test_imports(): - """Test that all required modules import correctly.""" - log("Importing numba_optimized...", "RUN") - from models.numba_optimized import ( - PPKernel, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, - set_numba_seed, - NUMBA_AVAILABLE, - ) - - log(f"NUMBA_AVAILABLE = {NUMBA_AVAILABLE}") - - if not NUMBA_AVAILABLE: - log("Numba not available - performance will be degraded", "WARN") - - log("Importing CA module...", "RUN") - from models.CA import PP, set_numba_seed as ca_seed - - log("Importing pp_analysis...", "RUN") - from scripts.experiments import ( - Config, - run_single_simulation, - count_populations, - ) - - log("All imports successful") - return True - - -def test_numba_kernel_random(): - """Test Numba kernel with random movement.""" - from models.numba_optimized import PPKernel, set_numba_seed - - log("Creating kernel (directed_hunting=False)...", "RUN") - kernel = PPKernel(50, 50, "moore", directed_hunting=False) - assert kernel.directed_hunting == False - - log("Setting up test grid...", "RUN") - np.random.seed(42) - set_numba_seed(42) - grid = np.random.choice([0, 1, 2], (50, 50), p=[0.55, 0.30, 0.15]).astype(np.int32) - prey_death = np.full((50, 50), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - initial_prey = np.sum(grid == 1) - initial_pred = np.sum(grid == 2) - log(f"Initial: prey={initial_prey}, pred={initial_pred}") - - log("Running 100 update steps...", "RUN") - for _ in range(100): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False) - - final_prey = np.sum(grid == 1) - final_pred = np.sum(grid == 2) - log(f"Final: prey={final_prey}, pred={final_pred}") - - # Verify grid is valid - assert grid.min() >= 0, "Grid has negative values" - assert grid.max() <= 2, "Grid has values > 2" - assert not np.any(np.isnan(grid)), "Grid has NaN values" - - # Verify prey_death consistency - prey_mask = grid == 1 - if np.any(prey_mask): - assert np.all( - ~np.isnan(prey_death[prey_mask]) - ), "Prey cells missing death rates" - assert np.all(np.isnan(prey_death[~prey_mask])), "Non-prey cells have death rates" - - log("Grid and prey_death arrays are consistent") - return True - - -def test_numba_kernel_directed(): - """Test Numba kernel with directed hunting.""" - from models.numba_optimized import PPKernel, set_numba_seed - - log("Creating kernel (directed_hunting=True)...", "RUN") - kernel = PPKernel(50, 50, "moore", directed_hunting=True) - assert kernel.directed_hunting == True - - log("Setting up test grid...", "RUN") - np.random.seed(42) - set_numba_seed(42) - grid = np.random.choice([0, 1, 2], (50, 50), p=[0.55, 0.30, 0.15]).astype(np.int32) - prey_death = np.full((50, 50), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - initial_prey = np.sum(grid == 1) - initial_pred = np.sum(grid == 2) - log(f"Initial: prey={initial_prey}, pred={initial_pred}") - - log("Running 100 update steps...", "RUN") - for _ in range(100): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False) - - final_prey = np.sum(grid == 1) - final_pred = np.sum(grid == 2) - log(f"Final: prey={final_prey}, pred={final_pred}") - - # Verify grid is valid - assert grid.min() >= 0, "Grid has negative values" - assert grid.max() <= 2, "Grid has values > 2" - - log("Directed hunting kernel working correctly") - return True - - -def test_ca_model_random(): - """Test CA PP model with random movement.""" - from models.CA import PP - from models.numba_optimized import set_numba_seed - - log("Creating PP model (directed_hunting=False)...", "RUN") - np.random.seed(42) - set_numba_seed(42) - - model = PP( - rows=50, - cols=50, - densities=(0.30, 0.15), - neighborhood="moore", - params={ - "prey_birth": 0.2, - "prey_death": 0.05, - "predator_birth": 0.2, - "predator_death": 0.1, - }, - seed=42, - synchronous=False, - directed_hunting=False, - ) - - assert model.directed_hunting == False - - initial_prey = np.sum(model.grid == 1) - initial_pred = np.sum(model.grid == 2) - log(f"Initial: prey={initial_prey}, pred={initial_pred}") - - log("Running 100 steps...", "RUN") - model.run(100) - - final_prey = np.sum(model.grid == 1) - final_pred = np.sum(model.grid == 2) - log(f"Final: prey={final_prey}, pred={final_pred}") - - assert model.grid.min() >= 0 - assert model.grid.max() <= 2 - - return True - - -def test_ca_model_directed(): - """Test CA PP model with directed hunting.""" - from models.CA import PP - from models.numba_optimized import set_numba_seed - - log("Creating PP model (directed_hunting=True)...", "RUN") - np.random.seed(42) - set_numba_seed(42) - - model = PP( - rows=50, - cols=50, - densities=(0.30, 0.15), - neighborhood="moore", - params={ - "prey_birth": 0.2, - "prey_death": 0.05, - "predator_birth": 0.2, - "predator_death": 0.1, - }, - seed=42, - synchronous=False, - directed_hunting=True, - ) - - assert model.directed_hunting == True - - initial_prey = np.sum(model.grid == 1) - initial_pred = np.sum(model.grid == 2) - log(f"Initial: prey={initial_prey}, pred={initial_pred}") - - log("Running 100 steps...", "RUN") - model.run(100) - - final_prey = np.sum(model.grid == 1) - final_pred = np.sum(model.grid == 2) - log(f"Final: prey={final_prey}, pred={final_pred}") - - assert model.grid.min() >= 0 - assert model.grid.max() <= 2 - - return True - - -def test_ca_model_with_evolution(): - """Test CA PP model with evolution enabled.""" - from models.CA import PP - from models.numba_optimized import set_numba_seed - - log("Creating PP model with evolution...", "RUN") - np.random.seed(42) - set_numba_seed(42) - - model = PP( - rows=50, - cols=50, - densities=(0.30, 0.15), - neighborhood="moore", - params={ - "prey_birth": 0.2, - "prey_death": 0.05, - "predator_birth": 0.2, - "predator_death": 0.1, - }, - seed=42, - synchronous=False, - directed_hunting=True, - ) - - log("Enabling prey_death evolution...", "RUN") - model.evolve("prey_death", sd=0.05, min_val=0.01, max_val=0.15) - - initial_mean = np.nanmean(model.cell_params["prey_death"]) - log(f"Initial prey_death mean: {initial_mean:.4f}") - - log("Running 200 steps...", "RUN") - model.run(200) - - final_values = model.cell_params["prey_death"] - valid_values = final_values[~np.isnan(final_values)] - - if len(valid_values) > 0: - final_mean = np.mean(valid_values) - final_std = np.std(valid_values) - log(f"Final prey_death: mean={final_mean:.4f}, std={final_std:.4f}") - - # Check bounds - assert valid_values.min() >= 0.01 - 1e-9, "Values below minimum" - assert valid_values.max() <= 0.15 + 1e-9, "Values above maximum" - log("Evolution bounds respected") - else: - log("No prey survived - cannot check evolution", "WARN") - - return True - - -def test_full_simulation_pipeline(): - """Test the full simulation pipeline via run_single_simulation.""" - from scripts.experiments import Config, run_single_simulation - from models.numba_optimized import set_numba_seed - - log("Creating fast config...", "RUN") - cfg = Config() - cfg.default_grid = 40 - cfg.warmup_steps = 50 - cfg.measurement_steps = 100 - cfg.cluster_samples = 1 - cfg.collect_pcf = True - cfg.pcf_sample_rate = 1.0 # Always compute PCF for this test - - # Test random movement - log("Running simulation (random movement, no evolution)...", "RUN") - cfg.directed_hunting = False - np.random.seed(42) - set_numba_seed(42) - - result_random = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=40, - seed=42, - with_evolution=False, - cfg=cfg, - compute_pcf=True, - ) - - assert "prey_mean" in result_random - assert "pred_mean" in result_random - log( - f"Random: prey_mean={result_random['prey_mean']:.1f}, pred_mean={result_random['pred_mean']:.1f}" - ) - - # Test directed hunting - log("Running simulation (directed hunting, no evolution)...", "RUN") - cfg.directed_hunting = True - np.random.seed(42) - set_numba_seed(42) - - result_directed = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=40, - seed=42, - with_evolution=False, - cfg=cfg, - compute_pcf=True, - ) - - assert "prey_mean" in result_directed - log( - f"Directed: prey_mean={result_directed['prey_mean']:.1f}, pred_mean={result_directed['pred_mean']:.1f}" - ) - - # Test with evolution - log("Running simulation (directed hunting, with evolution)...", "RUN") - np.random.seed(42) - set_numba_seed(42) - - result_evo = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=40, - seed=42, - with_evolution=True, - cfg=cfg, - compute_pcf=True, - ) - - assert result_evo["with_evolution"] == True - log(f"Evolution: prey_mean={result_evo['prey_mean']:.1f}") - - return True - - -def test_pcf_computation(): - """Test PCF computation.""" - from models.numba_optimized import compute_all_pcfs_fast, set_numba_seed - - log("Creating test grid...", "RUN") - np.random.seed(42) - set_numba_seed(42) - grid = np.random.choice([0, 1, 2], (100, 100), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - - n_prey = np.sum(grid == 1) - n_pred = np.sum(grid == 2) - log(f"Grid: prey={n_prey}, pred={n_pred}") - - log("Computing PCFs...", "RUN") - t0 = time.perf_counter() - pcfs = compute_all_pcfs_fast(grid, max_distance=20.0, n_bins=20) - elapsed = time.perf_counter() - t0 - log(f"PCF computation took {elapsed*1000:.1f}ms") - - # Check all three PCFs - for key in ["prey_prey", "pred_pred", "prey_pred"]: - assert key in pcfs, f"Missing PCF: {key}" - dist, pcf, n_pairs = pcfs[key] - - assert len(dist) == 20, f"{key}: wrong number of bins" - assert len(pcf) == 20, f"{key}: wrong PCF length" - assert not np.any(np.isnan(pcf)), f"{key}: PCF contains NaN" - - log(f"{key}: n_pairs={n_pairs}, mean_pcf={np.mean(pcf):.3f}") - - return True - - -def test_cluster_measurement(): - """Test cluster size measurement.""" - from models.numba_optimized import measure_cluster_sizes_fast - - log("Creating grid with known clusters...", "RUN") - grid = np.zeros((30, 30), dtype=np.int32) - - # Cluster 1: 3x3 = 9 cells - grid[2:5, 2:5] = 1 - # Cluster 2: 2x4 = 8 cells - grid[10:12, 10:14] = 1 - # Cluster 3: single cell - grid[20, 20] = 1 - # Cluster 4: L-shape = 5 cells - grid[25, 25:28] = 1 - grid[26:28, 25] = 1 - - expected_sizes = sorted([9, 8, 1, 5], reverse=True) - log(f"Expected cluster sizes: {expected_sizes}") - - log("Measuring clusters...", "RUN") - sizes = measure_cluster_sizes_fast(grid, 1) - actual_sizes = sorted(sizes, reverse=True) - log(f"Actual cluster sizes: {list(actual_sizes)}") - - assert len(sizes) == 4, f"Expected 4 clusters, found {len(sizes)}" - assert list(actual_sizes) == expected_sizes, "Cluster sizes don't match" - - # Verify total cells - assert sum(sizes) == np.sum(grid == 1), "Cluster total doesn't match grid total" - - log("Cluster measurement correct") - return True - - -def test_reproducibility(): - """Test that seeding produces reproducible results.""" - from models.numba_optimized import PPKernel, set_numba_seed - - log("Running simulation twice with same seed...", "RUN") - - def run_sim(seed): - np.random.seed(seed) - set_numba_seed(seed) - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - prey_death = np.full((30, 30), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - kernel = PPKernel(30, 30, "moore", directed_hunting=True) - for _ in range(50): - kernel.update( - grid, prey_death, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False - ) - - return grid.copy(), prey_death.copy() - - grid1, pd1 = run_sim(12345) - grid2, pd2 = run_sim(12345) - - prey1, prey2 = np.sum(grid1 == 1), np.sum(grid2 == 1) - pred1, pred2 = np.sum(grid1 == 2), np.sum(grid2 == 2) - - log(f"Run 1: prey={prey1}, pred={pred1}") - log(f"Run 2: prey={prey2}, pred={pred2}") - - if np.array_equal(grid1, grid2): - log("Grids are IDENTICAL - perfect reproducibility", "PASS") - else: - diff_count = np.sum(grid1 != grid2) - log(f"Grids differ in {diff_count} cells - may indicate seeding issue", "WARN") - # Still pass if populations match (some internal ordering may differ) - if prey1 == prey2 and pred1 == pred2: - log("Populations match - acceptable", "PASS") - else: - return False - - return True - - -def test_binary_save_load(): - """Test binary save/load roundtrip.""" - from scripts.experiments import save_sweep_binary, load_sweep_binary - - log("Creating test results...", "RUN") - results = [ - { - "prey_birth": 0.2, - "prey_death": 0.05, - "prey_mean": 150.5, - "pred_mean": 75.2, - "seed": 42, - "grid_size": 50, - "with_evolution": False, - }, - { - "prey_birth": 0.3, - "prey_death": 0.08, - "prey_mean": 120.3, - "pred_mean": 90.1, - "seed": 43, - "grid_size": 50, - "with_evolution": True, - }, - ] - - with tempfile.TemporaryDirectory() as tmpdir: - filepath = Path(tmpdir) / "test_results.npz" - - log(f"Saving to {filepath}...", "RUN") - save_sweep_binary(results, filepath) - - assert filepath.exists(), "File not created" - log(f"File size: {filepath.stat().st_size} bytes") - - log("Loading back...", "RUN") - loaded = load_sweep_binary(filepath) - - assert len(loaded) == len(results), "Wrong number of results loaded" - - for i, (orig, load) in enumerate(zip(results, loaded)): - for key in orig: - if isinstance(orig[key], float): - assert np.isclose( - orig[key], load[key] - ), f"Result {i}, key {key} mismatch" - else: - assert orig[key] == load[key], f"Result {i}, key {key} mismatch" - - log("Roundtrip successful") - - return True - - -def test_hunting_dynamics_comparison(): - """Compare dynamics between random and directed hunting.""" - from models.numba_optimized import PPKernel, set_numba_seed - - log("Setting up comparison...", "RUN") - - # Use same initial grid - np.random.seed(999) - template = np.random.choice([0, 1, 2], (60, 60), p=[0.50, 0.35, 0.15]).astype( - np.int32 - ) - - def run_mode(directed: bool, seed: int = 999): - grid = template.copy() - prey_death = np.full((60, 60), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - set_numba_seed(seed) - kernel = PPKernel(60, 60, "moore", directed_hunting=directed) - - history = [] - for step in range(100): - kernel.update(grid, prey_death, 0.2, 0.05, 0.5, 0.1) # High pred birth - if step % 10 == 0: - history.append((np.sum(grid == 1), np.sum(grid == 2))) - - return history - - log("Running random movement...", "RUN") - hist_random = run_mode(directed=False) - - log("Running directed hunting...", "RUN") - hist_directed = run_mode(directed=True) - - log("\nPopulation dynamics comparison:") - log(f"{'Step':<6} {'Random':<20} {'Directed':<20}") - log("-" * 46) - for i, ((pr, pdr), (pd, pdd)) in enumerate(zip(hist_random, hist_directed)): - step = i * 10 - log(f"{step:<6} prey={pr:<4} pred={pdr:<4} prey={pd:<4} pred={pdd:<4}") - - # Final comparison - final_random_prey = hist_random[-1][0] - final_directed_prey = hist_directed[-1][0] - - log(f"\nFinal prey - Random: {final_random_prey}, Directed: {final_directed_prey}") - - # Directed hunting with high predator birth typically depletes prey faster - # But we don't assert this strictly due to stochastic nature - log("Dynamics comparison complete") - - return True - - -def print_summary(): - """Print test summary.""" - print("\n" + "=" * 60) - print("SMOKE TEST SUMMARY") - print("=" * 60) - - passed = sum(1 for _, success, _, _ in RESULTS if success) - failed = sum(1 for _, success, _, _ in RESULTS if not success) - total_time = sum(t for _, _, t, _ in RESULTS) - - for name, success, elapsed, error in RESULTS: - status = "PASS" if success else "FAIL" - print(f" {status} {name} ({elapsed:.2f}s)") - if error and not success: - print(f" Error: {error[:60]}...") - - print("-" * 60) - print(f" Total: {passed} passed, {failed} failed in {total_time:.2f}s") - print("=" * 60) - - if failed == 0: - print("\ALL TESTS PASSED - Ready for HPC submission!\n") - else: - print(f"\n⚠️ {failed} TEST(S) FAILED - Please fix before HPC submission.\n") - - return failed == 0 - - -def main(): - global VERBOSE - - parser = argparse.ArgumentParser(description="Pre-HPC Smoke Test") - parser.add_argument("--quick", action="store_true", help="Run minimal tests only") - parser.add_argument("--verbose", action="store_true", help="Extra output") - args = parser.parse_args() - - VERBOSE = args.verbose - - print("\n" + "=" * 60) - print(" PREDATOR-PREY SIMULATION - PRE-HPC SMOKE TEST") - print("=" * 60) - print(f" Time: {time.strftime('%Y-%m-%d %H:%M:%S')}") - print(f" Python: {sys.version.split()[0]}") - print("=" * 60) - - # Core tests (always run) - run_test("Module Imports", test_imports) - run_test("Numba Kernel (Random)", test_numba_kernel_random) - run_test("Numba Kernel (Directed)", test_numba_kernel_directed) - run_test("CA Model (Random)", test_ca_model_random) - run_test("CA Model (Directed)", test_ca_model_directed) - - if not args.quick: - # Extended tests - run_test("CA Model (Evolution)", test_ca_model_with_evolution) - run_test("Full Simulation Pipeline", test_full_simulation_pipeline) - run_test("PCF Computation", test_pcf_computation) - run_test("Cluster Measurement", test_cluster_measurement) - run_test("Reproducibility (Seeding)", test_reproducibility) - run_test("Binary Save/Load", test_binary_save_load) - run_test("Hunting Dynamics Comparison", test_hunting_dynamics_comparison) - - success = print_summary() - sys.exit(0 if success else 1) - - -if __name__ == "__main__": - main() diff --git a/tests/test_ca.py b/tests/test_ca.py index 4f2b73b..903e4a4 100644 --- a/tests/test_ca.py +++ b/tests/test_ca.py @@ -1,152 +1,501 @@ -"""Cellular Automaton tests.""" +""" +Tests for CA base class and PP (Predator-Prey) model. + +Covers: +- CA initialization and validation +- PP model initialization, parameters, and update logic +- Evolution mechanism +- Seed reproducibility +- Edge cases (empty grids, extinction) +""" import pytest import numpy as np +import sys +from pathlib import Path + +# Ensure imports work +sys.path.insert(0, str(Path(__file__).parent.parent)) + from models.CA import CA, PP -def test_initialization_and_grid_filling(): - rows, cols = 10, 10 - densities = (0.2, 0.1) - ca = CA( - rows, cols, densities, neighborhood="moore", params={}, cell_params={}, seed=42 - ) - assert ca.grid.shape == (rows, cols) - assert ca.n_species == len(densities) - total_cells = rows * cols - # expected counts use the same rounding as CA.__init__ - expected_counts = [int(round(total_cells * d)) for d in densities] - # verify actual counts equal expected - for i, exp in enumerate(expected_counts, start=1): - assert int(np.count_nonzero(ca.grid == i)) == exp - - -def test_invalid_parameters_raise(): - # invalid rows/cols - with pytest.raises(AssertionError): - CA(0, 5, (0.1,), "moore", {}, {}, seed=1) - with pytest.raises(AssertionError): - CA(5, -1, (0.1,), "moore", {}, {}, seed=1) - # densities must be non-empty tuple - with pytest.raises(AssertionError): - CA(5, 5, (), "moore", {}, {}, seed=1) - # densities sum > 1 - with pytest.raises(AssertionError): - CA(5, 5, (0.8, 0.8), "moore", {}, {}, seed=1) - # invalid neighborhood - with pytest.raises(AssertionError): - CA(5, 5, (0.1,), "invalid", {}, {}, seed=1) - - # PP: params must be a dict or None - with pytest.raises(TypeError): - PP( - rows=5, - cols=5, - densities=(0.2, 0.1), +# ============================================================================= +# CA Base Class Tests +# ============================================================================= + + +class TestCAInitialization: + """Tests for CA base class initialization.""" + + def test_ca_requires_positive_dimensions(self): + """CA should reject non-positive dimensions.""" + with pytest.raises(AssertionError, match="rows must be positive"): + CA(rows=0, cols=10, densities=(0.5,), neighborhood="moore", params={}, cell_params={}) + + with pytest.raises(AssertionError, match="cols must be positive"): + CA(rows=10, cols=-1, densities=(0.5,), neighborhood="moore", params={}, cell_params={}) + + def test_ca_requires_valid_neighborhood(self): + """CA should only accept 'moore' or 'neumann' neighborhoods.""" + with pytest.raises(AssertionError, match="neighborhood must be"): + CA(rows=5, cols=5, densities=(0.3,), neighborhood="invalid", params={}, cell_params={}) + + def test_ca_densities_must_not_exceed_one(self): + """Sum of densities must not exceed 1.0.""" + with pytest.raises(AssertionError, match="sum of densities"): + CA(rows=5, cols=5, densities=(0.6, 0.6), neighborhood="moore", params={}, cell_params={}) + + def test_ca_densities_must_be_non_negative(self): + """Each density must be non-negative.""" + with pytest.raises(AssertionError, match="non-negative"): + CA(rows=5, cols=5, densities=(-0.1, 0.5), neighborhood="moore", params={}, cell_params={}) + + def test_ca_grid_shape_matches_dimensions(self): + """Grid should have the specified shape.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA(rows=7, cols=13, densities=(0.2,), neighborhood="moore", params={}, cell_params={}) + assert ca.grid.shape == (7, 13) + assert ca.rows == 7 + assert ca.cols == 13 + + def test_ca_species_count_from_densities(self): + """n_species should equal length of densities tuple.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA(rows=5, cols=5, densities=(0.2, 0.1, 0.05), neighborhood="moore", params={}, cell_params={}) + assert ca.n_species == 3 + + def test_ca_grid_population_approximately_matches_density(self): + """Initial grid population should approximately match requested densities.""" + + class ConcreteCA(CA): + def update(self): + pass + + np.random.seed(42) + ca = ConcreteCA(rows=100, cols=100, densities=(0.3, 0.15), neighborhood="moore", params={}, cell_params={}, seed=42) + + total_cells = 100 * 100 + expected_species1 = int(total_cells * 0.3) + expected_species2 = int(total_cells * 0.15) + + actual_species1 = np.sum(ca.grid == 1) + actual_species2 = np.sum(ca.grid == 2) + + # Allow 1% tolerance due to rounding + assert abs(actual_species1 - expected_species1) <= total_cells * 0.01 + assert abs(actual_species2 - expected_species2) <= total_cells * 0.01 + + def test_ca_seed_reproducibility(self): + """Same seed should produce identical grids.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca1 = ConcreteCA(rows=20, cols=20, densities=(0.3, 0.1), neighborhood="moore", params={}, cell_params={}, seed=123) + ca2 = ConcreteCA(rows=20, cols=20, densities=(0.3, 0.1), neighborhood="moore", params={}, cell_params={}, seed=123) + + assert np.array_equal(ca1.grid, ca2.grid) + + def test_ca_different_seeds_produce_different_grids(self): + """Different seeds should (almost certainly) produce different grids.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca1 = ConcreteCA(rows=20, cols=20, densities=(0.3, 0.1), neighborhood="moore", params={}, cell_params={}, seed=111) + ca2 = ConcreteCA(rows=20, cols=20, densities=(0.3, 0.1), neighborhood="moore", params={}, cell_params={}, seed=222) + + assert not np.array_equal(ca1.grid, ca2.grid) + + +class TestCAValidation: + """Tests for CA validation method.""" + + def test_validate_passes_for_valid_ca(self): + """Validation should pass for properly initialized CA.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA(rows=10, cols=10, densities=(0.2,), neighborhood="moore", params={}, cell_params={}) + ca.validate() # Should not raise + + def test_validate_fails_for_mismatched_grid_shape(self): + """Validation should fail if grid shape is modified incorrectly.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA(rows=10, cols=10, densities=(0.2,), neighborhood="moore", params={}, cell_params={}) + ca.grid = np.zeros((5, 5)) # Wrong shape + + with pytest.raises(ValueError, match="grid shape"): + ca.validate() + + +class TestCAEvolution: + """Tests for CA parameter evolution mechanism.""" + + def test_evolve_creates_cell_params_array(self): + """evolve() should create a per-cell parameter array.""" + + class ConcreteCA(CA): + species_names = ("prey",) + + def update(self): + pass + + ca = ConcreteCA( + rows=10, + cols=10, + densities=(0.3,), neighborhood="moore", - params="bad", - cell_params=None, - seed=1, + params={"prey_death": 0.05}, + cell_params={}, ) + ca.evolve("prey_death", species=1, sd=0.02, min_val=0.01, max_val=0.1) + assert "prey_death" in ca.cell_params + assert ca.cell_params["prey_death"].shape == (10, 10) -def test_neighborhood_counting(): - # set up a small grid with a single prey in the center and check neighbor counts - ca = CA(3, 3, (0.0,), neighborhood="moore", params={}, cell_params={}, seed=1) - ca.grid[:] = 0 - ca.grid[1, 1] = 1 - counts = ca.count_neighbors() - # counts is a tuple with one array (state 1) - neigh = counts[0] - # all 8 neighbors of center should have count 1 - expected_positions = [ - (0, 0), - (0, 1), - (0, 2), - (1, 0), - (1, 2), - (2, 0), - (2, 1), - (2, 2), - ] - for r in range(3): - for c in range(3): - if (r, c) in expected_positions: - assert neigh[r, c] == 1 - else: - # center has 0 neighbors of same state - assert neigh[r, c] == 0 - - -def test_validate_detects_cell_params_shape_and_nonnan_mismatch(): - # create a PP and enable evolution for a parameter - pp = PP( - rows=5, - cols=5, - densities=(0.2, 0.1), - neighborhood="moore", - params=None, - cell_params=None, - seed=2, - ) - pp.evolve("prey_death", sd=0.01, min_val=0.0, max_val=1.0) - - # wrong shape should raise informative ValueError via validate() - pp.cell_params["prey_death"] = np.zeros((1, 1)) - with pytest.raises(ValueError) as excinfo: - pp.validate() - assert "shape equal to grid" in str(excinfo.value) - - # now create a same-shaped array but with non-NaN positions that don't match prey positions - arr = np.zeros(pp.grid.shape, dtype=float) # filled with non-NaN everywhere - pp.cell_params["prey_death"] = arr - with pytest.raises(ValueError) as excinfo2: - pp.validate() - assert "non-NaN entries must match positions" in str(excinfo2.value) - - -def test_extinction_when_death_one(): - # when both death rates are 1 all individuals should die in one step - params = { - "prey_death": 1.0, - "predator_death": 1.0, - "prey_birth": 0.0, - "predator_birth": 0.0, - } - pp = PP( - rows=10, - cols=10, - densities=(0.2, 0.1), - neighborhood="moore", - params=params, - cell_params=None, - seed=3, - ) - pp.run(1) - # no prey or predators should remain - assert np.count_nonzero(pp.grid != 0) == 0 - - -def test_predators_dominate_with_high_birth_and_zero_predator_death(): - params = { - "prey_death": 0.0, - "predator_death": 0.0, - "prey_birth": 1.0, - "predator_birth": 1.0, - } - pp = PP( + def test_evolve_sets_values_only_for_target_species(self): + """evolved parameter should be NaN for non-target species cells.""" + + class ConcreteCA(CA): + species_names = ("prey", "predator") + + def update(self): + pass + + ca = ConcreteCA( + rows=10, + cols=10, + densities=(0.3, 0.1), + neighborhood="moore", + params={"prey_death": 0.05}, + cell_params={}, + ) + ca.evolve("prey_death", species=1, sd=0.02) + + arr = ca.cell_params["prey_death"] + # Species 1 cells should have the value + assert np.allclose(arr[ca.grid == 1], 0.05) + # Other cells should be NaN + assert np.all(np.isnan(arr[ca.grid != 1])) + + def test_evolve_rejects_unknown_parameter(self): + """evolve() should raise for parameters not in self.params.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA(rows=5, cols=5, densities=(0.3,), neighborhood="moore", params={}, cell_params={}) + + with pytest.raises(ValueError, match="Unknown parameter"): + ca.evolve("nonexistent_param") + + def test_evolve_infers_species_from_param_name(self): + """evolve() should infer species from parameter name prefix.""" + + class ConcreteCA(CA): + def update(self): + pass + + ca = ConcreteCA( rows=10, cols=10, - densities=(0.1, 0.05), + densities=(0.3, 0.1), neighborhood="moore", - params=params, - cell_params=None, - seed=4, + params={"prey_death": 0.05}, + cell_params={}, ) - # run longer to allow predators to consume prey; expect prey extinction - pp.run(200) - after_prey = int(np.count_nonzero(pp.grid == 1)) - after_pred = int(np.count_nonzero(pp.grid == 2)) - # after sufficient time, prey should go extinct and predators remain - assert after_prey == 0 - assert after_pred > 0 + ca.species_names = ("prey", "predator") + + # Should infer species=1 from "prey_death" + ca.evolve("prey_death", sd=0.02) + + assert "prey_death" in ca._evolve_info + assert ca._evolve_info["prey_death"]["species"] == 1 + + +# ============================================================================= +# PP Model Tests +# ============================================================================= + + +class TestPPInitialization: + """Tests for PP model initialization.""" + + def test_pp_default_initialization(self): + """PP should initialize with sensible defaults.""" + model = PP() + assert model.rows == 10 + assert model.cols == 10 + assert model.n_species == 2 + assert model.species_names == ("prey", "predator") + + def test_pp_custom_dimensions(self): + """PP should accept custom grid dimensions.""" + model = PP(rows=25, cols=30) + assert model.rows == 25 + assert model.cols == 30 + assert model.grid.shape == (25, 30) + + def test_pp_default_parameters(self): + """PP should have correct default parameters.""" + model = PP() + assert model.params["prey_death"] == 0.05 + assert model.params["predator_death"] == 0.1 + assert model.params["prey_birth"] == 0.25 + assert model.params["predator_birth"] == 0.2 + + def test_pp_custom_parameters(self): + """PP should accept custom parameters.""" + model = PP(params={"prey_death": 0.1, "prey_birth": 0.3}) + assert model.params["prey_death"] == 0.1 + assert model.params["prey_birth"] == 0.3 + # Defaults should still apply for unspecified params + assert model.params["predator_death"] == 0.1 + + def test_pp_rejects_invalid_parameter_keys(self): + """PP should reject unknown parameter keys.""" + with pytest.raises(ValueError, match="Unexpected parameter keys"): + PP(params={"invalid_key": 0.5}) + + def test_pp_rejects_out_of_range_parameters(self): + """PP parameters must be in [0, 1].""" + with pytest.raises(ValueError, match="must be between 0 and 1"): + PP(params={"prey_death": 1.5}) + + with pytest.raises(ValueError, match="must be between 0 and 1"): + PP(params={"prey_birth": -0.1}) + + def test_pp_accepts_both_neighborhoods(self): + """PP should accept both moore and neumann neighborhoods.""" + model_moore = PP(neighborhood="moore") + assert model_moore.neighborhood == "moore" + + model_neumann = PP(neighborhood="neumann") + assert model_neumann.neighborhood == "neumann" + + def test_pp_seed_reproducibility(self): + """Same seed should produce identical initial states.""" + model1 = PP(rows=15, cols=15, seed=999) + model2 = PP(rows=15, cols=15, seed=999) + assert np.array_equal(model1.grid, model2.grid) + + def test_pp_directed_hunting_option(self): + """PP should accept directed_hunting flag.""" + model = PP(directed_hunting=True) + assert model.directed_hunting is True + + model = PP(directed_hunting=False) + assert model.directed_hunting is False + + +class TestPPUpdate: + """Tests for PP model update mechanics.""" + + def test_pp_update_runs_without_error(self, pp_model_small): + """update() should execute without raising.""" + pp_model_small.update() # Should not raise + + def test_pp_update_modifies_grid(self, pp_model_small): + """update() should modify the grid state.""" + initial_grid = pp_model_small.grid.copy() + # Run several updates to ensure some change happens + for _ in range(10): + pp_model_small.update() + + # Grid should have changed (with high probability) + assert not np.array_equal(pp_model_small.grid, initial_grid) + + def test_pp_update_preserves_grid_shape(self, pp_model_small): + """update() should not change grid dimensions.""" + original_shape = pp_model_small.grid.shape + for _ in range(5): + pp_model_small.update() + assert pp_model_small.grid.shape == original_shape + + def test_pp_update_only_valid_states(self, pp_model_small): + """Grid should only contain states 0, 1, or 2.""" + for _ in range(10): + pp_model_small.update() + unique_values = np.unique(pp_model_small.grid) + assert all(v in [0, 1, 2] for v in unique_values) + + def test_pp_update_with_evolution(self, pp_model_with_evolution): + """update() should work with evolution enabled.""" + for _ in range(5): + pp_model_with_evolution.update() + # Should not raise and grid should still be valid + unique_values = np.unique(pp_model_with_evolution.grid) + assert all(v in [0, 1, 2] for v in unique_values) + + def test_pp_directed_vs_random_produces_different_dynamics(self): + """Directed and random hunting should produce different outcomes.""" + # Use same seed for initial state + model_random = PP(rows=20, cols=20, seed=42, directed_hunting=False) + model_directed = PP(rows=20, cols=20, seed=42, directed_hunting=True) + + # Run both for same number of steps + for _ in range(20): + model_random.update() + model_directed.update() + + # Grids should differ (with very high probability) + assert not np.array_equal(model_random.grid, model_directed.grid) + + +class TestPPValidation: + """Tests for PP validation method.""" + + def test_pp_validate_passes_for_valid_model(self): + """Validation should pass for properly initialized PP.""" + model = PP(rows=10, cols=10, seed=42) + model.validate() # Should not raise + + def test_pp_validate_with_evolution(self, pp_model_with_evolution): + """Validation should pass with properly configured evolution.""" + pp_model_with_evolution.validate() # Should not raise + + def test_pp_validate_fails_for_invalid_evolved_values(self): + """Validation should fail if evolved values are out of range.""" + model = PP(rows=10, cols=10, seed=42) + model.evolve("prey_death", sd=0.02, min_val=0.01, max_val=0.1) + + # Manually corrupt the evolved values + model.cell_params["prey_death"][model.grid == 1] = 0.5 # Outside max + + with pytest.raises(ValueError, match="contains values outside"): + model.validate() + + +class TestPPRun: + """Tests for PP run() method.""" + + def test_pp_run_executes_correct_steps(self): + """run() should execute the specified number of steps.""" + model = PP(rows=8, cols=8, seed=42) + initial_grid = model.grid.copy() + + model.run(steps=3) + + # After 3 steps, grid should have changed + assert not np.array_equal(model.grid, initial_grid) + + def test_pp_run_zero_steps(self): + """run(0) should not modify the grid.""" + model = PP(rows=8, cols=8, seed=42) + initial_grid = model.grid.copy() + + model.run(steps=0) + + assert np.array_equal(model.grid, initial_grid) + + def test_pp_run_stop_evolution(self): + """run() with stop_evolution_at should freeze mutation.""" + model = PP(rows=10, cols=10, seed=42) + model.evolve("prey_death", sd=0.1, min_val=0.01, max_val=0.2) + + assert model._evolution_stopped is False + model.run(steps=5, stop_evolution_at=3) + assert model._evolution_stopped is True + + +# ============================================================================= +# Edge Cases +# ============================================================================= + + +class TestPPEdgeCases: + """Edge case tests for PP model.""" + + def test_pp_survives_empty_start(self): + """PP should handle starting with zero density gracefully.""" + model = PP(rows=5, cols=5, densities=(0.0, 0.0), seed=42) + assert np.sum(model.grid) == 0 + + # Should not raise even with empty grid + model.update() + assert np.sum(model.grid) == 0 # Still empty + + def test_pp_prey_only_population(self): + """PP should handle prey-only population.""" + model = PP(rows=10, cols=10, densities=(0.5, 0.0), seed=42) + assert np.sum(model.grid == 2) == 0 # No predators + + for _ in range(5): + model.update() + + # Still no predators (can't spawn from nothing) + assert np.sum(model.grid == 2) == 0 + + def test_pp_predator_only_extinction(self): + """Predators without prey should eventually die.""" + model = PP( + rows=10, + cols=10, + densities=(0.0, 0.3), + params={"predator_death": 0.5}, # High death rate + seed=42, + ) + + # Run until extinction + for _ in range(50): + model.update() + if np.sum(model.grid == 2) == 0: + break + + # Predators should be extinct (or severely reduced) + assert np.sum(model.grid == 2) <= 5 + + def test_pp_very_small_grid(self): + """PP should work on minimal 2x2 grid.""" + model = PP(rows=2, cols=2, densities=(0.5, 0.25), seed=42) + assert model.grid.shape == (2, 2) + + for _ in range(3): + model.update() + + # Should still be valid + assert model.grid.shape == (2, 2) + assert all(v in [0, 1, 2] for v in np.unique(model.grid)) + + def test_pp_high_density_initialization(self): + """PP should handle near-full grid initialization.""" + model = PP(rows=10, cols=10, densities=(0.5, 0.45), seed=42) + total_occupied = np.sum(model.grid > 0) + assert total_occupied >= 90 # At least 90% filled + + model.update() # Should not raise + + +class TestPPNeighborhoods: + """Tests for different neighborhood types.""" + + def test_moore_has_8_neighbors(self): + """Moore neighborhood should use 8 directions.""" + model = PP(rows=10, cols=10, neighborhood="moore", seed=42) + assert len(model._kernel._dr) == 8 + assert len(model._kernel._dc) == 8 + + def test_neumann_has_4_neighbors(self): + """Von Neumann neighborhood should use 4 directions.""" + model = PP(rows=10, cols=10, neighborhood="neumann", seed=42) + assert len(model._kernel._dr) == 4 + assert len(model._kernel._dc) == 4 \ No newline at end of file diff --git a/tests/test_ca_viz.py b/tests/test_ca_viz.py deleted file mode 100644 index 4f3a1f4..0000000 --- a/tests/test_ca_viz.py +++ /dev/null @@ -1,21 +0,0 @@ -import matplotlib - -matplotlib.use("Agg") - -import matplotlib.pyplot as plt -from models.CA import PP - - -def test_visualize_headless_runs(): - pp = PP(rows=30, cols=30, densities=(0.2, 0.05), neighborhood="moore", seed=1) - pp.evolve("prey_death", sd=0.01, min_val=0.001, max_val=0.1) - # should not raise - pp.visualize( - interval=1, - figsize=(4, 4), - pause=0.001, - show_cell_params=True, - show_neighbors=False, - ) - pp.run(3) - plt.close("all") diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..b519203 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,238 @@ +""" +Tests for configuration module. + +Covers: +- Config dataclass defaults and validation +- Phase config retrieval +- Helper methods +""" + +import pytest +import numpy as np +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from models.config import ( + Config, + get_phase_config, + PHASE_CONFIGS, + PHASE1_CONFIG, + PHASE2_CONFIG, + PHASE3_CONFIG, + PHASE4_CONFIG, + PHASE5_CONFIG, +) + + +# ============================================================================= +# Config Defaults Tests +# ============================================================================= + + +class TestConfigDefaults: + """Tests for Config default values.""" + + def test_default_grid_size(self): + """Default grid size should be 1000.""" + cfg = Config() + assert cfg.grid_size == 1000 + + def test_default_densities(self): + """Default densities should be (0.30, 0.15).""" + cfg = Config() + assert cfg.densities == (0.30, 0.15) + + def test_default_species_parameters(self): + """Default species parameters should be set.""" + cfg = Config() + assert cfg.prey_birth == 0.2 + assert cfg.prey_death == 0.05 + assert cfg.predator_birth == 0.8 + assert cfg.predator_death == 0.05 + + def test_default_replicates(self): + """Default replicates should be 15.""" + cfg = Config() + assert cfg.n_replicates == 15 + + def test_default_parallelization(self): + """Default n_jobs should be -1 (all cores).""" + cfg = Config() + assert cfg.n_jobs == -1 + + +class TestConfigCustomization: + """Tests for Config customization.""" + + def test_override_grid_size(self): + """Should accept custom grid size.""" + cfg = Config(grid_size=500) + assert cfg.grid_size == 500 + + def test_override_multiple_params(self): + """Should accept multiple overrides.""" + cfg = Config( + grid_size=200, + n_replicates=5, + warmup_steps=100, + directed_hunting=True, + ) + assert cfg.grid_size == 200 + assert cfg.n_replicates == 5 + assert cfg.warmup_steps == 100 + assert cfg.directed_hunting is True + + def test_override_preserves_other_defaults(self): + """Overriding one param should not affect others.""" + cfg = Config(grid_size=500) + assert cfg.prey_birth == 0.2 # Still default + assert cfg.n_replicates == 15 # Still default + + +# ============================================================================= +# Config Helper Methods Tests +# ============================================================================= + + +class TestConfigHelpers: + """Tests for Config helper methods.""" + + def test_get_prey_deaths_returns_array(self): + """get_prey_deaths should return numpy array.""" + cfg = Config(prey_death_range=(0.0, 0.1), n_prey_death=5) + deaths = cfg.get_prey_deaths() + + assert isinstance(deaths, np.ndarray) + assert len(deaths) == 5 + + def test_get_prey_deaths_correct_range(self): + """get_prey_deaths should cover specified range.""" + cfg = Config(prey_death_range=(0.05, 0.15), n_prey_death=11) + deaths = cfg.get_prey_deaths() + + assert deaths[0] == pytest.approx(0.05) + assert deaths[-1] == pytest.approx(0.15) + + def test_get_warmup_steps_returns_configured_value(self): + """get_warmup_steps should return warmup_steps.""" + cfg = Config(warmup_steps=500) + assert cfg.get_warmup_steps(L=100) == 500 + + def test_get_measurement_steps_returns_configured_value(self): + """get_measurement_steps should return measurement_steps.""" + cfg = Config(measurement_steps=1000) + assert cfg.get_measurement_steps(L=100) == 1000 + + def test_estimate_runtime_returns_string(self): + """estimate_runtime should return formatted string.""" + cfg = Config(grid_size=100, n_prey_death=5, n_replicates=2) + estimate = cfg.estimate_runtime(n_cores=4) + + assert isinstance(estimate, str) + assert "sims" in estimate + assert "cores" in estimate + + +# ============================================================================= +# Phase Config Tests +# ============================================================================= + + +class TestPhaseConfigs: + """Tests for pre-defined phase configurations.""" + + def test_all_phases_exist(self): + """All 5 phases should have configs.""" + assert 1 in PHASE_CONFIGS + assert 2 in PHASE_CONFIGS + assert 3 in PHASE_CONFIGS + assert 4 in PHASE_CONFIGS + assert 5 in PHASE_CONFIGS + + def test_get_phase_config_returns_correct_config(self): + """get_phase_config should return correct instance.""" + assert get_phase_config(1) is PHASE1_CONFIG + assert get_phase_config(2) is PHASE2_CONFIG + assert get_phase_config(3) is PHASE3_CONFIG + assert get_phase_config(4) is PHASE4_CONFIG + assert get_phase_config(5) is PHASE5_CONFIG + + def test_get_phase_config_invalid_raises(self): + """get_phase_config should raise for invalid phase.""" + with pytest.raises(ValueError, match="Unknown phase"): + get_phase_config(99) + + with pytest.raises(ValueError, match="Unknown phase"): + get_phase_config(0) + + def test_phase1_config_values(self): + """Phase 1 config should have expected values.""" + cfg = PHASE1_CONFIG + assert cfg.grid_size == 1000 + assert cfg.collect_pcf is False + assert cfg.directed_hunting is False + + def test_phase2_config_evolution_settings(self): + """Phase 2 config should have evolution settings.""" + cfg = PHASE2_CONFIG + assert cfg.evolve_sd > 0 + assert cfg.evolve_min >= 0 + assert cfg.evolve_max > cfg.evolve_min + + def test_phase3_config_has_grid_sizes(self): + """Phase 3 config should have multiple grid sizes.""" + cfg = PHASE3_CONFIG + assert len(cfg.grid_sizes) > 1 + assert cfg.collect_pcf is True + + def test_phase4_no_directed_hunting(self): + """Phase 4 should have directed_hunting=False.""" + cfg = PHASE4_CONFIG + assert cfg.directed_hunting is False + + def test_phase5_directed_hunting_enabled(self): + """Phase 5 should have directed_hunting=True.""" + cfg = PHASE5_CONFIG + assert cfg.directed_hunting is True + + +# ============================================================================= +# Config Consistency Tests +# ============================================================================= + + +class TestConfigConsistency: + """Tests for configuration consistency and validity.""" + + def test_densities_sum_valid(self): + """Default densities should sum to <= 1.0.""" + cfg = Config() + assert sum(cfg.densities) <= 1.0 + + def test_prey_death_range_valid(self): + """prey_death_range should have min < max.""" + cfg = Config() + assert cfg.prey_death_range[0] < cfg.prey_death_range[1] + + def test_evolve_bounds_valid(self): + """evolve_min should be less than evolve_max.""" + cfg = Config() + assert cfg.evolve_min < cfg.evolve_max + + def test_pcf_bins_positive(self): + """PCF bins should be positive.""" + cfg = Config() + assert cfg.pcf_n_bins > 0 + + def test_all_phase_configs_valid_densities(self): + """All phase configs should have valid densities.""" + for phase, cfg in PHASE_CONFIGS.items(): + assert sum(cfg.densities) <= 1.0, f"Phase {phase} has invalid densities" + + def test_all_phase_configs_positive_steps(self): + """All phase configs should have positive step counts.""" + for phase, cfg in PHASE_CONFIGS.items(): + assert cfg.warmup_steps > 0, f"Phase {phase} has non-positive warmup" + assert cfg.measurement_steps > 0, f"Phase {phase} has non-positive measurement" \ No newline at end of file diff --git a/tests/test_experiments.py b/tests/test_experiments.py new file mode 100644 index 0000000..83ca26d --- /dev/null +++ b/tests/test_experiments.py @@ -0,0 +1,534 @@ +""" +Tests for experiments.py utility functions and simulation runner. + +Covers: +- Utility functions (generate_unique_seed, count_populations, etc.) +- I/O functions (save_results_jsonl, load_results_jsonl, save_results_npz) +- run_single_simulation with real (tiny) configs +- Basic phase runner validation +""" + +import pytest +import json +import numpy as np +import logging +import sys +from pathlib import Path +from unittest.mock import MagicMock + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Import functions to test +from experiments import ( + generate_unique_seed, + count_populations, + get_evolved_stats, + average_pcfs, + save_results_jsonl, + load_results_jsonl, + save_results_npz, + run_single_simulation, + PHASE_RUNNERS, +) + +# Import the real Config for integration tests +from models.config import Config + + +# ============================================================================= +# Utility Function Tests +# ============================================================================= + + +class TestGenerateUniqueSeed: + """Tests for generate_unique_seed function.""" + + def test_same_params_same_rep_same_seed(self): + """Identical inputs should produce identical seeds.""" + params = {"a": 1, "b": 2.5} + seed1 = generate_unique_seed(params, rep=0) + seed2 = generate_unique_seed(params, rep=0) + assert seed1 == seed2 + + def test_different_rep_different_seed(self): + """Different rep values should produce different seeds.""" + params = {"a": 1, "b": 2.5} + seed1 = generate_unique_seed(params, rep=0) + seed2 = generate_unique_seed(params, rep=1) + assert seed1 != seed2 + + def test_different_params_different_seed(self): + """Different parameters should produce different seeds.""" + params1 = {"a": 1} + params2 = {"a": 2} + seed1 = generate_unique_seed(params1, rep=0) + seed2 = generate_unique_seed(params2, rep=0) + assert seed1 != seed2 + + def test_key_order_does_not_matter(self): + """Dict key order should not affect seed (sorted keys).""" + params1 = {"b": 2, "a": 1} + params2 = {"a": 1, "b": 2} + assert generate_unique_seed(params1, 0) == generate_unique_seed(params2, 0) + + def test_returns_positive_integer(self): + """Seed should be a positive integer.""" + seed = generate_unique_seed({"x": 100}, rep=5) + assert isinstance(seed, int) + assert seed >= 0 + + +class TestCountPopulations: + """Tests for count_populations function.""" + + def test_empty_grid(self): + """Empty grid should return (total_cells, 0, 0).""" + grid = np.zeros((5, 5), dtype=int) + empty, prey, pred = count_populations(grid) + assert empty == 25 + assert prey == 0 + assert pred == 0 + + def test_full_prey_grid(self): + """Grid full of prey should return (0, total, 0).""" + grid = np.ones((4, 4), dtype=int) + empty, prey, pred = count_populations(grid) + assert empty == 0 + assert prey == 16 + assert pred == 0 + + def test_mixed_population(self): + """Mixed grid should return correct counts.""" + grid = np.array([[0, 1, 2], [1, 0, 1], [2, 2, 0]]) + empty, prey, pred = count_populations(grid) + assert empty == 3 + assert prey == 3 + assert pred == 3 + + def test_returns_integers(self): + """Counts should be Python ints, not numpy types.""" + grid = np.array([[0, 1], [2, 1]]) + empty, prey, pred = count_populations(grid) + assert type(empty) is int + assert type(prey) is int + assert type(pred) is int + + +class TestGetEvolvedStats: + """Tests for get_evolved_stats function.""" + + def test_missing_param_returns_nan(self): + """Missing parameter should return NaN stats.""" + mock_model = MagicMock() + mock_model.cell_params.get.return_value = None + + stats = get_evolved_stats(mock_model, "nonexistent") + + assert np.isnan(stats["mean"]) + assert np.isnan(stats["std"]) + assert stats["n"] == 0 + + def test_all_nan_returns_nan(self): + """Array of all NaN should return NaN stats.""" + mock_model = MagicMock() + mock_model.cell_params.get.return_value = np.array([np.nan, np.nan, np.nan]) + + stats = get_evolved_stats(mock_model, "param") + + assert np.isnan(stats["mean"]) + assert stats["n"] == 0 + + def test_valid_values_return_correct_stats(self): + """Valid values should return correct statistics.""" + mock_model = MagicMock() + mock_model.cell_params.get.return_value = np.array([1.0, 2.0, 3.0, np.nan]) + + stats = get_evolved_stats(mock_model, "param") + + assert stats["mean"] == 2.0 + assert stats["min"] == 1.0 + assert stats["max"] == 3.0 + assert stats["n"] == 3 + + +class TestAveragePcfs: + """Tests for average_pcfs function.""" + + def test_empty_list_returns_empty(self): + """Empty input should return empty arrays.""" + dist, mean, se = average_pcfs([]) + assert len(dist) == 0 + assert len(mean) == 0 + assert len(se) == 0 + + def test_single_pcf_returns_itself(self): + """Single PCF should return itself as mean.""" + distances = np.array([1.0, 2.0, 3.0]) + values = np.array([0.5, 1.0, 1.5]) + pcf_list = [(distances, values, 100)] + + dist, mean, se = average_pcfs(pcf_list) + + assert np.array_equal(dist, distances) + assert np.array_equal(mean, values) + assert np.allclose(se, 0.0) # No variance with single sample + + def test_multiple_pcfs_averaged(self): + """Multiple PCFs should be averaged correctly.""" + d = np.array([1.0, 2.0]) + pcf_list = [ + (d, np.array([1.0, 2.0]), 10), + (d, np.array([1.2, 1.8]), 12), + ] + + dist, mean, se = average_pcfs(pcf_list) + + expected_mean = np.array([1.1, 1.9]) + assert np.allclose(mean, expected_mean) + assert len(se) == 2 + + +# ============================================================================= +# I/O Function Tests +# ============================================================================= + + +class TestSaveLoadJsonl: + """Tests for JSONL save/load functions.""" + + def test_save_and_load_roundtrip(self, temp_output_dir, sample_results): + """Data should survive save/load roundtrip.""" + output_path = temp_output_dir / "test.jsonl" + + save_results_jsonl(sample_results, output_path) + loaded = load_results_jsonl(output_path) + + assert len(loaded) == len(sample_results) + for orig, load in zip(sample_results, loaded): + assert orig["prey_birth"] == load["prey_birth"] + assert orig["prey_mean"] == load["prey_mean"] + + def test_each_line_is_valid_json(self, temp_output_dir): + """Each line should be independently valid JSON.""" + results = [{"a": 1}, {"b": 2}] + output_path = temp_output_dir / "test.jsonl" + + save_results_jsonl(results, output_path) + + with open(output_path, "r") as f: + lines = f.readlines() + + assert len(lines) == 2 + for line in lines: + json.loads(line) # Should not raise + + def test_handles_numpy_arrays(self, temp_output_dir): + """Should handle numpy arrays via default=str.""" + results = [{"array": np.array([1, 2, 3])}] + output_path = temp_output_dir / "test.jsonl" + + save_results_jsonl(results, output_path) # Should not raise + + def test_load_nonexistent_file_raises(self, temp_output_dir): + """Loading nonexistent file should raise.""" + with pytest.raises(FileNotFoundError): + load_results_jsonl(temp_output_dir / "nonexistent.jsonl") + + +class TestSaveResultsNpz: + """Tests for save_results_npz function.""" + + def test_creates_npz_file(self, temp_output_dir): + """Should create a valid NPZ file.""" + results = [{"energy": [1, 2, 3]}, {"energy": [4, 5, 6]}] + output_path = temp_output_dir / "test.npz" + + save_results_npz(results, output_path) + + assert output_path.exists() + + def test_npz_contains_prefixed_keys(self, temp_output_dir): + """Keys should be prefixed with run index.""" + results = [{"x": [1]}, {"x": [2]}] + output_path = temp_output_dir / "test.npz" + + save_results_npz(results, output_path) + + data = np.load(output_path) + assert "run_0_x" in data.files + assert "run_1_x" in data.files + + +# ============================================================================= +# run_single_simulation Tests (using real Config) +# ============================================================================= + + +@pytest.fixture +def tiny_config(): + """Tiny config for fast integration tests.""" + return Config( + grid_size=10, + n_prey_death=2, + n_replicates=1, + warmup_steps=3, + measurement_steps=5, + collect_pcf=False, + save_timeseries=False, + directed_hunting=False, + ) + + +@pytest.fixture +def tiny_config_with_pcf(): + """Tiny config with PCF enabled.""" + return Config( + grid_size=15, + n_prey_death=2, + n_replicates=1, + warmup_steps=3, + measurement_steps=5, + collect_pcf=True, + pcf_sample_rate=1.0, + save_timeseries=False, + ) + + +@pytest.fixture +def tiny_config_with_timeseries(): + """Tiny config with timeseries enabled.""" + return Config( + grid_size=10, + n_prey_death=2, + n_replicates=1, + warmup_steps=3, + measurement_steps=5, + collect_pcf=False, + save_timeseries=True, + timeseries_subsample=1, + ) + + +@pytest.fixture +def tiny_config_directed(): + """Tiny config with directed hunting.""" + return Config( + grid_size=10, + n_prey_death=2, + n_replicates=1, + warmup_steps=3, + measurement_steps=5, + collect_pcf=False, + save_timeseries=False, + directed_hunting=True, + ) + + +class TestRunSingleSimulation: + """Tests for run_single_simulation with real tiny configs.""" + + def test_returns_required_keys(self, tiny_config): + """Result should contain all required keys.""" + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=10, + seed=42, + cfg=tiny_config, + with_evolution=False, + compute_pcf=False, + ) + + required_keys = [ + "prey_birth", "prey_death", "predator_birth", "predator_death", + "grid_size", "seed", "prey_mean", "prey_std", "pred_mean", "pred_std", + "prey_survived", "pred_survived", "prey_n_clusters", "pred_n_clusters", + ] + for key in required_keys: + assert key in result, f"Missing key: {key}" + + def test_with_evolution_returns_evolution_stats(self, tiny_config): + """Evolution mode should return evolution statistics.""" + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=10, + seed=42, + cfg=tiny_config, + with_evolution=True, + compute_pcf=False, + ) + + assert "evolved_prey_death_mean" in result + assert "evolve_sd" in result + + def test_with_pcf_returns_pcf_data(self, tiny_config_with_pcf): + """PCF mode should return PCF statistics.""" + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=15, + seed=42, + cfg=tiny_config_with_pcf, + with_evolution=False, + compute_pcf=True, + ) + + # PCF data should be present if both species survived + if result["prey_survived"] and result["pred_survived"]: + assert "pcf_distances" in result + + def test_with_timeseries_returns_population_history(self, tiny_config_with_timeseries): + """Timeseries mode should return population history.""" + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=10, + seed=42, + cfg=tiny_config_with_timeseries, + with_evolution=False, + compute_pcf=False, + ) + + assert "prey_timeseries" in result + assert "pred_timeseries" in result + assert len(result["prey_timeseries"]) > 0 + + def test_seed_reproducibility(self, tiny_config): + """Same seed should produce same results.""" + kwargs = dict( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=10, + seed=12345, + cfg=tiny_config, + with_evolution=False, + compute_pcf=False, + ) + + result1 = run_single_simulation(**kwargs) + result2 = run_single_simulation(**kwargs) + + assert result1["prey_mean"] == result2["prey_mean"] + assert result1["pred_mean"] == result2["pred_mean"] + + def test_directed_hunting_mode(self, tiny_config_directed): + """Should work with directed hunting enabled.""" + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.8, + predator_death=0.1, + grid_size=10, + seed=42, + cfg=tiny_config_directed, + with_evolution=False, + compute_pcf=False, + ) + + assert "prey_mean" in result # Completed successfully + + +# ============================================================================= +# Phase Runner Tests +# ============================================================================= + + +class TestPhaseRunners: + """Basic tests for phase runner registration.""" + + def test_all_phases_registered(self): + """All phases should be in PHASE_RUNNERS dict.""" + assert 1 in PHASE_RUNNERS + assert 2 in PHASE_RUNNERS + assert 3 in PHASE_RUNNERS + assert 4 in PHASE_RUNNERS + assert 5 in PHASE_RUNNERS + + def test_phase_runners_are_callable(self): + """Each phase runner should be callable.""" + for phase, runner in PHASE_RUNNERS.items(): + assert callable(runner), f"Phase {phase} runner is not callable" + + +# ============================================================================= +# Integration Tests +# ============================================================================= + + +class TestIntegration: + """End-to-end integration tests with actual tiny simulations.""" + + def test_full_simulation_tiny_grid(self, tiny_config): + """Run actual simulation on tiny grid.""" + result = run_single_simulation( + prey_birth=0.3, + prey_death=0.05, + predator_birth=0.5, + predator_death=0.1, + grid_size=8, + seed=42, + cfg=tiny_config, + with_evolution=False, + compute_pcf=False, + ) + + # Basic sanity checks + assert result["prey_mean"] >= 0 + assert result["pred_mean"] >= 0 + assert isinstance(result["prey_survived"], bool) + assert isinstance(result["pred_survived"], bool) + + def test_evolution_changes_death_rates(self, tiny_config): + """Evolution should cause prey death rates to vary.""" + result = run_single_simulation( + prey_birth=0.3, + prey_death=0.05, + predator_birth=0.5, + predator_death=0.1, + grid_size=15, + seed=42, + cfg=tiny_config, + with_evolution=True, + compute_pcf=False, + ) + + if result["prey_survived"]: + assert "evolved_prey_death_mean" in result + + def test_save_load_integration(self, tiny_config, temp_output_dir): + """Full save/load cycle with real simulation results.""" + results = [] + for seed in [1, 2, 3]: + result = run_single_simulation( + prey_birth=0.2, + prey_death=0.05, + predator_birth=0.6, + predator_death=0.1, + grid_size=8, + seed=seed, + cfg=tiny_config, + with_evolution=False, + compute_pcf=False, + ) + results.append(result) + + output_path = temp_output_dir / "integration_test.jsonl" + save_results_jsonl(results, output_path) + + loaded = load_results_jsonl(output_path) + + assert len(loaded) == 3 + for orig, load in zip(results, loaded): + assert orig["seed"] == load["seed"] + assert abs(orig["prey_mean"] - load["prey_mean"]) < 1e-10 \ No newline at end of file diff --git a/tests/test_mf.py b/tests/test_mf.py deleted file mode 100644 index 6ca8893..0000000 --- a/tests/test_mf.py +++ /dev/null @@ -1,42 +0,0 @@ -import pytest -import numpy as np -from misc.mean_field import MeanFieldModel - - -@pytest.fixture -def model(): - """Model instance for testing.""" - return MeanFieldModel() - - -def test_initialization(model): - """Test model initialization with default parameters.""" - assert model.birth == 0.2 - assert model.consumption == 0.8 - assert model.predator_death == 0.045 - assert model.conversion == 1.0 - assert model.prey_competition == 0.1 - assert model.predator_competition == 0.05 - assert model.pred_benifit == model.consumption * model.conversion - - -def test_prey_extinction(model): - """Verify prey extinction logic.""" - R_eq, C_eq = model.equilibrium(prey_death=0.3) - assert R_eq == 0.0 - assert C_eq == 0.0 - - -def test_monotonicity(model): - """Test monotonicity of equilibrium populations with respect to prey death rate.""" - d_r_range = np.linspace(0.01, 0.08, 10) - sweep = model.sweep_death_rate(d_r_range) - assert np.all(np.diff(sweep["R_eq"]) <= 0) - - -def test_convergence(model): - ana_R, _ = model.equilibrium(0.05) - num_R, _ = model.equilibrium_numerical(0.05) - - # Use approx for floating point comparisons in numerical analysis - assert num_R == pytest.approx(ana_R, rel=1e-2) diff --git a/tests/test_numba_optimized.py b/tests/test_numba_optimized.py index 096bea7..77ccf33 100644 --- a/tests/test_numba_optimized.py +++ b/tests/test_numba_optimized.py @@ -1,970 +1,490 @@ -#!/usr/bin/env python3 """ -Unit Tests for numba_optimized.py +Tests for Numba-optimized kernels and spatial analysis functions. -Run with: - pytest test_numba_optimized.py -v - pytest test_numba_optimized.py -v --tb=short # shorter traceback - python test_numba_optimized.py # without pytest +Covers: +- Cluster detection (measure_cluster_sizes_fast, detect_clusters_fast, get_cluster_stats_fast) +- Pair Correlation Function (PCF) computation +- PPKernel class +- Warmup and seeding functions """ -import sys -import numpy as np import pytest +import numpy as np +import sys from pathlib import Path -# Setup path -project_root = str(Path(__file__).resolve().parents[1]) -scripts_dir = str(Path(__file__).resolve().parent) -for p in [project_root, scripts_dir]: - if p not in sys.path: - sys.path.insert(0, p) - -# Import module under test -try: - from models.numba_optimized import ( - NUMBA_AVAILABLE, - PPKernel, - compute_pcf_periodic_fast, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, - warmup_numba_kernels, - set_numba_seed, - ) -except ImportError: - from models.numba_optimized import ( - NUMBA_AVAILABLE, - PPKernel, - compute_pcf_periodic_fast, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, - set_numba_seed, - warmup_numba_kernels, - ) - - -# ============================================================================ -# FIXTURES -# ============================================================================ - - -@pytest.fixture -def small_grid(): - """Small 20x20 grid for quick tests.""" - np.random.seed(42) - grid = np.random.choice([0, 1, 2], size=(20, 20), p=[0.5, 0.3, 0.2]).astype( - np.int32 - ) - return grid - - -@pytest.fixture -def medium_grid(): - """Medium 50x50 grid for correctness tests.""" - np.random.seed(42) - grid = np.random.choice([0, 1, 2], size=(50, 50), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - return grid - - -@pytest.fixture -def large_grid(): - """Large 100x100 grid for performance tests.""" - np.random.seed(42) - grid = np.random.choice([0, 1, 2], size=(100, 100), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - return grid - - -@pytest.fixture -def clustered_grid(): - """Grid with known clusters for testing cluster detection.""" - grid = np.zeros((30, 30), dtype=np.int32) - # Cluster 1: 3x3 = 9 cells at (2,2) - grid[2:5, 2:5] = 1 - # Cluster 2: 2x4 = 8 cells at (10,10) - grid[10:12, 10:14] = 1 - # Cluster 3: single cell at (20,20) - grid[20, 20] = 1 - # Cluster 4: L-shape = 5 cells - grid[25, 25:28] = 1 # 3 horizontal - grid[26:28, 25] = 1 # 2 vertical - return grid - - -@pytest.fixture -def prey_death_array(medium_grid): - """Prey death rate array matching medium_grid.""" - arr = np.full(medium_grid.shape, np.nan, dtype=np.float64) - arr[medium_grid == 1] = 0.05 - return arr - - -# ============================================================================ -# TEST: NUMBA AVAILABILITY -# ============================================================================ - - -class TestNumbaAvailability: - """Tests for Numba availability and basic imports.""" - - def test_numba_available(self): - """Numba should be available.""" - assert ( - NUMBA_AVAILABLE - ), "Numba is not available - install with: pip install numba" - - def test_ppkernel_importable(self): - """PPKernel class should be importable.""" - assert PPKernel is not None - - def test_pcf_functions_importable(self): - """PCF functions should be importable.""" - assert compute_pcf_periodic_fast is not None - assert compute_all_pcfs_fast is not None - - def test_cluster_function_importable(self): - """Cluster measurement function should be importable.""" - assert measure_cluster_sizes_fast is not None - - -# ============================================================================ -# TEST: PPKernel -# ============================================================================ +sys.path.insert(0, str(Path(__file__).parent.parent)) +from models.numba_optimized import ( + set_numba_seed, + PPKernel, + measure_cluster_sizes_fast, + detect_clusters_fast, + get_cluster_stats_fast, + compute_pcf_periodic_fast, + compute_all_pcfs_fast, + warmup_numba_kernels, + NUMBA_AVAILABLE, +) -class TestPPKernel: - """Tests for the PPKernel class.""" - def test_kernel_initialization_moore(self): - """Kernel should initialize with Moore neighborhood.""" - kernel = PPKernel(50, 50, "moore") - assert kernel.rows == 50 - assert kernel.cols == 50 - assert len(kernel._dr) == 8 # Moore has 8 neighbors +# ============================================================================= +# Seed and Warmup Tests +# ============================================================================= - def test_kernel_initialization_neumann(self): - """Kernel should initialize with von Neumann neighborhood.""" - kernel = PPKernel(50, 50, "neumann") - assert len(kernel._dr) == 4 # von Neumann has 4 neighbors - - def test_kernel_buffer_allocation(self): - """Kernel should pre-allocate work buffer.""" - kernel = PPKernel(100, 100, "moore") - assert kernel._occupied_buffer.shape == (10000, 2) - assert kernel._occupied_buffer.dtype == np.int32 - - def test_kernel_update_preserves_grid_shape(self, medium_grid, prey_death_array): - """Update should not change grid shape.""" - kernel = PPKernel(50, 50, "moore") - original_shape = medium_grid.shape - - kernel.update(medium_grid, prey_death_array, 0.2, 0.05, 0.2, 0.1) - - assert medium_grid.shape == original_shape - - def test_kernel_update_valid_states(self, medium_grid, prey_death_array): - """Grid should only contain valid states (0, 1, 2) after update.""" - kernel = PPKernel(50, 50, "moore") - - for _ in range(10): - kernel.update(medium_grid, prey_death_array, 0.2, 0.05, 0.2, 0.1) - - assert medium_grid.min() >= 0 - assert medium_grid.max() <= 2 - - def test_kernel_update_no_nan_in_grid(self, medium_grid, prey_death_array): - """Grid should not contain NaN values.""" - kernel = PPKernel(50, 50, "moore") - - for _ in range(10): - kernel.update(medium_grid, prey_death_array, 0.2, 0.05, 0.2, 0.1) - - assert not np.any(np.isnan(medium_grid)) - - def test_kernel_prey_death_consistency(self, medium_grid, prey_death_array): - """Prey death array should have values only where prey exist.""" - kernel = PPKernel(50, 50, "moore") - - for _ in range(10): - kernel.update( - medium_grid, - prey_death_array, - 0.2, - 0.05, - 0.2, - 0.1, - evolution_stopped=False, - ) - - prey_mask = medium_grid == 1 - non_prey_mask = medium_grid != 1 - - # Prey cells should have non-NaN death rates - assert np.all( - ~np.isnan(prey_death_array[prey_mask]) - ), "Prey cells missing death rates" - # Non-prey cells should have NaN death rates - assert np.all( - np.isnan(prey_death_array[non_prey_mask]) - ), "Non-prey cells have death rates" - - def test_kernel_evolution_changes_values(self, medium_grid, prey_death_array): - """Evolution should change prey death values over time.""" - kernel = PPKernel(50, 50, "moore") - - initial_mean = np.nanmean(prey_death_array) - - for _ in range(50): - kernel.update( - medium_grid, - prey_death_array, - 0.2, - 0.05, - 0.2, - 0.1, - evolve_sd=0.1, - evolve_min=0.001, - evolve_max=0.2, - evolution_stopped=False, - ) - - # Values should have changed (with high probability) - final_values = prey_death_array[~np.isnan(prey_death_array)] - if len(final_values) > 0: - # Check that not all values are exactly 0.05 - assert not np.allclose( - final_values, 0.05 - ), "Evolution did not change values" - - def test_kernel_evolution_respects_bounds(self, medium_grid, prey_death_array): - """Evolved values should stay within bounds.""" - kernel = PPKernel(50, 50, "moore") - evolve_min, evolve_max = 0.01, 0.15 - - for _ in range(100): - kernel.update( - medium_grid, - prey_death_array, - 0.2, - 0.05, - 0.2, - 0.1, - evolve_sd=0.1, - evolve_min=evolve_min, - evolve_max=evolve_max, - evolution_stopped=False, - ) - - valid_values = prey_death_array[~np.isnan(prey_death_array)] - if len(valid_values) > 0: - assert valid_values.min() >= evolve_min - 1e-10 - assert valid_values.max() <= evolve_max + 1e-10 - - def test_kernel_evolution_stopped(self, medium_grid, prey_death_array): - """When evolution stopped, values should only change by inheritance.""" - kernel = PPKernel(50, 50, "moore") - - # Set all prey to same value - prey_death_array[medium_grid == 1] = 0.05 - - for _ in range(20): - kernel.update( - medium_grid, - prey_death_array, - 0.2, - 0.05, - 0.2, - 0.1, - evolve_sd=0.1, - evolve_min=0.001, - evolve_max=0.2, - evolution_stopped=True, - ) - - # All values should still be exactly 0.05 (inherited without mutation) - valid_values = prey_death_array[~np.isnan(prey_death_array)] - if len(valid_values) > 0: - assert np.allclose(valid_values, 0.05), "Evolution should be stopped" - - def test_kernel_deterministic_with_seed(self): - """Same seed should produce same results.""" - results = [] - - for _ in range(2): - np.random.seed(12345) - set_numba_seed(12345) - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.5, 0.3, 0.2]).astype( - np.int32 - ) - prey_death = np.full((30, 30), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - kernel = PPKernel(30, 30, "moore") - for _ in range(10): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - - results.append(grid.copy()) - - assert np.array_equal(results[0], results[1]), "Results should be deterministic" - - -class TestPPKernelDirectedHunting: - """Tests for PPKernel with directed hunting behavior.""" - - def test_kernel_initialization_directed_false(self): - """Kernel should default to directed_hunting=False.""" - kernel = PPKernel(50, 50, "moore") - assert kernel.directed_hunting == False - - def test_kernel_initialization_directed_true(self): - """Kernel should accept directed_hunting=True.""" - kernel = PPKernel(50, 50, "moore", directed_hunting=True) - assert kernel.directed_hunting == True - - def test_kernel_directed_runs_without_error(self, medium_grid, prey_death_array): - """Directed hunting kernel should run without errors.""" - set_numba_seed(42) - kernel = PPKernel(50, 50, "moore", directed_hunting=True) - grid = medium_grid.copy() - prey_death = prey_death_array.copy() +class TestSeedingAndWarmup: + """Tests for RNG seeding and kernel warmup.""" - # Run multiple steps - for _ in range(20): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) + def test_set_numba_seed_does_not_raise(self): + """set_numba_seed should execute without error.""" + set_numba_seed(42) # Should not raise - # Grid should only have valid states - assert grid.min() >= 0 - assert grid.max() <= 2 + def test_warmup_numba_kernels_does_not_raise(self): + """warmup_numba_kernels should execute without error.""" + warmup_numba_kernels(grid_size=20, directed_hunting=False) + warmup_numba_kernels(grid_size=20, directed_hunting=True) - def test_kernel_directed_valid_states(self, medium_grid, prey_death_array): - """Directed kernel should produce only valid states.""" - set_numba_seed(42) - kernel = PPKernel(50, 50, "moore", directed_hunting=True) + def test_numba_available_flag(self): + """NUMBA_AVAILABLE should be True when numba is installed.""" + assert NUMBA_AVAILABLE is True - grid = medium_grid.copy() - prey_death = prey_death_array.copy() - for _ in range(50): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) +# ============================================================================= +# Cluster Detection Tests +# ============================================================================= - unique = np.unique(grid) - assert all(v in [0, 1, 2] for v in unique) - def test_kernel_directed_prey_death_consistency( - self, medium_grid, prey_death_array - ): - """Directed kernel should maintain prey_death array consistency.""" - set_numba_seed(42) - kernel = PPKernel(50, 50, "moore", directed_hunting=True) +class TestMeasureClusterSizesFast: + """Tests for measure_cluster_sizes_fast function.""" - grid = medium_grid.copy() - prey_death = prey_death_array.copy() + def test_empty_grid_returns_empty_array(self, empty_grid_10x10): + """Empty grid should return no clusters.""" + sizes = measure_cluster_sizes_fast(empty_grid_10x10, species=1) + assert len(sizes) == 0 - for _ in range(20): - kernel.update( - grid, prey_death, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False - ) + def test_single_cluster_correct_size(self, single_cluster_grid): + """Single connected cluster should return correct size.""" + sizes = measure_cluster_sizes_fast(single_cluster_grid, species=1) + assert len(sizes) == 1 + assert sizes[0] == 4 # 2x2 block + + def test_two_clusters_correct_sizes(self, two_cluster_grid): + """Two separate clusters should return two sizes.""" + sizes = measure_cluster_sizes_fast(two_cluster_grid, species=1) + assert len(sizes) == 2 + assert sorted(sizes) == [3, 4] # Clusters of size 3 and 4 + + def test_periodic_boundary_connects_clusters(self, periodic_cluster_grid): + """Clusters should connect via periodic boundaries (Moore).""" + sizes = measure_cluster_sizes_fast(periodic_cluster_grid, species=1, neighborhood="moore") + # All 3 cells should be one cluster due to periodic connections + assert len(sizes) == 1 + assert sizes[0] == 3 - # Prey cells should have non-NaN death rates - prey_mask = grid == 1 - non_prey_mask = grid != 1 + def test_neumann_neighborhood_fewer_connections(self): + """Von Neumann should produce more clusters than Moore for diagonal patterns.""" + grid = np.zeros((5, 5), dtype=np.int32) + # Diagonal line - connected in Moore, not in Neumann + grid[0, 0] = 1 + grid[1, 1] = 1 + grid[2, 2] = 1 - if np.any(prey_mask): - assert np.all(~np.isnan(prey_death[prey_mask])) - assert np.all(np.isnan(prey_death[non_prey_mask])) + sizes_moore = measure_cluster_sizes_fast(grid, species=1, neighborhood="moore") + sizes_neumann = measure_cluster_sizes_fast(grid, species=1, neighborhood="neumann") - def test_kernel_directed_evolution_respects_bounds( - self, medium_grid, prey_death_array - ): - """Directed kernel evolution should stay within bounds.""" - set_numba_seed(42) - kernel = PPKernel(50, 50, "moore", directed_hunting=True) - evolve_min, evolve_max = 0.01, 0.15 - - grid = medium_grid.copy() - prey_death = prey_death_array.copy() - - for _ in range(100): - kernel.update( - grid, - prey_death, - 0.2, - 0.05, - 0.2, - 0.1, - evolve_sd=0.1, - evolve_min=evolve_min, - evolve_max=evolve_max, - evolution_stopped=False, - ) - - valid_values = prey_death[~np.isnan(prey_death)] - if len(valid_values) > 0: - assert valid_values.min() >= evolve_min - 1e-10 - assert valid_values.max() <= evolve_max + 1e-10 - - def test_kernel_directed_neumann_neighborhood(self): - """Directed hunting should work with von Neumann neighborhood.""" - np.random.seed(42) - set_numba_seed(42) + assert len(sizes_moore) == 1 # One connected cluster + assert len(sizes_neumann) == 3 # Three separate cells - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.5, 0.3, 0.2]).astype(np.int32) - prey_death = np.full((30, 30), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan + def test_species_filtering(self, mixed_grid_10x10): + """Should only count clusters for specified species.""" + prey_sizes = measure_cluster_sizes_fast(mixed_grid_10x10, species=1) + pred_sizes = measure_cluster_sizes_fast(mixed_grid_10x10, species=2) - kernel = PPKernel(30, 30, "neumann", directed_hunting=True) + assert sum(prey_sizes) == 9 # Total prey count + assert sum(pred_sizes) == 4 # Total predator count - for _ in range(20): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) + def test_checkerboard_many_clusters(self, checkerboard_grid): + """Checkerboard pattern should produce many small clusters in Neumann.""" + sizes = measure_cluster_sizes_fast(checkerboard_grid, species=1, neighborhood="neumann") + # Each cell is isolated in Neumann neighborhood + assert len(sizes) == 18 # Half of 6x6 = 18 cells + assert all(s == 1 for s in sizes) - assert grid.min() >= 0 - assert grid.max() <= 2 - def test_random_vs_directed_different_behavior(self): - """Random and directed kernels should produce different results.""" - np.random.seed(123) +class TestDetectClustersFast: + """Tests for detect_clusters_fast function.""" - # Create identical starting grids - grid_template = np.random.choice( - [0, 1, 2], (40, 40), p=[0.50, 0.35, 0.15] - ).astype(np.int32) + def test_returns_labels_and_size_dict(self, single_cluster_grid): + """Should return both label array and size dictionary.""" + labels, sizes = detect_clusters_fast(single_cluster_grid, species=1) - grid_random = grid_template.copy() - grid_directed = grid_template.copy() + assert isinstance(labels, np.ndarray) + assert labels.shape == single_cluster_grid.shape + assert isinstance(sizes, dict) - prey_death_random = np.full((40, 40), 0.05, dtype=np.float64) - prey_death_random[grid_random != 1] = np.nan - prey_death_directed = prey_death_random.copy() + def test_labels_match_cluster_membership(self, two_cluster_grid): + """Labels should correctly identify cluster membership.""" + labels, sizes = detect_clusters_fast(two_cluster_grid, species=1) - kernel_random = PPKernel(40, 40, "moore", directed_hunting=False) - kernel_directed = PPKernel(40, 40, "moore", directed_hunting=True) + # All cells in a cluster should have same label + assert labels[0, 0] == labels[0, 1] == labels[1, 0] # Cluster 1 + assert labels[4, 4] == labels[4, 5] == labels[5, 4] == labels[5, 5] # Cluster 2 - # Run with same seed - set_numba_seed(999) - for _ in range(50): - kernel_random.update(grid_random, prey_death_random, 0.2, 0.05, 0.6, 0.1) + # Different clusters should have different labels + assert labels[0, 0] != labels[4, 4] - set_numba_seed(999) - for _ in range(50): - kernel_directed.update( - grid_directed, prey_death_directed, 0.2, 0.05, 0.6, 0.1 - ) + def test_non_species_cells_have_zero_label(self, mixed_grid_10x10): + """Cells not belonging to target species should have label 0.""" + labels, _ = detect_clusters_fast(mixed_grid_10x10, species=1) - # Grids should differ (directed hunting changes dynamics) - # Note: not guaranteed for every seed, but highly likely - prey_random = np.sum(grid_random == 1) - prey_directed = np.sum(grid_directed == 1) - pred_random = np.sum(grid_random == 2) - pred_directed = np.sum(grid_directed == 2) + # Predator cells and empty cells should be 0 + assert labels[6, 6] == 0 # Predator cell + assert labels[5, 5] == 0 # Empty cell - # At minimum, both should have valid grids - assert grid_random.min() >= 0 and grid_random.max() <= 2 - assert grid_directed.min() >= 0 and grid_directed.max() <= 2 + def test_size_dict_matches_cluster_count(self, two_cluster_grid): + """Size dictionary should have entry for each cluster.""" + labels, sizes = detect_clusters_fast(two_cluster_grid, species=1) - # The populations should likely differ - # (we don't assert this strictly as it depends on random dynamics) - print(f"Random: prey={prey_random}, pred={pred_random}") - print(f"Directed: prey={prey_directed}, pred={pred_directed}") + assert len(sizes) == 2 + assert set(sizes.values()) == {3, 4} - def test_directed_predator_hunts_adjacent_prey(self): - """Directed predator should successfully hunt adjacent prey.""" - # Create controlled scenario: predator surrounded by prey - grid = np.zeros((10, 10), dtype=np.int32) - grid[5, 5] = 2 # Predator in center - grid[4, 5] = 1 # Prey above - grid[6, 5] = 1 # Prey below - grid[5, 4] = 1 # Prey left - grid[5, 6] = 1 # Prey right - prey_death = np.full((10, 10), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan +class TestGetClusterStatsFast: + """Tests for get_cluster_stats_fast function.""" - kernel = PPKernel(10, 10, "neumann", directed_hunting=True) + def test_returns_comprehensive_stats(self, single_cluster_grid): + """Should return dictionary with all expected keys.""" + stats = get_cluster_stats_fast(single_cluster_grid, species=1) - initial_prey = np.sum(grid == 1) - initial_pred = np.sum(grid == 2) + expected_keys = [ + "n_clusters", + "sizes", + "largest", + "largest_fraction", + "mean_size", + "size_distribution", + "labels", + "size_dict", + ] + for key in expected_keys: + assert key in stats - # Run with high predator birth, zero predator death - set_numba_seed(42) - for _ in range(5): - kernel.update(grid, prey_death, 0.0, 0.05, 1.0, 0.0) + def test_empty_grid_stats(self, empty_grid_10x10): + """Empty grid should return zero-valued stats.""" + stats = get_cluster_stats_fast(empty_grid_10x10, species=1) - final_prey = np.sum(grid == 1) - final_pred = np.sum(grid == 2) + assert stats["n_clusters"] == 0 + assert stats["largest"] == 0 + assert stats["largest_fraction"] == 0.0 + assert stats["mean_size"] == 0.0 - # Predators should have converted some prey - # (with 100% birth rate and 0% death rate) - assert final_pred >= initial_pred, "Predator population should not decrease" - print(f"Prey: {initial_prey} -> {final_prey}") - print(f"Pred: {initial_pred} -> {final_pred}") + def test_largest_fraction_calculation(self, two_cluster_grid): + """largest_fraction should be largest cluster / total population.""" + stats = get_cluster_stats_fast(two_cluster_grid, species=1) + total_prey = 3 + 4 # Two clusters + expected_fraction = 4 / total_prey -# ============================================================================ -# TEST: PCF COMPUTATION -# ============================================================================ + assert stats["largest"] == 4 + assert abs(stats["largest_fraction"] - expected_fraction) < 1e-10 + def test_size_distribution_counts(self, checkerboard_grid): + """size_distribution should count clusters of each size.""" + stats = get_cluster_stats_fast(checkerboard_grid, species=1, neighborhood="neumann") -class TestPCFComputation: - """Tests for pair correlation function computation.""" + # All 18 clusters are size 1 + assert stats["size_distribution"] == {1: 18} - def test_pcf_returns_correct_shapes(self, medium_grid): - """PCF should return arrays of correct shapes.""" - prey_pos = np.argwhere(medium_grid == 1) - pred_pos = np.argwhere(medium_grid == 2) + def test_sizes_sorted_descending(self, two_cluster_grid): + """sizes array should be sorted in descending order.""" + stats = get_cluster_stats_fast(two_cluster_grid, species=1) - n_bins = 20 - dist, pcf, n_pairs = compute_pcf_periodic_fast( - prey_pos, pred_pos, medium_grid.shape, 15.0, n_bins, False - ) + sizes = stats["sizes"] + assert list(sizes) == sorted(sizes, reverse=True) - assert len(dist) == n_bins - assert len(pcf) == n_bins - assert isinstance(n_pairs, int) - def test_pcf_empty_positions(self): - """PCF should handle empty position arrays.""" - empty = np.array([]).reshape(0, 2) - positions = np.array([[5, 5], [10, 10]]) +# ============================================================================= +# PCF Tests +# ============================================================================= - dist, pcf, n_pairs = compute_pcf_periodic_fast( - empty, positions, (50, 50), 15.0, 20, False - ) - assert len(pcf) == 20 - assert np.allclose(pcf, 1.0) # Default value for empty - assert n_pairs == 0 +class TestComputePcfPeriodicFast: + """Tests for compute_pcf_periodic_fast function.""" - def test_pcf_values_reasonable(self, large_grid): - """PCF values should be positive and reasonable.""" - prey_pos = np.argwhere(large_grid == 1) + def test_empty_positions_returns_ones(self): + """PCF of empty positions should return 1.0 (no correlation).""" + empty_pos = np.array([]).reshape(0, 2) + grid_shape = (50, 50) dist, pcf, n_pairs = compute_pcf_periodic_fast( - prey_pos, prey_pos, large_grid.shape, 20.0, 20, True + empty_pos, empty_pos, grid_shape, max_distance=10.0, n_bins=10 ) - assert np.all(pcf >= 0), "PCF should be non-negative" - assert np.all(np.isfinite(pcf)), "PCF should be finite" - # For random distribution, PCF should be around 1.0 on average - assert 0.5 < np.mean(pcf) < 2.0, f"Mean PCF {np.mean(pcf)} seems unreasonable" - - def test_pcf_clustered_higher_than_random(self): - """Clustered points should have higher short-range PCF than random.""" - grid_size = 100 - - # Create clustered distribution - clustered_grid = np.zeros((grid_size, grid_size), dtype=np.int32) - for _ in range(10): - cx, cy = np.random.randint(10, 90, 2) - for dx in range(-5, 6): - for dy in range(-5, 6): - if np.random.random() < 0.8: - clustered_grid[(cx + dx) % grid_size, (cy + dy) % grid_size] = 1 - - # Create random distribution with same density - n_clustered = np.sum(clustered_grid == 1) - random_grid = np.zeros((grid_size, grid_size), dtype=np.int32) - positions = np.random.permutation(grid_size * grid_size)[:n_clustered] - for pos in positions: - random_grid[pos // grid_size, pos % grid_size] = 1 - - # Compute PCFs - clustered_pos = np.argwhere(clustered_grid == 1) - random_pos = np.argwhere(random_grid == 1) - - _, pcf_clustered, _ = compute_pcf_periodic_fast( - clustered_pos, clustered_pos, (grid_size, grid_size), 20.0, 20, True - ) - _, pcf_random, _ = compute_pcf_periodic_fast( - random_pos, random_pos, (grid_size, grid_size), 20.0, 20, True - ) + assert len(dist) == 10 + assert np.allclose(pcf, 1.0) + assert n_pairs == 0 - # Short-range PCF should be higher for clustered - short_range_clustered = np.mean(pcf_clustered[:5]) - short_range_random = np.mean(pcf_random[:5]) + def test_bin_centers_correct_spacing(self): + """Bin centers should be evenly spaced.""" + pos = np.array([[10.0, 10.0], [15.0, 15.0]]) + grid_shape = (50, 50) - assert ( - short_range_clustered > short_range_random - ), f"Clustered PCF ({short_range_clustered:.2f}) should be > random ({short_range_random:.2f})" + dist, _, _ = compute_pcf_periodic_fast( + pos, pos, grid_shape, max_distance=20.0, n_bins=10, self_correlation=True + ) - def test_compute_all_pcfs_keys(self, medium_grid): - """compute_all_pcfs_fast should return dict with correct keys.""" - results = compute_all_pcfs_fast(medium_grid, 15.0, 20) + expected_spacing = 20.0 / 10 + actual_spacing = dist[1] - dist[0] + assert abs(actual_spacing - expected_spacing) < 1e-10 - assert "prey_prey" in results - assert "pred_pred" in results - assert "prey_pred" in results + def test_self_correlation_excludes_self_pairs(self): + """Self-correlation should not count i==j pairs.""" + # Single point - self correlation should find 0 pairs + pos = np.array([[25.0, 25.0]]) + grid_shape = (50, 50) - def test_compute_all_pcfs_structure(self, medium_grid): - """Each PCF result should be a tuple of (distances, pcf, n_pairs).""" - results = compute_all_pcfs_fast(medium_grid, 15.0, 20) + _, _, n_pairs = compute_pcf_periodic_fast( + pos, pos, grid_shape, max_distance=10.0, self_correlation=True + ) - for key in ["prey_prey", "pred_pred", "prey_pred"]: - assert len(results[key]) == 3, f"{key} should have 3 elements" - dist, pcf, n_pairs = results[key] - assert len(dist) == 20 - assert len(pcf) == 20 - assert isinstance(n_pairs, int) + assert n_pairs == 0 + def test_cross_correlation_counts_all_pairs(self): + """Cross-correlation should count all i-j pairs.""" + pos_i = np.array([[10.0, 10.0]]) + pos_j = np.array([[12.0, 10.0]]) # Distance = 2 + grid_shape = (50, 50) -# ============================================================================ -# TEST: CLUSTER MEASUREMENT -# ============================================================================ + _, _, n_pairs = compute_pcf_periodic_fast( + pos_i, pos_j, grid_shape, max_distance=10.0, self_correlation=False + ) + assert n_pairs == 1 -class TestClusterMeasurement: - """Tests for cluster size measurement.""" + def test_periodic_distance_calculation(self): + """Distances should respect periodic boundaries.""" + # Two points on opposite edges - should be close via periodicity + pos_i = np.array([[0.5, 25.0]]) + pos_j = np.array([[49.5, 25.0]]) # Periodic distance = 1.0 + grid_shape = (50, 50) - def test_cluster_known_sizes(self, clustered_grid): - """Should correctly identify known cluster sizes.""" - sizes = measure_cluster_sizes_fast(clustered_grid, 1) - sizes_sorted = sorted(sizes, reverse=True) + _, pcf, n_pairs = compute_pcf_periodic_fast( + pos_i, pos_j, grid_shape, max_distance=5.0, n_bins=5, self_correlation=False + ) - # Expected: 9 (3x3), 8 (2x4), 5 (L-shape), 1 (single) - expected = [9, 8, 5, 1] + assert n_pairs == 1 # Should find the pair - assert len(sizes) == 4, f"Expected 4 clusters, got {len(sizes)}" - assert ( - list(sizes_sorted) == expected - ), f"Expected {expected}, got {list(sizes_sorted)}" - def test_cluster_empty_grid(self): - """Should return empty array for grid with no target species.""" - grid = np.zeros((20, 20), dtype=np.int32) - sizes = measure_cluster_sizes_fast(grid, 1) +class TestComputeAllPcfsFast: + """Tests for compute_all_pcfs_fast function.""" - assert len(sizes) == 0 + def test_returns_all_three_pcfs(self, mixed_grid_10x10): + """Should return prey-prey, pred-pred, and prey-pred PCFs.""" + results = compute_all_pcfs_fast(mixed_grid_10x10, max_distance=3.0, n_bins=5) - def test_cluster_full_grid(self): - """Single cluster when grid is full of one species.""" - grid = np.ones((10, 10), dtype=np.int32) - sizes = measure_cluster_sizes_fast(grid, 1) + assert "prey_prey" in results + assert "pred_pred" in results + assert "prey_pred" in results - assert len(sizes) == 1 - assert sizes[0] == 100 + def test_each_pcf_has_correct_structure(self, mixed_grid_10x10): + """Each PCF result should be (distances, values, count) tuple.""" + results = compute_all_pcfs_fast(mixed_grid_10x10, max_distance=3.0, n_bins=5) - def test_cluster_diagonal_not_connected(self): - """Diagonally adjacent cells should NOT be connected (4-connectivity).""" - grid = np.zeros((5, 5), dtype=np.int32) - grid[0, 0] = 1 - grid[1, 1] = 1 # Diagonal from (0,0) - grid[2, 2] = 1 # Diagonal from (1,1) + for key in ["prey_prey", "pred_pred", "prey_pred"]: + dist, pcf, n = results[key] + assert isinstance(dist, np.ndarray) + assert isinstance(pcf, np.ndarray) + assert isinstance(n, int) + assert len(dist) == len(pcf) == 5 - sizes = measure_cluster_sizes_fast(grid, 1) + def test_default_max_distance(self, mixed_grid_10x10): + """Default max_distance should be grid_size / 4.""" + results = compute_all_pcfs_fast(mixed_grid_10x10, n_bins=5) - # Each cell should be its own cluster (4-connectivity) - assert len(sizes) == 3, f"Expected 3 separate clusters, got {len(sizes)}" - assert all(s == 1 for s in sizes) + # For 10x10 grid, default max_distance = 2.5 + dist, _, _ = results["prey_prey"] + assert dist[-1] < 2.5 # Last bin center should be less than max - def test_cluster_orthogonal_connected(self): - """Orthogonally adjacent cells should be connected.""" - grid = np.zeros((5, 5), dtype=np.int32) - grid[2, 1:4] = 1 # Horizontal line of 3 - grid[1, 2] = 1 # One above middle - grid[3, 2] = 1 # One below middle + def test_empty_species_returns_ones(self, prey_only_grid_10x10): + """PCF for missing species should return 1.0.""" + results = compute_all_pcfs_fast(prey_only_grid_10x10, max_distance=3.0, n_bins=5) - sizes = measure_cluster_sizes_fast(grid, 1) + _, pred_pred_pcf, _ = results["pred_pred"] + assert np.allclose(pred_pred_pcf, 1.0) - # Should be one connected cluster of 5 - assert len(sizes) == 1 - assert sizes[0] == 5 - def test_cluster_species_separation(self): - """Clusters of different species should be separate.""" - grid = np.zeros((10, 10), dtype=np.int32) - grid[0:3, 0:3] = 1 # 9 prey - grid[5:8, 5:8] = 2 # 9 predators +# ============================================================================= +# PPKernel Tests +# ============================================================================= - prey_sizes = measure_cluster_sizes_fast(grid, 1) - pred_sizes = measure_cluster_sizes_fast(grid, 2) - assert len(prey_sizes) == 1 - assert prey_sizes[0] == 9 - assert len(pred_sizes) == 1 - assert pred_sizes[0] == 9 +class TestPPKernel: + """Tests for PPKernel class.""" - def test_cluster_total_cells(self, medium_grid): - """Total cells in clusters should equal total cells of that species.""" - for species in [1, 2]: - sizes = measure_cluster_sizes_fast(medium_grid, species) - total_in_clusters = sum(sizes) - total_in_grid = np.sum(medium_grid == species) + def test_kernel_initialization_moore(self): + """Moore kernel should have 8-direction offsets.""" + kernel = PPKernel(10, 10, neighborhood="moore") + assert len(kernel._dr) == 8 + assert len(kernel._dc) == 8 - assert ( - total_in_clusters == total_in_grid - ), f"Species {species}: cluster total {total_in_clusters} != grid total {total_in_grid}" + def test_kernel_initialization_neumann(self): + """Von Neumann kernel should have 4-direction offsets.""" + kernel = PPKernel(10, 10, neighborhood="von_neumann") + assert len(kernel._dr) == 4 + assert len(kernel._dc) == 4 + + def test_kernel_preallocates_buffer(self): + """Kernel should preallocate occupied_buffer.""" + kernel = PPKernel(15, 20) + assert kernel._occupied_buffer.shape == (15 * 20, 2) + + def test_kernel_update_modifies_grid(self): + """update() should modify the grid in place.""" + set_numba_seed(42) + kernel = PPKernel(10, 10, neighborhood="moore", directed_hunting=False) + grid = np.zeros((10, 10), dtype=np.int32) + grid[3:6, 3:6] = 1 # Prey block + grid[7, 7] = 2 # One predator -# ============================================================================ -# TEST: WARMUP FUNCTION -# ============================================================================ + prey_death_arr = np.full((10, 10), 0.05, dtype=np.float64) + prey_death_arr[grid != 1] = np.nan + initial_grid = grid.copy() -class TestWarmup: - """Tests for JIT warmup function.""" + kernel.update( + grid, prey_death_arr, + prey_birth=0.3, prey_death=0.05, + pred_birth=0.5, pred_death=0.1, + ) - def test_warmup_runs_without_error(self): - """Warmup should complete without errors.""" - try: - warmup_numba_kernels(50) - except Exception as e: - pytest.fail(f"Warmup failed with error: {e}") + # Grid should have changed + assert not np.array_equal(grid, initial_grid) - def test_warmup_compiles_kernel(self): - """After warmup, kernel should run faster.""" - import time + def test_kernel_update_preserves_dtype(self): + """update() should preserve grid dtype.""" + kernel = PPKernel(10, 10) - # First call (might trigger compilation) - warmup_numba_kernels(30) + grid = np.zeros((10, 10), dtype=np.int32) + grid[5, 5] = 1 + prey_death_arr = np.full((10, 10), 0.05, dtype=np.float64) - # Timed call (should be fast) - np.random.seed(42) - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.5, 0.3, 0.2]).astype(np.int32) - prey_death = np.full((30, 30), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan + kernel.update(grid, prey_death_arr, 0.2, 0.05, 0.2, 0.1) - kernel = PPKernel(30, 30, "moore") + assert grid.dtype == np.int32 - t0 = time.perf_counter() - for _ in range(10): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - elapsed = time.perf_counter() - t0 + def test_kernel_directed_hunting_option(self): + """directed_hunting flag should be stored correctly.""" + kernel_random = PPKernel(10, 10, directed_hunting=False) + kernel_directed = PPKernel(10, 10, directed_hunting=True) - # Should complete quickly (less than 1 second for 10 iterations) - assert elapsed < 1.0, f"Kernel too slow after warmup: {elapsed:.2f}s" + assert kernel_random.directed_hunting is False + assert kernel_directed.directed_hunting is True - def test_warmup_directed_hunting(self): - """Warmup should work with directed_hunting=True.""" - try: - warmup_numba_kernels(30, directed_hunting=True) - except Exception as e: - pytest.fail(f"Warmup with directed_hunting failed: {e}") + def test_kernel_update_with_evolution(self): + """update() should handle evolution parameters.""" + set_numba_seed(42) + kernel = PPKernel(10, 10) + grid = np.zeros((10, 10), dtype=np.int32) + grid[2:5, 2:5] = 1 # Prey + prey_death_arr = np.full((10, 10), 0.05, dtype=np.float64) + prey_death_arr[grid != 1] = np.nan + + # Run with evolution active + kernel.update( + grid, prey_death_arr, + prey_birth=0.3, prey_death=0.05, + pred_birth=0.5, pred_death=0.1, + evolve_sd=0.02, evolve_min=0.01, evolve_max=0.15, + evolution_stopped=False, + ) -# ============================================================================ -# TEST: EDGE CASES -# ============================================================================ + # Check that new prey have evolved values + new_prey_mask = (grid == 1) & ~np.isnan(prey_death_arr) + if np.any(new_prey_mask): + values = prey_death_arr[new_prey_mask] + assert np.all(values >= 0.01) + assert np.all(values <= 0.15) -class TestEdgeCases: - """Tests for edge cases and boundary conditions.""" +# ============================================================================= +# Edge Cases +# ============================================================================= - def test_single_cell_grid(self): - """Should handle 1x1 grid.""" - grid = np.array([[1]], dtype=np.int32) - prey_death = np.array([[0.05]], dtype=np.float64) - kernel = PPKernel(1, 1, "moore") - # Should not crash - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) +class TestNumbaEdgeCases: + """Edge case tests for Numba functions.""" - def test_very_small_grid(self): - """Should handle very small grids.""" - grid = np.array([[1, 0], [2, 1]], dtype=np.int32) - prey_death = np.full((2, 2), np.nan, dtype=np.float64) - prey_death[grid == 1] = 0.05 + def test_cluster_detection_1x1_grid(self): + """Should handle minimal 1x1 grid.""" + grid = np.array([[1]], dtype=np.int32) + sizes = measure_cluster_sizes_fast(grid, species=1) + assert len(sizes) == 1 + assert sizes[0] == 1 - kernel = PPKernel(2, 2, "moore") - for _ in range(10): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) + def test_cluster_detection_full_grid(self): + """Should handle grid completely filled with one species.""" + grid = np.ones((10, 10), dtype=np.int32) + stats = get_cluster_stats_fast(grid, species=1) - assert grid.min() >= 0 - assert grid.max() <= 2 + assert stats["n_clusters"] == 1 + assert stats["largest"] == 100 + assert stats["largest_fraction"] == 1.0 - def test_all_empty_grid(self): - """Should handle grid with no organisms.""" + def test_pcf_single_point(self): + """PCF should handle single-point case.""" grid = np.zeros((20, 20), dtype=np.int32) - prey_death = np.full((20, 20), np.nan, dtype=np.float64) - - kernel = PPKernel(20, 20, "moore") - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - - # Grid should still be all zeros - assert np.all(grid == 0) - - def test_all_prey_grid(self): - """Should handle grid with only prey.""" - grid = np.ones((20, 20), dtype=np.int32) - prey_death = np.full((20, 20), 0.05, dtype=np.float64) - - kernel = PPKernel(20, 20, "moore") - for _ in range(10): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - - # Some prey should have died - assert np.sum(grid == 0) > 0 - - def test_all_predator_grid(self): - """Should handle grid with only predators.""" - grid = np.full((20, 20), 2, dtype=np.int32) - prey_death = np.full((20, 20), np.nan, dtype=np.float64) - - kernel = PPKernel(20, 20, "moore") - for _ in range(50): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - - # All predators should have died (no prey to eat) - assert np.sum(grid == 2) < 400 # Most should be dead + grid[10, 10] = 1 - def test_extreme_parameters(self): - """Should handle extreme parameter values.""" - np.random.seed(42) - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.5, 0.3, 0.2]).astype(np.int32) - prey_death = np.full((30, 30), 0.5, dtype=np.float64) - prey_death[grid != 1] = np.nan + results = compute_all_pcfs_fast(grid, max_distance=5.0, n_bins=5) + _, pcf, n = results["prey_prey"] - kernel = PPKernel(30, 30, "moore") + assert n == 0 # No pairs with single point - # High death rates - kernel.update(grid, prey_death, 0.99, 0.99, 0.99, 0.99) + def test_kernel_empty_grid(self): + """Kernel should handle completely empty grid.""" + kernel = PPKernel(10, 10) + grid = np.zeros((10, 10), dtype=np.int32) + prey_death_arr = np.full((10, 10), np.nan, dtype=np.float64) - # Low death rates - grid = np.random.choice([0, 1, 2], (30, 30), p=[0.5, 0.3, 0.2]).astype(np.int32) - prey_death = np.full((30, 30), 0.001, dtype=np.float64) - prey_death[grid != 1] = np.nan - kernel.update(grid, prey_death, 0.01, 0.01, 0.01, 0.01) + # Should not raise + kernel.update(grid, prey_death_arr, 0.2, 0.05, 0.2, 0.1) - # Should not crash - assert True + # Grid should still be empty + assert np.sum(grid) == 0 - def test_directed_single_predator_surrounded_by_prey(self): - """Directed hunting: single predator surrounded by prey.""" - grid = np.ones((5, 5), dtype=np.int32) # All prey - grid[2, 2] = 2 # One predator in center + def test_kernel_high_death_rates(self): + """Kernel should handle extreme death rates.""" + set_numba_seed(42) + kernel = PPKernel(10, 10) - prey_death = np.full((5, 5), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan + grid = np.zeros((10, 10), dtype=np.int32) + grid[::2, ::2] = 1 # Sparse prey + prey_death_arr = np.full((10, 10), 0.99, dtype=np.float64) # Very high death + prey_death_arr[grid != 1] = np.nan - kernel = PPKernel(5, 5, "moore", directed_hunting=True) - set_numba_seed(42) + initial_prey = np.sum(grid == 1) - # Run a few steps - for _ in range(3): - kernel.update(grid, prey_death, 0.0, 0.05, 0.9, 0.0) + kernel.update(grid, prey_death_arr, 0.2, 0.99, 0.2, 0.1) - # Should not crash, grid should be valid - assert grid.min() >= 0 - assert grid.max() <= 2 + # Most prey should die + final_prey = np.sum(grid == 1) + assert final_prey < initial_prey - def test_directed_no_prey_nearby(self): - """Directed hunting: predator with no prey neighbors should explore.""" - grid = np.zeros((10, 10), dtype=np.int32) - grid[0, 0] = 2 # Predator in corner - grid[9, 9] = 1 # Prey far away + def test_cluster_large_grid_performance(self): + """Cluster detection should complete quickly on moderate grid.""" + import time - prey_death = np.full((10, 10), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan + grid = np.zeros((200, 200), dtype=np.int32) + # Random scattered prey + np.random.seed(42) + grid[np.random.random((200, 200)) < 0.3] = 1 - kernel = PPKernel(10, 10, "moore", directed_hunting=True) - set_numba_seed(42) + start = time.perf_counter() + stats = get_cluster_stats_fast(grid, species=1) + elapsed = time.perf_counter() - start - # Run - predator should explore randomly (no prey adjacent) - for _ in range(5): - kernel.update(grid, prey_death, 0.0, 0.05, 0.5, 0.0) - - assert grid.min() >= 0 - assert grid.max() <= 2 - - -# ============================================================================ -# MAIN -# ============================================================================ - - -def run_tests_without_pytest(): - """Run tests without pytest for basic verification.""" - print("=" * 60) - print("Running tests without pytest...") - print("=" * 60) - - test_classes = [ - TestNumbaAvailability, - TestPPKernel, - TestPCFComputation, - TestClusterMeasurement, - TestWarmup, - TestEdgeCases, - ] - - # Create fixtures - np.random.seed(42) - small_grid = np.random.choice([0, 1, 2], (20, 20), p=[0.5, 0.3, 0.2]).astype( - np.int32 - ) - medium_grid = np.random.choice([0, 1, 2], (50, 50), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - large_grid = np.random.choice([0, 1, 2], (100, 100), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - - clustered_grid = np.zeros((30, 30), dtype=np.int32) - clustered_grid[2:5, 2:5] = 1 - clustered_grid[10:12, 10:14] = 1 - clustered_grid[20, 20] = 1 - clustered_grid[25, 25:28] = 1 - clustered_grid[26:28, 25] = 1 - - prey_death_array = np.full(medium_grid.shape, np.nan, dtype=np.float64) - prey_death_array[medium_grid == 1] = 0.05 - - fixtures = { - "small_grid": small_grid, - "medium_grid": medium_grid, - "large_grid": large_grid, - "clustered_grid": clustered_grid, - "prey_death_array": prey_death_array, - } - - passed = 0 - failed = 0 - - for test_class in test_classes: - print(f"\n{test_class.__name__}:") - instance = test_class() - - for method_name in dir(instance): - if method_name.startswith("test_"): - method = getattr(instance, method_name) - - # Get fixture arguments - import inspect - - sig = inspect.signature(method) - kwargs = {} - for param in sig.parameters: - if param in fixtures: - # Create fresh copy for each test - kwargs[param] = fixtures[param].copy() - - try: - method(**kwargs) - print(f" ✓ {method_name}") - passed += 1 - except Exception as e: - print(f" ✗ {method_name}: {e}") - failed += 1 - - print("\n" + "=" * 60) - print(f"Results: {passed} passed, {failed} failed") - print("=" * 60) - - return failed == 0 - - -if __name__ == "__main__": - if len(sys.argv) > 1 and sys.argv[1] == "--no-pytest": - success = run_tests_without_pytest() - sys.exit(0 if success else 1) - else: - try: - import pytest - - sys.exit(pytest.main([__file__, "-v"])) - except ImportError: - print("pytest not installed, running basic tests...") - success = run_tests_without_pytest() - sys.exit(0 if success else 1) + assert elapsed < 1.0 # Should complete in under 1 second + assert stats["n_clusters"] > 0 \ No newline at end of file diff --git a/tests/test_optimizations.py b/tests/test_optimizations.py deleted file mode 100644 index d0d4cb5..0000000 --- a/tests/test_optimizations.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/env python3 -""" -Test and Benchmark Script for Optimized PP Analysis - -Run from your project root: - python scripts/test_optimizations.py - python scripts/test_optimizations.py --full -""" - -import sys -import time -import argparse -import numpy as np -from pathlib import Path - -# Handle imports from different locations -project_root = str(Path(__file__).resolve().parents[1]) -scripts_dir = str(Path(__file__).resolve().parent) -for p in [project_root, scripts_dir]: - if p not in sys.path: - sys.path.insert(0, p) - - -# Flexible import -def get_modules(): - try: - from models.numba_optimized import ( - NUMBA_AVAILABLE, - PPKernel, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, - ) - except ImportError: - from models.numba_optimized import ( - NUMBA_AVAILABLE, - PPKernel, - compute_all_pcfs_fast, - measure_cluster_sizes_fast, - ) - return NUMBA_AVAILABLE, PPKernel, compute_all_pcfs_fast, measure_cluster_sizes_fast - - -def test_numba(): - """Test Numba availability.""" - print("=" * 60) - print("TEST: Numba Availability") - print("=" * 60) - try: - NUMBA_AVAILABLE, PPKernel, _, _ = get_modules() - print(f" Numba available: {NUMBA_AVAILABLE}") - kernel = PPKernel(10, 10, "moore") - print(" PPKernel: OK") - return True - except Exception as e: - print(f" FAILED: {e}") - return False - - -def test_kernel(): - """Test kernel correctness.""" - print("\n" + "=" * 60) - print("TEST: Kernel Correctness") - print("=" * 60) - try: - _, PPKernel, _, _ = get_modules() - np.random.seed(42) - - grid = np.random.choice([0, 1, 2], (50, 50), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - prey_death = np.full((50, 50), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - print(f" Initial: {np.sum(grid==1)} prey, {np.sum(grid==2)} pred") - - kernel = PPKernel(50, 50, "moore") - for _ in range(100): - kernel.update( - grid, prey_death, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False - ) - - print(f" After 100: {np.sum(grid==1)} prey, {np.sum(grid==2)} pred") - - # Sanity checks - assert 0 <= grid.min() <= grid.max() <= 2 - assert np.all(~np.isnan(prey_death[grid == 1])) - print(" PASSED") - return True - except Exception as e: - print(f" FAILED: {e}") - return False - - -def test_pcf(): - """Test PCF computation.""" - print("\n" + "=" * 60) - print("TEST: PCF Computation") - print("=" * 60) - try: - _, _, compute_all_pcfs_fast, _ = get_modules() - - grid = np.zeros((100, 100), dtype=np.int32) - # Create clustered prey - for _ in range(10): - cx, cy = np.random.randint(10, 90, 2) - for dx in range(-5, 6): - for dy in range(-5, 6): - if np.random.random() < 0.7: - grid[(cx + dx) % 100, (cy + dy) % 100] = 1 - # Scatter predators - empty = np.argwhere(grid == 0) - for idx in np.random.choice(len(empty), min(500, len(empty)), replace=False): - grid[empty[idx, 0], empty[idx, 1]] = 2 - - print(f" Grid: {np.sum(grid==1)} prey, {np.sum(grid==2)} pred") - - results = compute_all_pcfs_fast(grid, 20.0, 20) - pcf_rr = results["prey_prey"][1] - - print(f" Prey clustering (short range): {np.mean(pcf_rr[:5]):.2f}") - assert np.mean(pcf_rr[:5]) > 1.0, "Clustered prey should have PCF > 1" - print(" PASSED") - return True - except Exception as e: - print(f" FAILED: {e}") - return False - - -def test_clusters(): - """Test cluster measurement.""" - print("\n" + "=" * 60) - print("TEST: Cluster Measurement") - print("=" * 60) - try: - _, _, _, measure_cluster_sizes_fast = get_modules() - - grid = np.zeros((20, 20), dtype=np.int32) - grid[2:5, 2:5] = 1 # 9 cells - grid[10:12, 10:12] = 1 # 4 cells - grid[15, 15] = 1 # 1 cell - - sizes = sorted(measure_cluster_sizes_fast(grid, 1), reverse=True) - print(f" Expected: [9, 4, 1], Got: {sizes}") - - assert sizes == [9, 4, 1] - print(" PASSED") - return True - except Exception as e: - print(f" FAILED: {e}") - return False - - -def benchmark_kernel(): - """Benchmark kernel performance.""" - print("\n" + "=" * 60) - print("BENCHMARK: Kernel (500 steps, 100x100)") - print("=" * 60) - - _, PPKernel, _, _ = get_modules() - - np.random.seed(42) - grid = np.random.choice([0, 1, 2], (100, 100), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - prey_death = np.full((100, 100), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - kernel = PPKernel(100, 100, "moore") - - # Warmup - g, p = grid.copy(), prey_death.copy() - kernel.update(g, p, 0.2, 0.05, 0.2, 0.1) - - # Benchmark - g, p = grid.copy(), prey_death.copy() - t0 = time.perf_counter() - for _ in range(500): - kernel.update(g, p, 0.2, 0.05, 0.2, 0.1, evolution_stopped=False) - elapsed = (time.perf_counter() - t0) * 1000 - - print(f" Total: {elapsed:.1f}ms") - print(f" Per step: {elapsed/500:.3f}ms") - return elapsed / 500 - - -def benchmark_pcf(): - """Benchmark PCF performance.""" - print("\n" + "=" * 60) - print("BENCHMARK: PCF (100x100, 10 runs)") - print("=" * 60) - - _, _, compute_all_pcfs_fast, _ = get_modules() - - np.random.seed(42) - grid = np.zeros((100, 100), dtype=np.int32) - positions = np.random.permutation(10000) - for p in positions[:3000]: - grid[p // 100, p % 100] = 1 - for p in positions[3000:4500]: - grid[p // 100, p % 100] = 2 - - print(f" Grid: {np.sum(grid==1)} prey, {np.sum(grid==2)} pred") - - # Warmup - _ = compute_all_pcfs_fast(grid, 20.0, 20) - - # Benchmark - t0 = time.perf_counter() - for _ in range(10): - _ = compute_all_pcfs_fast(grid, 20.0, 20) - elapsed = (time.perf_counter() - t0) / 10 * 1000 - - print(f" Per call: {elapsed:.1f}ms") - return elapsed - - -def benchmark_full_sim(): - """Benchmark complete simulation.""" - print("\n" + "=" * 60) - print("BENCHMARK: Full Simulation") - print("=" * 60) - - _, PPKernel, compute_all_pcfs_fast, measure_cluster_sizes_fast = get_modules() - - np.random.seed(42) - grid = np.random.choice([0, 1, 2], (100, 100), p=[0.55, 0.30, 0.15]).astype( - np.int32 - ) - prey_death = np.full((100, 100), 0.05, dtype=np.float64) - prey_death[grid != 1] = np.nan - - kernel = PPKernel(100, 100, "moore") - - t0 = time.perf_counter() - - # Warmup (200 steps) - for _ in range(200): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - t_warmup = (time.perf_counter() - t0) * 1000 - - # Measurement (300 steps) - for _ in range(300): - kernel.update(grid, prey_death, 0.2, 0.05, 0.2, 0.1) - t_measure = (time.perf_counter() - t0) * 1000 - t_warmup - - # Clusters - _ = measure_cluster_sizes_fast(grid, 1) - _ = measure_cluster_sizes_fast(grid, 2) - t_cluster = (time.perf_counter() - t0) * 1000 - t_warmup - t_measure - - # PCF - _ = compute_all_pcfs_fast(grid, 20.0, 20) - t_pcf = (time.perf_counter() - t0) * 1000 - t_warmup - t_measure - t_cluster - - total = (time.perf_counter() - t0) * 1000 - - print(f" Warmup (200): {t_warmup:.1f}ms") - print(f" Measure (300): {t_measure:.1f}ms") - print(f" Clusters: {t_cluster:.1f}ms") - print(f" PCF: {t_pcf:.1f}ms") - print(f" ─────────────────────────") - print(f" TOTAL: {total:.1f}ms") - return total - - -def estimate_sweep(): - """Estimate sweep time.""" - print("\n" + "=" * 60) - print("ESTIMATE: Full Sweep Runtime") - print("=" * 60) - - sim_time = benchmark_full_sim() - - n_sims = 15 * 15 * 50 * 2 # 22,500 - total_ms = n_sims * sim_time - - print(f"\n Single sim: {sim_time:.1f}ms") - print(f" Total sims: {n_sims:,}") - print(f"\n Estimated time:") - print(f" 1 core: {total_ms/3600000:.1f} hours") - print(f" 8 cores: {total_ms/3600000/8:.1f} hours") - print( - f" 32 cores: {total_ms/3600000/32:.2f} hours ({total_ms/60000/32:.1f} min)" - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--full", action="store_true", help="Run full benchmark") - args = parser.parse_args() - - print("\n" + "=" * 60) - print("PP ANALYSIS OPTIMIZATION TESTS") - print("=" * 60) - - # Run tests - results = [ - ("Numba", test_numba()), - ("Kernel", test_kernel()), - ("PCF", test_pcf()), - ("Clusters", test_clusters()), - ] - - # Benchmarks - kernel_time = benchmark_kernel() - pcf_time = benchmark_pcf() - - if args.full: - estimate_sweep() - - print("\n" + "=" * 60) - print("SUMMARY") - print("=" * 60) - print(f" Kernel: {kernel_time:.3f}ms/step") - print(f" PCF: {pcf_time:.1f}ms/call") - print("\nAll tests passed!") - - -if __name__ == "__main__": - main() diff --git a/tests/test_pp.py b/tests/test_pp.py deleted file mode 100644 index 2c9955c..0000000 --- a/tests/test_pp.py +++ /dev/null @@ -1,187 +0,0 @@ -import pytest -import numpy as np -import sys -import os - -# Ensure we can import the model from the current directory -sys.path.append(os.getcwd()) - -# Try importing the classes; fail gracefully if file is missing -try: - from models.CA import PP -except ImportError: - pytest.fail("Could not import 'PP' from 'ca_model.py'. Make sure the file exists.") - -# --- FIXTURES --- - - -@pytest.fixture -def base_params(): - """Standard robust parameters for testing.""" - return { - "prey_death": 0.05, - "predator_death": 0.1, - "prey_birth": 0.25, - "predator_birth": 0.2, - } - - -@pytest.fixture -def seed(): - """Fixed seed for reproducibility.""" - return 42 - - -# --- TESTS --- - - -def test_initialization(base_params, seed): - """Test grid setup, shapes, and density distribution.""" - rows, cols = 50, 50 - densities = (0.2, 0.1) # 20% prey, 10% predator - - pp = PP(rows, cols, densities, params=base_params, seed=seed) - - # Check grid dimensions - assert pp.grid.shape == (rows, cols) - - # Check population counts (approximate) - total_cells = rows * cols - prey_count = np.sum(pp.grid == 1) - pred_count = np.sum(pp.grid == 2) - - # Allow small variance due to randomness - tolerance = total_cells * 0.05 - assert abs(prey_count - total_cells * 0.2) < tolerance - assert abs(pred_count - total_cells * 0.1) < tolerance - - -def test_async_update_changes_grid(base_params, seed): - """Test if Asynchronous update actually modifies the grid.""" - pp = PP(20, 20, (0.5, 0.2), synchronous=False, params=base_params, seed=seed) - initial_grid = pp.grid.copy() - - pp.update() - - # In a generic CA step with these densities, the grid MUST change - assert not np.array_equal( - pp.grid, initial_grid - ), "Grid did not change after Async update" - - -def test_sync_update_changes_grid(base_params, seed): - """Test if Synchronous update actually modifies the grid.""" - pp = PP(20, 20, (0.5, 0.2), synchronous=True, params=base_params, seed=seed) - initial_grid = pp.grid.copy() - - pp.update() - - assert not np.array_equal( - pp.grid, initial_grid - ), "Grid did not change after Sync update" - - -def test_prey_growth_in_isolation(seed): - """Prey should grow if there are no predators and high birth rate.""" - growth_params = { - "prey_death": 0.0, - "predator_death": 1.0, # Kill any accidental predators - "prey_birth": 1.0, # Max birth rate - "predator_birth": 0.0, - } - # Start with only prey (10%) - pp = PP(20, 20, (0.1, 0.0), params=growth_params, synchronous=True, seed=seed) - - start_count = np.sum(pp.grid == 1) - pp.update() - end_count = np.sum(pp.grid == 1) - - assert end_count > start_count, "Prey did not grow in isolation" - - -def test_predator_starvation(seed): - """Predators should die if there is no prey.""" - starve_params = { - "prey_death": 0.0, - "predator_death": 0.5, # High death rate - "prey_birth": 0.0, - "predator_birth": 1.0, - } - # Start with only predators (50%) - pp = PP(20, 20, (0.0, 0.5), params=starve_params, synchronous=True, seed=seed) - - start_count = np.sum(pp.grid == 2) - pp.update() - end_count = np.sum(pp.grid == 2) - - assert end_count < start_count, "Predators did not die from starvation" - - -def test_parameter_evolution(base_params, seed): - """Test if per-cell parameters initialize and mutate correctly.""" - pp = PP(30, 30, (0.3, 0.1), params=base_params, seed=seed) - - # Enable evolution for 'prey_death' - pp.evolve("prey_death", sd=0.05) - - # Check key existence - assert "prey_death" in pp.cell_params - - # Check initialization logic - param_grid = pp.cell_params["prey_death"] - prey_mask = pp.grid == 1 - - # Values should exist where prey exists - assert np.all(~np.isnan(param_grid[prey_mask])) - # Values should be NaN where prey does NOT exist - assert np.all(np.isnan(param_grid[~prey_mask])) - - # Run updates to force reproduction and mutation - for _ in range(5): - pp.update() - - # Check for parameter drift (variance) - current_vals = pp.cell_params["prey_death"] - valid_vals = current_vals[~np.isnan(current_vals)] - - # If mutation is working, we expect the values to diverge from the initial scalar - if len(valid_vals) > 5: - assert ( - np.std(valid_vals) > 0.0 - ), "Parameters did not mutate/drift (variance is 0)" - - -def test_stability_long_run(base_params, seed): - """Run for 100 steps to ensure no immediate crash/extinction with default params.""" - pp = PP(50, 50, (0.2, 0.1), synchronous=True, params=base_params, seed=seed) - - extinct = False - for _ in range(100): - pp.update() - n_prey = np.sum(pp.grid == 1) - n_pred = np.sum(pp.grid == 2) - - # We consider 'extinct' if either species drops to 0 - if n_prey == 0 or n_pred == 0: - extinct = True - break - - assert ( - not extinct - ), "Populations went extinct within 100 steps with default parameters" - - -def test_viz_smoke_test(): - """Ensure visualize() can be called without error (requires matplotlib).""" - try: - import matplotlib.pyplot as plt - except ImportError: - pytest.skip("Matplotlib not installed") - - try: - pp = PP(10, 10, (0.2, 0.1)) - # Just initialize visualization, don't keep window open - pp.visualize(interval=1, pause=0.001) - plt.close("all") # Cleanup figures - except Exception as e: - pytest.fail(f"visualize() raised an exception: {e}") diff --git a/tests/test_pp_analysis.py b/tests/test_pp_analysis.py deleted file mode 100644 index 5aea5f5..0000000 --- a/tests/test_pp_analysis.py +++ /dev/null @@ -1,1046 +0,0 @@ -#!/usr/bin/env python3 -""" -Unit Tests for pp_analysis.py - -Run with: - pytest test_pp_analysis.py -v - pytest test_pp_analysis.py -v -x # stop on first failure -""" - -import sys -import tempfile -import numpy as np -import pytest -from pathlib import Path - -# Setup path -project_root = str(Path(__file__).resolve().parents[1]) -scripts_dir = str(Path(__file__).resolve().parent) -for p in [project_root, scripts_dir]: - if p not in sys.path: - sys.path.insert(0, p) - -# Import module under test -try: - from scripts.experiments import ( - Config, - count_populations, - get_evolved_stats, - truncated_power_law, - fit_truncated_power_law, - average_pcfs, - save_sweep_binary, - load_sweep_binary, - run_single_simulation, - run_single_simulation_fss, - ) -except ImportError: - from scripts.experiments import ( - Config, - count_populations, - get_evolved_stats, - truncated_power_law, - fit_truncated_power_law, - average_pcfs, - save_sweep_binary, - load_sweep_binary, - run_single_simulation, - run_single_simulation_fss, - ) - -# Check if CA module is available -try: - from models.CA import PP - - CA_AVAILABLE = True -except ImportError: - try: - from CA import PP - - CA_AVAILABLE = True - except ImportError: - CA_AVAILABLE = False - - -# ============================================================================ -# FIXTURES -# ============================================================================ - - -@pytest.fixture -def default_config(): - """Default configuration.""" - return Config() - - -@pytest.fixture -def fast_config(): - """Fast configuration for quick tests.""" - cfg = Config() - cfg.default_grid = 30 - cfg.warmup_steps = 20 - cfg.measurement_steps = 30 - cfg.cluster_samples = 1 - cfg.collect_pcf = False - return cfg - - -@pytest.fixture -def fast_config_directed(): - """Fast configuration with directed hunting enabled.""" - cfg = Config() - cfg.default_grid = 30 - cfg.warmup_steps = 20 - cfg.measurement_steps = 30 - cfg.cluster_samples = 1 - cfg.collect_pcf = False - cfg.directed_hunting = True - return cfg - - -@pytest.fixture -def sample_grid(): - """Sample grid for population counting tests.""" - grid = np.array( - [ - [0, 1, 1, 0, 2], - [1, 0, 0, 2, 1], - [0, 2, 1, 0, 0], - [1, 0, 0, 1, 2], - [2, 1, 0, 0, 0], - ], - dtype=np.int32, - ) - return grid - - -@pytest.fixture -def temp_dir(): - """Temporary directory for file tests.""" - with tempfile.TemporaryDirectory() as tmpdir: - yield Path(tmpdir) - - -# ============================================================================ -# TEST: CONFIG CLASS -# ============================================================================ - - -class TestConfig: - """Tests for the Config dataclass.""" - - def test_config_defaults_exist(self, default_config): - """Config should have all expected default attributes.""" - assert hasattr(default_config, "default_grid") - assert hasattr(default_config, "n_prey_birth") - assert hasattr(default_config, "n_prey_death") - assert hasattr(default_config, "n_replicates") - assert hasattr(default_config, "warmup_steps") - assert hasattr(default_config, "measurement_steps") - - def test_config_default_values(self, default_config): - """Config should have sensible defaults.""" - assert default_config.default_grid == 100 - assert default_config.n_prey_birth == 15 - assert default_config.n_prey_death == 15 - assert default_config.n_replicates == 50 - assert default_config.warmup_steps > 0 - assert default_config.measurement_steps > 0 - - def test_config_parameter_ranges_valid(self, default_config): - """Parameter ranges should be valid.""" - assert default_config.prey_birth_min < default_config.prey_birth_max - assert default_config.prey_death_min < default_config.prey_death_max - assert 0 < default_config.prey_birth_min < 1 - assert 0 < default_config.prey_death_max < 1 - - def test_config_get_prey_births(self, default_config): - """get_prey_births should return correct array.""" - births = default_config.get_prey_births() - - assert len(births) == default_config.n_prey_birth - assert np.isclose(births[0], default_config.prey_birth_min) - assert np.isclose(births[-1], default_config.prey_birth_max) - assert np.all(np.diff(births) > 0) - - def test_config_get_prey_deaths(self, default_config): - """get_prey_deaths should return correct array.""" - deaths = default_config.get_prey_deaths() - - assert len(deaths) == default_config.n_prey_death - assert np.isclose(deaths[0], default_config.prey_death_min) - assert np.isclose(deaths[-1], default_config.prey_death_max) - assert np.all(np.diff(deaths) > 0) - - def test_config_get_prey_births_custom(self, default_config): - """get_prey_births should respect custom config.""" - default_config.n_prey_birth = 5 - default_config.prey_birth_min = 0.1 - default_config.prey_birth_max = 0.5 - - births = default_config.get_prey_births() - - assert len(births) == 5 - assert np.isclose(births[0], 0.1) - assert np.isclose(births[-1], 0.5) - - def test_config_estimate_runtime(self, default_config): - """estimate_runtime should return a string.""" - estimate = default_config.estimate_runtime(32) - - assert isinstance(estimate, str) - assert "sims" in estimate.lower() - - def test_config_evolution_bounds(self, default_config): - """Evolution bounds should be valid.""" - assert default_config.evolve_min < default_config.evolve_max - assert default_config.evolve_min > 0 - assert default_config.evolve_sd > 0 - - def test_config_fss_grid_sizes(self, default_config): - """FSS grid sizes should be in ascending order.""" - sizes = default_config.fss_grid_sizes - assert list(sizes) == sorted(sizes) - assert len(sizes) > 0 - - def test_config_pcf_sample_rate(self, default_config): - """PCF sample rate should be between 0 and 1.""" - assert 0 <= default_config.pcf_sample_rate <= 1 - - def test_config_directed_hunting_default(self, default_config): - """Config should have directed_hunting attribute defaulting to False.""" - assert hasattr(default_config, "directed_hunting") - assert default_config.directed_hunting == False - - def test_config_directed_hunting_settable(self, default_config): - """directed_hunting should be settable.""" - default_config.directed_hunting = True - assert default_config.directed_hunting == True - - -# ============================================================================ -# TEST: HELPER FUNCTIONS -# ============================================================================ - - -class TestCountPopulations: - """Tests for count_populations function.""" - - def test_count_populations_basic(self, sample_grid): - """count_populations should correctly count each state.""" - empty, prey, pred = count_populations(sample_grid) - - # Verify by manual count using numpy - expected_empty = int(np.sum(sample_grid == 0)) - expected_prey = int(np.sum(sample_grid == 1)) - expected_pred = int(np.sum(sample_grid == 2)) - - assert empty == expected_empty - assert prey == expected_prey - assert pred == expected_pred - assert empty + prey + pred == sample_grid.size - - def test_count_populations_empty_grid(self): - """count_populations should handle empty grid.""" - grid = np.zeros((10, 10), dtype=np.int32) - empty, prey, pred = count_populations(grid) - - assert empty == 100 - assert prey == 0 - assert pred == 0 - - def test_count_populations_all_prey(self): - """count_populations should handle grid full of prey.""" - grid = np.ones((10, 10), dtype=np.int32) - empty, prey, pred = count_populations(grid) - - assert empty == 0 - assert prey == 100 - assert pred == 0 - - def test_count_populations_all_pred(self): - """count_populations should handle grid full of predators.""" - grid = np.full((10, 10), 2, dtype=np.int32) - empty, prey, pred = count_populations(grid) - - assert empty == 0 - assert prey == 0 - assert pred == 100 - - -class TestGetEvolvedStats: - """Tests for get_evolved_stats function.""" - - def test_get_evolved_stats_with_values(self): - """get_evolved_stats should compute statistics correctly.""" - - class MockModel: - cell_params = {"prey_death": np.array([[0.05, 0.06], [np.nan, 0.04]])} - - stats = get_evolved_stats(MockModel(), "prey_death") - - assert "mean" in stats - assert "std" in stats - assert "n" in stats - assert stats["n"] == 3 - assert np.isclose(stats["mean"], 0.05, atol=0.01) - - def test_get_evolved_stats_missing_param(self): - """get_evolved_stats should handle missing parameter.""" - - class MockModel: - cell_params = {} - - stats = get_evolved_stats(MockModel(), "prey_death") - - assert np.isnan(stats["mean"]) - assert stats["n"] == 0 - - def test_get_evolved_stats_all_nan(self): - """get_evolved_stats should handle all-NaN array.""" - - class MockModel: - cell_params = {"prey_death": np.array([[np.nan, np.nan], [np.nan, np.nan]])} - - stats = get_evolved_stats(MockModel(), "prey_death") - - assert np.isnan(stats["mean"]) - assert stats["n"] == 0 - - def test_get_evolved_stats_single_value(self): - """get_evolved_stats should handle single non-NaN value.""" - - class MockModel: - cell_params = {"prey_death": np.array([[np.nan, 0.07], [np.nan, np.nan]])} - - stats = get_evolved_stats(MockModel(), "prey_death") - - assert np.isclose(stats["mean"], 0.07) - assert stats["n"] == 1 - - -# ============================================================================ -# TEST: POWER LAW FITTING -# ============================================================================ - - -class TestTruncatedPowerLaw: - """Tests for truncated_power_law function.""" - - def test_truncated_power_law_shape(self): - """truncated_power_law should return correct shape.""" - s = np.array([1, 2, 3, 4, 5]) - result = truncated_power_law(s, tau=2.0, s_c=100.0, A=1.0) - - assert result.shape == s.shape - - def test_truncated_power_law_decreasing(self): - """truncated_power_law should be decreasing.""" - s = np.linspace(1, 100, 50) - result = truncated_power_law(s, tau=2.0, s_c=1000.0, A=1.0) - - assert np.all(np.diff(result) < 0) - - def test_truncated_power_law_positive(self): - """truncated_power_law should always return positive values.""" - s = np.linspace(1, 1000, 100) - result = truncated_power_law(s, tau=2.5, s_c=500.0, A=1.0) - - assert np.all(result > 0) - - def test_truncated_power_law_cutoff_effect(self): - """Smaller cutoff should cause faster decay.""" - s = np.linspace(1, 100, 50) - result_large = truncated_power_law(s, tau=2.0, s_c=10000.0, A=1.0) - result_small = truncated_power_law(s, tau=2.0, s_c=50.0, A=1.0) - - assert result_small[-1] < result_large[-1] - - -class TestFitTruncatedPowerLaw: - """Tests for fit_truncated_power_law function.""" - - def test_fit_insufficient_data(self): - """fit_truncated_power_law should handle insufficient data.""" - sizes = np.array([1, 2, 3]) - result = fit_truncated_power_law(sizes) - - assert result["valid"] == False - assert np.isnan(result["tau"]) - - def test_fit_empty_data(self): - """fit_truncated_power_law should handle empty data.""" - sizes = np.array([]) - result = fit_truncated_power_law(sizes) - - assert result["valid"] == False - - def test_fit_returns_required_keys(self): - """fit_truncated_power_law should return required keys.""" - np.random.seed(42) - sizes = (np.random.pareto(1.5, 500) + 1).astype(int) - sizes = sizes[sizes >= 2] - - result = fit_truncated_power_law(sizes) - - # Check only the keys that are actually returned - assert "tau" in result - assert "s_c" in result - assert "valid" in result - assert "n" in result - - -# ============================================================================ -# TEST: PCF AVERAGING -# ============================================================================ - - -class TestAveragePCFs: - """Tests for average_pcfs function.""" - - def test_average_pcfs_empty(self): - """average_pcfs should handle empty list.""" - distances, mean, se = average_pcfs([]) - - assert len(distances) == 0 - assert len(mean) == 0 - assert len(se) == 0 - - def test_average_pcfs_single(self): - """average_pcfs should handle single PCF.""" - dist = np.array([1.0, 2.0, 3.0]) - pcf = np.array([1.5, 1.2, 1.0]) - - distances, mean, se = average_pcfs([(dist, pcf, 100)]) - - np.testing.assert_array_equal(distances, dist) - np.testing.assert_array_equal(mean, pcf) - np.testing.assert_array_equal(se, np.zeros(3)) - - def test_average_pcfs_multiple(self): - """average_pcfs should correctly average multiple PCFs.""" - dist = np.array([1.0, 2.0, 3.0]) - pcf1 = np.array([1.0, 1.0, 1.0]) - pcf2 = np.array([2.0, 2.0, 2.0]) - - distances, mean, se = average_pcfs( - [ - (dist, pcf1, 100), - (dist, pcf2, 100), - ] - ) - - np.testing.assert_array_almost_equal(mean, [1.5, 1.5, 1.5]) - assert np.all(se > 0) - - def test_average_pcfs_preserves_length(self): - """average_pcfs should preserve bin count.""" - n_bins = 20 - dist = np.linspace(0.5, 19.5, n_bins) - pcf = np.ones(n_bins) - - distances, mean, se = average_pcfs([(dist, pcf, 100)] * 5) - - assert len(distances) == n_bins - assert len(mean) == n_bins - assert len(se) == n_bins - - -# ============================================================================ -# TEST: BINARY SAVE/LOAD -# ============================================================================ - - -class TestBinarySaveLoad: - """Tests for binary save/load functions.""" - - def test_save_creates_file(self, temp_dir): - """save_sweep_binary should create a file.""" - results = [{"prey_birth": 0.2, "prey_mean": 100.0}] - filepath = temp_dir / "test.npz" - - assert not filepath.exists() - save_sweep_binary(results, filepath) - assert filepath.exists() - - def test_save_load_roundtrip(self, temp_dir): - """save and load should roundtrip correctly.""" - results = [ - { - "prey_birth": 0.2, - "prey_death": 0.05, - "prey_mean": 100.0, - "with_evolution": False, - "seed": 1, - }, - { - "prey_birth": 0.3, - "prey_death": 0.06, - "prey_mean": 150.0, - "with_evolution": True, - "seed": 2, - }, - ] - - filepath = temp_dir / "test.npz" - save_sweep_binary(results, filepath) - loaded = load_sweep_binary(filepath) - - assert len(loaded) == len(results) - - for orig, load in zip(results, loaded): - for key in orig: - assert key in load - if isinstance(orig[key], float): - assert np.isclose(orig[key], load[key]) - else: - assert orig[key] == load[key] - - def test_save_empty_results(self, temp_dir): - """save_sweep_binary should handle empty results.""" - filepath = temp_dir / "empty.npz" - - save_sweep_binary([], filepath) - loaded = load_sweep_binary(filepath) - - assert len(loaded) == 0 - - def test_save_complex_results(self, temp_dir): - """save_sweep_binary should handle complex result dicts.""" - results = [ - { - "prey_birth": 0.2, - "prey_death": 0.05, - "prey_mean": 100.5, - "prey_std": 10.2, - "pred_mean": 50.3, - "pred_std": 5.1, - "with_evolution": True, - "seed": 42, - "grid_size": 100, - "prey_survived": True, - "pred_survived": True, - } - ] - - filepath = temp_dir / "complex.npz" - save_sweep_binary(results, filepath) - loaded = load_sweep_binary(filepath) - - assert len(loaded) == 1 - assert np.isclose(loaded[0]["prey_mean"], 100.5) - assert loaded[0]["seed"] == 42 - - -# ============================================================================ -# TEST: SIMULATION FUNCTIONS (require CA module) -# ============================================================================ - - -@pytest.mark.skipif(not CA_AVAILABLE, reason="CA module not available") -class TestRunSingleSimulation: - """Tests for run_single_simulation function.""" - - @pytest.fixture(autouse=True) - def setup(self, fast_config): - """Setup fast config for all tests.""" - self.cfg = fast_config - - def test_returns_dict(self): - """run_single_simulation should return a dictionary.""" - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - assert isinstance(result, dict) - - def test_required_keys_present(self): - """run_single_simulation should return all required keys.""" - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - required_keys = [ - "prey_birth", - "prey_death", - "grid_size", - "with_evolution", - "seed", - "prey_mean", - "prey_std", - "pred_mean", - "pred_std", - "prey_survived", - "pred_survived", - ] - - for key in required_keys: - assert key in result, f"Missing key: {key}" - - def test_parameters_recorded(self): - """Input parameters should be recorded in output.""" - result = run_single_simulation( - prey_birth=0.25, - prey_death=0.08, - grid_size=30, - seed=123, - with_evolution=False, - cfg=self.cfg, - ) - - assert np.isclose(result["prey_birth"], 0.25) - assert np.isclose(result["prey_death"], 0.08) - assert result["grid_size"] == 30 - assert result["seed"] == 123 - assert result["with_evolution"] == False - - def test_values_reasonable(self): - """Output values should be reasonable.""" - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - assert result["prey_mean"] >= 0 - assert result["pred_mean"] >= 0 - assert result["prey_std"] >= 0 - assert result["pred_std"] >= 0 - - max_pop = 30 * 30 - assert result["prey_mean"] <= max_pop - assert result["pred_mean"] <= max_pop - - def test_with_evolution_flag(self): - """with_evolution flag should be recorded.""" - result_no = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - result_yes = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=True, - cfg=self.cfg, - ) - - assert result_no["with_evolution"] == False - assert result_yes["with_evolution"] == True - - def test_survival_flags(self): - """Survival flags should be boolean.""" - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - assert isinstance(result["prey_survived"], bool) - assert isinstance(result["pred_survived"], bool) - - -@pytest.mark.skipif(not CA_AVAILABLE, reason="CA module not available") -class TestDirectedHunting: - """Tests for directed hunting functionality in simulations.""" - - @pytest.fixture(autouse=True) - def setup(self, fast_config): - """Setup fast config for all tests.""" - self.cfg = fast_config - self.cfg.directed_hunting = False # Default to False for comparison - - def test_simulation_with_directed_hunting_false(self): - """Simulation should work with directed_hunting=False.""" - self.cfg.directed_hunting = False - - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - assert isinstance(result, dict) - assert "prey_mean" in result - assert result["prey_mean"] >= 0 - - def test_simulation_with_directed_hunting_true(self): - """Simulation should work with directed_hunting=True.""" - self.cfg.directed_hunting = True - - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - assert isinstance(result, dict) - assert "prey_mean" in result - assert result["prey_mean"] >= 0 - - def test_directed_hunting_changes_dynamics(self): - """Directed hunting should produce different population dynamics.""" - # Run with random movement - self.cfg.directed_hunting = False - result_random = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=40, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - # Run with directed hunting - self.cfg.directed_hunting = True - result_directed = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=40, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - # Both should produce valid results - assert result_random["prey_mean"] >= 0 - assert result_directed["prey_mean"] >= 0 - - # Note: We don't assert they're different because stochastic dynamics - # means they could occasionally be similar. Just verify both run. - print(f"Random: prey_mean={result_random['prey_mean']:.1f}") - print(f"Directed: prey_mean={result_directed['prey_mean']:.1f}") - - def test_directed_hunting_with_evolution(self): - """Directed hunting should work with evolution enabled.""" - self.cfg.directed_hunting = True - - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=True, - cfg=self.cfg, - ) - - assert isinstance(result, dict) - assert result["with_evolution"] == True - - # Should have evolved death rate stats - if result.get("prey_survived", False): - # If prey survived, we should have evolution stats - assert "evolved_death_mean" in result or "prey_mean" in result - - def test_directed_hunting_multiple_seeds(self): - """Directed hunting should work with multiple seeds.""" - self.cfg.directed_hunting = True - - results = [] - for seed in [1, 2, 3, 4, 5]: - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=seed, - with_evolution=False, - cfg=self.cfg, - ) - results.append(result) - - assert len(results) == 5 - for r in results: - assert "prey_mean" in r - assert r["prey_mean"] >= 0 - - def test_directed_hunting_high_predator_birth(self): - """Directed hunting with high predator birth should deplete prey faster.""" - self.cfg.directed_hunting = True - self.cfg.predator_birth = 0.8 # High predator birth rate - - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - # With high predator birth and directed hunting, prey often go extinct - assert isinstance(result, dict) - # Don't assert extinction - just that it ran successfully - - -@pytest.mark.skipif(not CA_AVAILABLE, reason="CA module not available") -class TestRunSingleSimulationFSS: - """Tests for run_single_simulation_fss function.""" - - @pytest.fixture(autouse=True) - def setup(self, fast_config): - """Setup fast config for all tests.""" - self.cfg = fast_config - - def test_returns_dict(self): - """run_single_simulation_fss should return a dictionary.""" - result = run_single_simulation_fss( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - cfg=self.cfg, - warmup_steps=20, - measurement_steps=30, - ) - - assert isinstance(result, dict) - - def test_required_keys_present(self): - """run_single_simulation_fss should return required keys.""" - result = run_single_simulation_fss( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - cfg=self.cfg, - warmup_steps=20, - measurement_steps=30, - ) - - required_keys = [ - "prey_birth", - "prey_death", - "grid_size", - "seed", - "warmup_steps", - "measurement_steps", - "prey_mean", - "prey_std", - "pred_mean", - "pred_std", - ] - - for key in required_keys: - assert key in result, f"Missing key: {key}" - - def test_steps_recorded(self): - """warmup and measurement steps should be recorded.""" - result = run_single_simulation_fss( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - cfg=self.cfg, - warmup_steps=50, - measurement_steps=100, - ) - - assert result["warmup_steps"] == 50 - assert result["measurement_steps"] == 100 - - def test_different_grid_sizes(self): - """Should work with different grid sizes.""" - for size in [20, 30, 40]: - result = run_single_simulation_fss( - prey_birth=0.2, - prey_death=0.05, - grid_size=size, - seed=42, - cfg=self.cfg, - warmup_steps=20, - measurement_steps=30, - ) - - assert result["grid_size"] == size - assert result["prey_mean"] >= 0 - - def test_fss_with_directed_hunting(self): - """FSS simulation should work with directed hunting.""" - self.cfg.directed_hunting = True - - result = run_single_simulation_fss( - prey_birth=0.2, - prey_death=0.05, - grid_size=30, - seed=42, - cfg=self.cfg, - warmup_steps=20, - measurement_steps=30, - ) - - assert isinstance(result, dict) - assert "prey_mean" in result - - -# ============================================================================ -# TEST: PARAMETER SWEEP LOGIC -# ============================================================================ - - -class TestParameterSweepLogic: - """Tests for parameter sweep generation logic.""" - - def test_parameter_grid_coverage(self, default_config): - """Parameter sweep should cover entire grid.""" - births = default_config.get_prey_births() - deaths = default_config.get_prey_deaths() - - assert np.isclose(births[0], default_config.prey_birth_min) - assert np.isclose(births[-1], default_config.prey_birth_max) - assert np.isclose(deaths[0], default_config.prey_death_min) - assert np.isclose(deaths[-1], default_config.prey_death_max) - - def test_total_simulations_formula(self, default_config): - """Verify total simulation count formula.""" - n_params = default_config.n_prey_birth * default_config.n_prey_death - n_replicates = default_config.n_replicates - n_evolution = 2 - - expected_total = n_params * n_replicates * n_evolution - - # Default: 15 * 15 * 50 * 2 = 22,500 - assert expected_total == 15 * 15 * 50 * 2 - - def test_custom_config_grid(self, default_config): - """Custom config should produce correct parameter counts.""" - default_config.n_prey_birth = 5 - default_config.n_prey_death = 7 - - births = default_config.get_prey_births() - deaths = default_config.get_prey_deaths() - - assert len(births) == 5 - assert len(deaths) == 7 - - -# ============================================================================ -# TEST: INTEGRATION -# ============================================================================ - - -@pytest.mark.skipif(not CA_AVAILABLE, reason="CA module not available") -class TestIntegration: - """Integration tests verifying components work together.""" - - @pytest.fixture(autouse=True) - def setup(self, fast_config, temp_dir): - """Setup for all tests.""" - self.cfg = fast_config - self.temp_dir = temp_dir - - def test_simulation_to_binary_roundtrip(self): - """Simulation results should roundtrip through binary format.""" - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=25, - seed=42, - with_evolution=True, - cfg=self.cfg, - ) - - filepath = self.temp_dir / "roundtrip.npz" - save_sweep_binary([result], filepath) - loaded = load_sweep_binary(filepath) - - assert len(loaded) == 1 - assert np.isclose(loaded[0]["prey_birth"], result["prey_birth"]) - assert np.isclose(loaded[0]["prey_mean"], result["prey_mean"]) - - def test_multiple_simulations(self): - """Multiple simulations should run without interference.""" - results = [] - - for seed in [1, 2, 3]: - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=25, - seed=seed, - with_evolution=False, - cfg=self.cfg, - ) - results.append(result) - - assert len(results) == 3 - for r in results: - assert "prey_mean" in r - assert r["prey_mean"] >= 0 - - def test_evolution_vs_no_evolution(self): - """Evolution flag should be recorded correctly.""" - result_no = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=25, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - result_yes = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=25, - seed=42, - with_evolution=True, - cfg=self.cfg, - ) - - assert result_no["with_evolution"] == False - assert result_yes["with_evolution"] == True - - def test_directed_hunting_binary_roundtrip(self): - """Directed hunting results should roundtrip through binary format.""" - self.cfg.directed_hunting = True - - result = run_single_simulation( - prey_birth=0.2, - prey_death=0.05, - grid_size=25, - seed=42, - with_evolution=False, - cfg=self.cfg, - ) - - filepath = self.temp_dir / "directed_roundtrip.npz" - save_sweep_binary([result], filepath) - loaded = load_sweep_binary(filepath) - - assert len(loaded) == 1 - assert np.isclose(loaded[0]["prey_birth"], result["prey_birth"]) - assert np.isclose(loaded[0]["prey_mean"], result["prey_mean"]) - - -# ============================================================================ -# MAIN -# ============================================================================ - -if __name__ == "__main__": - pytest.main([__file__, "-v"])