Fix stacking for large image counts (98+): single-pass, auto-scale, 10min timeou...

File: backend/stacking.py
   - median : per-pixel median (best noise rejection)
   - sum    : simple accumulation (highlights faint stars)
-All modes process images in horizontal strips to limit peak memory usage,
-making stacking feasible on memory-constrained devices like Raspberry Pi.
+Memory-efficient design for Raspberry Pi:
+  - Mean/sum: single pass through images, strip-based uint32 accumulators.
+    Each image is opened once and sliced into all strip accumulators.
+  - Median: strip height auto-scales based on image count to stay within
+    a configurable memory budget (~200 MiB default).
 """
 import logging
 StackMode = Literal["mean", "median", "sum"]
-# Number of pixel rows to process at a time.  Each strip holds at most
-# N * _STRIP_HEIGHT * W * 3 * 2 bytes (uint16 accumulator + uint8 frame).
-# At 512 rows and 6048 pixels wide that's ~18 MiB per strip for median
-# with 12 images (float32) or ~35 KiB per strip for mean/sum (uint32 acc).
-_STRIP_HEIGHT = 512
+# Default strip height for mean/sum accumulation.  Increasing this has
+# negligible memory impact (all strip accumulators are live anyway)
+# but controls granularity of progress logging.
+_ACC_STRIP_HEIGHT = 512
+
+# Peak memory budget (bytes) for the median strip array.
+# strip_array = N * strip_h * W * 3 bytes (uint8).
+_MEDIAN_MEMORY_BUDGET = 200 * 1024 * 1024  # 200 MiB
 def stack_images(
 def stack_images(
 ) -> Image.Image:
     """
     Stack a list of images using the specified mode and return a PIL Image.
-
-    All modes process images in horizontal strips so that peak memory stays
-    well below 100 MiB even for large (24 MP) images on a Raspberry Pi.
     """
     try:
         import numpy as np
 def stack_images(
     width, height = reference_size
     first.close()
-    result_strips: list = []
-
-    for y_start in range(0, height, _STRIP_HEIGHT):
-        y_end = min(y_start + _STRIP_HEIGHT, height)
+    if mode in ("mean", "sum"):
+        return _stack_accumulate(image_paths, mode, reference_size, width, height, np)
+    else:
+        return _stack_median(image_paths, reference_size, width, height, np)
-        if mode == "median":
-            strip = _median_strip(image_paths, reference_size, width, y_start, y_end, np)
-        else:
-            strip = _accumulate_strip(image_paths, reference_size, width, y_start, y_end, mode, np)
-        result_strips.append(strip)
-
-    result = np.concatenate(result_strips, axis=0)
-    return Image.fromarray(result, mode="RGB")
-
-
-def _open_strip(path: Path, reference_size, width, y_start, y_end):
-    """Open an image, crop to the strip, return as PIL Image."""
+def _open_image(path: Path, reference_size):
+    """Open an image and convert to RGB, resizing if needed."""
     img = Image.open(path).convert("RGB")
     if img.size != reference_size:
         logger.warning(
 def _open_strip(path: Path, reference_size, width, y_start, y_end):
             reference_size,
         )
         img = img.resize(reference_size, Image.LANCZOS)
-    return img.crop((0, y_start, width, y_end))
+    return img
+
+def _stack_accumulate(image_paths, mode, reference_size, width, height, np):
+    """Mean/sum stacking – single pass through all images.
-def _accumulate_strip(image_paths, reference_size, width, y_start, y_end, mode, np):
-    """Mean/sum for one horizontal strip using a uint32 accumulator.
+    Opens each image exactly once, converts to a numpy uint8 array, and
+    slices it into per-strip uint32 accumulators.  Total memory:
+      accumulators ≈ H * W * 3 * 4 bytes  (one full-frame uint32)
+      + one uint8 frame ≈ H * W * 3 bytes
+    For 6048x4024 that's ~279 MiB + ~70 MiB ≈ 349 MiB peak.
-    uint32 can hold 16,843,009 frames at max pixel value 255 before overflow,
-    so it's safe for any realistic number of images.  Peak memory per strip:
-    strip_height * width * 3 * 4 bytes (one uint32 accumulator) + one uint8
-    frame being added.
+    If that's too large, we fall back to a multi-pass approach that
+    processes batches of strips per pass to trade speed for memory.
     """
-    strip_h = y_end - y_start
-    accumulator = np.zeros((strip_h, width, 3), dtype=np.uint32)
     n = len(image_paths)
+    strip_height = _ACC_STRIP_HEIGHT
+
+    # Build strip boundaries.
+    strip_ranges = []
+    for y in range(0, height, strip_height):
+        strip_ranges.append((y, min(y + strip_height, height)))
+
+    # Estimate peak memory: all accumulators + one uint8 frame.
+    acc_bytes = height * width * 3 * 4  # uint32
+    frame_bytes = height * width * 3    # uint8
+    total_bytes = acc_bytes + frame_bytes
+
+    # If total exceeds 400 MiB, use multi-pass to limit resident memory.
+    max_single_pass = 400 * 1024 * 1024
+    if total_bytes <= max_single_pass:
+        return _accumulate_single_pass(
+            image_paths, mode, reference_size, width, strip_ranges, n, np
+        )
+    else:
+        return _accumulate_multi_pass(
+            image_paths, mode, reference_size, width, height, strip_ranges, n, np
+        )
+
+
+def _accumulate_single_pass(image_paths, mode, reference_size, width, strip_ranges, n, np):
+    """Open each image once, accumulate all strips in memory."""
+    # Pre-allocate all strip accumulators.
+    accumulators = []
+    for y_start, y_end in strip_ranges:
+        accumulators.append(np.zeros((y_end - y_start, width, 3), dtype=np.uint32))
+
+    for idx, path in enumerate(image_paths):
+        if idx % 10 == 0:
+            logger.info("  accumulate: image %d/%d", idx + 1, n)
+        img = _open_image(path, reference_size)
+        arr = np.asarray(img, dtype=np.uint8)
+        for i, (y_start, y_end) in enumerate(strip_ranges):
+            accumulators[i] += arr[y_start:y_end]
+        # Free the PIL image and numpy view promptly.
+        img.close()
+        del arr
+
+    return _finalize_accumulator(accumulators, mode, n, np)
+
+
+def _accumulate_multi_pass(image_paths, mode, reference_size, width, height, strip_ranges, n, np):
+    """Process strips in batches, trading extra image opens for lower memory.
+
+    Each pass processes enough strips to stay under ~200 MiB of accumulator
+    memory, plus one full image decode (~70 MiB).
+    """
+    # How many strips can we fit in 200 MiB of accumulators?
+    bytes_per_strip_row = width * 3 * 4  # uint32
+    max_acc_bytes = 200 * 1024 * 1024
+    rows_budget = max_acc_bytes // bytes_per_strip_row
+    strips_per_pass = max(1, rows_budget // _ACC_STRIP_HEIGHT)
-    for path in image_paths:
-        strip_img = _open_strip(path, reference_size, width, y_start, y_end)
-        accumulator += np.array(strip_img, dtype=np.uint8)
+    logger.info(
+        "  multi-pass: %d strips total, %d per pass",
+        len(strip_ranges),
+        strips_per_pass,
+    )
+    all_results = []
+    for batch_start in range(0, len(strip_ranges), strips_per_pass):
+        batch = strip_ranges[batch_start : batch_start + strips_per_pass]
+        accumulators = []
+        for y_start, y_end in batch:
+            accumulators.append(np.zeros((y_end - y_start, width, 3), dtype=np.uint32))
+
+        for idx, path in enumerate(image_paths):
+            if idx % 20 == 0:
+                logger.info(
+                    "  multi-pass batch %d: image %d/%d",
+                    batch_start // strips_per_pass + 1,
+                    idx + 1,
+                    n,
+                )
+            img = _open_image(path, reference_size)
+            arr = np.asarray(img, dtype=np.uint8)
+            for i, (y_start, y_end) in enumerate(batch):
+                accumulators[i] += arr[y_start:y_end]
+            img.close()
+            del arr
+
+        all_results.extend(
+            _finalize_strips(accumulators, mode, n, np)
+        )
+
+    result = np.concatenate(all_results, axis=0)
+    return Image.fromarray(result, mode="RGB")
+
+
+def _finalize_strips(accumulators, mode, n, np):
+    """Convert a list of uint32 accumulator strips to uint8 results."""
+    results = []
     if mode == "mean":
-        result = (accumulator / n).astype(np.uint8)
-    else:  # sum – normalise so brightest pixel maps to 255
-        max_val = accumulator.max()
-        if max_val > 0:
-            result = (accumulator * 255 // max_val).astype(np.uint8)
-        else:
-            result = accumulator.astype(np.uint8)
+        for acc in accumulators:
+            results.append((acc / n).astype(np.uint8))
+    else:  # sum
+        # Find global max across all strips for normalisation.
+        global_max = max(acc.max() for acc in accumulators)
+        for acc in accumulators:
+            if global_max > 0:
+                results.append((acc * 255 // global_max).astype(np.uint8))
+            else:
+                results.append(acc.astype(np.uint8))
+    return results
+
+
+def _finalize_accumulator(accumulators, mode, n, np):
+    """Finalize and concatenate all accumulator strips into a PIL Image."""
+    if mode == "sum":
+        # Need global max across all strips for normalisation.
+        global_max = max(acc.max() for acc in accumulators)
-    return result
+    results = []
+    for acc in accumulators:
+        if mode == "mean":
+            results.append((acc / n).astype(np.uint8))
+        else:  # sum
+            if global_max > 0:
+                results.append((acc * 255 // global_max).astype(np.uint8))
+            else:
+                results.append(acc.astype(np.uint8))
+
+    result = np.concatenate(results, axis=0)
+    return Image.fromarray(result, mode="RGB")
-def _median_strip(image_paths, reference_size, width, y_start, y_end, np):
-    """Median for one horizontal strip.
+def _stack_median(image_paths, reference_size, width, height, np):
+    """Median stacking with auto-scaled strip height.
-    Loads one strip per image into a (N, H, W, 3) uint8 array, computes
-    the per-pixel median, and returns the result as uint8.  Using uint8
-    instead of float32 halves memory (N * strip_h * W * 3 bytes).
+    The strip height is chosen so that the per-strip array
+    (N * strip_h * W * 3 bytes) stays within _MEDIAN_MEMORY_BUDGET.
     """
-    strip_h = y_end - y_start
-    # Use uint8 for storage, convert to float only for the median computation.
-    strips = np.empty((len(image_paths), strip_h, width, 3), dtype=np.uint8)
+    n = len(image_paths)
+    bytes_per_row = n * width * 3  # one row of all images, uint8
+
+    # Compute strip height that fits in memory budget.
+    strip_height = max(1, _MEDIAN_MEMORY_BUDGET // bytes_per_row)
+    # Clamp to something reasonable.
+    strip_height = min(strip_height, 512)
+
+    logger.info(
+        "  median: strip_height=%d (%.1f MiB per strip for %d images)",
+        strip_height,
+        n * strip_height * width * 3 / (1024 * 1024),
+        n,
+    )
+
+    result_strips = []
+
+    for y_start in range(0, height, strip_height):
+        y_end = min(y_start + strip_height, height)
+        strip_h = y_end - y_start
-    for i, path in enumerate(image_paths):
-        strip_img = _open_strip(path, reference_size, width, y_start, y_end)
-        strips[i] = np.array(strip_img, dtype=np.uint8)
+        logger.info("  median: rows %d–%d of %d", y_start, y_end, height)
-    median_strip = np.median(strips, axis=0)
-    return np.clip(median_strip, 0, 255).astype(np.uint8)
+        strips = np.empty((n, strip_h, width, 3), dtype=np.uint8)
+
+        for i, path in enumerate(image_paths):
+            img = _open_image(path, reference_size)
+            strip_img = img.crop((0, y_start, width, y_end))
+            strips[i] = np.asarray(strip_img, dtype=np.uint8)
+            img.close()
+
+        median_strip = np.median(strips, axis=0)
+        result_strips.append(np.clip(median_strip, 0, 255).astype(np.uint8))
+        del strips
+
+    result = np.concatenate(result_strips, axis=0)
+    return Image.fromarray(result, mode="RGB")
File: frontend/src/api/client.js
 export const stackImages = (gallery, images, mode, outputName) =>
     method: "POST",
     headers: { "Content-Type": "application/json" },
     body: JSON.stringify({ images, mode, output_name: outputName }),
+    timeout: 600000, // 10 minutes – large stacks are slow on RPi
   });
 export const imageUrl = (gallery, filename) =>
File: frontend/src/components/StackingPanel.jsx
-import { useState } from "react";
+import { useState, useEffect, useRef } from "react";
 import * as api from "../api/client";
 import { imageUrl } from "../api/client";
+function useElapsed(active) {
+  const [elapsed, setElapsed] = useState(0);
+  const startRef = useRef(null);
+  useEffect(() => {
+    if (!active) {
+      startRef.current = null;
+      return;
+    }
+    startRef.current = Date.now();
+    const id = setInterval(() => {
+      setElapsed(Math.floor((Date.now() - startRef.current) / 1000));
+    }, 1000);
+    return () => {
+      clearInterval(id);
+      setElapsed(0);
+    };
+  }, [active]);
+  return elapsed;
+}
+
+function formatElapsed(s) {
+  const m = Math.floor(s / 60);
+  const sec = s % 60;
+  return m > 0 ? `${m}m ${sec}s` : `${sec}s`;
+}
+
 export default function StackingPanel({ gallery, images, onStackComplete }) {
   const [selected, setSelected] = useState(new Set());
   const [mode, setMode] = useState("mean");
   const [outputName, setOutputName] = useState("");
   const [stacking, setStacking] = useState(false);
   const [result, setResult] = useState(null);
   const [error, setError] = useState(null);
+  const elapsed = useElapsed(stacking);
   // Filter out already-stacked images from selection candidates
   const stackableImages = images.filter((img) => !img.filename.startsWith("stacked-"));
 export default function StackingPanel({ gallery, images, onStackComplete }) {
             className="w-full rounded-lg bg-purple-600 hover:bg-purple-500 disabled:opacity-50 px-4 py-2.5 text-sm font-semibold text-white transition-colors"
           >
             {stacking
-              ? "Stacking…"
+              ? `Stacking… ${formatElapsed(elapsed)}`
               : `Stack ${selected.size} Image${selected.size !== 1 ? "s" : ""}`}
           </button>
Read more...