Skip to content
Merged

UI fixes #23127

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
c235ea6
hide camera overrides badge from system sections
hawkeye217 May 6, 2026
bdcdbe7
show empty card on camera metrics page when no cameras are defined
hawkeye217 May 6, 2026
b735c76
fix enabled camera state switch after adding via wizard
hawkeye217 May 6, 2026
4147403
guard camera enabled access
hawkeye217 May 6, 2026
27017d3
fix useOptimisticState dropping debounced setState under StrictMode
hawkeye217 May 6, 2026
25c24ab
use openvino on cpu as default model
hawkeye217 May 6, 2026
f02f729
use an enum for model_size
hawkeye217 May 6, 2026
9e3f424
i18n
hawkeye217 May 6, 2026
b317f6b
sync object filter entries with tracked labels in camera config form
hawkeye217 May 6, 2026
587c1cb
revalidate raw paths cache after config save so CameraPathWidget show…
hawkeye217 May 6, 2026
f860faf
fix test
hawkeye217 May 6, 2026
edfc73f
restore masked ffmpeg credentials when persisting camera config
hawkeye217 May 6, 2026
f11f575
formatting
hawkeye217 May 6, 2026
1bf4bc8
rebuild ffmpeg commands when enabling recording for the first time
hawkeye217 May 7, 2026
e7a5f76
keep record toggle switch in single camera view disabled until enable…
hawkeye217 May 7, 2026
8875551
fix override detection for sections unset in the global config
hawkeye217 May 7, 2026
feff214
add support for config-aware patterns in section hiddenFields
hawkeye217 May 7, 2026
bbc79f9
siimplify object filters handling
hawkeye217 May 7, 2026
2d0598c
tweaks
hawkeye217 May 7, 2026
6ca60dd
update docs for new detector default
hawkeye217 May 7, 2026
ce87b91
make genai provider required and add special case for UI
hawkeye217 May 7, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/docs/configuration/object_detectors.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ This does not affect using hardware for accelerating other tasks such as [semant

# Officially Supported Detectors

Frigate provides a number of builtin detector types. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
Frigate provides a number of builtin detector types. By default, Frigate will use a single OpenVINO detector running on the CPU. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.

## Edge TPU Detector

Expand Down
2 changes: 1 addition & 1 deletion docs/docs/guides/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ cameras:

### Step 4: Configure detectors

By default, Frigate will use a single CPU detector.
By default, Frigate will use a single OpenVINO detector running on the CPU.

In many cases, the integrated graphics on Intel CPUs provides sufficient performance for typical Frigate setups. If you have an Intel processor, you can follow the configuration below.

Expand Down
38 changes: 38 additions & 0 deletions frigate/api/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,6 +499,40 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
)


def _restore_masked_camera_paths(config_data: dict, config: FrigateConfig) -> None:
"""Substitute incoming `*:*` masked credentials with the in-memory ones.

The /config response masks ffmpeg input credentials, so the settings UI
sends the masked path back when sibling fields (e.g. hwaccel_args) are
edited. Without this we'd write `rtsp://*:*@host` into YAML and lose
the real credentials. Mutates `config_data` in place.
"""
cameras = config_data.get("cameras")
if not isinstance(cameras, dict):
return

for camera_name, camera_data in cameras.items():
if not isinstance(camera_data, dict):
continue
inputs = camera_data.get("ffmpeg", {}).get("inputs")
if not isinstance(inputs, list):
continue
existing = config.cameras.get(camera_name)
if existing is None:
continue
existing_paths = [inp.path for inp in existing.ffmpeg.inputs]
for index, input_obj in enumerate(inputs):
if not isinstance(input_obj, dict):
continue
path = input_obj.get("path")
if not isinstance(path, str):
continue
if ("://*:*@" in path or "user=*&password=*" in path) and index < len(
existing_paths
):
input_obj["path"] = existing_paths[index]


def _config_set_in_memory(request: Request, body: AppConfigSetBody) -> JSONResponse:
"""Apply config changes in-memory only, without writing to YAML.

Expand All @@ -509,6 +543,7 @@ def _config_set_in_memory(request: Request, body: AppConfigSetBody) -> JSONRespo
try:
updates = {}
if body.config_data:
_restore_masked_camera_paths(body.config_data, request.app.frigate_config)
updates = flatten_config_data(body.config_data)
updates = {k: ("" if v is None else v) for k, v in updates.items()}

Expand Down Expand Up @@ -615,6 +650,9 @@ def config_set(request: Request, body: AppConfigSetBody):
if query_string:
updates = process_config_query_string(query_string)
elif body.config_data:
_restore_masked_camera_paths(
body.config_data, request.app.frigate_config
)
updates = flatten_config_data(body.config_data)
# Convert None values to empty strings for deletion (e.g., when deleting masks)
updates = {k: ("" if v is None else v) for k, v in updates.items()}
Expand Down
3 changes: 1 addition & 2 deletions frigate/config/camera/genai.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ class GenAIConfig(FrigateBaseModel):
title="Model",
description="The model to use from the provider for generating descriptions or summaries.",
)
provider: GenAIProviderEnum | None = Field(
default=None,
provider: GenAIProviderEnum = Field(
title="Provider",
description="The GenAI provider to use (for example: ollama, gemini, openai).",
)
Expand Down
3 changes: 3 additions & 0 deletions frigate/config/camera/updater.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,10 @@ def __update_config(
elif update_type == CameraConfigUpdateEnum.objects:
config.objects = updated_config
elif update_type == CameraConfigUpdateEnum.record:
old_enabled_in_config = config.record.enabled_in_config
config.record = updated_config
if old_enabled_in_config != updated_config.enabled_in_config:
config.recreate_ffmpeg_cmds()
elif update_type == CameraConfigUpdateEnum.review:
config.review = updated_config
elif update_type == CameraConfigUpdateEnum.review_genai:
Expand Down
23 changes: 14 additions & 9 deletions frigate/config/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ class EnrichmentsDeviceEnum(str, Enum):
CPU = "CPU"


class ModelSizeEnum(str, Enum):
small = "small"
large = "large"


class TriggerType(str, Enum):
THUMBNAIL = "thumbnail"
DESCRIPTION = "description"
Expand Down Expand Up @@ -53,13 +58,13 @@ class AudioTranscriptionConfig(FrigateBaseModel):
title="Transcription language",
description="Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes.",
)
device: Optional[EnrichmentsDeviceEnum] = Field(
device: EnrichmentsDeviceEnum = Field(
default=EnrichmentsDeviceEnum.CPU,
title="Transcription device",
description="Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription.",
)
model_size: str = Field(
default="small",
model_size: ModelSizeEnum = Field(
default=ModelSizeEnum.small,
title="Model size",
description="Model size to use for offline audio event transcription.",
)
Expand Down Expand Up @@ -189,8 +194,8 @@ def coerce_model_enum(cls, v):
return v
return v

model_size: str = Field(
default="small",
model_size: ModelSizeEnum = Field(
default=ModelSizeEnum.small,
title="Model size",
description="Select model size; 'small' runs on CPU and 'large' typically requires GPU.",
)
Expand Down Expand Up @@ -253,8 +258,8 @@ class FaceRecognitionConfig(FrigateBaseModel):
title="Enable face recognition",
description="Enable or disable face recognition for all cameras; can be overridden per-camera.",
)
model_size: str = Field(
default="small",
model_size: ModelSizeEnum = Field(
default=ModelSizeEnum.small,
title="Model size",
description="Model size to use for face embeddings (small/large); larger may require GPU.",
)
Expand Down Expand Up @@ -335,8 +340,8 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
title="Enable LPR",
description="Enable or disable license plate recognition for all cameras; can be overridden per-camera.",
)
model_size: str = Field(
default="small",
model_size: ModelSizeEnum = Field(
default=ModelSizeEnum.small,
title="Model size",
description="Model size used for text detection/recognition. Most users should use 'small'.",
)
Expand Down
33 changes: 30 additions & 3 deletions frigate/config/config.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import io
import json
import logging
import os
Expand Down Expand Up @@ -80,17 +81,40 @@

yaml = YAML()

DEFAULT_DETECTORS = {
"ov": {
"type": "openvino",
"device": "CPU",
}
}
DEFAULT_MODEL = {
"width": 300,
"height": 300,
"input_tensor": "nhwc",
"input_pixel_format": "bgr",
"path": "/openvino-model/ssdlite_mobilenet_v2.xml",
"labelmap_path": "/openvino-model/coco_91cl_bkgr.txt",
}
DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}


def _render_default_yaml(data: dict) -> str:
buf = io.StringIO()
_yaml_writer = YAML()
_yaml_writer.indent(mapping=2, sequence=4, offset=2)
_yaml_writer.dump(data, buf)
return buf.getvalue()


DEFAULT_CONFIG = f"""
mqtt:
enabled: False

{_render_default_yaml({"detectors": DEFAULT_DETECTORS, "model": DEFAULT_MODEL})}
cameras: {{}} # No cameras defined, UI wizard should be used
version: {CURRENT_CONFIG_VERSION}
"""

DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}

# stream info handler
stream_info_retriever = StreamInfoRetriever()

Expand Down Expand Up @@ -679,6 +703,9 @@ def post_validation(self, info: ValidationInfo) -> Self:
model_config["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
model_config["path"] = "/edgetpu_model.tflite"
elif detector_config.type == "openvino":
for default_key, default_value in DEFAULT_MODEL.items():
model_config.setdefault(default_key, default_value)

model = ModelConfig.model_validate(model_config)
model.check_and_load_plus_model(self.plus_api, detector_config.type)
Expand Down
7 changes: 4 additions & 3 deletions frigate/test/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ def setUp(self):

def test_config_class(self):
frigate_config = FrigateConfig(**self.minimal)
assert "cpu" in frigate_config.detectors.keys()
assert frigate_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert frigate_config.detectors["cpu"].model.width == 320
assert "ov" in frigate_config.detectors.keys()
assert frigate_config.detectors["ov"].type == DetectorTypeEnum.openvino
assert frigate_config.detectors["ov"].model.width == 300

@patch("frigate.detectors.detector_config.load_labels")
def test_detector_custom_model_path(self, mock_labels):
Expand Down Expand Up @@ -1005,6 +1005,7 @@ def test_plus_labelmap(self):

config = {
"mqtt": {"host": "mqtt"},
"detectors": {"cpu": {"type": "cpu"}},
"model": {"path": "plus://test"},
"cameras": {
"back": {
Expand Down
17 changes: 17 additions & 0 deletions frigate/video/ffmpeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ def __init__(
)
self.requestor = InterProcessRequestor()
self.was_enabled = self.config.enabled
self.was_record_enabled_in_config = self.config.record.enabled_in_config

self.segment_subscriber = RecordingsDataSubscriber(RecordingsDataTypeEnum.all)
self.latest_valid_segment_time: float = 0
Expand Down Expand Up @@ -323,6 +324,22 @@ def run(self) -> None:
self.was_enabled = enabled
continue

record_enabled_in_config = self.config.record.enabled_in_config
if record_enabled_in_config != self.was_record_enabled_in_config:
if record_enabled_in_config and enabled:
self.logger.debug(
f"Record enabled in config for {self.config.name}, restarting ffmpeg"
)
self.stop_all_ffmpeg()
self.start_all_ffmpeg()
self.latest_valid_segment_time = 0
self.latest_invalid_segment_time = 0
self.latest_cache_segment_time = 0
self.record_enable_time = datetime.now().astimezone(timezone.utc)
last_restart_time = datetime.now().timestamp()
self.was_record_enabled_in_config = record_enabled_in_config
continue

if not enabled:
continue

Expand Down
3 changes: 2 additions & 1 deletion web/public/locales/en/views/live.json
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@
},
"recording": {
"enable": "Enable Recording",
"disable": "Disable Recording"
"disable": "Disable Recording",
"disabledInConfig": "Recording must first be enabled in Settings for this camera."
},
"snapshots": {
"enable": "Enable Snapshots",
Expand Down
8 changes: 4 additions & 4 deletions web/public/locales/en/views/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -1663,12 +1663,12 @@
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit."
},
"faceRecognition": {
"globalDisabled": "Face recognition is not enabled at the global level. Enable it in Enrichments for camera-level face recognition to function.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Ensure 'person' is in the object tracking list."
"globalDisabled": "The face recognition enrichment must be enabled for face recognition features to function on this camera.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Enable 'person' in Objects for this camera."
},
"lpr": {
"globalDisabled": "License plate recognition is not enabled at the global level. Enable it in Enrichments for camera-level LPR to function.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked."
"globalDisabled": "The license plate recognition enrichment must be enabled for LPR features to function on this camera.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked. Enable 'car' or 'motorcycle' in Objects for this camera."
},
"record": {
"noRecordRole": "No streams have the record role defined. Recording will not function."
Expand Down
3 changes: 3 additions & 0 deletions web/public/locales/en/views/system.json
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@
}
},
"framesAndDetections": "Frames / Detections",
"noCameras": {
"title": "No Cameras Found"
},
"label": {
"camera": "camera",
"detect": "detect",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@ const audioTranscription: SectionConfigOverrides = {
severity: "warning",
condition: (ctx) => {
if (ctx.level === "camera" && ctx.fullCameraConfig) {
return ctx.fullCameraConfig.audio.enabled === false;
return (
!ctx.fullCameraConfig.ffmpeg?.inputs?.some((input) =>
input.roles?.includes("audio"),
) || ctx.fullCameraConfig.audio.enabled === false
);
}
return false;
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ const faceRecognition: SectionConfigOverrides = {
"device",
],
restartRequired: ["enabled", "model_size", "device"],
uiSchema: {
model_size: {
"ui:options": { size: "xs" },
},
},
},
};

Expand Down
3 changes: 3 additions & 0 deletions web/src/components/config-form/section-configs/lpr.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ const lpr: SectionConfigOverrides = {
suppressDescription: true,
},
},
model_size: {
"ui:options": { size: "xs" },
},
},
},
};
Expand Down
11 changes: 11 additions & 0 deletions web/src/components/config-form/section-configs/objects.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
import type { FrigateConfig } from "@/types/frigateConfig";
import type { SectionConfigOverrides } from "./types";

// Attribute labels (face, license_plate, Frigate+ couriers like DHL/Amazon,
// etc.) are populated into objects.filters by the backend even when the
// model can't actually detect them. They aren't user-settable, so hide any
// `filters.<attr>` patterns from forms and override comparisons.
const hideAttributeFilters = (config: FrigateConfig): string[] =>
(config.model?.all_attributes ?? []).map((attr) => `filters.${attr}`);

const objects: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/object_filters",
Expand All @@ -26,6 +34,7 @@ const objects: SectionConfigOverrides = {
"filters.*.raw_mask",
"filters.mask",
"filters.raw_mask",
hideAttributeFilters,
],
advancedFields: ["genai"],
uiSchema: {
Expand Down Expand Up @@ -99,6 +108,7 @@ const objects: SectionConfigOverrides = {
"filters.mask",
"filters.raw_mask",
"genai.required_zones",
hideAttributeFilters,
],
},
camera: {
Expand All @@ -123,6 +133,7 @@ const objects: SectionConfigOverrides = {
"filters.*.raw_mask",
"filters.mask",
"filters.raw_mask",
hideAttributeFilters,
],
advancedFields: [],
},
Expand Down
Loading
Loading