Edge AI Manager settings
Persistent AI Manager settings are stored locally in the
/opt/sclbl/etc/settings
file. If it does not exist yet, the uimanager, the one-line install script, or Advantech ICR router will autogenerate the settings file by copying
/opt/sclbl/etc/defaults.json
to /opt/sclbl/etc/settings.json
.1
{
2
"settings": {
3
"AdminLocation": "https://admin.sclbl.net",
4
"AuthenticationUrl": "https://api.sclbl.net/auth",
5
"DeamonSocketName": "../sockets/sclbld_%d.sock",
6
"DeviceApiUrl": "https://api.sclbl.net/dev",
7
"DeviceId": "",
8
"DeviceName": "",
9
"DeviceSerial": "",
10
"DeviceType": "CPU",
11
"LicenseKey": "",
12
"LogLevel": 3,
13
"ModuleSocketName": "../sockets/sclblmod.sock",
14
"OutputDistributorPort": 8012,
15
"RefreshToken": "",
16
"SettingsSection": 1,
17
"WebPort": 8081,
18
"WebPortSecure": 8443
19
},
20
"module": {
21
"AlwaysOffline": 0,
22
"AssignedModels": [],
23
"Clients": "",
24
"EmailOnAlarm": 0,
25
"EnableLocalPassword": 0,
26
"ExternalTrigger": 0,
27
"GeneralFrameRate": 0,
28
"ImageEndpoint": 1,
29
"InputCamera1AspectRatio": 1,
30
"InputCamera1AssignedModels": [
31
0
32
],
33
"InputCamera1Format": "",
34
"InputCamera1IP": "",
35
"InputCamera1LineCrossingDistanceThreshold": 100,
36
"InputCamera1LineCrossingKeepTrackOfCentroidWhenDisappearedStep": 2,
37
"InputCamera1LineCrossingMaxFollowPathOfOneCentroidStep": 4,
38
"InputCamera1LineCrossingMaxNoCentroidsFollowingAtSameTime": 5,
39
"InputCamera1LineCrossingTopBottomMarginsOfThreshold": 10,
40
"InputCamera1LineCrossingX1": 0,
41
"InputCamera1LineCrossingX2": 0,
42
"InputCamera1LineCrossingY1": 0,
43
"InputCamera1LineCrossingY2": 0,
44
"InputCamera1Location": "",
45
"InputCamera1Mask": "",
46
"InputCamera1MaskInvert": 0,
47
"InputCamera1Name": "",
48
"InputCamera1Pass": "",
49
"InputCamera1RoiH": 0,
50
"InputCamera1RoiW": 0,
51
"InputCamera1RoiX": 0,
52
"InputCamera1RoiY": 0,
53
"InputCamera1ShmF": 25,
54
"InputCamera1ShmH": 360,
55
"InputCamera1ShmW": 640,
56
"InputCamera1Username": "",
57
"InputCamera2AspectRatio": 1,
58
"InputCamera2AssignedModels": [
59
0
60
],
61
"InputCamera2Format": "",
62
"InputCamera2IP": "",
63
"InputCamera2LineCrossingDistanceThreshold": 100,
64
"InputCamera2LineCrossingKeepTrackOfCentroidWhenDisappearedStep": 2,
65
"InputCamera2LineCrossingMaxFollowPathOfOneCentroidStep": 4,
66
"InputCamera2LineCrossingMaxNoCentroidsFollowingAtSameTime": 5,
67
"InputCamera2LineCrossingTopBottomMarginsOfThreshold": 10,
68
"InputCamera2LineCrossingX1": 0,
69
"InputCamera2LineCrossingX2": 0,
70
"InputCamera2LineCrossingY1": 0,
71
"InputCamera2LineCrossingY2": 0,
72
"InputCamera2Location": "",
73
"InputCamera2Mask": "",
74
"InputCamera2MaskInvert": 0,
75
"InputCamera2Name": "",
76
"InputCamera2Pass": "",
77
"InputCamera2RoiH": 0,
78
"InputCamera2RoiW": 0,
79
"InputCamera2RoiX": 0,
80
"InputCamera2RoiY": 0,
81
"InputCamera2ShmF": 25,
82
"InputCamera2ShmH": 360,
83
"InputCamera2ShmW": 640,
84
"InputCamera2Username": "",
85
"InputCamera3AspectRatio": 1,
86
"InputCamera3AssignedModels": [
87
0
88
],
89
"InputCamera3Format": "",
90
"InputCamera3IP": "",
91
"InputCamera3LineCrossingDistanceThreshold": 100,
92
"InputCamera3LineCrossingKeepTrackOfCentroidWhenDisappearedStep": 2,
93
"InputCamera3LineCrossingMaxFollowPathOfOneCentroidStep": 4,
94
"InputCamera3LineCrossingMaxNoCentroidsFollowingAtSameTime": 5,
95
"InputCamera3LineCrossingTopBottomMarginsOfThreshold": 10,
96
"InputCamera3LineCrossingX1": 0,
97
"InputCamera3LineCrossingX2": 0,
98
"InputCamera3LineCrossingY1": 0,
99
"InputCamera3LineCrossingY2": 0,
100
"InputCamera3Location": "",
101
"InputCamera3Mask": "",
102
"InputCamera3MaskInvert": 0,
103
"InputCamera3Name": "",
104
"InputCamera3Pass": "",
105
"InputCamera3RoiH": 0,
106
"InputCamera3RoiW": 0,
107
"InputCamera3RoiX": 0,
108
"InputCamera3RoiY": 0,
109
"InputCamera3ShmF": 25,
110
"InputCamera3ShmH": 360,
111
"InputCamera3ShmW": 640,
112
"InputCamera3Username": "",
113
"InputCamera4AspectRatio": 1,
114
"InputCamera4AssignedModels": [
115
0
116
],
117
"InputCamera4Format": "",
118
"InputCamera4IP": "",
119
"InputCamera4LineCrossingDistanceThreshold": 100,
120
"InputCamera4LineCrossingKeepTrackOfCentroidWhenDisappearedStep": 2,
121
"InputCamera4LineCrossingMaxFollowPathOfOneCentroidStep": 4,
122
"InputCamera4LineCrossingMaxNoCentroidsFollowingAtSameTime": 5,
123
"InputCamera4LineCrossingTopBottomMarginsOfThreshold": 10,
124
"InputCamera4LineCrossingX1": 0,
125
"InputCamera4LineCrossingX2": 0,
126
"InputCamera4LineCrossingY1": 0,
127
"InputCamera4LineCrossingY2": 0,
128
"InputCamera4Location": "",
129
"InputCamera4Mask": "",
130
"InputCamera4MaskInvert": 0,
131
"InputCamera4Name": "",
132
"InputCamera4Pass": "",
133
"InputCamera4RoiH": 0,
134
"InputCamera4RoiW": 0,
135
"InputCamera4RoiX": 0,
136
"InputCamera4RoiY": 0,
137
"InputCamera4ShmF": 25,
138
"InputCamera4ShmH": 360,
139
"InputCamera4ShmW": 640,
140
"InputCamera4Username": "",
141
"InputDriver": "VIDEO",
142
"LocalPassword": "",
143
"MaxVideoPreResize": 640,
144
"ModuleSection": 1,
145
"OnChange": 0,
146
"OnChangeImageThreshold": 5,
147
"OnChangePixelThreshold": 20,
148
"OutputFormat": "JSON",
149
"OutputFrequency": -1,
150
"OutputLocation": "http://localhost:8012/output",
151
"OutputProtocol": "REST",
152
"OutputThresholdType": 0,
153
"RgbAsGray": 0,
154
"RunAtStartup": 0,
155
"SaveConditional": 0,
156
"SaveConditionalProbs": 0,
157
"SaveConditionalServer": "",
158
"SaveImages": 0,
159
"SaveImagesPath": "",
160
"SettingsUpload": 0,
161
"SocketRuntimes": 1,
162
"UploadImageOnAlarm": 0,
163
"OmitRawModelOutput": 0
164
}
165
}
Definitions of individual settings in the order they appear in the settings and defaults file.
The RefreshToken is a unique token that identifies this device to the Scailable Cloud. It is assigned when registering at the Scailable platform.
Without a RefreshToken, LicenseKey and DeviceSerial the
sclbld
and sclblmod
binaries will not start.The LicenseKey is a unique token that identifies a specific Scailable license. It is generated when registering at the Scailable platform.
Without a RefreshToken, LicenseKey and DeviceSerial the
sclbld
and sclblmod
binaries will not start.The DeviceSerial is a unique token that identifies a specific device. It is generated when registering a device in the Scailable platform, and will not change for as long as the device's settings file is not deleted and regenerated.
Without a RefreshToken, LicenseKey and DeviceSerial the
sclbld
and sclblmod
binaries will not start.For non Advantech ICR devices, it is generated using the OS native machine UUID/GUID plus the first detected network card's MAC address:
deviceid=MACHINENAME-FIRSTMACID
The MAC address is a unique identifier assigned to a network interface controller (NIC).
Machine IDs are usually generated during system installation and stay constant for all subsequent boots (though not for docker type instances). The following sources are used:
- BSD uses
/etc/hostid
andsmbios.system.uuid
as a fallback - Linux uses
/var/lib/dbus/machine-id
- OS X uses
IOPlatformUUID
- Windows uses the
MachineGuid
fromHKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Cryptography
For Advantech ICR devices, the Edge AI Manager equals the advantech device id.
A DeviceName is a human readable name chosen during device registration. The value does not have to be unique.
A DeviceType is a human readable name representing the type of device chosen during device registration. The value does not have to be unique.
URL of the Scailable device service.
Default value:
https://api.sclbl.net/dev
This value generally does not have to be changed.
URL of the Scailable authentication service file endpoint.
Default value:
https://api.sclbl.net/auth
This value generally does not have to be changed.
URL of the Scailable Admin service.
Default value:
https://admin.sclbl.net
This value generally does not have to be changed.
The RefreshToken is a unique token that identifies this specific device's registration.
The AI Manager IO Module receives important information over a UNIX domain socket, such as control messages, triggers and tensor inputs. This setting controls the path to this socket and is expected to be relative to the executable.
Default value:
../sockets/sclblmod.sock
The Scailable runtime and io module communicate with each other over a UNIX domain socket. Each instance of an Edge AI Manager within the same environment needs to have a unique socket name.
Default value:
../sockets/sclbld_%d.sock
HTTP web server port of the uiprovider.
Default value:
8081
HTTPS web server port of the uiprovider.
Default value:
8443
The log level the
sclblmod
and sclbld
will run at. Only useful for debugging sessions.Default value:
3
Indicates if the AI Manager is currently running an inference(s).
If the AI Manager is running its value is
1
If the AI Manager is not running inferences its value is 0
Default value:
0
The AssignedModels section of the settings is an array which can contain any number of assigned models. The individual model details should be automatically populated when selecting a model from the Cloud Admin.
The Edge AI Manager is designed in such a way that each assigned model will be run for inference on each incoming input. This means that if there are two assigned models, there will be two inferences run on each incoming tensor. The results of these inferences will be sent separately to whichever output is configured.
The AssignedModelCFID is a unique token that identifies the currently selected model.
The AssignedModelSRPID is a unique token that identifies the currently selected model's group.
The AssignedModelFileName is the name of model file in the cache directory. It defers based on which runtime is installed.
AssignedModelInputNames is a field that contains the names of each of the currently selected input tensors in JSON format. The names follow the order in which they were defined in the original ONNX file.
Image tensor order. Deep learning frameworks generally use either NCHW and NHWC layouts in memory ONNX uses NCHW by default, but the AI Manager can convert between NCHW and NHWC input tensors.
NCHW order
0
:batch N, channels C, height H, width W
NHWC order
1
:batch N, height H, width W, channels C
Currently, the Scailable Edge AI Manager only supports N = 1
The integer number of colors / channels for image tensor inputs.
The integer image height for image tensor type input.
The integer image width for image tensor type input.
Image model mean normalization input values:
output = (input -
mean
) / var
Has to be formatted as a JSON integer array. Default
[0]
per channel. For instance
[0,0,0]
for C = 3 Image model var normalization input values:
output = (input - mean) /
var
Image model mean normalization input values:
output = (input -
mean
) / var
Has to be formatted as an JSON integer array. Default
[1]
per channel. For instance
[1,1,1]
for C = 3 AssignedModelOutputNames is a field that contains the names of each of the currently selected output tensors in JSON format. The names follow the order in which they were defined in the original ONNX file.
AssignedModelOutput is an optional field that can contain additional information regarding the currently selected model's output tensor(s) in JSON format.
Single string, sets postprocessing type.
Will be obsolete in the next AI Manager, but remain available for reverse compatibility.
AssignedModelDetails is a human readable description of the model. It does not have to be unique.
Model confidence threshold. It ranges from 0 to 1.
Period of time (in milliseconds) before considering an object dumped.
Intersection Over Union (IoU) value for considering two objects are the same.
Number of recent object positions to remember.
Number of consecutive frames that an object may be occluded.
Positive integer denoting the maximum euclidean distance an object can cross between two consecutive frames.
InputCameraXIP defines the Xth camera's input address. Here, X is a value from 1 to N.
For instance:
InputCamera1IP=https://192.168.1.10
Depending on InputCameraXFormat, it can be one of the following:
InputCameraXFormat | Type of input | Example |
---|---|---|
Image (JPEG, GIF, PNG) | http:// or https:// URL | http://test.com/test.jpg
https://192.168.1.10 |
Image (JPEG, GIF, PNG) | local file URL | file://path/to/image/test.jpg |
RTSP stream | rtsp:// URL | rtsp://test.com/stream.mjpg |
MJPEG stream | http:// URL | http://test.com/stream.rtsp |
V4L2 webcam | path to (usb) camera | /dev/video0 |
Gstreamer SHM | SHM path | /tmp/industrialcam |
Controls through which mechanism the data gets to the AI Manager.
Currently there are two supported options:
When InputDriver is set to "VIDEO", the the AI Manager will fetch inputs from defined cameras, controlled by the InputCameraXIP setting.
When InputDriver is set to "SOCKET", the AI Manger will expect inputs received on its Unix socket. The path to this socket is controlled by the ModuleSocketName setting.
The module will wait for a message on the socket before performing inference.
Determines how an input image is resized to model tensor input WxH dimensions.
Description | Value |
---|---|
Image is fitted exactly to the tensor input dimensions without taking into account the original image's aspect ratio. As a result, the input tensor can be a "squashed" representation of the original input image. | 0 |
Image is fitted to the tensor input aspect ratio by cutting edges of the image either vertically or horizontally in case the image aspect ratio differs from the tensor aspect ratio.. As a result, parts of the input image may not be part of the inference. | 1 |
Image is fitted to the tensor input aspect ratio by adding black borders either horizontally or vertically in case the image aspect ratio differs from the tensor aspect ratio. | 2 |
0

1

2

Coordinates of a polygon or polygons that the define an inverse mask or zone for camera no X. The area outside of the zone is "blacked out" and is not available to the model. The coordinates for two polygons A and B defined by three points, each existing of an integer X and Y coordinate, could be described as:
{"mask":[[A_X1,A_Y1,A_X2,A_Y2,A_X3,A_Y3],[B_X1,B_Y1,B_X2,B_Y2,B_X3,B_Y3]]}
A example using actual coordinates for a zone or inverse mask of one polygon defined by four points would be:
{"mask":[[44,22,100,21,109,108,46,114]]}
The X and Y coordinates are defined relative to their position to the topleft 0,0 coordinate of the image input tensor:

Where Y is within H and X within W.
Optional username for camera X.
Optional password for camera X.
Can be one of the following:
Description | Value |
---|---|
Image (JPEG, GIF, PNG) | jpg |
RTSP stream | rtsp |
MJPEG stream | mjpeg |
V4L2 webcam | v4l2 |
Gstreamer SHM | shm |
User defined name of camera X.
Set automatically by the AI Manager. The number of channels / colors in the output of camera no X.
Set automatically by the AI Manager. The height of the output of camera X in pixels.
Set automatically by the AI Manager. The width of the output of camera X in pixels.
GStreamer
shmsink
video input frequency for camera X. Required when InputCameraXFormat
is set to shm
. Default 25
frames per second.GStreamer
shmsink
video input height for camera X. Required when InputCameraXFormat
is set to shm
. Default 360
pixels.GStreamer
shmsink
video input width for camera X. Required when InputCameraXFormat
is set to shm
. Default 640
pixels.Human readable description of camera X's location
Camera Region Of Interest top left integer coordinate X value in pixels. Defined relative to the top left (0,0) coordinate of the image input tensor WxH
Camera Region Of Interest top left integer coordinate Y value in pixels. Defined relative to the top left (0,0) coordinate of the image input tensor WxH
Camera Region Of Interest integer width within the coordinate space of the image input tensor WxH
Camera Region Of Interest integer height within the coordinate space of the image input tensor WxH
Camera x-coordinate of first point used to define the line used in line crossing within the coordinate space of the image input tensor WxH
Camera y-coordinate of first point used to define the line used in line crossing within the coordinate space of the image input tensor WxH
Camera x-coordinate of second point used to define the line used in line crossing within the coordinate space of the image input tensor WxH
Camera y-coordinate of second point used to define the line used in line crossing within the coordinate space of the image input tensor WxH

The two points (X1, Y1) and (X2, Y2) defining a line.
Camera's maximum number of frames to wait for an object to reappear before considering it left definitely.
Camera's threshold distance, from line, to consider a centroid moved from one side to the other side of line.
Camera's maximum number of recent positions the object has occupied. This serves to determine the direction of movement for each object.
Camera's maximum number of objects to keep track of at the same time.
Camera's threshold distance (in terms of pixels) between an existing centroid and a new object centroid, above which these two centroids cannot be paired together.
- REST: Endpoint inference output location. For example,
https://example.com/myendpoint
- SOCKET: A filepath on the device. For example,
/opt/sclbl/sockets/output_socket.sock
Whether to turn on HTTP Authentication password:
1
is on, 0
is off.HTTP Authentication password.
The output protocol currently has two options:
- REST (Default)
- SOCKET
When the setting is set to REST, the setting will be sent as an HTTP REST request. When the setting is set to SOCKET, the output will be sent as a Unix socket message. See Edge AI Manager Module Socket Interface .
In both cases, where the output is sent is controlled by OutputLocation. In the case of REST, a URL is expected, and in the case of SOCKET, a filepath is expected.
Output format. Default:
JSON
Select a maximum frequency limit for the inferences in milliseconds, to save bandwidth and/or power. The model may not be fast enough to reach this maximum, but the module will not attempt faster inferences.
Default:
2
, that is, every two seconds. When set to 0
the output frequency is only limited by the inference speed.Inference results can be sent one at a time or aggregated.
- If you aggregate the results over one input loop, the results for all input sources will be concatenated into one request to the logging server.
- When you aggregate the inferences over time all results for all inputs will be combined into one request to the logging server.
Description | Value |
---|---|
Post each inference separately | -1 |
Aggregate inferences over one input loop | 0 |
Aggregate every two seconds | 2 |
Aggregate every ten seconds | 10 |
Aggregate every thirty seconds | 30 |
Aggregate every minute | 60 |
Aggregate every X seconds | X |
Upload or post images below some certainty threshold. When posted, it converts the camera image(s) as a base64 encoded string to the defined endpoint. On when set to
1
Probability threshold. Can be set to a value between 0.0 and 1.1. If the the threshold value is 0.0 no images will be sent to the API. If the threshold is higher than 0 the input image will be sent to the API when an inference result has a probability value below the threshold. If there are more detected classes, any detection in the result, independent of the detected class that is below the threshold will trigger that the image is sent to the API.
Whether or not to make video stream images available through a shared memory interface (SHM).
Description | Value |
---|---|
No SHM images available | 0 |
Input images available through SHM | 1 |
Output images available through SHM | 2 |
Whether the Scailable AI Manager will be able to connect to the internet. If not, license and related settings will need to be configured explicitly. Contact us to get more information about our solutions for airgapped and other types of offline deployment.
Next to being able to control AI Managers through our cloud, it is also possible to then control multiple AI Managers through one central AI Manager. Contact us if you want to know more about this option.
A percentage value, for the NMS post-processing, representing the confidence score below which some detected entities are considered false positives. For example, a value equal to 100 will result in less predictions than a value equal to 0.
Whether to only run inference when a change in the image is detected.
If
1
inference will only be run if a change between the current and previous frame is detected, controlled by OnChangePixelThreshold and OnChangeImageThreshold.If
0
inference will be run on every frame received from the camera.Threshold of change in a pixel value to be considered a changed pixel.
Percentage of pixels in frame necessary to be changed above OnChangePixelThresholdto be considered a changed frame.
When this is activated, inference will not be run until a signal is received externally. A signal can be sent by calling the
/triggerInference
endpoint.Whether to save input and/or output frames as images locally.
Base path of the save location is the full path set by SaveImagesPath.
Description | Value |
---|---|
Do not save frames | 0 |
Save full size input frames | 1 |
Save model size input frames | 2 |
When SaveImages is not zero, images will be saved with the following directory structure:
[SaveImagesPath]/[%Y]/[%m]/[%d]
Option to exclude the model output from being sent as part of the inference result.
This is useful for when postprocessors are enabled and only the output of the postprocessors are significant.
For example, with a barcode scanning model, by default the bounding boxes of the detected barcodes are included, but this is potentially unwanted. This setting can be enabled to exclude that information from the output.
For example, with raw model data included:
{
"outputType": "json",
"outputFormat": "namedObject",
"outputDims": [
[
1,
4
]
],
"outputDataTypes": [
1
],
"modelId": "0e8f7348-0a96-4c52-8df8-c635edaed0e7",
"modelName": "Bar code scanning",
"sourceId": "input-0",
"sourceName": "",
"output": {
"barcode_bboxes-format:xyxy": [
0.0,
0.0,
512.0,
512.0
],
"barCodes": [
"welcome to scailable!",
"Scailable-2023"
]
}
}
And with raw model data omitted:
{
"outputType": "json",
"outputFormat": "namedObject",
"outputDims": [
[
1,
4
]
],
"outputDataTypes": [
1
],
"modelId": "0e8f7348-0a96-4c52-8df8-c635edaed0e7",
"modelName": "Bar code scanning",
"sourceId": "input-0",
"sourceName": "",
"output": {
"barCodes": [
"welcome to scailable!",
"Scailable-2023"
]
}
}
In this example, the
barCodes
object could be seen as the only significant output, so barcode_bboxes-format:xyxy
is just noise. Then this setting can be used to exclude it.Advantech ICR router specific setting.
We have the flexibility to handle images of any input size. However, it's worth noting that many existing vision-related DNN models typically don't require large tensor sizes. To accommodate this, we have implemented a default pre-resizing step that ensures the input fits within a maximum 640x640 box without altering the aspect ratio. This pre-resizing is controlled by the "MaxVideoPreResize":640 setting.
Last modified 6d ago