Skip to content

facemap_inference.py

activate(facemap_model_schema_name, fbe_schema_name=None, *, create_schema=True, create_tables=True, linking_module=None)

Activate schema.

Parameters:

Name Type Description Default
facemap_model_schema_name str

Schema name on the database server to activate the facemap_inference schema of element-facemap

required
fbe_schema_name str

Schema name on the database server to activate the 'facial_behavioral_estimation

None
create_schema bool

When True (default), create schema in the database if it does not yet exist.

True
create_tables bool

When True (default), create tables in the database if they do not yet exist.

True
linking_module str

A module name or a module containing the required dependencies to activate the facial_behavior_estimation module:

None

Dependencies: Upstream tables: + Session: A parent table to VideoRecording, identifying a recording session. + Equipment: A parent table to VideoRecording, identifying video recording equipment. + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference.

Source code in element_facemap/facemap_inference.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def activate(
    facemap_model_schema_name: str,
    fbe_schema_name: str = None,
    *,
    create_schema: bool = True,
    create_tables: bool = True,
    linking_module=None,
):
    """Activate schema.

    Args:
        facemap_model_schema_name (str): Schema name on the database server to activate the
            `facemap_inference` schema of element-facemap
        fbe_schema_name (str):  Schema name on the database server to activate the 'facial_behavioral_estimation
        create_schema (bool): When True (default), create schema in the database if it
            does not yet exist.
        create_tables (bool): When True (default), create tables in the database if
            they do not yet exist.
        linking_module (str): A module name or a module containing the required
            dependencies to activate the `facial_behavior_estimation` module:

    Dependencies:
    Upstream tables:
        + Session: A parent table to VideoRecording, identifying a recording session.
        + Equipment: A parent table to VideoRecording, identifying video recording equipment.
        + VideoRecording: A parent table to FacemapInferenceTask, identifying videos to be used in inference.
    """
    if isinstance(linking_module, str):
        linking_module = importlib.import_module(linking_module)
    assert inspect.ismodule(
        linking_module
    ), "The argument 'dependency' must be a module's name or a module"
    assert hasattr(
        linking_module, "get_facemap_root_data_dir"
    ), "The linking module must specify a lookup function for a root data directory"

    global _linking_module
    _linking_module = linking_module

    fbe.activate(
        fbe_schema_name,
        create_schema=create_schema,
        create_tables=create_tables,
        linking_module=linking_module,
    )
    schema.activate(
        facemap_model_schema_name,
        create_schema=create_schema,
        create_tables=create_tables,
        add_objects=_linking_module.__dict__,
    )

BodyPart

Bases: Lookup

Body parts tracked by Facemap models.

Attributes:

Name Type Description
body_part str

Body part short name.

body_part_description str

Detailed body part description.

Source code in element_facemap/facemap_inference.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
@schema
class BodyPart(dj.Lookup):
    """Body parts tracked by Facemap models.

    Attributes:
        body_part (str): Body part short name.
        body_part_description (str, optional): Detailed body part description.

    """

    definition = """
    body_part                : varchar(32)
    ---
    body_part_description='' : varchar(1000)
    """

    # Facemap Default BodyPart list
    contents = [
        ("eye(back)", ""),
        ("eye(bottom)", ""),
        ("eye(front)", ""),
        ("eye(top)", ""),
        ("lowerlip", ""),
        ("mouth", ""),
        ("nose(bottom)", ""),
        ("nose(r)", ""),
        ("nose(tip)", ""),
        ("nose(top)", ""),
        ("nosebridge", ""),
        ("paw", ""),
        ("whisker(I)", ""),
        ("whisker(III)", ""),
        ("whisker(II)", ""),
    ]

FacemapModel

Bases: Manual

Trained Models stored for facial pose inference.

Attributes:

Name Type Description
model_id int

User specified unique model ID associated with a model.

model_name str

Name of model.

model_description str

Detailed model description.

Source code in element_facemap/facemap_inference.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
@schema
class FacemapModel(dj.Manual):
    """Trained Models stored for facial pose inference.

    Attributes:
        model_id (int): User specified unique model ID associated with a model.
        model_name (str): Name of model.
        model_description (str, optional): Detailed model description.
    """

    definition = """
    model_id                  : int              # user assigned ID associated with a unique model
    ---
    model_name                : varchar(64)      # name of model 
    model_description=''      : varchar(1000)    # optional model description
    """

    class BodyPart(dj.Part):
        """Body parts associated with a given model

        Attributes:
            body_part (str): Body part name.
            body_part_description (str): Detailed body part description.
        """

        definition = """
        -> master
        -> BodyPart
        """

    class File(dj.Part):
        """Relative paths of facemap models with respect to facemap_root_data_dir

        Attributes:
            FacemapModel (foreign key): Primary key from FacemapModel.
            model_file (attach): Facemap model file.

        """

        definition = """
        -> master
        ---
        model_file: attach      # model file attachment. Stored as binary in database.
        """

    @classmethod
    def insert_new_model(
        cls,
        model_id: int,
        model_name: str,
        model_description: str,
        full_model_path: str,
    ):
        """Insert a new model into the FacemapModel table and relevant part tables.

        Args:
            model_id (int): User specified unique model ID associated with a model.
            model_name (str): Name of model.
            model_description (str): Detailed model description.
            full_model_path (str): Full path to the model file.
        """
        cls.insert1(
            dict(
                model_id=model_id,
                model_name=model_name,
                model_description=model_description,
            )
        )

        cls.BodyPart.insert(
            [
                dict(
                    model_id=model_id,
                    body_part=part,
                )
                for part in BodyPart.fetch("body_part")
            ]
        )

        cls.File.insert1(
            dict(
                model_id=model_id,
                model_file=full_model_path,
            ),
        )

BodyPart

Bases: Part

Body parts associated with a given model

Attributes:

Name Type Description
body_part str

Body part name.

body_part_description str

Detailed body part description.

Source code in element_facemap/facemap_inference.py
130
131
132
133
134
135
136
137
138
139
140
141
class BodyPart(dj.Part):
    """Body parts associated with a given model

    Attributes:
        body_part (str): Body part name.
        body_part_description (str): Detailed body part description.
    """

    definition = """
    -> master
    -> BodyPart
    """

File

Bases: Part

Relative paths of facemap models with respect to facemap_root_data_dir

Attributes:

Name Type Description
FacemapModel foreign key

Primary key from FacemapModel.

model_file attach

Facemap model file.

Source code in element_facemap/facemap_inference.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
class File(dj.Part):
    """Relative paths of facemap models with respect to facemap_root_data_dir

    Attributes:
        FacemapModel (foreign key): Primary key from FacemapModel.
        model_file (attach): Facemap model file.

    """

    definition = """
    -> master
    ---
    model_file: attach      # model file attachment. Stored as binary in database.
    """

insert_new_model(model_id, model_name, model_description, full_model_path) classmethod

Insert a new model into the FacemapModel table and relevant part tables.

Parameters:

Name Type Description Default
model_id int

User specified unique model ID associated with a model.

required
model_name str

Name of model.

required
model_description str

Detailed model description.

required
full_model_path str

Full path to the model file.

required
Source code in element_facemap/facemap_inference.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
@classmethod
def insert_new_model(
    cls,
    model_id: int,
    model_name: str,
    model_description: str,
    full_model_path: str,
):
    """Insert a new model into the FacemapModel table and relevant part tables.

    Args:
        model_id (int): User specified unique model ID associated with a model.
        model_name (str): Name of model.
        model_description (str): Detailed model description.
        full_model_path (str): Full path to the model file.
    """
    cls.insert1(
        dict(
            model_id=model_id,
            model_name=model_name,
            model_description=model_description,
        )
    )

    cls.BodyPart.insert(
        [
            dict(
                model_id=model_id,
                body_part=part,
            )
            for part in BodyPart.fetch("body_part")
        ]
    )

    cls.File.insert1(
        dict(
            model_id=model_id,
            model_file=full_model_path,
        ),
    )

FacemapInferenceTask

Bases: Manual

A pairing of video recordings and Facemap model.

Attributes:

Name Type Description
fbe.VideoRecording foreign key

Primary key from VideoRecording table.

FacemapModel foreign key

Primary key from FacemapModel table.

facemap_inference_output_dir str

output dir storing the results of pose analysis.

task_mode str

One of 'load' (load computed analysis results) or 'trigger' (trigger computation).

bbox longblob, nullable)

Bounding box for cropping the video [x1, x2, y1, y2]. If not set, entire frame is used.

task_description str, optional)

Task description.

Source code in element_facemap/facemap_inference.py
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
@schema
class FacemapInferenceTask(dj.Manual):
    """A pairing of video recordings and Facemap model.

    Attributes:
        fbe.VideoRecording (foreign key): Primary key from VideoRecording table.
        FacemapModel (foreign key): Primary key from FacemapModel table.
        facemap_inference_output_dir (str): output dir storing the results of pose analysis.
        task_mode (str): One of 'load' (load computed analysis results) or 'trigger' (trigger computation).
        bbox (longblob, nullable) : Bounding box for cropping the video [x1, x2, y1, y2]. If not set, entire frame is used.
        task_description (str, optional) : Task description.
    """

    definition = """
    # Staging table for pairing of recording and Facemap model.
    -> fbe.VideoRecording
    -> FacemapModel
    ---
    facemap_inference_output_dir    : varchar(255)  # Output directory of processed results of Facemap inference analysis relative to root directory.
    task_description=''             : varchar(128)  # Optional. Additional task description
    task_mode='load'                : enum('load', 'trigger') 
    bbox=null                       : longblob  # list containing bounding box for cropping the video [x1, x2, y1, y2]
    """

    @classmethod
    def infer_output_dir(cls, key, relative=False, mkdir=False):
        """Infer an output directory for an entry in FacemapInferenceTask table.

        Args:
            key (dict): Primary key from the FacemapInferenceTask table.
            relative (bool, optional): If True, facemap_inference_output_dir is returned
            relative to facemap_root_dir. Defaults to True.
            mkdir (bool, optional): If True, create facemap_inference_output_dir. Defaults to True.

        Returns:
            dir (str): A default output directory for inference results (facemap_inference_output_dir
                in FacemapInferenceTask) based on the following convention:
                processed_dir / relative_video_dir / {facemap_recordingid}_{model_id}
                e.g.: sub1/sess1/video_files/facemap_recording_id0_model0
        """
        video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0]
        video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent
        root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir)

        processed_dir = Path(get_facemap_processed_data_dir())
        output_dir = (
            processed_dir
            / video_dir.relative_to(root_dir)
            / f"facemap_recordingid{key['recording_id']}_model{key['model_id']}"
        )

        if mkdir:
            output_dir.mkdir(parents=True, exist_ok=True)

        return output_dir.relative_to(processed_dir) if relative else output_dir

    @classmethod
    def generate(
        cls,
        key,
        task_description: str = "",
        task_mode: str = "load",
        bbox: list = [],
    ):
        """Generate a unique pose estimation task for each of the relative_video_paths

        Args:
            key (dict): Primary key from FacemapInferenceTask table
                e.g.: {subject="sub1",session_id=0,recording_id=0,model_id=0}
            relative_video_paths (list): list of relative videos in VideoRecording.File table
            task_mode (str, optional): 'load' or 'trigger. Defaults to 'load'.
            bbox (list, optional): Bounding box for processing. Defaults to [].
        """
        facemap_inference_output_dir = cls.infer_output_dir(key)

        cls.insert1(
            dict(
                **key,
                facemap_inference_output_dir=facemap_inference_output_dir,
                task_description=task_description,
                task_mode=task_mode,
                bbox=bbox,
            ),
        )

    insert_facemap_inference_task = generate

infer_output_dir(key, relative=False, mkdir=False) classmethod

Infer an output directory for an entry in FacemapInferenceTask table.

Parameters:

Name Type Description Default
key dict

Primary key from the FacemapInferenceTask table.

required
relative bool

If True, facemap_inference_output_dir is returned

False
mkdir bool

If True, create facemap_inference_output_dir. Defaults to True.

False

Returns:

Name Type Description
dir str

A default output directory for inference results (facemap_inference_output_dir in FacemapInferenceTask) based on the following convention: processed_dir / relative_video_dir / {facemap_recordingid}_{model_id} e.g.: sub1/sess1/video_files/facemap_recording_id0_model0

Source code in element_facemap/facemap_inference.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@classmethod
def infer_output_dir(cls, key, relative=False, mkdir=False):
    """Infer an output directory for an entry in FacemapInferenceTask table.

    Args:
        key (dict): Primary key from the FacemapInferenceTask table.
        relative (bool, optional): If True, facemap_inference_output_dir is returned
        relative to facemap_root_dir. Defaults to True.
        mkdir (bool, optional): If True, create facemap_inference_output_dir. Defaults to True.

    Returns:
        dir (str): A default output directory for inference results (facemap_inference_output_dir
            in FacemapInferenceTask) based on the following convention:
            processed_dir / relative_video_dir / {facemap_recordingid}_{model_id}
            e.g.: sub1/sess1/video_files/facemap_recording_id0_model0
    """
    video_file = (fbe.VideoRecording.File & key).fetch("file_path", limit=1)[0]
    video_dir = find_full_path(get_facemap_root_data_dir(), video_file).parent
    root_dir = find_root_directory(get_facemap_root_data_dir(), video_dir)

    processed_dir = Path(get_facemap_processed_data_dir())
    output_dir = (
        processed_dir
        / video_dir.relative_to(root_dir)
        / f"facemap_recordingid{key['recording_id']}_model{key['model_id']}"
    )

    if mkdir:
        output_dir.mkdir(parents=True, exist_ok=True)

    return output_dir.relative_to(processed_dir) if relative else output_dir

generate(key, task_description='', task_mode='load', bbox=[]) classmethod

Generate a unique pose estimation task for each of the relative_video_paths

Parameters:

Name Type Description Default
key dict

Primary key from FacemapInferenceTask table e.g.: {subject="sub1",session_id=0,recording_id=0,model_id=0}

required
relative_video_paths list

list of relative videos in VideoRecording.File table

required
task_mode str

'load' or 'trigger. Defaults to 'load'.

'load'
bbox list

Bounding box for processing. Defaults to [].

[]
Source code in element_facemap/facemap_inference.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
@classmethod
def generate(
    cls,
    key,
    task_description: str = "",
    task_mode: str = "load",
    bbox: list = [],
):
    """Generate a unique pose estimation task for each of the relative_video_paths

    Args:
        key (dict): Primary key from FacemapInferenceTask table
            e.g.: {subject="sub1",session_id=0,recording_id=0,model_id=0}
        relative_video_paths (list): list of relative videos in VideoRecording.File table
        task_mode (str, optional): 'load' or 'trigger. Defaults to 'load'.
        bbox (list, optional): Bounding box for processing. Defaults to [].
    """
    facemap_inference_output_dir = cls.infer_output_dir(key)

    cls.insert1(
        dict(
            **key,
            facemap_inference_output_dir=facemap_inference_output_dir,
            task_description=task_description,
            task_mode=task_mode,
            bbox=bbox,
        ),
    )

FacemapInference

Bases: Computed

Perform facemap pose estimation.

Attributes:

Name Type Description
FacemapInferenceTask foreign key

Primary key from FacemapInferenceTask.

inference_completion_time datetime

Inference completion datetime.

inference_run_duration datetime

Duration to inference completion.

total_frame_count int

Number of frames in all video files.

Source code in element_facemap/facemap_inference.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
@schema
class FacemapInference(dj.Computed):
    """Perform facemap pose estimation.

    Attributes:
        FacemapInferenceTask (foreign key): Primary key from FacemapInferenceTask.
        inference_completion_time (datetime): Inference completion datetime.
        inference_run_duration (datetime): Duration to inference completion.
        total_frame_count (int): Number of frames in all video files.
    """

    definition = """
    -> FacemapInferenceTask
    ---
    inference_completion_time: datetime  # time of generation of this set of facemap results
    inference_run_duration: float # seconds
    total_frame_count: int          # frame count across all video files          
    """

    class BodyPartPosition(dj.Part):
        """Position of individual body parts by frame index.

        Attributes:
            FacemapInference (foreign key): Primary key from FacemapInference.
            FacemapModel.BodyPart (foreign key): Primary key from FacemapModel.BodyPart.
            x_pos (longblob): X position.
            y_pos (longblob): Y position.
            likelihood (longblob): Model confidence."""

        definition = """ # uses facemap h5 output for body part position
        -> master
        -> FacemapModel.BodyPart
        ---
        x_pos       : longblob      # x position
        y_pos       : longblob      # y position
        likelihood  : longblob      # model evaluated likelihood
        """

    def make(self, key):
        """.populate() method will launch training for each FacemapInferenceTask"""
        # ID model and directories
        task_mode, output_dir = (FacemapInferenceTask & key).fetch1(
            "task_mode", "facemap_inference_output_dir"
        )

        if not output_dir:
            output_dir = FacemapInferenceTask.infer_output_dir(
                key, relative=True, mkdir=True
            )
            # update facemap_inference_output_dir
            FacemapInferenceTask.update1(
                {**key, "facemap_inference_output_dir": output_dir.as_posix()}
            )

        output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir)
        video_files = (FacemapInferenceTask * fbe.VideoRecording.File & key).fetch(
            "file_path"
        )

        video_files = [
            find_full_path(fbe.get_facemap_root_data_dir(), video_file)
            for video_file in video_files
        ]
        vid_name = Path(video_files[0]).stem
        facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5"
        full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl"

        # Load or Trigger Facemap Pose Estimation Inference
        if (
            facemap_result_path.exists() & full_metadata_path.exists()
        ) or task_mode == "load":  # Load results and do not rerun processing
            (
                body_part_position_entry,
                inference_duration,
                total_frame_count,
                creation_time,
            ) = _load_facemap_results(key, facemap_result_path, full_metadata_path)
            self.insert1(
                {
                    **key,
                    "inference_completion_time": creation_time,
                    "inference_run_duration": inference_duration,
                    "total_frame_count": total_frame_count,
                }
            )
            self.BodyPartPosition.insert(body_part_position_entry)
            return

        elif task_mode == "trigger":
            from facemap.pose import pose as facemap_pose, model_loader

            bbox = (FacemapInferenceTask & key).fetch1("bbox") or []

            # Fetch model(.pt) file attachment to present working directory
            facemap_model_name = (
                FacemapModel.File & f'model_id="{key["model_id"]}"'
            ).fetch1("model_file")

            facemap_model_path = Path.cwd() / facemap_model_name
            models_root_dir = model_loader.get_models_dir()

            # Create Symbolic Links to raw video data files from outbox directory
            video_symlinks = []
            for video_file in video_files:
                video_symlink = output_dir / video_file.name
                if video_symlink.exists():
                    video_symlink.unlink()
                video_symlink.symlink_to(video_file)
                video_symlinks.append(video_symlink.as_posix())

            # copy this model file to the facemap model root directory (~/.facemap/models/)
            shutil.copy(facemap_model_path, models_root_dir)

            # Instantiate Pose object, with filenames specified as video files, and bounding specified in params
            # Assumes GUI to be none as we are running CLI implementation
            pose = facemap_pose.Pose(
                filenames=[video_symlinks],
                model_name=facemap_model_path.stem,
                bbox=bbox,
                bbox_set=bool(bbox),
            )
            pose.run()

            (
                body_part_position_entry,
                inference_duration,
                total_frame_count,
                creation_time,
            ) = _load_facemap_results(key, facemap_result_path, full_metadata_path)
            self.insert1(
                {
                    **key,
                    "inference_completion_time": creation_time,
                    "inference_run_duration": inference_duration,
                    "total_frame_count": total_frame_count,
                }
            )
            self.BodyPartPosition.insert(body_part_position_entry)

    @classmethod
    def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame:
        """Returns a pandas dataframe of coordinates of the specified body_part(s)

        Args:
            key (dict): A DataJoint query specifying one FacemapInferenceEstimation entry.
            body_parts (list, optional): Body parts as a list. If "all", all joints

        Returns:
            df: multi index pandas dataframe with Facemap model name, body_parts
                and x/y coordinates of each body part for a camera_id, similar to
                output of facemap inference data.
        """
        model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch1("model_name")

        if body_parts == "all":
            body_parts = (cls.BodyPartPosition & key).fetch("body_part")
        elif not isinstance(body_parts, list):
            body_parts = list(body_parts)

        df = None
        for body_part in body_parts:
            result_dict = (
                cls.BodyPartPosition
                & {"body_part": body_part}
                & {"recording_id": key["recording_id"]}
                & {"session_id": key["session_id"]}
            ).fetch("x_pos", "y_pos", "likelihood", as_dict=True)[0]
            x_pos = result_dict["x_pos"].tolist()
            y_pos = result_dict["y_pos"].tolist()
            likelihood = result_dict["likelihood"].tolist()
            a = np.vstack((x_pos, y_pos, likelihood))
            a = a.T
            pdindex = pd.MultiIndex.from_product(
                [[model_name], [body_part], ["x", "y", "likelihood"]],
                names=["model", "bodyparts", "coords"],
            )
            frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))
            df = pd.concat([df, frame], axis=1)
        return df

BodyPartPosition

Bases: Part

Position of individual body parts by frame index.

Attributes:

Name Type Description
FacemapInference foreign key

Primary key from FacemapInference.

FacemapModel.BodyPart foreign key

Primary key from FacemapModel.BodyPart.

x_pos longblob

X position.

y_pos longblob

Y position.

likelihood longblob

Model confidence.

Source code in element_facemap/facemap_inference.py
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
class BodyPartPosition(dj.Part):
    """Position of individual body parts by frame index.

    Attributes:
        FacemapInference (foreign key): Primary key from FacemapInference.
        FacemapModel.BodyPart (foreign key): Primary key from FacemapModel.BodyPart.
        x_pos (longblob): X position.
        y_pos (longblob): Y position.
        likelihood (longblob): Model confidence."""

    definition = """ # uses facemap h5 output for body part position
    -> master
    -> FacemapModel.BodyPart
    ---
    x_pos       : longblob      # x position
    y_pos       : longblob      # y position
    likelihood  : longblob      # model evaluated likelihood
    """

make(key)

.populate() method will launch training for each FacemapInferenceTask

Source code in element_facemap/facemap_inference.py
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def make(self, key):
    """.populate() method will launch training for each FacemapInferenceTask"""
    # ID model and directories
    task_mode, output_dir = (FacemapInferenceTask & key).fetch1(
        "task_mode", "facemap_inference_output_dir"
    )

    if not output_dir:
        output_dir = FacemapInferenceTask.infer_output_dir(
            key, relative=True, mkdir=True
        )
        # update facemap_inference_output_dir
        FacemapInferenceTask.update1(
            {**key, "facemap_inference_output_dir": output_dir.as_posix()}
        )

    output_dir = find_full_path(fbe.get_facemap_root_data_dir(), output_dir)
    video_files = (FacemapInferenceTask * fbe.VideoRecording.File & key).fetch(
        "file_path"
    )

    video_files = [
        find_full_path(fbe.get_facemap_root_data_dir(), video_file)
        for video_file in video_files
    ]
    vid_name = Path(video_files[0]).stem
    facemap_result_path = output_dir / f"{vid_name}_FacemapPose.h5"
    full_metadata_path = output_dir / f"{vid_name}_FacemapPose_metadata.pkl"

    # Load or Trigger Facemap Pose Estimation Inference
    if (
        facemap_result_path.exists() & full_metadata_path.exists()
    ) or task_mode == "load":  # Load results and do not rerun processing
        (
            body_part_position_entry,
            inference_duration,
            total_frame_count,
            creation_time,
        ) = _load_facemap_results(key, facemap_result_path, full_metadata_path)
        self.insert1(
            {
                **key,
                "inference_completion_time": creation_time,
                "inference_run_duration": inference_duration,
                "total_frame_count": total_frame_count,
            }
        )
        self.BodyPartPosition.insert(body_part_position_entry)
        return

    elif task_mode == "trigger":
        from facemap.pose import pose as facemap_pose, model_loader

        bbox = (FacemapInferenceTask & key).fetch1("bbox") or []

        # Fetch model(.pt) file attachment to present working directory
        facemap_model_name = (
            FacemapModel.File & f'model_id="{key["model_id"]}"'
        ).fetch1("model_file")

        facemap_model_path = Path.cwd() / facemap_model_name
        models_root_dir = model_loader.get_models_dir()

        # Create Symbolic Links to raw video data files from outbox directory
        video_symlinks = []
        for video_file in video_files:
            video_symlink = output_dir / video_file.name
            if video_symlink.exists():
                video_symlink.unlink()
            video_symlink.symlink_to(video_file)
            video_symlinks.append(video_symlink.as_posix())

        # copy this model file to the facemap model root directory (~/.facemap/models/)
        shutil.copy(facemap_model_path, models_root_dir)

        # Instantiate Pose object, with filenames specified as video files, and bounding specified in params
        # Assumes GUI to be none as we are running CLI implementation
        pose = facemap_pose.Pose(
            filenames=[video_symlinks],
            model_name=facemap_model_path.stem,
            bbox=bbox,
            bbox_set=bool(bbox),
        )
        pose.run()

        (
            body_part_position_entry,
            inference_duration,
            total_frame_count,
            creation_time,
        ) = _load_facemap_results(key, facemap_result_path, full_metadata_path)
        self.insert1(
            {
                **key,
                "inference_completion_time": creation_time,
                "inference_run_duration": inference_duration,
                "total_frame_count": total_frame_count,
            }
        )
        self.BodyPartPosition.insert(body_part_position_entry)

get_trajectory(key, body_parts='all') classmethod

Returns a pandas dataframe of coordinates of the specified body_part(s)

Parameters:

Name Type Description Default
key dict

A DataJoint query specifying one FacemapInferenceEstimation entry.

required
body_parts list

Body parts as a list. If "all", all joints

'all'

Returns:

Name Type Description
df DataFrame

multi index pandas dataframe with Facemap model name, body_parts and x/y coordinates of each body part for a camera_id, similar to output of facemap inference data.

Source code in element_facemap/facemap_inference.py
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
@classmethod
def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame:
    """Returns a pandas dataframe of coordinates of the specified body_part(s)

    Args:
        key (dict): A DataJoint query specifying one FacemapInferenceEstimation entry.
        body_parts (list, optional): Body parts as a list. If "all", all joints

    Returns:
        df: multi index pandas dataframe with Facemap model name, body_parts
            and x/y coordinates of each body part for a camera_id, similar to
            output of facemap inference data.
    """
    model_name = (FacemapModel & f'model_id={key["model_id"]}').fetch1("model_name")

    if body_parts == "all":
        body_parts = (cls.BodyPartPosition & key).fetch("body_part")
    elif not isinstance(body_parts, list):
        body_parts = list(body_parts)

    df = None
    for body_part in body_parts:
        result_dict = (
            cls.BodyPartPosition
            & {"body_part": body_part}
            & {"recording_id": key["recording_id"]}
            & {"session_id": key["session_id"]}
        ).fetch("x_pos", "y_pos", "likelihood", as_dict=True)[0]
        x_pos = result_dict["x_pos"].tolist()
        y_pos = result_dict["y_pos"].tolist()
        likelihood = result_dict["likelihood"].tolist()
        a = np.vstack((x_pos, y_pos, likelihood))
        a = a.T
        pdindex = pd.MultiIndex.from_product(
            [[model_name], [body_part], ["x", "y", "likelihood"]],
            names=["model", "bodyparts", "coords"],
        )
        frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))
        df = pd.concat([df, frame], axis=1)
    return df