Skip to content

Commit 2280757

Browse files
committed
added the DeepfaceAnalyzeFaceAttributes node.
1 parent de16f8b commit 2280757

File tree

8 files changed

+397
-14
lines changed

8 files changed

+397
-14
lines changed

README.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,3 +76,15 @@ From initial testing, the filtering effect is better than classifier models such
7676

7777
<img src="assets/detectorForNSFW.png" width="100%"/>
7878
You can also adjust the confidence levels for various rules such as buttocks_exposed to be more lenient or strict. Lower confidence levels will filter out more potential NSFW images. Setting the value to 1 will stop filtering for that specific feature.
79+
80+
## DeepfaceAnalyzeFaceAttributes
81+
This node integrates the [deepface](https://github.com/serengil/deepface) library to analyze face attributes (gender, race, emotion, age). It analyzes only the largest face in the image and supports processing one image at a time.
82+
<img src="assets/deepfaceAnalyzeFaceAttributes.png" width="100%"/>
83+
84+
If the input image is a standard square face image, you can enable the standard_single_face_image switch. In this case, the node will skip face detection and analyze the attributes directly.
85+
86+
Upon the first run, the node will download the [deepface](https://github.com/serengil/deepface) models, which may take some time.
87+
88+
Note: If you encounter the following exception while running the node:
89+
ValueError: The layer sequential has never been called and thus has no defined input.
90+
please set the environment variable TF_USE_LEGACY_KERAS to 1, then restart ComfyUI.
305 KB
Loading

py/node_face_attributes.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
import os
2+
os.environ["TF_USE_LEGACY_KERAS"] = "1"
3+
4+
import numpy as np
5+
from typing import Union, List, Dict, Any
6+
from .utils import tensor2np,np2tensor
7+
from ..r_deepface import demography
8+
9+
import folder_paths
10+
import json
11+
import logging
12+
logger = logging.getLogger(__file__)
13+
14+
15+
def prepare_deepface_home():
16+
deepface_path = os.path.join(folder_paths.models_dir, "deepface")
17+
18+
# Deepface requires a specific structure within the DEEPFACE_HOME directory
19+
deepface_dot_path = os.path.join(deepface_path, ".deepface")
20+
deepface_weights_path = os.path.join(deepface_dot_path, "weights")
21+
if not os.path.exists(deepface_weights_path):
22+
os.makedirs(deepface_weights_path)
23+
24+
os.environ["DEEPFACE_HOME"] = deepface_path
25+
26+
27+
def get_largest_face(faces):
28+
largest_face = {}
29+
largest_area = 0
30+
if len(faces) == 1:
31+
return faces[0]
32+
33+
for face in faces:
34+
if 'region' in face:
35+
w = face['region']['w']
36+
h = face['region']['h']
37+
area = w * h
38+
if area > largest_area:
39+
largest_area = area
40+
largest_face = face
41+
return largest_face
42+
43+
44+
class DeepfaceAnalyzeFaceAttributes:
45+
'''
46+
- 'gender' (str): The gender in the detected face. "M" or "F"
47+
48+
- 'emotion' (str): The emotion in the detected face.
49+
Possible values include "sad," "angry," "surprise," "fear," "happy,"
50+
"disgust," and "neutral."
51+
52+
- 'race' (str): The race in the detected face.
53+
Possible values include "indian," "asian," "latino hispanic,"
54+
"black," "middle eastern," and "white."
55+
'''
56+
57+
def __init__(self) -> None:
58+
prepare_deepface_home()
59+
60+
@classmethod
61+
def INPUT_TYPES(cls):
62+
return {
63+
"required": {
64+
"image": ("IMAGE",),
65+
"detector_backend": ([
66+
"opencv",
67+
"ssd",
68+
"dlib",
69+
"mtcnn",
70+
"retinaface",
71+
"mediapipe",
72+
"yolov8",
73+
"yunet",
74+
"fastmtcnn",
75+
], {
76+
"default": "yolov8",
77+
}),
78+
},
79+
"optional": {
80+
"analyze_gender": ("BOOLEAN", {"default": True}),
81+
"analyze_race": ("BOOLEAN", {"default": True}),
82+
"analyze_emotion": ("BOOLEAN", {"default": True}),
83+
"analyze_age": ("BOOLEAN", {"default": True}),
84+
"standard_single_face_image": ("BOOLEAN", {"default": False}),
85+
},
86+
}
87+
88+
RETURN_TYPES = ("STRING","STRING","STRING","STRING", "STRING")
89+
RETURN_NAMES = ("gender","race","emotion","age", "json_info")
90+
FUNCTION = "analyze_face"
91+
CATEGORY = "utils/face"
92+
93+
def analyze_face(self, image, detector_backend, analyze_gender=True, analyze_race=True, analyze_emotion=True, analyze_age=True, standard_single_face_image=False):
94+
# 将图像转换为numpy数组
95+
img_np = tensor2np(image)
96+
if isinstance(img_np, List):
97+
if len(img_np) > 1:
98+
logger.warn(f"DeepfaceAnalyzeFaceAttributes only support for one image and only analyze the largest face.")
99+
img_np = img_np[0]
100+
101+
# 准备actions列表
102+
actions = []
103+
if analyze_gender:
104+
actions.append("gender")
105+
if analyze_race:
106+
actions.append("race")
107+
if analyze_emotion:
108+
actions.append("emotion")
109+
if analyze_age:
110+
actions.append("age")
111+
112+
# 调用analyze函数
113+
results = demography.analyze(img_np, actions=actions, detector_backend=detector_backend, enforce_detection=False, is_single_face_image=standard_single_face_image)
114+
115+
# 获取面积最大的脸
116+
largest_face = get_largest_face(results)
117+
118+
if not standard_single_face_image and largest_face.get("face_confidence")==0:
119+
largest_face ={}
120+
121+
gender_map = {"Woman":"F","Man":"M",'':''}
122+
# 提取结果
123+
gender = gender_map.get(largest_face.get('dominant_gender', ''),'')if analyze_gender else ''
124+
race = largest_face.get('dominant_race', '') if analyze_race else ''
125+
emotion = largest_face.get('dominant_emotion', '') if analyze_emotion else ''
126+
age = str(largest_face.get('age', '0')) if analyze_age else '0'
127+
128+
json_info= json.dumps(largest_face)
129+
return (gender, race, emotion, age, json_info)
130+
131+
NODE_CLASS_MAPPINGS = {
132+
#image
133+
"DeepfaceAnalyzeFaceAttributes": DeepfaceAnalyzeFaceAttributes,
134+
135+
}
136+
137+
NODE_DISPLAY_NAME_MAPPINGS = {
138+
# Image
139+
"DeepfaceAnalyzeFaceAttributes": "Deepface Analyze Face Attributes",
140+
141+
}

py/node_nsfw.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,12 @@
88
from typing import Union, List
99
import json
1010
import logging
11+
from .utils import tensor2np,np2tensor
12+
1113
logger = logging.getLogger(__file__)
1214

1315
comfy_paths.folder_names_and_paths["nsfw"] = ([os.path.join(models_dir, "nsfw")], {".pt",".onnx"})
1416

15-
def tensor2np(tensor: torch.Tensor):
16-
if len(tensor.shape) == 3: # Single image
17-
return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8)
18-
else: # Batch of images
19-
return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor]
20-
21-
def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor:
22-
if isinstance(img_np, list):
23-
return torch.cat([np2tensor(img) for img in img_np], dim=0)
24-
return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0)
25-
2617

2718
class DetectorForNSFW:
2819

py/utils.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import cv2
2+
import numpy as np
3+
from typing import Union, List
4+
import torch
5+
6+
7+
def tensor2np(tensor: torch.Tensor):
8+
if len(tensor.shape) == 3: # Single image
9+
return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8)
10+
else: # Batch of images
11+
return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor]
12+
13+
def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor:
14+
if isinstance(img_np, list):
15+
return torch.cat([np2tensor(img) for img in img_np], dim=0)
16+
return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0)

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[project]
22
name = "comfyui-utils-nodes"
3-
description = "Nodes:LoadImageWithSwitch, ImageBatchOneOrMore, ModifyTextGender, GenderControlOutput, ImageCompositeMaskedWithSwitch, ColorCorrectOfUtils, SplitMask, MaskFastGrow, CheckpointLoaderSimpleWithSwitch, ImageResizeTo8x, MatchImageRatioToPreset, UpscaleImageWithModelIfNeed, MaskFromFaceModel, MaskCoverFourCorners, DetectorForNSFW etc."
4-
version = "1.1.5"
3+
description = "Nodes:LoadImageWithSwitch, ImageBatchOneOrMore, ModifyTextGender, GenderControlOutput, ImageCompositeMaskedWithSwitch, ColorCorrectOfUtils, SplitMask, MaskFastGrow, CheckpointLoaderSimpleWithSwitch, ImageResizeTo8x, MatchImageRatioToPreset, UpscaleImageWithModelIfNeed, MaskFromFaceModel, MaskCoverFourCorners, DetectorForNSFW, DeepfaceAnalyzeFaceAttributes etc."
4+
version = "1.1.6"
55
license = { file = "LICENSE" }
66
dependencies = []
77

0 commit comments

Comments
 (0)