@@ -57,46 +57,6 @@ def run(self, faces, criteria, order, take_start, take_count):
57
57
rest = sorted_faces [:take_start ] + sorted_faces [take_start + take_count :]
58
58
return (filtered , rest )
59
59
60
- # class FaceDetailsDEPRECATED:
61
- # @classmethod
62
- # def INPUT_TYPES(cls):
63
- # return {
64
- # 'required': {
65
- # 'faces': ('FACE',),
66
- # 'crop_size': ('INT', {'default': 512, 'min': 512, 'max': 1024, 'step': 128}),
67
- # 'crop_factor': ('FLOAT', {'default': 1.5, 'min': 1.0, 'max': 3, 'step': 0.1}),
68
- # 'mask_type': (mask_types,)
69
- # }
70
- # }
71
-
72
- # RETURN_TYPES = ('IMAGE', 'MASK', 'WARP')
73
- # RETURN_NAMES = ('crops', 'masks', 'warps')
74
- # FUNCTION = 'run'
75
- # CATEGORY = 'facetools'
76
-
77
- # def run(self, faces, crop_size, crop_factor, mask_type):
78
- # if len(faces) == 0:
79
- # empty_crop = torch.zeros((1,512,512,3))
80
- # empty_mask = torch.zeros((1,512,512))
81
- # empty_warp = np.array([
82
- # [1,0,-512],
83
- # [0,1,-512],
84
- # ], dtype=np.float32)
85
- # return (empty_crop, empty_mask, [empty_warp])
86
-
87
- # crops = []
88
- # masks = []
89
- # warps = []
90
- # for face in faces:
91
- # M, crop = crop_faces(face.image, face, crop_size, crop_factor)
92
- # mask = mask_crop(face, M, crop, mask_type)
93
- # crops.append(np.array(crop) / 255)
94
- # masks.append(np.array(mask))
95
- # warps.append(M)
96
- # crops = torch.from_numpy(np.array(crops)).type(torch.float32)
97
- # masks = torch.from_numpy(np.array(masks)).type(torch.float32)
98
- # return (crops, masks, warps)
99
-
100
60
class DetectFaces :
101
61
@classmethod
102
62
def INPUT_TYPES (cls ):
@@ -131,7 +91,7 @@ def run(self, image, threshold, min_size, max_size, mask=None):
131
91
w = abs (c - a )
132
92
if (h <= max_size or w <= max_size ) and (min_size <= h or min_size <= w ):
133
93
face .image_idx = i
134
- face .image = image [i ]
94
+ face .img = image [i ]
135
95
faces .append (face )
136
96
return (faces ,)
137
97
@@ -166,47 +126,14 @@ def run(self, faces, crop_size, crop_factor, mask_type):
166
126
masks = []
167
127
warps = []
168
128
for face in faces :
169
- M , crop , maskedcrop = face .crop (crop_size , crop_factor )
170
- mask = mask_crop (face , M , maskedcrop , mask_type )
171
- crops .append (np .array (crop ))
172
- masks .append (np .array (mask ))
129
+ M , crop = face .crop (crop_size , crop_factor )
130
+ mask = mask_crop (face , M , crop , mask_type )
131
+ crops .append (np .array (crop [ 0 ] ))
132
+ masks .append (np .array (mask [ 0 ] ))
173
133
warps .append (M )
174
134
crops = torch .from_numpy (np .array (crops )).type (torch .float32 )
175
135
masks = torch .from_numpy (np .array (masks )).type (torch .float32 )
176
136
return (crops , masks , warps )
177
-
178
- # class AlignFacesDEPRECATED:
179
- # @classmethod
180
- # def INPUT_TYPES(cls):
181
- # return {
182
- # 'required': {
183
- # 'insightface': ('INSIGHTFACE',),
184
- # 'image': ('IMAGE',),
185
- # 'threshold': ('FLOAT', {'default': 0.5, 'min': 0.5, 'max': 1.0, 'step': 0.01}),
186
- # 'min_size': ('INT', {'default': 64, 'max': 512, 'step': 8}),
187
- # 'max_size': ('INT', {'default': 512, 'min': 512, 'step': 8}),
188
- # }
189
- # }
190
-
191
- # RETURN_TYPES = ('FACE',)
192
- # RETURN_NAMES = ('faces',)
193
- # FUNCTION = 'run'
194
- # CATEGORY = 'facetools'
195
-
196
- # def run(self, insightface, image, threshold, min_size, max_size):
197
- # faces = []
198
- # images = (image * 255).type(torch.uint8).numpy()
199
- # for i, img in enumerate(images):
200
- # unfiltered_faces = get_faces(img, insightface)
201
- # for face in unfiltered_faces:
202
- # a, b, c, d = face.bbox
203
- # h = abs(d-b)
204
- # w = abs(c-a)
205
- # if face.det_score >= threshold and (h <= max_size or w <= max_size) and (min_size <= h or min_size <= w):
206
- # face.image_idx = i
207
- # face.image = img
208
- # faces.append(face)
209
- # return (faces,)
210
137
211
138
class WarpFaceBack :
212
139
RETURN_TYPES = ('IMAGE' ,)
@@ -228,14 +155,7 @@ def INPUT_TYPES(cls):
228
155
def run (self , images , face , crop , mask , warp ):
229
156
groups = defaultdict (list )
230
157
for f ,c ,m ,w in zip (face , crop , mask , warp ):
231
- # gray = cv2.cvtColor(c.numpy(), cv2.COLOR_RGB2GRAY)
232
- # _, gray = cv2.threshold(gray, 0.01, 1, cv2.THRESH_BINARY)
233
- # gray = gray.astype(np.uint8)
234
- # cnts, _ = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
235
- # largeCnts = list(filter(lambda x: cv2.contourArea(x) > 10000, cnts))
236
- # gray = cv2.drawContours(gray, largeCnts, -1, 1, -1)
237
- # m *= torch.from_numpy(gray)
238
- groups [f .image_idx ].append ((f .image ,c ,m ,w ))
158
+ groups [f .image_idx ].append ((f .img ,c ,m ,w ))
239
159
240
160
results = []
241
161
for i , image in enumerate (images ):
@@ -287,13 +207,89 @@ def run(self, crop0, mask0, warp0, crop1, mask1, warp1):
287
207
masks = torch .vstack ((mask0 , mask1 ))
288
208
warps = warp0 + warp1
289
209
return (crops , masks , warps )
210
+
211
+ class BiSeNetMask :
212
+ RETURN_TYPES = ('MASK' ,)
213
+ FUNCTION = 'run'
214
+ CATEGORY = 'facetools'
215
+
216
+ @classmethod
217
+ def INPUT_TYPES (cls ):
218
+ return {
219
+ 'required' : {
220
+ 'crop' : ('IMAGE' ,),
221
+ 'skin' : ('BOOLEAN' , {'default' : True }),
222
+ 'left_brow' : ('BOOLEAN' , {'default' : True }),
223
+ 'right_brow' : ('BOOLEAN' , {'default' : True }),
224
+ 'left_eye' : ('BOOLEAN' , {'default' : True }),
225
+ 'right_eye' : ('BOOLEAN' , {'default' : True }),
226
+ 'eyeglasses' : ('BOOLEAN' , {'default' : True }),
227
+ 'left_ear' : ('BOOLEAN' , {'default' : True }),
228
+ 'right_ear' : ('BOOLEAN' , {'default' : True }),
229
+ 'earring' : ('BOOLEAN' , {'default' : True }),
230
+ 'nose' : ('BOOLEAN' , {'default' : True }),
231
+ 'mouth' : ('BOOLEAN' , {'default' : True }),
232
+ 'upper_lip' : ('BOOLEAN' , {'default' : True }),
233
+ 'lower_lip' : ('BOOLEAN' , {'default' : True }),
234
+ 'neck' : ('BOOLEAN' , {'default' : False }),
235
+ 'necklace' : ('BOOLEAN' , {'default' : False }),
236
+ 'cloth' : ('BOOLEAN' , {'default' : False }),
237
+ 'hair' : ('BOOLEAN' , {'default' : False }),
238
+ 'hat' : ('BOOLEAN' , {'default' : False }),
239
+ }
240
+ }
290
241
242
+ def run (self , crop , skin , left_brow , right_brow , left_eye , right_eye , eyeglasses ,
243
+ left_ear , right_ear , earring , nose , mouth , upper_lip , lower_lip ,
244
+ neck , necklace , cloth , hair , hat ):
245
+ masks = mask_BiSeNet (crop , skin , left_brow , right_brow , left_eye , right_eye , eyeglasses ,
246
+ left_ear , right_ear , earring , nose , mouth , upper_lip , lower_lip ,
247
+ neck , necklace , cloth , hair , hat )
248
+ return (masks , )
249
+
250
+ class JonathandinuMask :
251
+ RETURN_TYPES = ('MASK' ,)
252
+ FUNCTION = 'run'
253
+ CATEGORY = 'facetools'
254
+
255
+ @classmethod
256
+ def INPUT_TYPES (cls ):
257
+ return {
258
+ 'required' : {
259
+ 'crop' : ('IMAGE' ,),
260
+ 'skin' : ('BOOLEAN' , {'default' : True }),
261
+ 'nose' : ('BOOLEAN' , {'default' : True }),
262
+ 'eyeglasses' : ('BOOLEAN' , {'default' : False }),
263
+ 'left_eye' : ('BOOLEAN' , {'default' : True }),
264
+ 'right_eye' : ('BOOLEAN' , {'default' : True }),
265
+ 'left_brow' : ('BOOLEAN' , {'default' : True }),
266
+ 'right_brow' : ('BOOLEAN' , {'default' : True }),
267
+ 'left_ear' : ('BOOLEAN' , {'default' : True }),
268
+ 'right_ear' : ('BOOLEAN' , {'default' : True }),
269
+ 'mouth' : ('BOOLEAN' , {'default' : True }),
270
+ 'upper_lip' : ('BOOLEAN' , {'default' : True }),
271
+ 'lower_lip' : ('BOOLEAN' , {'default' : True }),
272
+ 'hair' : ('BOOLEAN' , {'default' : False }),
273
+ 'hat' : ('BOOLEAN' , {'default' : False }),
274
+ 'earring' : ('BOOLEAN' , {'default' : False }),
275
+ 'necklace' : ('BOOLEAN' , {'default' : False }),
276
+ 'neck' : ('BOOLEAN' , {'default' : False }),
277
+ 'cloth' : ('BOOLEAN' , {'default' : False }),
278
+ }
279
+ }
280
+
281
+ def run (self , crop , skin , nose , eyeglasses , left_eye , right_eye , left_brow , right_brow , left_ear , right_ear ,
282
+ mouth , upper_lip , lower_lip , hair , hat , earring , necklace , neck , cloth ):
283
+ masks = mask_jonathandinu (crop , skin , nose , eyeglasses , left_eye , right_eye , left_brow , right_brow , left_ear , right_ear ,
284
+ mouth , upper_lip , lower_lip , hair , hat , earring , necklace , neck , cloth )
285
+ return (masks , )
286
+
291
287
NODE_CLASS_MAPPINGS = {
292
288
'DetectFaces' : DetectFaces ,
293
289
'CropFaces' : CropFaces ,
294
- # 'AlignFaces': AlignFacesDEPRECATED,
295
290
'WarpFacesBack' : WarpFaceBack ,
296
- # 'FaceDetails': FaceDetailsDEPRECATED,
291
+ 'BiSeNetMask' : BiSeNetMask ,
292
+ 'JonathandinuMask' : JonathandinuMask ,
297
293
'MergeWarps' : MergeWarps ,
298
294
'GenderFaceFilter' : GenderFaceFilter ,
299
295
'OrderedFaceFilter' : OrderedFaceFilter ,
@@ -302,9 +298,9 @@ def run(self, crop0, mask0, warp0, crop1, mask1, warp1):
302
298
NODE_DISPLAY_NAME_MAPPINGS = {
303
299
'DetectFaces' : 'DetectFaces' ,
304
300
'CropFaces' : 'CropFaces' ,
305
- # 'AlignFaces': 'Align Faces (DEPRECATED)',
306
301
'WarpFacesBack' : 'Warp Faces Back' ,
307
- # 'FaceDetails': 'Face Details (DEPRECATED)',
302
+ 'BiSeNetMask' : 'BiSeNet Mask' ,
303
+ 'JonathandinuMask' : 'Jonathandinu Mask' ,
308
304
'MergeWarps' : 'Merge Warps' ,
309
305
'GenderFaceFilter' : 'Gender Face Filter' ,
310
306
'OrderedFaceFilter' : 'Ordered Face Filter' ,
0 commit comments