From 1d1face00d9476896e7857d3976afce383585d1b Mon Sep 17 00:00:00 2001 From: torzdf <36920800+torzdf@users.noreply.github.com> Date: Mon, 31 Oct 2022 18:25:32 +0000 Subject: [PATCH] Update Face Filter - Remove old face filter - plugins.extract.pipeline: Expose plugins directly - Change `is_aligned` from plugin level to ExtractMedia level - Allow extract pipeline to take faceswap aligned images - Add ability for recognition plugins to accept aligned faces as input - Add face filter to recognition plugin - Move extractor pipeline IO ops to own class --- lib/align/alignments.py | 1 + lib/align/detected_face.py | 3 +- lib/cli/args.py | 24 +- lib/face_filter.py | 181 --------- lib/image.py | 30 +- locales/es/LC_MESSAGES/lib.cli.args.mo | Bin 45184 -> 46565 bytes locales/es/LC_MESSAGES/lib.cli.args.po | 236 ++++++----- locales/lib.cli.args.pot | 196 +++++---- locales/ru/LC_MESSAGES/lib.cli.args.mo | Bin 58943 -> 60859 bytes locales/ru/LC_MESSAGES/lib.cli.args.po | 235 ++++++----- plugins/extract/align/_base.py | 11 +- plugins/extract/detect/_base.py | 4 + plugins/extract/mask/_base.py | 17 +- plugins/extract/pipeline.py | 90 +++-- plugins/extract/recognition/_base.py | 261 +++++++++++- scripts/extract.py | 539 ++++++++++++++++++++----- scripts/fsmedia.py | 160 -------- tools/manual/manual.py | 3 +- tools/mask/mask.py | 22 +- 19 files changed, 1187 insertions(+), 826 deletions(-) delete mode 100644 lib/face_filter.py diff --git a/lib/align/alignments.py b/lib/align/alignments.py index 560b57e..cc1c52e 100644 --- a/lib/align/alignments.py +++ b/lib/align/alignments.py @@ -68,6 +68,7 @@ class PNGHeaderSourceDict(TypedDict): face_index: int source_filename: str source_is_video: bool + source_frame_dims: Optional[Tuple[int, int]] class AlignmentDict(TypedDict): diff --git a/lib/align/detected_face.py b/lib/align/detected_face.py index c6dfe45..74ca9a9 100644 --- a/lib/align/detected_face.py +++ b/lib/align/detected_face.py @@ -1079,7 +1079,8 @@ def update_legacy_png_header(filename: str, alignments: Alignments original_filename=orig_filename, face_index=face_idx, source_filename=src_fname, - source_is_video=False)) # Can't check so set false + source_is_video=False, # Can't check so set false + source_frame_dims=None)) out_filename = f"{os.path.splitext(filename)[0]}.png" # Make sure saved file is png out_image = encode_image(in_image, ".png", metadata=meta) diff --git a/lib/cli/args.py b/lib/cli/args.py index 1878ca9..fad3634 100644 --- a/lib/cli/args.py +++ b/lib/cli/args.py @@ -521,11 +521,10 @@ class ExtractArgs(ExtractConvertArgs): default=None, nargs="+", group=_("Face Processing"), - help=_("Optionally filter out people who you do not wish to process by passing in an " - "image of that person. Should be a front portrait with a single person in the " - "image. Multiple images can be added space separated. NB: Using face filter " - "will significantly decrease extraction speed and its accuracy cannot be " - "guaranteed."))) + help=_("Optionally filter out people who you do not wish to extract by passing in " + "images of those people. Should be a small variety of images at different " + "angles and in different conditions. Multiple images can be added space " + "separated."))) argument_list.append(dict( opts=("-f", "--filter"), action=FilesFullPaths, @@ -534,11 +533,10 @@ class ExtractArgs(ExtractConvertArgs): default=None, nargs="+", group=_("Face Processing"), - help=_("Optionally select people you wish to process by passing in an image of that " - "person. Should be a front portrait with a single person in the image. " - "Multiple images can be added space separated. NB: Using face filter will " - "significantly decrease extraction speed and its accuracy cannot be " - "guaranteed."))) + help=_("Optionally select people you wish to extract by passing in images of that " + "person. Should be a small variety of images at different angles and in " + "different conditions. Multiple identities can be filtered. Multiple images " + "can be added space separated."))) argument_list.append(dict( opts=("-l", "--ref_threshold"), action=Slider, @@ -546,12 +544,10 @@ class ExtractArgs(ExtractConvertArgs): rounding=2, type=float, dest="ref_threshold", - default=0.4, + default=0.65, group=_("Face Processing"), help=_("For use with the optional nfilter/filter files. Threshold for positive face " - "recognition. Lower values are stricter. NB: Using face filter will " - "significantly decrease extraction speed and its accuracy cannot be " - "guaranteed."))) + "recognition. Higher values are stricter."))) argument_list.append(dict( opts=("-sz", "--size"), action=Slider, diff --git a/lib/face_filter.py b/lib/face_filter.py deleted file mode 100644 index c6589a3..0000000 --- a/lib/face_filter.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin python3 -""" Face Filterer for extraction in faceswap.py """ - -import logging - -from lib.align import AlignedFace -from lib.vgg_face import VGGFace -from lib.image import read_image -from plugins.extract.pipeline import Extractor, ExtractMedia - -logger = logging.getLogger(__name__) # pylint: disable=invalid-name - - -def avg(arr): - """ Return an average """ - return sum(arr) * 1.0 / len(arr) - - -class FaceFilter(): - """ Face filter for extraction - NB: we take only first face, so the reference file should only contain one face. """ - - def __init__(self, reference_file_paths, nreference_file_paths, detector, aligner, - multiprocess=False, threshold=0.4): - logger.debug("Initializing %s: (reference_file_paths: %s, nreference_file_paths: %s, " - "detector: %s, aligner: %s, multiprocess: %s, threshold: %s)", - self.__class__.__name__, reference_file_paths, nreference_file_paths, - detector, aligner, multiprocess, threshold) - self.vgg_face = VGGFace() - self.filters = self.load_images(reference_file_paths, nreference_file_paths) - # TODO Revert face-filter to use the selected detector and aligner. - # Currently Tensorflow does not release vram after it has been allocated - # Whilst this vram can still be used, the pipeline for the extraction process can't see - # it so thinks there is not enough vram available. - # Either the pipeline will need to be changed to be re-usable by face-filter and extraction - # Or another vram measurement technique will need to be implemented to for when tensorflow - # has already performed allocation. For now we force CPU detectors. - - # self.align_faces(detector, aligner, multiprocess) - self.align_faces("cv2-dnn", "cv2-dnn", "none", multiprocess) - - self.get_filter_encodings() - self.threshold = threshold - logger.debug("Initialized %s", self.__class__.__name__) - - @staticmethod - def load_images(reference_file_paths, nreference_file_paths): - """ Load the images """ - retval = dict() - for fpath in reference_file_paths: - retval[fpath] = {"image": read_image(fpath, raise_error=True), - "type": "filter"} - for fpath in nreference_file_paths: - retval[fpath] = {"image": read_image(fpath, raise_error=True), - "type": "nfilter"} - logger.debug("Loaded filter images: %s", {k: v["type"] for k, v in retval.items()}) - return retval - - # Extraction pipeline - def align_faces(self, detector_name, aligner_name, masker_name, multiprocess): - """ Use the requested detectors to retrieve landmarks for filter images """ - extractor = Extractor(detector_name, - aligner_name, - masker_name, - multiprocess=multiprocess) - self.run_extractor(extractor) - del extractor - self.load_aligned_face() - - def run_extractor(self, extractor): - """ Run extractor to get faces """ - for _ in range(extractor.passes): - extractor.launch() - self.queue_images(extractor) - for faces in extractor.detected_faces(): - filename = faces.filename - detected_faces = faces.detected_faces - if len(detected_faces) > 1: - logger.warning("Multiple faces found in %s file: '%s'. Using first detected " - "face.", self.filters[filename]["type"], filename) - self.filters[filename]["detected_face"] = detected_faces[0] - - def queue_images(self, extractor): - """ queue images for detection and alignment """ - in_queue = extractor.input_queue - for fname, img in self.filters.items(): - logger.debug("Adding to filter queue: '%s' (%s)", fname, img["type"]) - feed_dict = ExtractMedia(fname, img["image"], detected_faces=img.get("detected_faces")) - logger.debug("Queueing filename: '%s' items: %s", fname, feed_dict) - in_queue.put(feed_dict) - logger.debug("Sending EOF to filter queue") - in_queue.put("EOF") - - def load_aligned_face(self): - """ Align the faces for vgg_face input """ - for filename, face in self.filters.items(): - logger.debug("Loading aligned face: '%s'", filename) - image = face["image"] - detected_face = face["detected_face"] - detected_face.load_aligned(image, centering="legacy", size=224) - face["face"] = detected_face.aligned.face - del face["image"] - logger.debug("Loaded aligned face: ('%s', shape: %s)", - filename, face["face"].shape) - - def get_filter_encodings(self): - """ Return filter face encodings from Keras VGG Face """ - for filename, face in self.filters.items(): - logger.debug("Getting encodings for: '%s'", filename) - encodings = self.vgg_face.predict(face["face"]) - logger.debug("Filter Filename: %s, encoding shape: %s", filename, encodings.shape) - face["encoding"] = encodings - del face["face"] - - def check(self, image, detected_face): - """ Check the extracted Face - - Parameters - ---------- - image: :class:`numpy.ndarray` - The original frame that contains the face to be checked - detected_face: :class:`lib.align.DetectedFace` - The detected face object that contains the face to be checked - - Returns - ------- - bool - ``True`` if the face matches a filter otherwise ``False`` - """ - logger.trace("Checking face with FaceFilter") - distances = {"filter": list(), "nfilter": list()} - feed = AlignedFace(detected_face.landmarks_xy, image=image, size=224, centering="legacy") - encodings = self.vgg_face.predict(feed.face) - for filt in self.filters.values(): - similarity = self.vgg_face.find_cosine_similiarity(filt["encoding"], encodings) - distances[filt["type"]].append(similarity) - - avgs = {key: avg(val) if val else None for key, val in distances.items()} - mins = {key: min(val) if val else None for key, val in distances.items()} - # Filter - if distances["filter"] and avgs["filter"] > self.threshold: - msg = "Rejecting filter face: {} > {}".format(round(avgs["filter"], 2), self.threshold) - retval = False - # nFilter no Filter - elif not distances["filter"] and avgs["nfilter"] < self.threshold: - msg = "Rejecting nFilter face: {} < {}".format(round(avgs["nfilter"], 2), - self.threshold) - retval = False - # Filter with nFilter - elif distances["filter"] and distances["nfilter"] and mins["filter"] > mins["nfilter"]: - msg = ("Rejecting face as distance from nfilter sample is smaller: (filter: {}, " - "nfilter: {})".format(round(mins["filter"], 2), round(mins["nfilter"], 2))) - retval = False - elif distances["filter"] and distances["nfilter"] and avgs["filter"] > avgs["nfilter"]: - msg = ("Rejecting face as average distance from nfilter sample is smaller: (filter: " - "{}, nfilter: {})".format(round(mins["filter"], 2), round(mins["nfilter"], 2))) - retval = False - elif distances["filter"] and distances["nfilter"]: - # k-nearest-neighbor classifier - var_k = min(5, min(len(distances["filter"]), len(distances["nfilter"])) + 1) - var_n = sum(list(map(lambda x: x[0], - list(sorted([(1, d) for d in distances["filter"]] + - [(0, d) for d in distances["nfilter"]], - key=lambda x: x[1]))[:var_k]))) - ratio = var_n/var_k - if ratio < 0.5: - msg = ("Rejecting face as k-nearest neighbors classification is less than " - "0.5: {}".format(round(ratio, 2))) - retval = False - else: - msg = None - retval = True - else: - msg = None - retval = True - if msg: - logger.verbose(msg) - else: - logger.trace("Accepted face: (similarity: %s, threshold: %s)", - distances, self.threshold) - return retval diff --git a/lib/image.py b/lib/image.py index 2aed239..36c60b8 100644 --- a/lib/image.py +++ b/lib/image.py @@ -11,7 +11,7 @@ import sys from ast import literal_eval from bisect import bisect from concurrent import futures -from typing import Optional +from typing import Optional, TYPE_CHECKING, Union from zlib import crc32 import cv2 @@ -24,6 +24,9 @@ from lib.multithreading import MultiThread from lib.queue_manager import queue_manager, QueueEmpty from lib.utils import convert_to_secs, FaceswapError, _video_extensions, get_image_paths +if TYPE_CHECKING: + from lib.align.alignments import PNGHeaderDict + logger = logging.getLogger(__name__) # pylint:disable=invalid-name # ################### # @@ -552,7 +555,9 @@ def update_existing_metadata(filename, metadata): os.replace(tmp_filename, filename) -def encode_image(image, extension, metadata=None): +def encode_image(image: np.ndarray, + extension: str, + metadata: Optional["PNGHeaderDict"] = None) -> bytes: """ Encode an image. Parameters @@ -580,7 +585,7 @@ def encode_image(image, extension, metadata=None): raise ValueError("Metadata is only supported for .png images") retval = cv2.imencode(extension, image)[1] if metadata: - retval = np.frombuffer(png_write_meta(retval.tobytes(), metadata), dtype="uint8") + retval = png_write_meta(retval.tobytes(), metadata) return retval @@ -1032,7 +1037,7 @@ class ImagesLoader(ImageIO): If the given location is a file and does not have a valid video extension. """ - if os.path.isdir(self.location): + if not isinstance(self.location, str) or os.path.isdir(self.location): retval = False elif os.path.splitext(self.location)[1].lower() in _video_extensions: retval = True @@ -1423,7 +1428,10 @@ class ImagesSaver(ImageIO): executor.submit(self._save, *item) executor.shutdown() - def _save(self, filename: str, image: bytes, sub_folder: Optional[str]) -> None: + def _save(self, + filename: str, + image: Union[bytes, np.ndarray], + sub_folder: Optional[str]) -> None: """ Save a single image inside a ThreadPoolExecutor Parameters @@ -1431,8 +1439,8 @@ class ImagesSaver(ImageIO): filename: str The filename of the image to be saved. NB: Any folders passed in with the filename will be stripped and replaced with :attr:`location`. - image: bytes - The encoded image to be saved + image: bytes or :class:`numpy.ndarray` + The encoded image or numpy array to be saved subfolder: str or ``None`` If the file should be saved in a subfolder in the output location, the subfolder should be provided here. ``None`` for no subfolder. @@ -1444,15 +1452,19 @@ class ImagesSaver(ImageIO): filename = os.path.join(location, os.path.basename(filename)) try: if self._as_bytes: + assert isinstance(image, bytes) with open(filename, "wb") as out_file: out_file.write(image) else: cv2.imwrite(filename, image) logger.trace("Saved image: '%s'", filename) # type:ignore except Exception as err: # pylint: disable=broad-except - logger.error("Failed to save image '%s'. Original Error: %s", filename, err) + logger.error("Failed to save image '%s'. Original Error: %s", filename, str(err)) - def save(self, filename: str, image: bytes, sub_folder: Optional[str] = None) -> None: + def save(self, + filename: str, + image: Union[bytes, np.ndarray], + sub_folder: Optional[str] = None) -> None: """ Save the given image in the background thread Ensure that :func:`close` is called once all save operations are complete. diff --git a/locales/es/LC_MESSAGES/lib.cli.args.mo b/locales/es/LC_MESSAGES/lib.cli.args.mo index 05ce66fa9c922a9697eec318427b24cfaf556020..574e72c56d864954d89ffc8757ff7d075a54d7d3 100644 GIT binary patch delta 2778 zcmcJQc~Dhl6u=LNKp?3IngVihDVJmsHF3!WMRHA0F(L8t-sSP?@h-gk9ztnYQ)tGh zNoJcFTBDhx*-OWxebgKlKgg{l0U)?R@8) z`?2Co|vM55qfI1auK zS1WtG$R7A4%!Bz|M3%5_IXsMhDnXUbhPl#=gD3 z$a(A^4iK3P4-VuaP;M0|09i0bg}RQ;EW-N`{22Y{Fp=@3VGFa5qo2;k24Bw+sldK` zxX4Q8pB^Fd0|!+R|HGX`7TqZ_obhx{b47_1O%X{(-&aUl)V_GR5`F|NSUQ7?kD_y& zPA-g|$xY!HSO6b_v*8iQo{}_6_i^wExCO481F`4M6?qGO;&6SDT{E+bMP9^i z%oC}GQSi1^FRHX_!wolJU(@KZb3DLnJMa@c z#KhhAiL7Em;C_*((0}*Yp<74%%CKLB>#&!aB0JzksA1n&gAMMlwV6bsRb*>|BEP_I z;bO+mg+!*IHx?|VU@)wK3*fGLk?$Gk{D25)mn(1t%zuz@!g|Q3Etg>;vA+txLGMl; zAC1IO_!at!RYV1P9_J$1vsUvtGJk)wU8KcN^XW#B|E6aMB@-^aAW}@yn>ULrL_htq zo$I`9R6ZL`f-UeeR7KjfUF2@~0i@0(0KP-iZU?J>Fc(y_hJjn&)U&Cqm^-lF5 zk^ghMY>#*DwnNmyX)0&Udqm!0;mJ4n#Nehd>44F1^18tW_ymmKYdg9T;=gQ$(eM?A zJE6*b7{Wn85k+DILy};Yb{Et*On_gOTnS2ul0Q=cwxWl+j2}q#wf9sNR+v znu>^q+~rv3z#d2^M{k@=XB5KQ(e9lJSvTZ%Bo!Hgs5TN&_3-DW-Uk)_jxEtq2Er~* zUqv9t=}&N&Z9lxWmf{!_U@meS(i@qJ%tKV0j*LTYK_()+tkM_Zv1|VamJC&B$0CD} zjx7l#78!v|MFt~jD*yfIWFe}mJGS|bQsA%`yu<0M!W)LDJYQ==olXYi3)w-9qf0jZLiy1rr+%idd*0`)v_!jE9^<%5s~^&zO-az zq&5C0NBXTyZC1z^^l~H3>&8nQ!AFXm)nEZJsNxx8GitWkWM-Twc?!bM=-9<=WUv-R0GNT8(LORGpcr%``2Q6EncM z)orZ=$QQ(9nSNU}>@1m9W;m{VTVkx*mfYzrZ`Y>SDI=-mi@2J^y{*+__}R*BXojU* zq$|KyIv1T{a3zb$A-|5fHe|Hz(e0$Ub+=k@eYMe^p!U46S6i#!6Y}9ygLdOUT6^x) z{R|E$rZ_?dQPgx#!0_lTKU+h>t+gj2{?*NEUj8pmy3DB**k7G&52PL-$_MNUu$&Sg N8h?A8LTmK+KLAs&ITQc@ delta 1881 zcmYk7eN5F=7{|XL@~UzHMS)Ndl9WO&lpu!4i@d3bVfjK5aFwuOPF^A_`Up_->gjZk*@i|HMc8Ap@OGoLr0sn@( zQW(sDCrF3!gH#?yJZzR!h~Ext@rTmbCH^n)C;Y_OQW*@%VBxR=R>Cew@^l`qpm7O) zf$zzdiXGB@gU06sGMV)$`~$WUzc`mkA7bX;tU2#NiY6C zOQmpoa5eMbbl3%t!38j(M#_h^upB-K9b4fg8g(#=ZI!{b@F{o{j`HHvWzsObx*QVk zW$^njd8NxQq1DFo^(>C~=~dDeSbdM1xl#DEji<?xhd5tdYhTcXqA&yo2l94Gq8}#J`5;Vb^+h!#Rzd z|8q>-(MTs9(Hp2x_&u~DY;1CO)B)od7xMtACZ5_%o#5v~d+}%s{lvq4ZcWU9hl!VM zbdUC{P40_dYa`o?^F2u6(m%&(mtG)nX0w~JiifyTbew|+iN|(GufyXojrcP1+6X%y zcKPvEH>H`|q$c9Su!`{^9Q=LwHSjl^-_4bVhkB$t;TYTwodmYa71aS4PmYhkAMr=X z%pQtrU?&BNpR!x}2!`%q<;1VRLB==qxyAS^t8Jqg)BDL7{rg^|kF(7hq@M5}Kjt3k z)gkF^o^uT{)X{-qw;Iz=NcD7Vg4ByfVFQeM#l5#1Acd^=AYBT5m4~pylW-jFKIML3 zcAa(?Tt(XcVqDG}?h$2;aFy|U-{cCz!ndSOyZ=Kp4iT7oM%oWg!U&khGTDHNpcUnk zAgiFAbse;E&5%T>9ZrUwkoEj$CA5~HDZvPrC1Cc9g}gs-UKUw^>=VrQMpID|DnN@- z1*%78>F6#LhPas`yMavEO({oqoY`m=`oC2Mom^;{yd60+gF#EV**s*0VpVRtGOKeT zZ;-_>4b`Ak$jma4k1PYXA}cb!Q_4bRXa!0{x1khd7H^^1H0}tvfj5#C&sgKAMJGK)re$YV~>>~F}-4r>Ck!<&Q((R7rFlF?0@A8aJRr8k46J>6Tk zv~+LwwRCiBPxJRhY>f0bdy+i}uishYi}rW6?vMBX O+4D(=|Jz5yi~j`=2Kc7{ diff --git a/locales/es/LC_MESSAGES/lib.cli.args.po b/locales/es/LC_MESSAGES/lib.cli.args.po index 7347451..905452a 100644 --- a/locales/es/LC_MESSAGES/lib.cli.args.po +++ b/locales/es/LC_MESSAGES/lib.cli.args.po @@ -6,8 +6,8 @@ msgid "" msgstr "" "Project-Id-Version: faceswap.spanish\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2022-10-10 13:05+0100\n" -"PO-Revision-Date: 2022-10-10 13:07+0100\n" +"POT-Creation-Date: 2022-10-31 11:51+0000\n" +"PO-Revision-Date: 2022-10-31 11:53+0000\n" "Last-Translator: \n" "Language-Team: tokafondo\n" "Language: es\n" @@ -55,7 +55,7 @@ msgstr "" "almacenarlo en la carpeta pde instalación de faceswap" #: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337 -#: lib/cli/args.py:386 lib/cli/args.py:672 lib/cli/args.py:681 +#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677 msgid "Data" msgstr "Datos" @@ -102,8 +102,8 @@ msgstr "" #: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424 #: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493 -#: lib/cli/args.py:502 lib/cli/args.py:691 lib/cli/args.py:718 -#: lib/cli/args.py:756 +#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714 +#: lib/cli/args.py:752 msgid "Plugins" msgstr "Extensiones" @@ -272,9 +272,9 @@ msgstr "" "Obtenga y almacene codificaciones de identidad facial de VGGFace2. Ralentiza " "un poco la extracción, pero ahorrará tiempo si usa 'sort by face'" -#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:536 -#: lib/cli/args.py:550 lib/cli/args.py:793 lib/cli/args.py:807 -#: lib/cli/args.py:820 lib/cli/args.py:834 +#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535 +#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803 +#: lib/cli/args.py:816 lib/cli/args.py:830 msgid "Face Processing" msgstr "Proceso de Caras" @@ -287,52 +287,44 @@ msgstr "" "a lo largo de la diagonal del cuadro delimitador. Establecer a 0 para " "desactivar" -#: lib/cli/args.py:524 lib/cli/args.py:808 +#: lib/cli/args.py:524 msgid "" -"Optionally filter out people who you do not wish to process by passing in an " -"image of that person. Should be a front portrait with a single person in the " -"image. Multiple images can be added space separated. NB: Using face filter " -"will significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally filter out people who you do not wish to extract by passing in " +"images of those people. Should be a small variety of images at different " +"angles and in different conditions. Multiple images can be added space " +"separated." msgstr "" -"Opcionalmente, puede filtrar las personas que no desea procesar pasando una " -"imagen de esa persona. Debe ser un retrato frontal con una sola persona en " -"la imagen. Se pueden añadir varias imágenes separadas por espacios. NB: El " -"uso del filtro de caras disminuirá significativamente la velocidad de " -"extracción y no se puede garantizar su precisión." +"Opcionalmente, filtre a las personas que no desea extraer pasando imágenes " +"de esas personas. Debe ser una pequeña variedad de imágenes en diferentes " +"ángulos y en diferentes condiciones. Se pueden agregar varias imágenes " +"separadas por espacios." -#: lib/cli/args.py:537 lib/cli/args.py:821 +#: lib/cli/args.py:536 msgid "" -"Optionally select people you wish to process by passing in an image of that " -"person. Should be a front portrait with a single person in the image. " -"Multiple images can be added space separated. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally select people you wish to extract by passing in images of that " +"person. Should be a small variety of images at different angles and in " +"different conditions. Multiple identities can be filtered. Multiple images " +"can be added space separated." msgstr "" -"Opcionalmente, seleccione las personas que desea procesar pasando una imagen " -"de esa persona. Debe ser un retrato frontal con una sola persona en la " -"imagen. Se pueden añadir varias imágenes separadas por espacios. NB: El uso " -"del filtro facial disminuirá significativamente la velocidad de extracción y " -"no se puede garantizar su precisión." +"Opcionalmente, seleccione las personas que desea extraer pasando imágenes de " +"esa persona. Debe ser una pequeña variedad de imágenes en diferentes ángulos " +"y en diferentes condiciones. Se pueden filtrar múltiples identidades. Se " +"pueden agregar varias imágenes separadas por espacios." -#: lib/cli/args.py:551 lib/cli/args.py:835 +#: lib/cli/args.py:549 msgid "" "For use with the optional nfilter/filter files. Threshold for positive face " -"recognition. Lower values are stricter. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"recognition. Higher values are stricter." msgstr "" -"Para usar con los archivos opcionales nfilter/filter. Umbral para el " -"reconocimiento positivo de caras. Los valores más bajos son más estrictos. " -"NB: El uso del filtro facial disminuirá significativamente la velocidad de " -"extracción y no se puede garantizar su precisión." +"Para usar con los archivos nfilter/filter opcionales. Umbral para el " +"reconocimiento facial positivo. Los valores más altos son más estrictos." -#: lib/cli/args.py:562 lib/cli/args.py:574 lib/cli/args.py:586 -#: lib/cli/args.py:598 +#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582 +#: lib/cli/args.py:594 msgid "output" msgstr "salida" -#: lib/cli/args.py:563 +#: lib/cli/args.py:559 msgid "" "The output size of extracted faces. Make sure that the model you intend to " "train supports your required size. This will only need to be changed for hi-" @@ -342,7 +334,7 @@ msgstr "" "pretende entrenar admite el tamaño deseado. Esto sólo tendrá que ser " "cambiado para los modelos de alta resolución." -#: lib/cli/args.py:575 +#: lib/cli/args.py:571 msgid "" "Extract every 'nth' frame. This option will skip frames when extracting " "faces. For example a value of 1 will extract faces from every frame, a value " @@ -352,7 +344,7 @@ msgstr "" "extraer las caras. Por ejemplo, un valor de 1 extraerá las caras de cada " "fotograma, un valor de 10 extraerá las caras de cada 10 fotogramas." -#: lib/cli/args.py:587 +#: lib/cli/args.py:583 msgid "" "Automatically save the alignments file after a set amount of frames. By " "default the alignments file is only saved at the end of the extraction " @@ -368,18 +360,18 @@ msgstr "" "ADVERTENCIA: No interrumpa el script al escribir el archivo porque podría " "corromperse. Poner a 0 para desactivar" -#: lib/cli/args.py:599 +#: lib/cli/args.py:595 msgid "Draw landmarks on the ouput faces for debugging purposes." msgstr "" "Dibujar puntos de referencia en las caras de salida para fines de depuración." -#: lib/cli/args.py:605 lib/cli/args.py:614 lib/cli/args.py:622 -#: lib/cli/args.py:629 lib/cli/args.py:847 lib/cli/args.py:858 -#: lib/cli/args.py:866 lib/cli/args.py:885 lib/cli/args.py:891 +#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618 +#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854 +#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887 msgid "settings" msgstr "ajustes" -#: lib/cli/args.py:606 +#: lib/cli/args.py:602 msgid "" "Don't run extraction in parallel. Will run each part of the extraction " "process separately (one after the other) rather than all at the smae time. " @@ -389,7 +381,7 @@ msgstr "" "extracción por separado (una tras otra) en lugar de hacerlo todo al mismo " "tiempo. Útil si la VRAM es escasa." -#: lib/cli/args.py:615 +#: lib/cli/args.py:611 msgid "" "Skips frames that have already been extracted and exist in the alignments " "file" @@ -397,19 +389,19 @@ msgstr "" "Omite los fotogramas que ya han sido extraídos y que existen en el archivo " "de alineaciones" -#: lib/cli/args.py:623 +#: lib/cli/args.py:619 msgid "Skip frames that already have detected faces in the alignments file" msgstr "" "Omitir los fotogramas que ya tienen caras detectadas en el archivo de " "alineaciones" -#: lib/cli/args.py:630 +#: lib/cli/args.py:626 msgid "Skip saving the detected faces to disk. Just create an alignments file" msgstr "" "No guardar las caras detectadas en el disco. Crear sólo un archivo de " "alineaciones" -#: lib/cli/args.py:652 +#: lib/cli/args.py:648 msgid "" "Swap the original faces in a source video/images to your final faces.\n" "Conversion plugins can be configured in the 'Settings' Menu" @@ -419,7 +411,7 @@ msgstr "" "Los plugins de conversión pueden ser configurados en el menú " "\"Configuración\"" -#: lib/cli/args.py:673 +#: lib/cli/args.py:669 msgid "" "Only required if converting from images to video. Provide The original video " "that the source frames were extracted from (for extracting the fps and " @@ -429,7 +421,7 @@ msgstr "" "original del que se extrajeron los fotogramas de origen (para extraer los " "fps y el audio)." -#: lib/cli/args.py:682 +#: lib/cli/args.py:678 msgid "" "Model directory. The directory containing the trained model you wish to use " "for conversion." @@ -437,7 +429,7 @@ msgstr "" "Directorio del modelo. El directorio que contiene el modelo entrenado que " "desea utilizar para la conversión." -#: lib/cli/args.py:692 +#: lib/cli/args.py:688 msgid "" "R|Performs color adjustment to the swapped face. Some of these options have " "configurable settings in '/config/convert.ini' or 'Settings > Configure " @@ -477,7 +469,7 @@ msgstr "" "colores. Generalmente no da resultados muy satisfactorios.\n" "L|none: No realice el ajuste de color." -#: lib/cli/args.py:719 +#: lib/cli/args.py:715 msgid "" "R|Masker to use. NB: The mask you require must exist within the alignments " "file. You can add additional masks with the Mask Tool.\n" @@ -553,7 +545,7 @@ msgstr "" "L|predicted: Si la opción 'Learn Mask' se habilitó durante el entrenamiento, " "esto usará la máscara que fue creada por el modelo entrenado." -#: lib/cli/args.py:757 +#: lib/cli/args.py:753 msgid "" "R|The plugin to use to output the converted images. The writers are " "configurable in '/config/convert.ini' or 'Settings > Configure Convert " @@ -579,11 +571,11 @@ msgstr "" "L|pillow: [images] Más lento que opencv, pero tiene más opciones y soporta " "más formatos." -#: lib/cli/args.py:776 lib/cli/args.py:783 lib/cli/args.py:877 +#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873 msgid "Frame Processing" msgstr "Proceso de fotogramas" -#: lib/cli/args.py:777 +#: lib/cli/args.py:773 #, python-format msgid "" "Scale the final output frames by this amount. 100%% will output the frames " @@ -593,7 +585,7 @@ msgstr "" "a los fotogramas a las dimensiones de origen. 50%% a la mitad de tamaño. " "200%% al doble de tamaño" -#: lib/cli/args.py:784 +#: lib/cli/args.py:780 msgid "" "Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use " "--frame-ranges 10-50 90-100. Frames falling outside of the selected range " @@ -607,7 +599,7 @@ msgstr "" "imágenes, ¡los nombres de los archivos deben terminar con el número de " "fotograma!" -#: lib/cli/args.py:794 +#: lib/cli/args.py:790 msgid "" "If you have not cleansed your alignments file, then you can filter out faces " "by defining a folder here that contains the faces extracted from your input " @@ -623,7 +615,47 @@ msgstr "" "especificada. Si se deja en blanco, se convertirán todas las caras que " "existan en el archivo de alineaciones." -#: lib/cli/args.py:848 +#: lib/cli/args.py:804 +msgid "" +"Optionally filter out people who you do not wish to process by passing in an " +"image of that person. Should be a front portrait with a single person in the " +"image. Multiple images can be added space separated. NB: Using face filter " +"will significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Opcionalmente, puede filtrar las personas que no desea procesar pasando una " +"imagen de esa persona. Debe ser un retrato frontal con una sola persona en " +"la imagen. Se pueden añadir varias imágenes separadas por espacios. NB: El " +"uso del filtro de caras disminuirá significativamente la velocidad de " +"extracción y no se puede garantizar su precisión." + +#: lib/cli/args.py:817 +msgid "" +"Optionally select people you wish to process by passing in an image of that " +"person. Should be a front portrait with a single person in the image. " +"Multiple images can be added space separated. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Opcionalmente, seleccione las personas que desea procesar pasando una imagen " +"de esa persona. Debe ser un retrato frontal con una sola persona en la " +"imagen. Se pueden añadir varias imágenes separadas por espacios. NB: El uso " +"del filtro facial disminuirá significativamente la velocidad de extracción y " +"no se puede garantizar su precisión." + +#: lib/cli/args.py:831 +msgid "" +"For use with the optional nfilter/filter files. Threshold for positive face " +"recognition. Lower values are stricter. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Para usar con los archivos opcionales nfilter/filter. Umbral para el " +"reconocimiento positivo de caras. Los valores más bajos son más estrictos. " +"NB: El uso del filtro facial disminuirá significativamente la velocidad de " +"extracción y no se puede garantizar su precisión." + +#: lib/cli/args.py:844 msgid "" "The maximum number of parallel processes for performing conversion. " "Converting images is system RAM heavy so it is possible to run out of memory " @@ -640,7 +672,7 @@ msgstr "" "procesos que los disponibles en su sistema. Si 'singleprocess' está " "habilitado, este ajuste será ignorado." -#: lib/cli/args.py:859 +#: lib/cli/args.py:855 msgid "" "[LEGACY] This only needs to be selected if a legacy model is being loaded or " "if there are multiple models in the model folder" @@ -648,7 +680,7 @@ msgstr "" "[LEGACY] Sólo es necesario seleccionar esta opción si se está cargando un " "modelo heredado si hay varios modelos en la carpeta de modelos" -#: lib/cli/args.py:867 +#: lib/cli/args.py:863 msgid "" "Enable On-The-Fly Conversion. NOT recommended. You should generate a clean " "alignments file for your destination video. However, if you wish you can " @@ -663,7 +695,7 @@ msgstr "" "de baja calidad. Si se encuentra un archivo de alineaciones, esta opción " "será ignorada." -#: lib/cli/args.py:878 +#: lib/cli/args.py:874 msgid "" "When used with --frame-ranges outputs the unchanged frames that are not " "processed instead of discarding them." @@ -671,16 +703,16 @@ msgstr "" "Cuando se usa con --frame-ranges, la salida incluye los fotogramas no " "procesados en vez de descartarlos." -#: lib/cli/args.py:886 +#: lib/cli/args.py:882 msgid "Swap the model. Instead converting from of A -> B, converts B -> A" msgstr "" "Intercambiar el modelo. En vez de convertir de A a B, convierte de B a A" -#: lib/cli/args.py:892 +#: lib/cli/args.py:888 msgid "Disable multiprocessing. Slower but less resource intensive." msgstr "Desactiva el multiproceso. Es más lento, pero usa menos recursos." -#: lib/cli/args.py:908 +#: lib/cli/args.py:904 msgid "" "Train a model on extracted original (A) and swap (B) faces.\n" "Training models can take a long time. Anything from 24hrs to over a week\n" @@ -692,11 +724,11 @@ msgstr "" "hasta más de una semana.\n" "Los plugins de los modelos pueden configurarse en el menú \"Ajustes\"" -#: lib/cli/args.py:927 lib/cli/args.py:936 +#: lib/cli/args.py:923 lib/cli/args.py:932 msgid "faces" msgstr "caras" -#: lib/cli/args.py:928 +#: lib/cli/args.py:924 msgid "" "Input directory. A directory containing training images for face A. This is " "the original face, i.e. the face that you want to remove and replace with " @@ -706,7 +738,7 @@ msgstr "" "para la cara A. Esta es la cara original, es decir, la cara que se quiere " "eliminar y sustituir por la cara B." -#: lib/cli/args.py:937 +#: lib/cli/args.py:933 msgid "" "Input directory. A directory containing training images for face B. This is " "the swap face, i.e. the face that you want to place onto the head of person " @@ -716,12 +748,12 @@ msgstr "" "para la cara B. Esta es la cara de intercambio, es decir, la cara que se " "quiere colocar en la cabeza de la persona A." -#: lib/cli/args.py:945 lib/cli/args.py:957 lib/cli/args.py:973 -#: lib/cli/args.py:998 lib/cli/args.py:1008 +#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969 +#: lib/cli/args.py:994 lib/cli/args.py:1004 msgid "model" msgstr "modelo" -#: lib/cli/args.py:946 +#: lib/cli/args.py:942 msgid "" "Model directory. This is where the training data will be stored. You should " "always specify a new folder for new models. If starting a new model, select " @@ -735,7 +767,7 @@ msgstr "" "carpeta que no exista (que se creará). Si continúa entrenando un modelo " "existente, especifique la ubicación del modelo existente." -#: lib/cli/args.py:958 +#: lib/cli/args.py:954 msgid "" "R|Load the weights from a pre-existing model into a newly created model. For " "most models this will load weights from the Encoder of the given model into " @@ -759,7 +791,7 @@ msgstr "" "NB: Los pesos solo se pueden cargar desde modelos del mismo complemento que " "desea entrenar." -#: lib/cli/args.py:974 +#: lib/cli/args.py:970 msgid "" "R|Select which trainer to use. Trainers can be configured from the Settings " "menu or the config folder.\n" @@ -804,7 +836,7 @@ msgstr "" "recursos (se necesita una GPU con una buena cantidad de VRAM). Bueno para " "los detalles, pero más susceptible a las diferencias de color." -#: lib/cli/args.py:999 +#: lib/cli/args.py:995 msgid "" "Output a summary of the model and exit. If a model folder is provided then a " "summary of the saved model is displayed. Otherwise a summary of the model " @@ -816,7 +848,7 @@ msgstr "" "muestra un resumen del modelo que crearía el complemento elegido y los " "ajustes de configuración." -#: lib/cli/args.py:1009 +#: lib/cli/args.py:1005 msgid "" "Freeze the weights of the model. Freezing weights means that some of the " "parameters in the model will no longer continue to learn, but those that are " @@ -830,12 +862,12 @@ msgstr "" "congelará el codificador, pero algunos modelos pueden tener opciones de " "configuración para congelar otras capas." -#: lib/cli/args.py:1022 lib/cli/args.py:1034 lib/cli/args.py:1045 -#: lib/cli/args.py:1056 lib/cli/args.py:1139 +#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041 +#: lib/cli/args.py:1052 lib/cli/args.py:1135 msgid "training" msgstr "entrenamiento" -#: lib/cli/args.py:1023 +#: lib/cli/args.py:1019 msgid "" "Batch size. This is the number of images processed through the model for " "each side per iteration. NB: As the model is fed 2 sides at a time, the " @@ -848,7 +880,7 @@ msgstr "" "momento es el doble del número que se establece aquí. Los lotes más grandes " "requieren más RAM de la GPU." -#: lib/cli/args.py:1035 +#: lib/cli/args.py:1031 msgid "" "Length of training in iterations. This is only really used for automation. " "There is no 'correct' number of iterations a model should be trained for. " @@ -863,7 +895,7 @@ msgstr "" "automáticamente en un número determinado de iteraciones, puede establecer " "ese valor aquí." -#: lib/cli/args.py:1046 +#: lib/cli/args.py:1042 msgid "" "[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow " "Mirrored Distrubution Strategy to train on multiple GPUs." @@ -871,7 +903,7 @@ msgstr "" "[Obsoleto: use '-D, --distribution-strategy' en su lugar] Use la estrategia " "de distribución duplicada de Tensorflow para entrenar en varias GPU." -#: lib/cli/args.py:1057 +#: lib/cli/args.py:1053 msgid "" "R|Select the distribution stategy to use.\n" "L|default: Use Tensorflow's default distribution strategy.\n" @@ -897,15 +929,15 @@ msgstr "" "locales. Se carga una copia del modelo y todas las variables en cada GPU con " "lotes distribuidos a cada GPU en cada iteración." -#: lib/cli/args.py:1074 lib/cli/args.py:1084 +#: lib/cli/args.py:1070 lib/cli/args.py:1080 msgid "Saving" msgstr "Guardar" -#: lib/cli/args.py:1075 +#: lib/cli/args.py:1071 msgid "Sets the number of iterations between each model save." msgstr "Establece el número de iteraciones entre cada guardado del modelo." -#: lib/cli/args.py:1085 +#: lib/cli/args.py:1081 msgid "" "Sets the number of iterations before saving a backup snapshot of the model " "in it's current state. Set to 0 for off." @@ -913,11 +945,11 @@ msgstr "" "Establece el número de iteraciones antes de guardar una copia de seguridad " "del modelo en su estado actual. Establece 0 para que esté desactivado." -#: lib/cli/args.py:1092 lib/cli/args.py:1103 lib/cli/args.py:1114 +#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110 msgid "timelapse" msgstr "intervalo" -#: lib/cli/args.py:1093 +#: lib/cli/args.py:1089 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -931,7 +963,7 @@ msgstr "" "para crear el timelapse. También debe suministrar un parámetro --timelapse-" "output y un parámetro --timelapse-input-B." -#: lib/cli/args.py:1104 +#: lib/cli/args.py:1100 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -945,7 +977,7 @@ msgstr "" "para crear el timelapse. También debe suministrar un parámetro --timelapse-" "output y un parámetro --timelapse-input-A." -#: lib/cli/args.py:1115 +#: lib/cli/args.py:1111 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. If " @@ -957,17 +989,17 @@ msgstr "" "Si se suministran las carpetas de entrada pero no la carpeta de salida, se " "guardará por defecto en la carpeta del modelo /timelapse/" -#: lib/cli/args.py:1124 lib/cli/args.py:1131 +#: lib/cli/args.py:1120 lib/cli/args.py:1127 msgid "preview" msgstr "previsualización" -#: lib/cli/args.py:1125 +#: lib/cli/args.py:1121 msgid "Show training preview output. in a separate window." msgstr "" "Mostrar la salida de la vista previa del entrenamiento. en una ventana " "separada." -#: lib/cli/args.py:1132 +#: lib/cli/args.py:1128 msgid "" "Writes the training result to a file. The image will be stored in the root " "of your FaceSwap folder." @@ -975,7 +1007,7 @@ msgstr "" "Escribe el resultado del entrenamiento en un archivo. La imagen se " "almacenará en la raíz de su carpeta FaceSwap." -#: lib/cli/args.py:1140 +#: lib/cli/args.py:1136 msgid "" "Disables TensorBoard logging. NB: Disabling logs means that you will not be " "able to use the graph or analysis for this session in the GUI." @@ -983,12 +1015,12 @@ msgstr "" "Desactiva el registro de TensorBoard. NB: Desactivar los registros significa " "que no podrá utilizar el gráfico o el análisis de esta sesión en la GUI." -#: lib/cli/args.py:1147 lib/cli/args.py:1156 lib/cli/args.py:1165 -#: lib/cli/args.py:1174 +#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161 +#: lib/cli/args.py:1170 msgid "augmentation" msgstr "aumento" -#: lib/cli/args.py:1148 +#: lib/cli/args.py:1144 msgid "" "Warps training faces to closely matched Landmarks from the opposite face-set " "rather than randomly warping the face. This is the 'dfaker' way of doing " @@ -998,7 +1030,7 @@ msgstr "" "conjunto de caras opuestas en lugar de deformar la cara al azar. Esta es la " "forma 'dfaker' de hacer la deformación." -#: lib/cli/args.py:1157 +#: lib/cli/args.py:1153 msgid "" "To effectively learn, a random set of images are flipped horizontally. " "Sometimes it is desirable for this not to occur. Generally this should be " @@ -1009,7 +1041,7 @@ msgstr "" "general, esto debería dejarse sin efecto, excepto durante el 'entrenamiento " "de ajuste'." -#: lib/cli/args.py:1166 +#: lib/cli/args.py:1162 msgid "" "Color augmentation helps make the model less susceptible to color " "differences between the A and B sets, at an increased training time cost. " @@ -1019,7 +1051,7 @@ msgstr "" "diferencias de color entre los conjuntos A y B, con un mayor coste de tiempo " "de entrenamiento. Activa esta opción para desactivar el aumento de color." -#: lib/cli/args.py:1175 +#: lib/cli/args.py:1171 msgid "" "Warping is integral to training the Neural Network. This option should only " "be enabled towards the very end of training to try to bring out more detail. " @@ -1032,7 +1064,7 @@ msgstr "" "esta opción desde el principio, es probable que arruine el modelo y se " "obtengan resultados terribles." -#: lib/cli/args.py:1200 +#: lib/cli/args.py:1196 msgid "Output to Shell console instead of GUI console" msgstr "Salida a la consola Shell en lugar de la consola GUI" diff --git a/locales/lib.cli.args.pot b/locales/lib.cli.args.pot index 48ef542..788ba91 100644 --- a/locales/lib.cli.args.pot +++ b/locales/lib.cli.args.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2022-10-10 13:05+0100\n" +"POT-Creation-Date: 2022-10-31 11:51+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -46,7 +46,7 @@ msgid "Path to store the logfile. Leave blank to store in the faceswap folder" msgstr "" #: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337 -#: lib/cli/args.py:386 lib/cli/args.py:672 lib/cli/args.py:681 +#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677 msgid "Data" msgstr "" @@ -82,8 +82,8 @@ msgstr "" #: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424 #: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493 -#: lib/cli/args.py:502 lib/cli/args.py:691 lib/cli/args.py:718 -#: lib/cli/args.py:756 +#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714 +#: lib/cli/args.py:752 msgid "Plugins" msgstr "" @@ -180,9 +180,9 @@ msgid "" "little, but will save time if using 'sort by face'" msgstr "" -#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:536 -#: lib/cli/args.py:550 lib/cli/args.py:793 lib/cli/args.py:807 -#: lib/cli/args.py:820 lib/cli/args.py:834 +#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535 +#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803 +#: lib/cli/args.py:816 lib/cli/args.py:830 msgid "Face Processing" msgstr "" @@ -192,52 +192,48 @@ msgid "" "diagonal of the bounding box. Set to 0 for off" msgstr "" -#: lib/cli/args.py:524 lib/cli/args.py:808 +#: lib/cli/args.py:524 msgid "" -"Optionally filter out people who you do not wish to process by passing in an " -"image of that person. Should be a front portrait with a single person in the " -"image. Multiple images can be added space separated. NB: Using face filter " -"will significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally filter out people who you do not wish to extract by passing in " +"images of those people. Should be a small variety of images at different " +"angles and in different conditions. Multiple images can be added space " +"separated." msgstr "" -#: lib/cli/args.py:537 lib/cli/args.py:821 +#: lib/cli/args.py:536 msgid "" -"Optionally select people you wish to process by passing in an image of that " -"person. Should be a front portrait with a single person in the image. " -"Multiple images can be added space separated. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally select people you wish to extract by passing in images of that " +"person. Should be a small variety of images at different angles and in " +"different conditions. Multiple identities can be filtered. Multiple images " +"can be added space separated." msgstr "" -#: lib/cli/args.py:551 lib/cli/args.py:835 +#: lib/cli/args.py:549 msgid "" "For use with the optional nfilter/filter files. Threshold for positive face " -"recognition. Lower values are stricter. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"recognition. Higher values are stricter." msgstr "" -#: lib/cli/args.py:562 lib/cli/args.py:574 lib/cli/args.py:586 -#: lib/cli/args.py:598 +#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582 +#: lib/cli/args.py:594 msgid "output" msgstr "" -#: lib/cli/args.py:563 +#: lib/cli/args.py:559 msgid "" "The output size of extracted faces. Make sure that the model you intend to " "train supports your required size. This will only need to be changed for hi-" "res models." msgstr "" -#: lib/cli/args.py:575 +#: lib/cli/args.py:571 msgid "" "Extract every 'nth' frame. This option will skip frames when extracting " "faces. For example a value of 1 will extract faces from every frame, a value " "of 10 will extract faces from every 10th frame." msgstr "" -#: lib/cli/args.py:587 +#: lib/cli/args.py:583 msgid "" "Automatically save the alignments file after a set amount of frames. By " "default the alignments file is only saved at the end of the extraction " @@ -247,57 +243,57 @@ msgid "" "turn off" msgstr "" -#: lib/cli/args.py:599 +#: lib/cli/args.py:595 msgid "Draw landmarks on the ouput faces for debugging purposes." msgstr "" -#: lib/cli/args.py:605 lib/cli/args.py:614 lib/cli/args.py:622 -#: lib/cli/args.py:629 lib/cli/args.py:847 lib/cli/args.py:858 -#: lib/cli/args.py:866 lib/cli/args.py:885 lib/cli/args.py:891 +#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618 +#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854 +#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887 msgid "settings" msgstr "" -#: lib/cli/args.py:606 +#: lib/cli/args.py:602 msgid "" "Don't run extraction in parallel. Will run each part of the extraction " "process separately (one after the other) rather than all at the smae time. " "Useful if VRAM is at a premium." msgstr "" -#: lib/cli/args.py:615 +#: lib/cli/args.py:611 msgid "" "Skips frames that have already been extracted and exist in the alignments " "file" msgstr "" -#: lib/cli/args.py:623 +#: lib/cli/args.py:619 msgid "Skip frames that already have detected faces in the alignments file" msgstr "" -#: lib/cli/args.py:630 +#: lib/cli/args.py:626 msgid "Skip saving the detected faces to disk. Just create an alignments file" msgstr "" -#: lib/cli/args.py:652 +#: lib/cli/args.py:648 msgid "" "Swap the original faces in a source video/images to your final faces.\n" "Conversion plugins can be configured in the 'Settings' Menu" msgstr "" -#: lib/cli/args.py:673 +#: lib/cli/args.py:669 msgid "" "Only required if converting from images to video. Provide The original video " "that the source frames were extracted from (for extracting the fps and " "audio)." msgstr "" -#: lib/cli/args.py:682 +#: lib/cli/args.py:678 msgid "" "Model directory. The directory containing the trained model you wish to use " "for conversion." msgstr "" -#: lib/cli/args.py:692 +#: lib/cli/args.py:688 msgid "" "R|Performs color adjustment to the swapped face. Some of these options have " "configurable settings in '/config/convert.ini' or 'Settings > Configure " @@ -318,7 +314,7 @@ msgid "" "L|none: Don't perform color adjustment." msgstr "" -#: lib/cli/args.py:719 +#: lib/cli/args.py:715 msgid "" "R|Masker to use. NB: The mask you require must exist within the alignments " "file. You can add additional masks with the Mask Tool.\n" @@ -355,7 +351,7 @@ msgid "" "will use the mask that was created by the trained model." msgstr "" -#: lib/cli/args.py:757 +#: lib/cli/args.py:753 msgid "" "R|The plugin to use to output the converted images. The writers are " "configurable in '/config/convert.ini' or 'Settings > Configure Convert " @@ -370,18 +366,18 @@ msgid "" "more formats." msgstr "" -#: lib/cli/args.py:776 lib/cli/args.py:783 lib/cli/args.py:877 +#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873 msgid "Frame Processing" msgstr "" -#: lib/cli/args.py:777 +#: lib/cli/args.py:773 #, python-format msgid "" "Scale the final output frames by this amount. 100%% will output the frames " "at source dimensions. 50%% at half size 200%% at double size" msgstr "" -#: lib/cli/args.py:784 +#: lib/cli/args.py:780 msgid "" "Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use " "--frame-ranges 10-50 90-100. Frames falling outside of the selected range " @@ -389,7 +385,7 @@ msgid "" "converting from images, then the filenames must end with the frame-number!" msgstr "" -#: lib/cli/args.py:794 +#: lib/cli/args.py:790 msgid "" "If you have not cleansed your alignments file, then you can filter out faces " "by defining a folder here that contains the faces extracted from your input " @@ -399,7 +395,33 @@ msgid "" "alignments file." msgstr "" -#: lib/cli/args.py:848 +#: lib/cli/args.py:804 +msgid "" +"Optionally filter out people who you do not wish to process by passing in an " +"image of that person. Should be a front portrait with a single person in the " +"image. Multiple images can be added space separated. NB: Using face filter " +"will significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" + +#: lib/cli/args.py:817 +msgid "" +"Optionally select people you wish to process by passing in an image of that " +"person. Should be a front portrait with a single person in the image. " +"Multiple images can be added space separated. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" + +#: lib/cli/args.py:831 +msgid "" +"For use with the optional nfilter/filter files. Threshold for positive face " +"recognition. Lower values are stricter. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" + +#: lib/cli/args.py:844 msgid "" "The maximum number of parallel processes for performing conversion. " "Converting images is system RAM heavy so it is possible to run out of memory " @@ -409,13 +431,13 @@ msgid "" "your system. If singleprocess is enabled this setting will be ignored." msgstr "" -#: lib/cli/args.py:859 +#: lib/cli/args.py:855 msgid "" "[LEGACY] This only needs to be selected if a legacy model is being loaded or " "if there are multiple models in the model folder" msgstr "" -#: lib/cli/args.py:867 +#: lib/cli/args.py:863 msgid "" "Enable On-The-Fly Conversion. NOT recommended. You should generate a clean " "alignments file for your destination video. However, if you wish you can " @@ -424,51 +446,51 @@ msgid "" "alignments file is found, this option will be ignored." msgstr "" -#: lib/cli/args.py:878 +#: lib/cli/args.py:874 msgid "" "When used with --frame-ranges outputs the unchanged frames that are not " "processed instead of discarding them." msgstr "" -#: lib/cli/args.py:886 +#: lib/cli/args.py:882 msgid "Swap the model. Instead converting from of A -> B, converts B -> A" msgstr "" -#: lib/cli/args.py:892 +#: lib/cli/args.py:888 msgid "Disable multiprocessing. Slower but less resource intensive." msgstr "" -#: lib/cli/args.py:908 +#: lib/cli/args.py:904 msgid "" "Train a model on extracted original (A) and swap (B) faces.\n" "Training models can take a long time. Anything from 24hrs to over a week\n" "Model plugins can be configured in the 'Settings' Menu" msgstr "" -#: lib/cli/args.py:927 lib/cli/args.py:936 +#: lib/cli/args.py:923 lib/cli/args.py:932 msgid "faces" msgstr "" -#: lib/cli/args.py:928 +#: lib/cli/args.py:924 msgid "" "Input directory. A directory containing training images for face A. This is " "the original face, i.e. the face that you want to remove and replace with " "face B." msgstr "" -#: lib/cli/args.py:937 +#: lib/cli/args.py:933 msgid "" "Input directory. A directory containing training images for face B. This is " "the swap face, i.e. the face that you want to place onto the head of person " "A." msgstr "" -#: lib/cli/args.py:945 lib/cli/args.py:957 lib/cli/args.py:973 -#: lib/cli/args.py:998 lib/cli/args.py:1008 +#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969 +#: lib/cli/args.py:994 lib/cli/args.py:1004 msgid "model" msgstr "" -#: lib/cli/args.py:946 +#: lib/cli/args.py:942 msgid "" "Model directory. This is where the training data will be stored. You should " "always specify a new folder for new models. If starting a new model, select " @@ -477,7 +499,7 @@ msgid "" "the existing model." msgstr "" -#: lib/cli/args.py:958 +#: lib/cli/args.py:954 msgid "" "R|Load the weights from a pre-existing model into a newly created model. For " "most models this will load weights from the Encoder of the given model into " @@ -491,7 +513,7 @@ msgid "" "to train." msgstr "" -#: lib/cli/args.py:974 +#: lib/cli/args.py:970 msgid "" "R|Select which trainer to use. Trainers can be configured from the Settings " "menu or the config folder.\n" @@ -514,7 +536,7 @@ msgid "" "susceptible to color differences." msgstr "" -#: lib/cli/args.py:999 +#: lib/cli/args.py:995 msgid "" "Output a summary of the model and exit. If a model folder is provided then a " "summary of the saved model is displayed. Otherwise a summary of the model " @@ -522,7 +544,7 @@ msgid "" "displayed." msgstr "" -#: lib/cli/args.py:1009 +#: lib/cli/args.py:1005 msgid "" "Freeze the weights of the model. Freezing weights means that some of the " "parameters in the model will no longer continue to learn, but those that are " @@ -531,12 +553,12 @@ msgid "" "layers." msgstr "" -#: lib/cli/args.py:1022 lib/cli/args.py:1034 lib/cli/args.py:1045 -#: lib/cli/args.py:1056 lib/cli/args.py:1139 +#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041 +#: lib/cli/args.py:1052 lib/cli/args.py:1135 msgid "training" msgstr "" -#: lib/cli/args.py:1023 +#: lib/cli/args.py:1019 msgid "" "Batch size. This is the number of images processed through the model for " "each side per iteration. NB: As the model is fed 2 sides at a time, the " @@ -544,7 +566,7 @@ msgid "" "number that you set here. Larger batches require more GPU RAM." msgstr "" -#: lib/cli/args.py:1035 +#: lib/cli/args.py:1031 msgid "" "Length of training in iterations. This is only really used for automation. " "There is no 'correct' number of iterations a model should be trained for. " @@ -553,13 +575,13 @@ msgid "" "can set that value here." msgstr "" -#: lib/cli/args.py:1046 +#: lib/cli/args.py:1042 msgid "" "[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow " "Mirrored Distrubution Strategy to train on multiple GPUs." msgstr "" -#: lib/cli/args.py:1057 +#: lib/cli/args.py:1053 msgid "" "R|Select the distribution stategy to use.\n" "L|default: Use Tensorflow's default distribution strategy.\n" @@ -572,25 +594,25 @@ msgid "" "batches distributed to each GPU at each iteration." msgstr "" -#: lib/cli/args.py:1074 lib/cli/args.py:1084 +#: lib/cli/args.py:1070 lib/cli/args.py:1080 msgid "Saving" msgstr "" -#: lib/cli/args.py:1075 +#: lib/cli/args.py:1071 msgid "Sets the number of iterations between each model save." msgstr "" -#: lib/cli/args.py:1085 +#: lib/cli/args.py:1081 msgid "" "Sets the number of iterations before saving a backup snapshot of the model " "in it's current state. Set to 0 for off." msgstr "" -#: lib/cli/args.py:1092 lib/cli/args.py:1103 lib/cli/args.py:1114 +#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110 msgid "timelapse" msgstr "" -#: lib/cli/args.py:1093 +#: lib/cli/args.py:1089 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -599,7 +621,7 @@ msgid "" "timelapse-input-B parameter." msgstr "" -#: lib/cli/args.py:1104 +#: lib/cli/args.py:1100 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -608,7 +630,7 @@ msgid "" "timelapse-input-A parameter." msgstr "" -#: lib/cli/args.py:1115 +#: lib/cli/args.py:1111 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. If " @@ -616,53 +638,53 @@ msgid "" "model folder /timelapse/" msgstr "" -#: lib/cli/args.py:1124 lib/cli/args.py:1131 +#: lib/cli/args.py:1120 lib/cli/args.py:1127 msgid "preview" msgstr "" -#: lib/cli/args.py:1125 +#: lib/cli/args.py:1121 msgid "Show training preview output. in a separate window." msgstr "" -#: lib/cli/args.py:1132 +#: lib/cli/args.py:1128 msgid "" "Writes the training result to a file. The image will be stored in the root " "of your FaceSwap folder." msgstr "" -#: lib/cli/args.py:1140 +#: lib/cli/args.py:1136 msgid "" "Disables TensorBoard logging. NB: Disabling logs means that you will not be " "able to use the graph or analysis for this session in the GUI." msgstr "" -#: lib/cli/args.py:1147 lib/cli/args.py:1156 lib/cli/args.py:1165 -#: lib/cli/args.py:1174 +#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161 +#: lib/cli/args.py:1170 msgid "augmentation" msgstr "" -#: lib/cli/args.py:1148 +#: lib/cli/args.py:1144 msgid "" "Warps training faces to closely matched Landmarks from the opposite face-set " "rather than randomly warping the face. This is the 'dfaker' way of doing " "warping." msgstr "" -#: lib/cli/args.py:1157 +#: lib/cli/args.py:1153 msgid "" "To effectively learn, a random set of images are flipped horizontally. " "Sometimes it is desirable for this not to occur. Generally this should be " "left off except for during 'fit training'." msgstr "" -#: lib/cli/args.py:1166 +#: lib/cli/args.py:1162 msgid "" "Color augmentation helps make the model less susceptible to color " "differences between the A and B sets, at an increased training time cost. " "Enable this option to disable color augmentation." msgstr "" -#: lib/cli/args.py:1175 +#: lib/cli/args.py:1171 msgid "" "Warping is integral to training the Neural Network. This option should only " "be enabled towards the very end of training to try to bring out more detail. " @@ -670,6 +692,6 @@ msgid "" "likely to kill a model and lead to terrible results." msgstr "" -#: lib/cli/args.py:1200 +#: lib/cli/args.py:1196 msgid "Output to Shell console instead of GUI console" msgstr "" diff --git a/locales/ru/LC_MESSAGES/lib.cli.args.mo b/locales/ru/LC_MESSAGES/lib.cli.args.mo index 6df5de854a2fead5c9b6c4758f914b31a8bea4dc..f13df69ccf0b384bd24ee021bf474d8a6d129660 100644 GIT binary patch delta 3182 zcmcJQeQ;FO6~GTkBnSyXU?TK$o3Oh+kM+QkXLS(W!58 z_IJ;@=iYnHx#ztcKat*ZGCgu?XvR~*a}P2KIgut3fsb6l56^~7k%oRE8(}W|Vt`05 z_WlD!GGHI9fN#RB+CE661MY;=;oK~d2UxcW_MxB37O93m8!VEej~dADgBbE)7`DUT z!ptEe{K@b6DQ1JOVHq4hRD`v%e3;0;U^Q&PZVVTRqkrxZ*#r0IiaZBrUoA3^`IE2( zU0oxx0{#Nt+>cb#^PG(5jud&GiC@8gz}1vvE(`C=cerP?NG0|&V?@rv`AXzQ=8FnM zZbz@Y)>-#5EX4leSdnws-x()z3+x?FB2caqSpZ64rY`k*28$@~X80cZiAf?exrY6C zzm5J$DK>bxOk@rAO_N1h@&9Cs$d?>cOa1%OMed&^GMV`?oK~43vUHw^2fb?{*P{0& z%2xO;48aw5koYtPZ!;)|{qH1GI0G(#o8e;kI%H27zDT4IE{9zl{1My_cl;1yFJB_k z1K)SqzSQZN#dnG9#ctd!vKD46b1E5J#{DO11HTMepV1MSHsS%07y|t7dd%=%Iu4nxmSPXrB z5!y(02WW2my5Je?yCU@>UJUEGkiX&ZA+Nx{q18YG;$RV`j=XfE6TqQ|xL52yd6+8T zm!xbJ%y(~Q48JC!Phq@~N&NOS@#-?%OPM>dk7yBDh5gqw!sFN@g%?(HH8 z9H+O5aBcFJ9lSzt!A_B&y0ji2U z`$eu5|ECYpfSj}LDCN)O{^$4dUUS*rK1#?Xns18yoo>#hzP!?s^FH4JHW~*L=uW6;TAaOBR)RxEvW4)KNdL+Ps3-}_xMSXAOS4LSftSO&AuXTX8*R#*a; z!l5t<`IgC3a5(470S4?NlM%h(+lQj0J{V(M12Lsa|LuGZgBuZj__fF)u4igV>T>Du1BsxmLPmLQqS%D>VGyPklDyMWHge2+=Fkbz>Rm`{5tfCC@TfTj26)_yc$RBRC1^@9G*8-Eje&=Z@#P zoB^jJbC8*??N*qlBfle-owzSV%0uB)iJ6rc5RQUEse$QE*x^jF003GGwWA*%vq7lG`rDUvLkQyHtU#`Q1)ZiVXNEf z;a95x=YSFi#md@eCGAJ8q!m-vanOTwSqVF8#m#(WiG6hCJ9YW&W+K~T_Owhe zfBCGZ=!ia2=aI^>*WRXdrBms>%7F~@JS8hOE1FBp>1)Tv4n%jSjlX;~xjF64D(8+j zG*2*p6U@xNP({1-@)b0T{8{GirW{kX42pd3_4@8|Rs46oeyZFYNcC|TIufW eA=OqrotjFRuRWV9)oAM*%U z{B$VLb3HQXZPA`av(SfAq!(fNRDNiVX;O256ak&EJw!T<|46751lLcOmckm?Vf^qo zObL?~K_9GUUG#m@IqV(b(q_11hBQ<1sE5u@94FuyOvUSfA^fDXFc!LCX@tZSwMI&J z;p=cO{@0?UKJ1y%(qVWW9)xvHsfh8ra4&Y@1JZMF5a!{3IM(d$;WTm5AqFPkZP*si zWG3t)9EP7I@G$&A4@%kC^{@>4SR!wU{UiJV+c{S%fB{Jy92UV6xD66KeGFI78Gu)@ z9cfaIhj35Oxr8H`RYUL(Sd0H`2H|Ewa+dTf8+|}L&jk|VB~ljSb$N(8*q={$vF{X1 z!REoGtb;Ld8|;RU!>Hww8vnBB&>4|4sg+APgu-ENo;->WA|_^)muiJA91 z{09GU1C@d8ZIb?CJetz_7lv=;P>hFrxFB=l#%6ot?>TNI1HF{Q9VY(MLU}R%XDj2- zy-T_Ue=~7p-Zfr;De~=+Hej!5qcX6)uW+~6ImEr6`KL(;qOKhbE`x(`KlFrk*k#oN zbE(ba@E8kz=_0&A-01NFgYn;h zU&Fd9QWp;{9APa7@O{O%g9le!lPi;eqcMWWte~cJlQ>ldWgqba(+~z)=_pgD30{RxC7cmj~SoYIGKTlSt-4CIJ_sTvUXpF00MqcLCxSL}iMXv+mYZN+AkEd{V4L1@~Hs zzn2M1(Idz++wZLOGZmJjXk<#xG|Js-EO4C&sxvb<$g>zL1*M}hl#fg^7mh0}|C_l))1pwZ)#E3>d3#f1&Gr}SY8o0^620xyHidYr9kC8?)tu%4 z@5Q7pcbF?P(dAy8nVpvD@-{t_;_!CYw@9$J?FWTW^e1{|5s# B71{s* diff --git a/locales/ru/LC_MESSAGES/lib.cli.args.po b/locales/ru/LC_MESSAGES/lib.cli.args.po index 362a188..4bae0ff 100644 --- a/locales/ru/LC_MESSAGES/lib.cli.args.po +++ b/locales/ru/LC_MESSAGES/lib.cli.args.po @@ -6,8 +6,8 @@ msgid "" msgstr "" "Project-Id-Version: \n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2022-10-10 13:05+0100\n" -"PO-Revision-Date: 2022-10-10 13:08+0100\n" +"POT-Creation-Date: 2022-10-31 11:51+0000\n" +"PO-Revision-Date: 2022-10-31 11:54+0000\n" "Last-Translator: \n" "Language-Team: \n" "Language: ru\n" @@ -58,7 +58,7 @@ msgstr "" "с faceswap" #: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337 -#: lib/cli/args.py:386 lib/cli/args.py:672 lib/cli/args.py:681 +#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677 msgid "Data" msgstr "Данные" @@ -102,8 +102,8 @@ msgstr "" #: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424 #: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493 -#: lib/cli/args.py:502 lib/cli/args.py:691 lib/cli/args.py:718 -#: lib/cli/args.py:756 +#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714 +#: lib/cli/args.py:752 msgid "Plugins" msgstr "Плагины" @@ -266,9 +266,9 @@ msgstr "" "Получите и сохраните кодировку идентификации лица от VGGFace2. Немного " "замедляет извлечение, но сэкономит время при использовании «sort by face»" -#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:536 -#: lib/cli/args.py:550 lib/cli/args.py:793 lib/cli/args.py:807 -#: lib/cli/args.py:820 lib/cli/args.py:834 +#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535 +#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803 +#: lib/cli/args.py:816 lib/cli/args.py:830 msgid "Face Processing" msgstr "Обработка лиц" @@ -280,51 +280,45 @@ msgstr "" "Отбрасывает лица ниже указанного размера. Длина указывается в пикселях по " "диагонали. Установите в 0 для отключения" -#: lib/cli/args.py:524 lib/cli/args.py:808 +#: lib/cli/args.py:524 msgid "" -"Optionally filter out people who you do not wish to process by passing in an " -"image of that person. Should be a front portrait with a single person in the " -"image. Multiple images can be added space separated. NB: Using face filter " -"will significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally filter out people who you do not wish to extract by passing in " +"images of those people. Should be a small variety of images at different " +"angles and in different conditions. Multiple images can be added space " +"separated." msgstr "" -"Дополнительно вы можете отфильтровать лица людей, которых вы не хотите " -"обрабатывать указав изображение этого человека. На изображении должен быть " -"фронтальный портрет одного человека . Можно указать несколько файлов через " -"пробел. Прим.: Фильтрация лиц существенно снижает скорость извлечения, при " -"этом точность не гарантируется." +"При желании отфильтруйте людей, которых вы не хотите извлекать, передав " +"изображения этих людей. Должно быть небольшое разнообразие снимков под " +"разными углами и в разных условиях. Несколько изображений могут быть " +"добавлены через пробел." -#: lib/cli/args.py:537 lib/cli/args.py:821 +#: lib/cli/args.py:536 msgid "" -"Optionally select people you wish to process by passing in an image of that " -"person. Should be a front portrait with a single person in the image. " -"Multiple images can be added space separated. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"Optionally select people you wish to extract by passing in images of that " +"person. Should be a small variety of images at different angles and in " +"different conditions. Multiple identities can be filtered. Multiple images " +"can be added space separated." msgstr "" -"Дополнительно вы можете выбрать людей, которых вы хотели бы включить в " -"обработку путем указания изображения этого человека. Должен быть фронтальный " -"портрет с лишь одним человеком на картинке. Можно выбрать несколько " -"изображений через пробел. Прим.: Использование фильтра существенно замедлит " -"скорость извлечения. Также точность не гарантируется." +"При желании выберите людей, которых вы хотите извлечь, передав изображения " +"этого человека. Должно быть небольшое разнообразие снимков под разными " +"углами и в разных условиях. Множественные личности могут быть отфильтрованы. " +"Несколько изображений могут быть добавлены через пробел." -#: lib/cli/args.py:551 lib/cli/args.py:835 +#: lib/cli/args.py:549 msgid "" "For use with the optional nfilter/filter files. Threshold for positive face " -"recognition. Lower values are stricter. NB: Using face filter will " -"significantly decrease extraction speed and its accuracy cannot be " -"guaranteed." +"recognition. Higher values are stricter." msgstr "" -"Только при использовании файлов nfilter/filter. Порог для распознавания " -"лица. Чем ниже значения, тем строже. Прим.: Использование фильтра лиц " -"существенно замедлит скорость извлечения. Также точность не гарантируется." +"Для использования с дополнительными файлами nfilter/filter. Порог " +"положительного распознавания лиц. Более высокие значения являются более " +"строгими." -#: lib/cli/args.py:562 lib/cli/args.py:574 lib/cli/args.py:586 -#: lib/cli/args.py:598 +#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582 +#: lib/cli/args.py:594 msgid "output" msgstr "вывод" -#: lib/cli/args.py:563 +#: lib/cli/args.py:559 msgid "" "The output size of extracted faces. Make sure that the model you intend to " "train supports your required size. This will only need to be changed for hi-" @@ -334,7 +328,7 @@ msgstr "" "поддерживает такой входной размер. Стоит изменять только для моделей " "высокого разрешения." -#: lib/cli/args.py:575 +#: lib/cli/args.py:571 msgid "" "Extract every 'nth' frame. This option will skip frames when extracting " "faces. For example a value of 1 will extract faces from every frame, a value " @@ -344,7 +338,7 @@ msgstr "" "извлечении. Например, значение 1 будет искать лица в каждом кадре, а " "значение 10 в каждом 10том кадре." -#: lib/cli/args.py:587 +#: lib/cli/args.py:583 msgid "" "Automatically save the alignments file after a set amount of frames. By " "default the alignments file is only saved at the end of the extraction " @@ -359,17 +353,17 @@ msgstr "" "только во время второго прохода. ВНИМАНИЕ: Не прерывайте выполнение во время " "записи, так как это может повлечь порчу файла. Установите в 0 для выключения" -#: lib/cli/args.py:599 +#: lib/cli/args.py:595 msgid "Draw landmarks on the ouput faces for debugging purposes." msgstr "Рисовать ландмарки на выходных лицах для нужд отладки." -#: lib/cli/args.py:605 lib/cli/args.py:614 lib/cli/args.py:622 -#: lib/cli/args.py:629 lib/cli/args.py:847 lib/cli/args.py:858 -#: lib/cli/args.py:866 lib/cli/args.py:885 lib/cli/args.py:891 +#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618 +#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854 +#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887 msgid "settings" msgstr "настройки" -#: lib/cli/args.py:606 +#: lib/cli/args.py:602 msgid "" "Don't run extraction in parallel. Will run each part of the extraction " "process separately (one after the other) rather than all at the smae time. " @@ -379,7 +373,7 @@ msgstr "" "стадия извлечения будет запущена отдельно (одна, за другой). Полезно при " "нехватке VRAM." -#: lib/cli/args.py:615 +#: lib/cli/args.py:611 msgid "" "Skips frames that have already been extracted and exist in the alignments " "file" @@ -387,16 +381,16 @@ msgstr "" "Пропускать кадры, которые уже были извлечены и существуют в файле " "выравнивания" -#: lib/cli/args.py:623 +#: lib/cli/args.py:619 msgid "Skip frames that already have detected faces in the alignments file" msgstr "Пропускать кадры, для которых в файле выравнивания есть найденные лица" -#: lib/cli/args.py:630 +#: lib/cli/args.py:626 msgid "Skip saving the detected faces to disk. Just create an alignments file" msgstr "" "Не сохранять найденные лица на носитель. Просто создать файл выравнивания" -#: lib/cli/args.py:652 +#: lib/cli/args.py:648 msgid "" "Swap the original faces in a source video/images to your final faces.\n" "Conversion plugins can be configured in the 'Settings' Menu" @@ -404,7 +398,7 @@ msgstr "" "Заменить оригиналы лица в исходном видео/фотографиях новыми.\n" "Плагины конвертации могут быть настроены в меню 'Настройки'" -#: lib/cli/args.py:673 +#: lib/cli/args.py:669 msgid "" "Only required if converting from images to video. Provide The original video " "that the source frames were extracted from (for extracting the fps and " @@ -414,7 +408,7 @@ msgstr "" "Предоставьте исходное видео, из которого были извлечены кадры (для настройки " "частоты кадров, а также аудио)." -#: lib/cli/args.py:682 +#: lib/cli/args.py:678 msgid "" "Model directory. The directory containing the trained model you wish to use " "for conversion." @@ -422,7 +416,7 @@ msgstr "" "Папка с моделью. Папка, содержащая обученную модель, которую вы хотите " "использовать для преобразования." -#: lib/cli/args.py:692 +#: lib/cli/args.py:688 msgid "" "R|Performs color adjustment to the swapped face. Some of these options have " "configurable settings in '/config/convert.ini' or 'Settings > Configure " @@ -461,7 +455,7 @@ msgstr "" "дает удовлетворительных результатов.\n" "L|none: Не производить подгонку цвета." -#: lib/cli/args.py:719 +#: lib/cli/args.py:715 msgid "" "R|Masker to use. NB: The mask you require must exist within the alignments " "file. You can add additional masks with the Mask Tool.\n" @@ -530,7 +524,7 @@ msgstr "" "L| predicted: Если во время обучения была включена опция «Learn Mask», будет " "использоваться маска, созданная обученной моделью." -#: lib/cli/args.py:757 +#: lib/cli/args.py:753 msgid "" "R|The plugin to use to output the converted images. The writers are " "configurable in '/config/convert.ini' or 'Settings > Configure Convert " @@ -556,11 +550,11 @@ msgstr "" "L|pillow: [изображения] Более медленный, чем opencv, но имеет больше опций и " "поддерживает больше форматов." -#: lib/cli/args.py:776 lib/cli/args.py:783 lib/cli/args.py:877 +#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873 msgid "Frame Processing" msgstr "Обработка кадров" -#: lib/cli/args.py:777 +#: lib/cli/args.py:773 #, python-format msgid "" "Scale the final output frames by this amount. 100%% will output the frames " @@ -570,7 +564,7 @@ msgstr "" "кадры в исходном размере. 50%% половина от размера, а 200%% в удвоенном " "размере" -#: lib/cli/args.py:784 +#: lib/cli/args.py:780 msgid "" "Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use " "--frame-ranges 10-50 90-100. Frames falling outside of the selected range " @@ -583,7 +577,7 @@ msgstr "" "unchanged). Прим.: Если при конверсии используются изображения, то имена " "файлов должны заканчиваться номером кадра!" -#: lib/cli/args.py:794 +#: lib/cli/args.py:790 msgid "" "If you have not cleansed your alignments file, then you can filter out faces " "by defining a folder here that contains the faces extracted from your input " @@ -599,7 +593,46 @@ msgstr "" "Если оставить это поле пустым, то все лица, которые существуют в файле " "выравниваний будут сконвертированы." -#: lib/cli/args.py:848 +#: lib/cli/args.py:804 +msgid "" +"Optionally filter out people who you do not wish to process by passing in an " +"image of that person. Should be a front portrait with a single person in the " +"image. Multiple images can be added space separated. NB: Using face filter " +"will significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Дополнительно вы можете отфильтровать лица людей, которых вы не хотите " +"обрабатывать указав изображение этого человека. На изображении должен быть " +"фронтальный портрет одного человека . Можно указать несколько файлов через " +"пробел. Прим.: Фильтрация лиц существенно снижает скорость извлечения, при " +"этом точность не гарантируется." + +#: lib/cli/args.py:817 +msgid "" +"Optionally select people you wish to process by passing in an image of that " +"person. Should be a front portrait with a single person in the image. " +"Multiple images can be added space separated. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Дополнительно вы можете выбрать людей, которых вы хотели бы включить в " +"обработку путем указания изображения этого человека. Должен быть фронтальный " +"портрет с лишь одним человеком на картинке. Можно выбрать несколько " +"изображений через пробел. Прим.: Использование фильтра существенно замедлит " +"скорость извлечения. Также точность не гарантируется." + +#: lib/cli/args.py:831 +msgid "" +"For use with the optional nfilter/filter files. Threshold for positive face " +"recognition. Lower values are stricter. NB: Using face filter will " +"significantly decrease extraction speed and its accuracy cannot be " +"guaranteed." +msgstr "" +"Только при использовании файлов nfilter/filter. Порог для распознавания " +"лица. Чем ниже значения, тем строже. Прим.: Использование фильтра лиц " +"существенно замедлит скорость извлечения. Также точность не гарантируется." + +#: lib/cli/args.py:844 msgid "" "The maximum number of parallel processes for performing conversion. " "Converting images is system RAM heavy so it is possible to run out of memory " @@ -616,7 +649,7 @@ msgstr "" "будет использоваться больше процессов, чем доступно в вашей системе. Если " "включен одиночный процесс, этот параметр будет проигнорирован." -#: lib/cli/args.py:859 +#: lib/cli/args.py:855 msgid "" "[LEGACY] This only needs to be selected if a legacy model is being loaded or " "if there are multiple models in the model folder" @@ -624,7 +657,7 @@ msgstr "" "[СОВМЕСТИМОСТЬ] Это нужно выбирать только в том случае, если загружается " "устаревшая модель или если в папке сохранения есть несколько моделей" -#: lib/cli/args.py:867 +#: lib/cli/args.py:863 msgid "" "Enable On-The-Fly Conversion. NOT recommended. You should generate a clean " "alignments file for your destination video. However, if you wish you can " @@ -638,7 +671,7 @@ msgstr "" "использованию улучшенного конвейера экстракции и некачественных результатов. " "Если файл выравниваний найден, этот параметр будет проигнорирован." -#: lib/cli/args.py:878 +#: lib/cli/args.py:874 msgid "" "When used with --frame-ranges outputs the unchanged frames that are not " "processed instead of discarding them." @@ -646,16 +679,16 @@ msgstr "" "При использовании с --frame-range кадры не попавшие в диапазон выводятся " "неизменными, вместо их пропуска." -#: lib/cli/args.py:886 +#: lib/cli/args.py:882 msgid "Swap the model. Instead converting from of A -> B, converts B -> A" msgstr "" "Поменять модели местами. Вместо преобразования из A -> B, преобразует B -> A" -#: lib/cli/args.py:892 +#: lib/cli/args.py:888 msgid "Disable multiprocessing. Slower but less resource intensive." msgstr "Отключить многопроцессорность. Медленнее, но менее ресурсоемко." -#: lib/cli/args.py:908 +#: lib/cli/args.py:904 msgid "" "Train a model on extracted original (A) and swap (B) faces.\n" "Training models can take a long time. Anything from 24hrs to over a week\n" @@ -666,11 +699,11 @@ msgstr "" "Обучение моделей может занять долгое время: от 24 часов до недели\n" "Каждую модель можно отдельно настроить в меню «Настройки»" -#: lib/cli/args.py:927 lib/cli/args.py:936 +#: lib/cli/args.py:923 lib/cli/args.py:932 msgid "faces" msgstr "лица" -#: lib/cli/args.py:928 +#: lib/cli/args.py:924 msgid "" "Input directory. A directory containing training images for face A. This is " "the original face, i.e. the face that you want to remove and replace with " @@ -679,7 +712,7 @@ msgstr "" "Входная папка. Папка содержащая изображения для тренировки лица A. Это " "исходное лицо т.е. лицо, которое вы хотите убрать, заменив лицом B." -#: lib/cli/args.py:937 +#: lib/cli/args.py:933 msgid "" "Input directory. A directory containing training images for face B. This is " "the swap face, i.e. the face that you want to place onto the head of person " @@ -688,12 +721,12 @@ msgstr "" "Входная папка. Папка содержащая изображения для тренировки лица B. Это новое " "лицо т.е. лицо, которое вы хотите поместить на голову человека A." -#: lib/cli/args.py:945 lib/cli/args.py:957 lib/cli/args.py:973 -#: lib/cli/args.py:998 lib/cli/args.py:1008 +#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969 +#: lib/cli/args.py:994 lib/cli/args.py:1004 msgid "model" msgstr "модель" -#: lib/cli/args.py:946 +#: lib/cli/args.py:942 msgid "" "Model directory. This is where the training data will be stored. You should " "always specify a new folder for new models. If starting a new model, select " @@ -707,7 +740,7 @@ msgstr "" "будет создана). Если вы хотите продолжить тренировку, выберите папку с уже " "существующими сохранениями." -#: lib/cli/args.py:958 +#: lib/cli/args.py:954 msgid "" "R|Load the weights from a pre-existing model into a newly created model. For " "most models this will load weights from the Encoder of the given model into " @@ -731,7 +764,7 @@ msgstr "" "NB: Вес можно загружать только из моделей того же плагина, который вы " "собираетесь тренировать." -#: lib/cli/args.py:974 +#: lib/cli/args.py:970 msgid "" "R|Select which trainer to use. Trainers can be configured from the Settings " "menu or the config folder.\n" @@ -777,7 +810,7 @@ msgstr "" "ресурсам (Вам потребуется GPU с хорошим количеством видеопамяти). Хороша для " "деталей, но подвержена к неправильной передаче цвета." -#: lib/cli/args.py:999 +#: lib/cli/args.py:995 msgid "" "Output a summary of the model and exit. If a model folder is provided then a " "summary of the saved model is displayed. Otherwise a summary of the model " @@ -789,7 +822,7 @@ msgstr "" "сводная информация о модели, которая будет создана выбранным плагином, и " "параметрами конфигурации." -#: lib/cli/args.py:1009 +#: lib/cli/args.py:1005 msgid "" "Freeze the weights of the model. Freezing weights means that some of the " "parameters in the model will no longer continue to learn, but those that are " @@ -803,12 +836,12 @@ msgstr "" "некоторые модели могут иметь параметры конфигурации для замораживания других " "слоев." -#: lib/cli/args.py:1022 lib/cli/args.py:1034 lib/cli/args.py:1045 -#: lib/cli/args.py:1056 lib/cli/args.py:1139 +#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041 +#: lib/cli/args.py:1052 lib/cli/args.py:1135 msgid "training" msgstr "тренировка" -#: lib/cli/args.py:1023 +#: lib/cli/args.py:1019 msgid "" "Batch size. This is the number of images processed through the model for " "each side per iteration. NB: As the model is fed 2 sides at a time, the " @@ -821,7 +854,7 @@ msgstr "" "изображений в два раза больше этого числа. Увеличение размера партии требует " "больше памяти GPU." -#: lib/cli/args.py:1035 +#: lib/cli/args.py:1031 msgid "" "Length of training in iterations. This is only really used for automation. " "There is no 'correct' number of iterations a model should be trained for. " @@ -835,7 +868,7 @@ msgstr "" "Однако, если вы хотите, чтобы тренировка прервалась после указанного кол-ва " "итерация, вы можете ввести это здесь." -#: lib/cli/args.py:1046 +#: lib/cli/args.py:1042 msgid "" "[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow " "Mirrored Distrubution Strategy to train on multiple GPUs." @@ -844,7 +877,7 @@ msgstr "" "Используйте стратегию зеркального распространения Tensorflow для обучения на " "нескольких графических процессорах." -#: lib/cli/args.py:1057 +#: lib/cli/args.py:1053 msgid "" "R|Select the distribution stategy to use.\n" "L|default: Use Tensorflow's default distribution strategy.\n" @@ -869,15 +902,15 @@ msgstr "" "в каждый GPU, причем пакеты распределяются между каждым GPU на каждой " "итерации." -#: lib/cli/args.py:1074 lib/cli/args.py:1084 +#: lib/cli/args.py:1070 lib/cli/args.py:1080 msgid "Saving" msgstr "Сохранение" -#: lib/cli/args.py:1075 +#: lib/cli/args.py:1071 msgid "Sets the number of iterations between each model save." msgstr "Установка количества итераций между сохранениями модели." -#: lib/cli/args.py:1085 +#: lib/cli/args.py:1081 msgid "" "Sets the number of iterations before saving a backup snapshot of the model " "in it's current state. Set to 0 for off." @@ -885,11 +918,11 @@ msgstr "" "Устанавливает кол-во итераций перед созданием резервной копии модели. " "Установите в 0 для отключения." -#: lib/cli/args.py:1092 lib/cli/args.py:1103 lib/cli/args.py:1114 +#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110 msgid "timelapse" msgstr "таймлапс" -#: lib/cli/args.py:1093 +#: lib/cli/args.py:1089 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -902,7 +935,7 @@ msgstr "" "папку лиц набора 'A' для использования при создании таймлапса. Вам также " "нужно указать параметры--timelapse-output и --timelapse-input-B." -#: lib/cli/args.py:1104 +#: lib/cli/args.py:1100 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. " @@ -916,7 +949,7 @@ msgstr "" "таймлапса. Вы также должны указать параметр --timelapse-output и --timelapse-" "input-A." -#: lib/cli/args.py:1115 +#: lib/cli/args.py:1111 msgid "" "Optional for creating a timelapse. Timelapse will save an image of your " "selected faces into the timelapse-output folder at every save iteration. If " @@ -928,15 +961,15 @@ msgstr "" "указаны только входные папки, то по умолчанию вывод будет сохранен вместе с " "моделью в подкаталог /timelapse/" -#: lib/cli/args.py:1124 lib/cli/args.py:1131 +#: lib/cli/args.py:1120 lib/cli/args.py:1127 msgid "preview" msgstr "предварительный просмотр" -#: lib/cli/args.py:1125 +#: lib/cli/args.py:1121 msgid "Show training preview output. in a separate window." msgstr "Показывать предварительный просмотр в отдельном окне." -#: lib/cli/args.py:1132 +#: lib/cli/args.py:1128 msgid "" "Writes the training result to a file. The image will be stored in the root " "of your FaceSwap folder." @@ -944,7 +977,7 @@ msgstr "" "Записывает результат тренировки в файл. Файл будет сохранен в коренной папке " "FaceSwap." -#: lib/cli/args.py:1140 +#: lib/cli/args.py:1136 msgid "" "Disables TensorBoard logging. NB: Disabling logs means that you will not be " "able to use the graph or analysis for this session in the GUI." @@ -952,12 +985,12 @@ msgstr "" "Отключает журнал TensorBoard. Примечание: Отключение журналов означает, что " "вы не сможете использовать графики или анализ сессии внутри GUI." -#: lib/cli/args.py:1147 lib/cli/args.py:1156 lib/cli/args.py:1165 -#: lib/cli/args.py:1174 +#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161 +#: lib/cli/args.py:1170 msgid "augmentation" msgstr "аугментация" -#: lib/cli/args.py:1148 +#: lib/cli/args.py:1144 msgid "" "Warps training faces to closely matched Landmarks from the opposite face-set " "rather than randomly warping the face. This is the 'dfaker' way of doing " @@ -967,7 +1000,7 @@ msgstr "" "Ориентирами/Landmarks противоположного набора лиц. Этот способ используется " "пакетом \"dfaker\"." -#: lib/cli/args.py:1157 +#: lib/cli/args.py:1153 msgid "" "To effectively learn, a random set of images are flipped horizontally. " "Sometimes it is desirable for this not to occur. Generally this should be " @@ -978,7 +1011,7 @@ msgstr "" "происходило. Как правило, эту настройку не стоит трогать, за исключением " "периода «финальной шлифовки»." -#: lib/cli/args.py:1166 +#: lib/cli/args.py:1162 msgid "" "Color augmentation helps make the model less susceptible to color " "differences between the A and B sets, at an increased training time cost. " @@ -988,7 +1021,7 @@ msgstr "" "цвета между наборами A and B ценой некоторого замедления скорости " "тренировки. Включите эту опцию для отключения цветовой аугментации." -#: lib/cli/args.py:1175 +#: lib/cli/args.py:1171 msgid "" "Warping is integral to training the Neural Network. This option should only " "be enabled towards the very end of training to try to bring out more detail. " @@ -1001,7 +1034,7 @@ msgstr "" "Включение этой опции с самого начала может убить модель и привести к ужасным " "результатам." -#: lib/cli/args.py:1200 +#: lib/cli/args.py:1196 msgid "Output to Shell console instead of GUI console" msgstr "Вывод в системную консоль вместо GUI" diff --git a/plugins/extract/align/_base.py b/plugins/extract/align/_base.py index cd0d38e..c5f38eb 100644 --- a/plugins/extract/align/_base.py +++ b/plugins/extract/align/_base.py @@ -175,8 +175,8 @@ class Aligner(Extractor): # pylint:disable=abstract-method logger.trace("EOF received") # type:ignore exhausted = True break - # Put frames with no faces into the out queue to keep TQDM consistent - if not item.detected_faces: + # Put frames with no faces or are already aligned into the out queue + if not item.detected_faces or item.is_aligned: self._queues["out"].put(item) continue @@ -192,7 +192,8 @@ class Aligner(Extractor): # pylint:disable=abstract-method self._rollover = ExtractMedia( item.filename, item.image, - detected_faces=item.detected_faces[f_idx + 1:]) + detected_faces=item.detected_faces[f_idx + 1:], + is_aligned=item.is_aligned) logger.trace("Rolled over %s faces of %s to next batch " # type:ignore "for '%s'", len(self._rollover.detected_faces), frame_faces, item.filename) @@ -536,7 +537,7 @@ class AlignedFilter(): Parameters ---------- - batch: list + faces: list List of detected face objects to filter out on size minimum_dimension: int The minimum (height, width) of the original frame @@ -655,4 +656,4 @@ class AlignedFilter(): for key, count in self._counts.items() if count > 0] if counts: - logger.info("Aligner filtered: [%s)", ", ".join(counts)) + logger.info("Aligner filtered: (%s)", ", ".join(counts)) diff --git a/plugins/extract/detect/_base.py b/plugins/extract/detect/_base.py index 3c9bfcd..772852e 100644 --- a/plugins/extract/detect/_base.py +++ b/plugins/extract/detect/_base.py @@ -159,6 +159,10 @@ class Detector(Extractor): # pylint:disable=abstract-method exhausted = True break assert isinstance(item, ExtractMedia) + # Put items that are already aligned into the out queue + if item.is_aligned: + self._queues["out"].put(item) + continue batch.filename.append(item.filename) image, scale, pad = self._compile_detection_image(item) batch.image.append(image) diff --git a/plugins/extract/mask/_base.py b/plugins/extract/mask/_base.py index f2997e1..de9ab9e 100644 --- a/plugins/extract/mask/_base.py +++ b/plugins/extract/mask/_base.py @@ -61,9 +61,6 @@ class Masker(Extractor): # pylint:disable=abstract-method https://github.com/deepfakes-models/faceswap-models for more information model_filename: str The name of the model file to be loaded - image_is_aligned: bool, optional - Indicates that the passed in image is an aligned face rather than a frame. - Default: ``False`` Other Parameters ---------------- @@ -84,10 +81,8 @@ class Masker(Extractor): # pylint:disable=abstract-method model_filename: Optional[str] = None, configfile: Optional[str] = None, instance: int = 0, - image_is_aligned=False, **kwargs) -> None: - logger.debug("Initializing %s: (configfile: %s, image_is_aligned: %s)", - self.__class__.__name__, configfile, image_is_aligned) + logger.debug("Initializing %s: (configfile: %s)", self.__class__.__name__, configfile) super().__init__(git_model_id, model_filename, configfile=configfile, @@ -97,7 +92,6 @@ class Masker(Extractor): # pylint:disable=abstract-method self.coverage_ratio = 1.0 # Override for model specific coverage_ratio self._plugin_type = "mask" - self._image_is_aligned = image_is_aligned self._storage_name = self.__module__.rsplit(".", maxsplit=1)[-1].replace("_", "-") self._storage_centering: "CenteringType" = "face" # Centering to store the mask at self._storage_size = 128 # Size to store masks at. Leave this at default @@ -154,7 +148,7 @@ class Masker(Extractor): # pylint:disable=abstract-method image = item.get_image_copy(self.color_format) roi = np.ones((*item.image_size[:2], 1), dtype="float32") - if not self._image_is_aligned: + if not item.is_aligned: # Add the ROI mask to image so we can get the ROI mask with a single warp image = np.concatenate([image, roi], axis=-1) @@ -164,10 +158,10 @@ class Masker(Extractor): # pylint:disable=abstract-method size=self.input_size, coverage_ratio=self.coverage_ratio, dtype="float32", - is_aligned=self._image_is_aligned) + is_aligned=item.is_aligned) assert feed_face.face is not None - if not self._image_is_aligned: + if not item.is_aligned: # Split roi mask from feed face alpha channel roi_mask = feed_face.face[..., 3] feed_face._face = feed_face.face[..., :3] # pylint:disable=protected-access @@ -189,7 +183,8 @@ class Masker(Extractor): # pylint:disable=abstract-method self._rollover = ExtractMedia( item.filename, item.image, - detected_faces=item.detected_faces[f_idx + 1:]) + detected_faces=item.detected_faces[f_idx + 1:], + is_aligned=item.is_aligned) logger.trace("Rolled over %s faces of %s to next batch " # type:ignore "for '%s'", len(self._rollover.detected_faces), frame_faces, item.filename) diff --git a/plugins/extract/pipeline.py b/plugins/extract/pipeline.py index 9e120cf..3e4e953 100644 --- a/plugins/extract/pipeline.py +++ b/plugins/extract/pipeline.py @@ -12,7 +12,7 @@ plugins either in parallel or in series, giving easy access to input and output. import logging import sys -from typing import Any, cast, Dict, Generator, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import cast, Dict, Generator, List, Optional, Tuple, TYPE_CHECKING, Union import cv2 @@ -28,6 +28,7 @@ else: if TYPE_CHECKING: import numpy as np + from lib.align.alignments import PNGHeaderSourceDict from lib.align.detected_face import DetectedFace from plugins.extract._base import Extractor as PluginExtractor from plugins.extract.detect._base import Detector @@ -91,9 +92,6 @@ class Extractor(): Default: `0` disable_filter: bool, optional Disable all aligner filters regardless of config option. Default: ``False`` - image_is_aligned: bool, optional - Used to set the :attr:`plugins.extract.mask.image_is_aligned` attribute. Indicates to the - masker that the fed in image is an aligned face rather than a frame. Default: ``False`` Attributes ---------- @@ -113,14 +111,13 @@ class Extractor(): min_size: int = 0, normalize_method: Optional[Literal["none", "clahe", "hist", "mean"]] = None, re_feed: int = 0, - disable_filter: bool = False, - image_is_aligned: bool = False,) -> None: + disable_filter: bool = False) -> None: logger.debug("Initializing %s: (detector: %s, aligner: %s, masker: %s, recognition: %s, " "configfile: %s, multiprocess: %s, exclude_gpus: %s, rotate_images: %s, " - "min_size: %s, normalize_method: %s, re_feed: %s, disable_filter: %s, " - "image_is_aligned: %s)", self.__class__.__name__, detector, aligner, masker, - recognition, configfile, multiprocess, exclude_gpus, rotate_images, min_size, - normalize_method, re_feed, disable_filter, image_is_aligned) + "min_size: %s, normalize_method: %s, re_feed: %s, disable_filter: %s, )", + self.__class__.__name__, detector, aligner, masker, recognition, configfile, + multiprocess, exclude_gpus, rotate_images, min_size, normalize_method, + re_feed, disable_filter) self._instance = _get_instance() maskers = [cast(Optional[str], masker)] if not isinstance(masker, list) else cast(List[Optional[str]], masker) @@ -139,7 +136,7 @@ class Extractor(): re_feed, disable_filter) self._recognition = self._load_recognition(recognition, configfile) - self._mask = [self._load_mask(mask, image_is_aligned, configfile) for mask in maskers] + self._mask = [self._load_mask(mask, configfile) for mask in maskers] self._is_parallel = self._set_parallel_processing(multiprocess) self._phases = self._set_phases(multiprocess) self._phase_index = 0 @@ -218,6 +215,18 @@ class Extractor(): logger.trace(retval) # type: ignore return retval + @property + def aligner(self) -> "Aligner": + """ The currently selected aligner plugin """ + assert self._align is not None + return self._align + + @property + def recognition(self) -> "Identity": + """ The currently selected recognition plugin """ + assert self._recognition is not None + return self._recognition + def reset_phase_index(self) -> None: """ Reset the current phase index back to 0. Used for when batch processing is used in extract. """ @@ -625,16 +634,27 @@ class Extractor(): def _load_mask(self, masker: Optional[str], - image_is_aligned: bool, configfile: Optional[str]) -> Optional["Masker"]: - """ Set global arguments and load masker plugin """ + """ Set global arguments and load masker plugin + + Parameters + ---------- + masker: str or ``none`` + The name of the masker plugin to use or ``None`` if no masker + configfile: str + Full path to custom config.ini file or ``None`` to use default + + Returns + ------- + :class:`~plugins.extract.mask._base.Masker` or ``None`` + The masker plugin to use or ``None`` if no masker selected + """ if masker is None or masker.lower() == "none": logger.debug("No masker selected. Returning None") return None masker_name = masker.replace("-", "_").lower() logger.debug("Loading Masker: '%s'", masker_name) plugin = PluginLoader.get_masker(masker_name)(exclude_gpus=self._exclude_gpus, - image_is_aligned=image_is_aligned, configfile=configfile, instance=self._instance) return plugin @@ -700,21 +720,6 @@ class Extractor(): - plugins_required) // len(gpu_plugins) self._set_plugin_batchsize(gpu_plugins, available_vram) - def set_aligner_normalization_method(self, method: Optional[Literal["none", - "clahe", - "hist", - "mean"]]) -> None: - """ Change the normalization method for faces fed into the aligner. - - Parameters - ---------- - method: {"none", "clahe", "hist", "mean"} - The normalization method to apply to faces prior to feeding into the aligner's model - """ - assert self._align is not None - logger.debug("Setting to: '%s'", method) - self._align.set_normalize_method(method) - def _set_plugin_batchsize(self, gpu_plugins: List[str], available_vram: float) -> None: """ Set the batch size for the given plugin based on given available vram. Do not update plugins which have a vram_per_batch of 0 (CPU plugins) due to @@ -779,26 +784,32 @@ class ExtractMedia(): filename: str The base name of the original frame's filename image: :class:`numpy.ndarray` - The original frame + The original frame or a faceswap aligned face image detected_faces: list, optional A list of :class:`~lib.align.DetectedFace` objects. Detected faces can be added later with :func:`add_detected_faces`. Setting ``None`` will default to an empty list. Default: ``None`` + is_aligned: bool, optional + ``True`` if the :attr:`image` is an aligned faceswap image otherwise ``False``. Used for + face filtering with vggface2. Aligned faceswap images will automatically skip detection, + alignment and masking. Default: ``False`` """ def __init__(self, filename: str, image: "np.ndarray", - detected_faces: Optional[List["DetectedFace"]] = None) -> None: + detected_faces: Optional[List["DetectedFace"]] = None, + is_aligned: bool = False) -> None: logger.trace("Initializing %s: (filename: '%s', image shape: %s, " # type: ignore - "detected_faces: %s)", self.__class__.__name__, filename, image.shape, - detected_faces) + "detected_faces: %s, is_aligned: %s)", self.__class__.__name__, filename, + image.shape, detected_faces, is_aligned) self._filename = filename self._image: Optional["np.ndarray"] = image self._image_shape = cast(Tuple[int, int, int], image.shape) self._detected_faces: List["DetectedFace"] = ([] if detected_faces is None else detected_faces) - self._frame_metadata: Dict[str, Any] = {} + self._is_aligned = is_aligned + self._frame_metadata: Optional["PNGHeaderSourceDict"] = None self._sub_folders: List[Optional[str]] = [] @property @@ -828,7 +839,12 @@ class ExtractMedia(): return self._detected_faces @property - def frame_metadata(self) -> dict: + def is_aligned(self) -> bool: + """ bool. ``True`` if :attr:`image` is an aligned faceswap image otherwise ``False`` """ + return self._is_aligned + + @property + def frame_metadata(self) -> "PNGHeaderSourceDict": """ dict: The frame metadata that has been added from an aligned image. This property should only be called after :func:`add_frame_metadata` has been called when processing an aligned face. For all other instances an assertion error will be raised. @@ -915,7 +931,7 @@ class ExtractMedia(): self._filename, image.shape) self._image = image - def add_frame_metadata(self, metadata: Dict[str, Any]) -> None: + def add_frame_metadata(self, metadata: "PNGHeaderSourceDict") -> None: """ Add the source frame metadata from an aligned PNG's header data. metadata: dict @@ -923,7 +939,7 @@ class ExtractMedia(): """ logger.trace("Adding PNG Source data for '%s': %s", # type:ignore self._filename, metadata) - dims: Tuple[int, int] = metadata["source_frame_dims"] + dims = cast(Tuple[int, int], metadata["source_frame_dims"]) self._image_shape = (*dims, 3) self._frame_metadata = metadata diff --git a/plugins/extract/recognition/_base.py b/plugins/extract/recognition/_base.py index d635915..a472318 100644 --- a/plugins/extract/recognition/_base.py +++ b/plugins/extract/recognition/_base.py @@ -16,6 +16,7 @@ To get a :class:`~lib.align.DetectedFace` object use the function: >>> face = self.to_detected_face(, , , ) """ import logging +import sys from dataclasses import dataclass, field from typing import Generator, List, Optional, Tuple, TYPE_CHECKING @@ -23,14 +24,20 @@ from typing import Generator, List, Optional, Tuple, TYPE_CHECKING import numpy as np from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module # noqa -from lib.align import AlignedFace +from lib.align import AlignedFace, DetectedFace +from lib.image import read_image_meta from lib.utils import FaceswapError, get_backend from plugins.extract._base import BatchType, Extractor, ExtractorBatch from plugins.extract.pipeline import ExtractMedia +if sys.version_info < (3, 8): + from typing_extensions import get_args, Literal +else: + from typing import get_args, Literal + + if TYPE_CHECKING: from queue import Queue - from lib.align import DetectedFace from lib.align.aligned_face import CenteringType logger = logging.getLogger(__name__) @@ -58,9 +65,6 @@ class Identity(Extractor): # pylint:disable=abstract-method https://github.com/deepfakes-models/faceswap-models for more information model_filename: str The name of the model file to be loaded - image_is_aligned: bool, optional - Indicates that the passed in image is an aligned face rather than a frame. - Default: ``False`` Other Parameters ---------------- @@ -81,7 +85,6 @@ class Identity(Extractor): # pylint:disable=abstract-method model_filename: Optional[str] = None, configfile: Optional[str] = None, instance: int = 0, - image_is_aligned=False, **kwargs): logger.debug("Initializing %s", self.__class__.__name__) super().__init__(git_model_id, @@ -94,9 +97,27 @@ class Identity(Extractor): # pylint:disable=abstract-method self.coverage_ratio = 1.0 # Override for model specific coverage_ratio self._plugin_type = "recognition" - self._image_is_aligned = image_is_aligned + self._filter = IdentityFilter(self.config["save_filtered"]) logger.debug("Initialized _base %s", self.__class__.__name__) + def _get_detected_from_aligned(self, item: ExtractMedia) -> None: + """ Obtain detected face objects for when loading in aligned faces and a detected face + object does not exist + + Parameters + ---------- + item: :class:`~plugins.extract.pipeline.ExtractMedia` + The extract media to populate the detected face for + """ + detected_face = DetectedFace() + meta = read_image_meta(item.filename).get("itxt", {}).get("alignments") + if meta: + detected_face.from_png_meta(meta) + item.add_detected_faces([detected_face]) + self._faces_per_filename[item.filename] += 1 # Track this added face + logger.debug("Obtained detected face: (filename: %s, detected_face: %s)", + item.filename, item.detected_faces) + def get_batch(self, queue: "Queue") -> Tuple[bool, RecogBatch]: """ Get items for inputting into the recognition from the queue in batches @@ -140,9 +161,12 @@ class Identity(Extractor): # pylint:disable=abstract-method exhausted = True break # Put frames with no faces into the out queue to keep TQDM consistent - if not item.detected_faces: + if not item.is_aligned and not item.detected_faces: self._queues["out"].put(item) continue + if item.is_aligned and not item.detected_faces: + self._get_detected_from_aligned(item) + for f_idx, face in enumerate(item.detected_faces): image = item.get_image_copy(self.color_format) @@ -152,7 +176,7 @@ class Identity(Extractor): # pylint:disable=abstract-method size=self.input_size, coverage_ratio=self.coverage_ratio, dtype="float32", - is_aligned=self._image_is_aligned) + is_aligned=item.is_aligned) batch.detected_faces.append(face) batch.feed_faces.append(feed_face) @@ -164,7 +188,8 @@ class Identity(Extractor): # pylint:disable=abstract-method self._rollover = ExtractMedia( item.filename, item.image, - detected_faces=item.detected_faces[f_idx + 1:]) + detected_faces=item.detected_faces[f_idx + 1:], + is_aligned=item.is_aligned) logger.trace("Rolled over %s faces of %s to next batch " # type:ignore "for '%s'", len(self._rollover.detected_faces), frame_faces, item.filename) @@ -175,6 +200,11 @@ class Identity(Extractor): # pylint:disable=abstract-method for k, v in batch.__dict__.items()}) else: logger.trace(item) # type:ignore + + # TODO Move to end of process not beginning + if exhausted: + self._filter.output_counts() + return exhausted, batch def _predict(self, batch: BatchType) -> RecogBatch: @@ -239,15 +269,226 @@ class Identity(Extractor): # pylint:disable=abstract-method logger.trace("Item out: %s", # type: ignore {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.__dict__.items()}) + for filename, face in zip(batch.filename, batch.detected_faces): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) + self._output_faces = self._filter(self._output_faces, output.sub_folders) + output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace("Yielding: (filename: '%s', image: %s, " # type:ignore "detected_faces: %s)", output.filename, output.image_shape, len(output.detected_faces)) yield output + + def add_identity_filters(self, + filters: np.ndarray, + nfilters: np.ndarray, + threshold: float) -> None: + """ Add identity encodings to filter by identity in the recognition plugin + + Parameters + ---------- + filters: :class:`numpy.ndarray` + The array of filter embeddings to use + nfilters: :class:`numpy.ndarray` + The array of nfilter embeddings to use + threshold: float + The threshold for a positive filter match + """ + logger.debug("Adding identity filters") + self._filter.add_filters(filters, nfilters, threshold) + logger.debug("Added identity filters") + + +class IdentityFilter(): + """ Applies filters on the output of the recognition plugin + + Parameters + ---------- + save_output: bool + ``True`` if the filtered faces should be kept as they are being saved. ``False`` if they + should be deleted + """ + def __init__(self, save_output: bool) -> None: + logger.debug("Initializing %s: (save_output: %s)", self.__class__.__name__, save_output) + self._save_output = save_output + self._filter: Optional[np.ndarray] = None + self._nfilter: Optional[np.ndarray] = None + self._threshold = 0.0 + self._filter_enabled: bool = False + self._nfilter_enabled: bool = False + self._active: bool = False + self._counts = 0 + logger.debug("Initialized %s", self.__class__.__name__) + + def add_filters(self, filters: np.ndarray, nfilters: np.ndarray, threshold) -> None: + """ Add identity encodings to the filter and set whether each filter is enabled + + Parameters + ---------- + filters: :class:`numpy.ndarray` + The array of filter embeddings to use + nfilters: :class:`numpy.ndarray` + The array of nfilter embeddings to use + threshold: float + The threshold for a positive filter match + """ + logger.debug("Adding filters: %s, nfilters: %s, threshold: %s", + filters.shape, nfilters.shape, threshold) + self._filter = filters + self._nfilter = nfilters + self._threshold = threshold + self._filter_enabled = bool(np.any(self._filter)) + self._nfilter_enabled = bool(np.any(self._nfilter)) + self._active = self._filter_enabled or self._nfilter_enabled + logger.debug("filter active: %s, nfilter active: %s, all active: %s", + self._filter_enabled, self._nfilter_enabled, self._active) + + @classmethod + def _find_cosine_similiarity(cls, + source_identities: np.ndarray, + test_identity: np.ndarray) -> np.ndarray: + """ Find the cosine similarity between a source face identity and a test face identity + + Parameters + --------- + source_identities: :class:`numpy.ndarray` + The identity encoding for the source face identities + test_identity: :class:`numpy.ndarray` + The identity encoding for the face identity to test against the sources + + Returns + ------- + :class:`numpy.ndarray`: + The cosine similarity between a face identity and the source identities + """ + s_norm = np.linalg.norm(source_identities, axis=1) + i_norm = np.linalg.norm(test_identity) + retval = source_identities @ test_identity / (s_norm * i_norm) + return retval + + def _get_matches(self, + filter_type: Literal["filter", "nfilter"], + identities: np.ndarray) -> np.ndarray: + """ Obtain the average and minimum distances for each face against the source identities + to test against + + Parameters + ---------- + filter_type ["filter", "nfilter"] + The filter type to use for calculating the distance + identities: :class:`numpy.ndarray` + The identity encodings for the current face(s) being checked + + Returns + ------- + :class:`numpy.ndarray` + Boolean array. ``True`` if identity should be filtered otherwise ``False`` + """ + encodings = self._filter if filter_type == "filter" else self._nfilter + assert encodings is not None + distances = np.array([self._find_cosine_similiarity(encodings, identity) + for identity in identities]) + is_match = np.any(distances >= self._threshold, axis=-1) + # Invert for filter (set the `True` match to `False` for should filter) + retval = np.invert(is_match) if filter_type == "filter" else is_match + logger.trace("filter_type: %s, distances shape: %s, is_match: %s, ", # type: ignore + "retval: %s", filter_type, distances.shape, is_match, retval) + return retval + + def _filter_faces(self, + faces: List[DetectedFace], + sub_folders: List[Optional[str]], + should_filter: List[bool]) -> List[DetectedFace]: + """ Filter the detected faces, either removing filtered faces from the list of detected + faces or setting the output subfolder to `"_identity"` for any filtered faces if saving + output is enabled. + + Parameters + ---------- + faces: list + List of detected face objects to filter out on size + sub_folders: list + List of subfolder locations for any faces that have already been filtered when + config option `save_filtered` has been enabled. + should_filter: list + List of 'bool' corresponding to face that have not already been marked for filtering. + ``True`` indicates face should be filtered, ``False`` indicates face should be kept + + Returns + ------- + detected_faces: list + The filtered list of detected face objects, if saving filtered faces has not been + selected or the full list of detected faces + """ + retval: List[DetectedFace] = [] + self._counts += sum(should_filter) + for idx, face in enumerate(faces): + fldr = sub_folders[idx] + if fldr is not None: + # Saving to sub folder is selected and face is already filtered + # so this face was excluded from identity check + retval.append(face) + continue + to_filter = should_filter.pop(0) + if not to_filter or self._save_output: + # Keep the face if not marked as filtered or we are to output to a subfolder + retval.append(face) + if to_filter and self._save_output: + sub_folders[idx] = "_identity" + + return retval + + def __call__(self, + faces: List[DetectedFace], + sub_folders: List[Optional[str]]) -> List[DetectedFace]: + """ Call the identity filter function + + Parameters + ---------- + faces: list + List of detected face objects to filter out on size + sub_folders: list + List of subfolder locations for any faces that have already been filtered when + config option `save_filtered` has been enabled. + + Returns + ------- + detected_faces: list + The filtered list of detected face objects, if saving filtered faces has not been + selected or the full list of detected faces + """ + if not self._active: + return faces + + identities = np.array([face.identity["vggface2"] for face, fldr in zip(faces, sub_folders) + if fldr is None]) + logger.trace("face_count: %s, already_filtered: %s, identity_shape: %s", # type: ignore + len(faces), sum(x is not None for x in sub_folders), identities.shape) + + if not np.any(identities): + logger.trace("All faces already filtered: %s", sub_folders) # type: ignore + return faces + + should_filter: List[np.ndarray] = [] + for f_type in get_args(Literal["filter", "nfilter"]): + if not getattr(self, f"_{f_type}_enabled"): + continue + should_filter.append(self._get_matches(f_type, identities)) + + # If any of the filter or nfilter evaluate to 'should filter' then filter out face + final_filter: List[bool] = np.array(should_filter).max(axis=0).tolist() + logger.trace("should_filter: %s, final_filter: %s", # type: ignore + should_filter, final_filter) + return self._filter_faces(faces, sub_folders, final_filter) + + def output_counts(self): + """ Output the counts of filtered items """ + if not self._active or not self._counts: + return + logger.info("Identity filtered: (%s)", self._counts) diff --git a/scripts/extract.py b/scripts/extract.py index c30c86d..4d54299 100644 --- a/scripts/extract.py +++ b/scripts/extract.py @@ -7,18 +7,22 @@ import logging import os import sys from argparse import Namespace -from typing import List, Dict, Optional +from typing import List, Dict, Optional, Tuple, TYPE_CHECKING, Union +import numpy as np from tqdm import tqdm +from lib.align.alignments import PNGHeaderDict -from lib.image import encode_image, generate_thumbnail, ImagesLoader, ImagesSaver +from lib.image import encode_image, generate_thumbnail, ImagesLoader, ImagesSaver, read_image_meta from lib.multithreading import MultiThread from lib.utils import get_folder, _image_extensions, _video_extensions from plugins.extract.pipeline import Extractor, ExtractMedia from scripts.fsmedia import Alignments, PostProcess, finalize +if TYPE_CHECKING: + from lib.align.alignments import PNGHeaderAlignmentsDict -tqdm.monitor_interval = 0 # workaround for TqdmSynchronisationWarning +# tqdm.monitor_interval = 0 # workaround for TqdmSynchronisationWarning # TODO? logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -50,7 +54,9 @@ class Extract(): # pylint:disable=too-few-public-methods normalization = None if self._args.normalization == "none" else self._args.normalization maskers = ["components", "extended"] maskers += self._args.masker if self._args.masker else [] - recognition = "vgg_face2" if arguments.identity else None + recognition = ("vgg_face2" + if arguments.identity or arguments.filter or arguments.nfilter + else None) self._extractor = Extractor(self._args.detector, self._args.aligner, maskers, @@ -62,6 +68,10 @@ class Extract(): # pylint:disable=too-few-public-methods min_size=self._args.min_size, normalize_method=normalization, re_feed=self._args.re_feed) + self._filter = Filter(self._args.ref_threshold, + self._args.filter, + self._args.nfilter, + self._extractor) def _get_input_locations(self) -> List[str]: """ Obtain the full path to input locations. Will be a list of locations if batch mode is @@ -133,7 +143,7 @@ class Extract(): # pylint:disable=too-few-public-methods logger.debug("Returning output: '%s' for input: '%s'", retval, input_location) return retval - def process(self): + def process(self) -> None: """ The entry point for triggering the Extraction Process. Should only be called from :class:`lib.cli.launcher.ScriptExecutor` @@ -155,96 +165,349 @@ class Extract(): # pylint:disable=too-few-public-methods self._extractor.reset_phase_index() -class _Extract(): # pylint:disable=too-few-public-methods - """ The Actual extraction process. - - This class is called by the parent :class:`Extract` process +class Filter(): + """ Obtains and holds face identity embeddings for any filter/nfilter image files + passed in from the command line. Parameters ---------- + filter_files: str, list or ``None`` + The list of filter file(s) passed in as command line arguments + nfilter_files: str, list or ``None`` + The list of nfilter file(s) passed in as command line arguments extractor: :class:`~plugins.extract.pipeline.Extractor` - The extractor pipeline for running extractions - arguments: :class:`argparse.Namespace` - The arguments to be passed to the extraction process as generated from Faceswap's command - line arguments + The extractor pipeline for obtaining face identity from images """ def __init__(self, - extractor: Extractor, - arguments: Namespace) -> None: - logger.debug("Initializing %s: (extractor: %s, args: %s)", self.__class__.__name__, - extractor, arguments) - self._args = arguments - self._output_dir = None if self._args.skip_saving_faces else get_folder( - self._args.output_dir) + threshold: float, + filter_files: Optional[Union[str, List[str]]], + nfilter_files: Optional[Union[str, List[str]]], + extractor: Extractor) -> None: + logger.debug("Initializing %s: (threshold: %s, filter_files: %s, nfilter_files: %s " + "extractor: %s)", self.__class__.__name__, threshold, filter_files, + nfilter_files, extractor) + self._threshold = threshold + self._filter_files, self._nfilter_files = self._validate_inputs(filter_files, + nfilter_files) - logger.info("Output Directory: %s", self._output_dir) - self._images = ImagesLoader(self._args.input_dir, fast_count=True) - self._alignments = Alignments(self._args, True, self._images.is_video) + if not self._filter_files and not self._nfilter_files: + logger.debug("Filter not selected. Exiting %s", self.__class__.__name__) + return + + self._embeddings: List[np.ndarray] = [np.array([]) for _ in self._filter_files] + self._nembeddings: List[np.ndarray] = [np.array([]) for _ in self._nfilter_files] self._extractor = extractor - self._existing_count = 0 - self._set_skip_list() - - self._post_process = PostProcess(arguments) - self._threads: List[MultiThread] = [] - self._verify_output = False + self._get_embeddings() + self._extractor.recognition.add_identity_filters(self.embeddings, + self.n_embeddings, + self._threshold) logger.debug("Initialized %s", self.__class__.__name__) @property - def _save_interval(self) -> Optional[int]: - """ int: The number of frames to be processed between each saving of the alignments file if - it has been provided, otherwise ``None`` """ - if hasattr(self._args, "save_interval"): - return self._args.save_interval - return None + def active(self): + """ bool: ``True`` if filter files have been passed in command line arguments. ``False`` if + no filter files have been provided """ + return bool(self._filter_files) or bool(self._nfilter_files) @property - def _skip_num(self) -> int: - """ int: Number of frames to skip if extract_every_n has been provided """ - return self._args.extract_every_n if hasattr(self._args, "extract_every_n") else 1 + def embeddings(self) -> np.ndarray: + """ :class:`numpy.ndarray`: The filter embeddings""" + if self._embeddings and all(np.any(e) for e in self._embeddings): + retval = np.concatenate(self._embeddings, axis=0) + else: + retval = np.array([]) + return retval - def _set_skip_list(self) -> None: - """ Add the skip list to the image loader + @property + def n_embeddings(self) -> np.ndarray: + """ :class:`numpy.ndarray`: The n-filter embeddings""" + if self._nembeddings and all(np.any(e) for e in self._nembeddings): + retval = np.concatenate(self._nembeddings, axis=0) + else: + retval = np.array([]) + return retval - Checks against `extract_every_n` and the existence of alignments data (can exist if - `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame - indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`. + @classmethod + def _validate_inputs(cls, + filter_files: Optional[Union[str, List[str]]], + nfilter_files: Optional[Union[str, List[str]]]) -> Tuple[List[str], + List[str]]: + """ Validates that the given filter/nfilter files exist, are image files and are unique + + Parameters + ---------- + filter_files: str, list or ``None`` + The list of filter file(s) passed in as command line arguments + nfilter_files: str, list or ``None`` + The list of nfilter file(s) passed in as command line arguments + + Returns + ------- + filter_files: list + List of full paths to filter files + nfilter_files: list + List of full paths to nfilter files """ - if self._skip_num == 1 and not self._alignments.data: - logger.debug("No frames to be skipped") + error = False + retval: List[List[str]] = [] + for files in (filter_files, nfilter_files): + filt_files = [files] if isinstance(files, str) else files + filt_files = [] if filt_files is None else filt_files + for file in filt_files: + if (not os.path.isfile(file) or + os.path.splitext(file)[-1].lower() not in _image_extensions): + logger.warning("Filter file '%s' does not exist or is not an image file", file) + error = True + retval.append(filt_files) + + filters = retval[0] + nfilters = retval[1] + f_fnames = set(os.path.basename(fname) for fname in filters) + n_fnames = set(os.path.basename(fname) for fname in nfilters) + if f_fnames.intersection(n_fnames): + error = True + logger.warning("filter and nfilter filenames should be unique. The following " + "filenames exist in both folders: %s", f_fnames.intersection(n_fnames)) + + if error: + logger.error("There was a problem processing filter files. See the above warnings for " + "details") + sys.exit(1) + logger.debug("filter_files: %s, nfilter_files: %s", retval[0], retval[1]) + + return filters, nfilters + + @classmethod + def _identity_from_extracted(cls, filename) -> Tuple[np.ndarray, bool]: + """ Test whether the given image is a faceswap extracted face and contains identity + information. If so, return the identity embedding + + Parameters + ---------- + filename: str + Full path to the image file to load + + Returns + ------- + :class:`numpy.ndarray` + The identity embeddings, if they can be obtained from the image header, otherwise an + empty array + bool + ``True`` if the image is a faceswap extracted image otherwise ``False`` + """ + if os.path.splitext(filename)[-1].lower() != ".png": + logger.info("'%s' not a png. Returning empty array", filename) + return np.array([]), False + + meta = read_image_meta(filename) + if "itxt" not in meta or "alignments" not in meta["itxt"]: + logger.debug("'%s' does not contain faceswap data. Returning empty array", filename) + return np.array([]), False + + align: "PNGHeaderAlignmentsDict" = meta["itxt"]["alignments"] + if "identity" not in align or "vggface2" not in align["identity"]: + logger.debug("'%s' does not contain identity data. Returning empty array", filename) + return np.array([]), True + + retval = np.array(align["identity"]["vggface2"]) + logger.debug("Obtained identity for '%s'. Shape: %s", filename, retval.shape) + + return retval, True + + def _process_extracted(self, item: ExtractMedia) -> None: + """ Process the output from the extraction pipeline. + + If no face has been detected, or multiple faces are detected for the inclusive filter, + embeddings and filenames are removed from the filter. + + if a single face is detected or multiple faces are detected for the exclusive filter, + embeddings are added to the relevent filter list + + Parameters + ---------- + item: :class:`plugins.extract.Pipeline.ExtracMedia` + The output from the extraction pipeline containing the identity encodings + """ + is_filter = item.filename in self._filter_files + lbl = "filter" if is_filter else "nfilter" + filelist = self._filter_files if is_filter else self._nfilter_files + embeddings = self._embeddings if is_filter else self._nembeddings + identities = np.array([face.identity["vggface2"] for face in item.detected_faces]) + idx = filelist.index(item.filename) + + if len(item.detected_faces) == 0: + logger.warning("No faces detected for %s in file '%s'. Image will not be used", + lbl, os.path.basename(item.filename)) + filelist.pop(idx) + embeddings.pop(idx) return - skip_list = [] - for idx, filename in enumerate(self._images.file_list): - if idx % self._skip_num != 0: - logger.trace("Adding image '%s' to skip list due to " # type: ignore - "extract_every_n = %s", filename, self._skip_num) - skip_list.append(idx) - # Items may be in the alignments file if skip-existing[-faces] is selected - elif os.path.basename(filename) in self._alignments.data: - self._existing_count += 1 - logger.trace("Removing image: '%s' due to previously existing", # type: ignore - filename) - skip_list.append(idx) - if self._existing_count != 0: - logger.info("Skipping %s frames due to skip_existing/skip_existing_faces.", - self._existing_count) - logger.debug("Adding skip list: %s", skip_list) + + if len(item.detected_faces) == 1: + logger.debug("Adding identity for %s from file '%s'", lbl, item.filename) + embeddings[idx] = identities + return + + if len(item.detected_faces) > 1 and is_filter: + logger.warning("%s faces detected for filter in '%s'. These identies will not be used", + len(item.detected_faces), os.path.basename(item.filename)) + filelist.pop(idx) + embeddings.pop(idx) + return + + if len(item.detected_faces) > 1 and not is_filter: + logger.warning("%s faces detected for nfilter in '%s'. All of these identies will be " + "used", len(item.detected_faces), os.path.basename(item.filename)) + embeddings[idx] = identities + return + + def _identity_from_extractor(self, file_list: List[str], aligned: List[str]) -> None: + """ Obtain the identity embeddings from the extraction pipeline + + Parameters + ---------- + filesile_list: list + List of full path to images to run through the extraction pipeline + aligned: list + List of full path to images that exist in attr:`filelist` that are faceswap aligned + images + """ + logger.info("Extracting faces to obtain identity from images") + logger.debug("Files requiring full extraction: %s", + [fname for fname in file_list if fname not in aligned]) + logger.debug("Aligned files requiring identity info: %s", aligned) + + loader = PipelineLoader(file_list, self._extractor, aligned_filenames=aligned) + loader.launch() + + for phase in range(self._extractor.passes): + is_final = self._extractor.final_pass + detected_faces: Dict[str, ExtractMedia] = {} + self._extractor.launch() + desc = "Obtaining reference face Identity" + if self._extractor.passes > 1: + desc = (f"{desc } pass {phase + 1} of {self._extractor.passes}: " + f"{self._extractor.phase_text}") + for extract_media in tqdm(self._extractor.detected_faces(), + total=len(file_list), + file=sys.stdout, + desc=desc): + if is_final: + self._process_extracted(extract_media) + else: + extract_media.remove_image() + # cache extract_media for next run + detected_faces[extract_media.filename] = extract_media + + if not is_final: + logger.debug("Reloading images") + loader.reload(detected_faces) + + self._extractor.reset_phase_index() + + def _get_embeddings(self) -> None: + """ Obtain the embeddings for the given filter lists """ + needs_extraction: List[str] = [] + aligned: List[str] = [] + + for files, embed in zip((self._filter_files, self._nfilter_files), + (self._embeddings, self._nembeddings)): + for idx, file in enumerate(files): + identity, is_aligned = self._identity_from_extracted(file) + if np.any(identity): + logger.debug("Obtained identity from png header: '%s'", file) + embed[idx] = identity[None, ...] + continue + + needs_extraction.append(file) + if is_aligned: + aligned.append(file) + + if needs_extraction: + self._identity_from_extractor(needs_extraction, aligned) + + if not self._nfilter_files and not self._filter_files: + logger.error("No faces were detected from your selected identity filter files") + sys.exit(1) + + logger.debug("Filter: (filenames: %s, shape: %s), nFilter: (filenames: %s, shape: %s)", + [os.path.basename(f) for f in self._filter_files], + self.embeddings.shape, + [os.path.basename(f) for f in self._nfilter_files], + self.n_embeddings.shape) + + +class PipelineLoader(): + """ Handles loading and reloading images into the extraction pipeline. + + Parameters + ---------- + path: str or list of str + Full path to a folder of images or a video file or a list of image files + extractor: :class:`~plugins.extract.pipeline.Extractor` + The extractor pipeline for obtaining face identity from images + aligned_filenames: list, optional + Used for when the loader is used for getting face filter embeddings. List of full path to + image files that exist in :attr:`path` that are aligned faceswap images + """ + def __init__(self, + path: Union[str, List[str]], + extractor: Extractor, + aligned_filenames: Optional[List[str]] = None) -> None: + logger.debug("Initializing %s: (path: %s, extractor: %s, aligned_filenames: %s)", + self.__class__.__name__, path, extractor, aligned_filenames) + self._images = ImagesLoader(path, fast_count=True) + self._extractor = extractor + self._threads: List[MultiThread] = [] + self._aligned_filenames = [] if aligned_filenames is None else aligned_filenames + logger.debug("Initialized %s", self.__class__.__name__) + + @property + def is_video(self) -> bool: + """ bool: ``True`` if the input location is a video file, ``False`` if it is a folder of + images """ + return self._images.is_video + + @property + def file_list(self) -> List[str]: + """ list: A full list of files in the source location. If the input is a video + then this is a list of dummy filenames as corresponding to an alignments file """ + return self._images.file_list + + @property + def process_count(self) -> int: + """ int: The number of images or video frames to be processed (IE the total count less + items that are to be skipped from the :attr:`skip_list`)""" + return self._images.process_count + + def add_skip_list(self, skip_list: List[int]) -> None: + """ Add a skip list to the :class:`ImagesLoader` + + Parameters + ---------- + skip_list: list + A list of indices corresponding to the frame indices that should be skipped by the + :func:`load` function. + """ self._images.add_skip_list(skip_list) - def process(self) -> None: - """ The entry point for triggering the Extraction Process. - - Should only be called from :class:`lib.cli.launcher.ScriptExecutor` - """ - # from lib.queue_manager import queue_manager ; queue_manager.debug_monitor(3) + def launch(self) -> None: + """ Launch the image loading pipeline """ self._threaded_redirector("load") - self._run_extraction() + + def reload(self, detected_faces: Dict[str, ExtractMedia]) -> None: + """ Reload images for multiple pipeline passes """ + self._threaded_redirector("reload", (detected_faces, )) + + def check_thread_error(self) -> None: + """ Check if any errors have occurred in the running threads and raise their errors """ + for thread in self._threads: + thread.check_and_raise_error() + + def join(self) -> None: + """ Join all open loader threads """ for thread in self._threads: thread.join() - self._alignments.save() - finalize(self._images.process_count + self._existing_count, - self._alignments.faces_count, - self._verify_output) def _threaded_redirector(self, task: str, io_args: Optional[tuple] = None) -> None: """ Redirect image input/output tasks to relevant queues in background thread @@ -275,7 +538,8 @@ class _Extract(): # pylint:disable=too-few-public-methods if load_queue.shutdown.is_set(): logger.debug("Load Queue: Stop signal received. Terminating") break - item = ExtractMedia(filename, image[..., :3]) + is_aligned = filename in self._aligned_filenames + item = ExtractMedia(filename, image[..., :3], is_aligned=is_aligned) load_queue.put(item) load_queue.put("EOF") logger.debug("Load Images: Complete") @@ -308,6 +572,97 @@ class _Extract(): # pylint:disable=too-few-public-methods load_queue.put("EOF") logger.debug("Reload Images: Complete") + +class _Extract(): # pylint:disable=too-few-public-methods + """ The Actual extraction process. + + This class is called by the parent :class:`Extract` process + + Parameters + ---------- + extractor: :class:`~plugins.extract.pipeline.Extractor` + The extractor pipeline for running extractions + arguments: :class:`argparse.Namespace` + The arguments to be passed to the extraction process as generated from Faceswap's command + line arguments + """ + def __init__(self, + extractor: Extractor, + arguments: Namespace) -> None: + logger.debug("Initializing %s: (extractor: %s, args: %s)", self.__class__.__name__, + extractor, arguments) + self._args = arguments + self._output_dir = None if self._args.skip_saving_faces else get_folder( + self._args.output_dir) + + logger.info("Output Directory: %s", self._output_dir) + self._loader = PipelineLoader(self._args.input_dir, extractor) + + self._alignments = Alignments(self._args, True, self._loader.is_video) + self._extractor = extractor + + self._existing_count = 0 + self._set_skip_list() + + self._post_process = PostProcess(arguments) + self._verify_output = False + logger.debug("Initialized %s", self.__class__.__name__) + + @property + def _save_interval(self) -> Optional[int]: + """ int: The number of frames to be processed between each saving of the alignments file if + it has been provided, otherwise ``None`` """ + if hasattr(self._args, "save_interval"): + return self._args.save_interval + return None + + @property + def _skip_num(self) -> int: + """ int: Number of frames to skip if extract_every_n has been provided """ + return self._args.extract_every_n if hasattr(self._args, "extract_every_n") else 1 + + def _set_skip_list(self) -> None: + """ Add the skip list to the image loader + + Checks against `extract_every_n` and the existence of alignments data (can exist if + `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame + indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`. + """ + if self._skip_num == 1 and not self._alignments.data: + logger.debug("No frames to be skipped") + return + skip_list = [] + for idx, filename in enumerate(self._loader.file_list): + if idx % self._skip_num != 0: + logger.trace("Adding image '%s' to skip list due to " # type: ignore + "extract_every_n = %s", filename, self._skip_num) + skip_list.append(idx) + # Items may be in the alignments file if skip-existing[-faces] is selected + elif os.path.basename(filename) in self._alignments.data: + self._existing_count += 1 + logger.trace("Removing image: '%s' due to previously existing", # type: ignore + filename) + skip_list.append(idx) + if self._existing_count != 0: + logger.info("Skipping %s frames due to skip_existing/skip_existing_faces.", + self._existing_count) + logger.debug("Adding skip list: %s", skip_list) + self._loader.add_skip_list(skip_list) + + def process(self) -> None: + """ The entry point for triggering the Extraction Process. + + Should only be called from :class:`lib.cli.launcher.ScriptExecutor` + """ + # from lib.queue_manager import queue_manager ; queue_manager.debug_monitor(3) + self._loader.launch() + self._run_extraction() + self._loader.join() + self._alignments.save() + finalize(self._loader.process_count + self._existing_count, + self._alignments.faces_count, + self._verify_output) + def _run_extraction(self) -> None: """ The main Faceswap Extraction process @@ -318,23 +673,19 @@ class _Extract(): # pylint:disable=too-few-public-methods size = self._args.size if hasattr(self._args, "size") else 256 saver = None if self._args.skip_saving_faces else ImagesSaver(self._output_dir, as_bytes=True) - exception = False - for phase in range(self._extractor.passes): - if exception: - break is_final = self._extractor.final_pass - detected_faces = {} + detected_faces: Dict[str, ExtractMedia] = {} self._extractor.launch() - self._check_thread_error() + self._loader.check_thread_error() ph_desc = "Extraction" if self._extractor.passes == 1 else self._extractor.phase_text desc = f"Running pass {phase + 1} of {self._extractor.passes}: {ph_desc}" for idx, extract_media in enumerate(tqdm(self._extractor.detected_faces(), - total=self._images.process_count, + total=self._loader.process_count, file=sys.stdout, desc=desc, leave=False)): - self._check_thread_error() + self._loader.check_thread_error() if is_final: self._output_processing(extract_media, size) self._output_faces(saver, extract_media) @@ -347,15 +698,10 @@ class _Extract(): # pylint:disable=too-few-public-methods if not is_final: logger.debug("Reloading images") - self._threaded_redirector("reload", (detected_faces, )) + self._loader.reload(detected_faces) if saver is not None: saver.close() - def _check_thread_error(self) -> None: - """ Check if any errors have occurred in the running threads and their errors """ - for thread in self._threads: - thread.check_and_raise_error() - def _output_processing(self, extract_media: ExtractMedia, size: int) -> None: """ Prepare faces for output @@ -408,14 +754,17 @@ class _Extract(): # pylint:disable=too-few-public-methods for face_id, face in enumerate(extract_media.detected_faces): real_face_id = face_id - skip_idx output_filename = f"{filename}_{real_face_id}{extension}" - meta = dict(alignments=face.to_png_meta(), - source=dict(alignments_version=self._alignments.version, - original_filename=output_filename, - face_index=real_face_id, - source_filename=os.path.basename(extract_media.filename), - source_is_video=self._images.is_video, - source_frame_dims=extract_media.image_size)) - image = encode_image(face.aligned.face, extension, metadata=meta) + aligned = face.aligned.face + assert aligned is not None + meta: PNGHeaderDict = dict( + alignments=face.to_png_meta(), + source=dict(alignments_version=self._alignments.version, + original_filename=output_filename, + face_index=real_face_id, + source_filename=os.path.basename(extract_media.filename), + source_is_video=self._loader.is_video, + source_frame_dims=extract_media.image_size)) + image = encode_image(aligned, extension, metadata=meta) sub_folder = extract_media.sub_folders[face_id] # Binned faces shouldn't risk filename clash, so just use original id diff --git a/scripts/fsmedia.py b/scripts/fsmedia.py index 34ee1b9..02339d5 100644 --- a/scripts/fsmedia.py +++ b/scripts/fsmedia.py @@ -16,15 +16,9 @@ import numpy as np import imageio from lib.align import Alignments as AlignmentsBase, get_centered_size -from lib.face_filter import FaceFilter as FilterFunc from lib.image import count_frames, read_image from lib.utils import (camel_case_split, get_image_paths, _video_extensions) -if sys.version_info < (3, 8): - from typing_extensions import get_args, Literal -else: - from typing import get_args, Literal - if TYPE_CHECKING: from argparse import Namespace from lib.align import AlignedFace @@ -408,33 +402,6 @@ class PostProcess(): # pylint:disable=too-few-public-methods if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None - # Face Filter post processing - if ((hasattr(self._args, "filter") and self._args.filter is not None) or - (hasattr(self._args, "nfilter") and - self._args.nfilter is not None)): - - if hasattr(self._args, "detector"): - detector = self._args.detector.replace("-", "_").lower() - else: - detector = "cv2_dnn" - if hasattr(self._args, "aligner"): - aligner = self._args.aligner.replace("-", "_").lower() - else: - aligner = "cv2_dnn" - - face_filter = dict(detector=detector, - aligner=aligner, - multiprocess=not self._args.singleprocess) - filter_lists = {} - if hasattr(self._args, "ref_threshold"): - face_filter["ref_threshold"] = self._args.ref_threshold - for filter_type in ('filter', 'nfilter'): - filter_args = getattr(self._args, filter_type, None) - filter_args = None if not filter_args else filter_args - filter_lists[filter_type] = filter_args - face_filter["filter_lists"] = filter_lists - postprocess_items["FaceFilter"] = {"kwargs": face_filter} - logger.debug("Postprocess Items: %s", postprocess_items) return postprocess_items @@ -642,130 +609,3 @@ class DebugLandmarks(PostProcessAction): # pylint: disable=too-few-public-metho roi = face.aligned.get_cropped_roi(face.aligned.size, self._legacy_size, "legacy") cv2.rectangle(face.aligned.face, tuple(roi[:2]), tuple(roi[2:]), (0, 0, 255), 1) self._print_stats(face.aligned) - - -class FaceFilter(PostProcessAction): - """ Filter in or out faces based on input image(s). Extract or Convert - - Parameters - ----------- - args: tuple - Unused - kwargs: dict - Keyword arguments for face filter: - - * **detector** (`str`) - The detector to use - - * **aligner** (`str`) - The aligner to use - - * **multiprocess** (`bool`) - Whether to run the extraction pipeline in single process \ - mode or not - - * **ref_threshold** (`float`) - The reference threshold for a positive match - - * **filter_lists** (`dict`) - The filter and nfilter image paths - """ - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - logger.info("Extracting and aligning face for Face Filter...") - self._filter = self._load_face_filter(**kwargs) - logger.debug("Initialized %s", self.__class__.__name__) - - def _load_face_filter(self, - filter_lists: Dict[str, str], - ref_threshold: float, - aligner: str, - detector: str, - multiprocess: bool) -> Optional[FilterFunc]: - """ Set up and load the :class:`~lib.face_filter.FaceFilter`. - - Parameters - ---------- - filter_lists: dict - The filter and nfilter image paths - ref_threshold: float - The reference threshold for a positive match - aligner: str - The aligner to use - detector: str - The detector to use - multiprocess: bool - Whether to run the extraction pipeline in single process mode or not - - Returns - ------- - :class:`~lib.face_filter.FaceFilter` - The face filter - """ - if not any(val for val in filter_lists.values()): - return None - - facefilter = None - filter_files = [self._set_face_filter(f_type, filter_lists[f_type]) - for f_type in get_args(Literal["filter", "nfilter"])] - - if any(filters for filters in filter_files): - facefilter = FilterFunc(filter_files[0], - filter_files[1], - detector, - aligner, - multiprocess, - ref_threshold) - logger.debug("Face filter: %s", facefilter) - else: - self._valid = False - return facefilter - - @classmethod - def _set_face_filter(cls, - f_type: Literal["filter", "nfilter"], - f_args: Union[str, List[str]]) -> List[str]: - """ Check filter files exist and add the filter file paths to a list. - - Parameters - ---------- - f_type: {"filter", "nfilter"} - The type of filter to create this list for - f_args: str or list - The filter image(s) to use - - Returns - ------- - list - The confirmed existing paths to filter files to use - """ - if not f_args: - return [] - - logger.info("%s: %s", f_type.title(), f_args) - filter_files = f_args if isinstance(f_args, list) else [f_args] - filter_files = [fpath for fpath in filter_files if os.path.exists(fpath)] - if not filter_files: - logger.warning("Face %s files were requested, but no files could be found. This " - "filter will not be applied.", f_type) - logger.debug("Face Filter files: %s", filter_files) - return filter_files - - def process(self, extract_media: "ExtractMedia") -> None: - """ Filters in or out any wanted or unwanted faces based on command line arguments. - - Parameters - ---------- - extract_media: :class:`~plugins.extract.pipeline.ExtractMedia` - The :class:`~plugins.extract.pipeline.ExtractMedia` object to perform the - face filtering on. - """ - if not self._filter: - return - ret_faces = [] - for idx, detect_face in enumerate(extract_media.detected_faces): - check_item = detect_face["face"] if isinstance(detect_face, dict) else detect_face - if not self._filter.check(extract_media.image, check_item): - logger.verbose("Skipping not recognized face: (Frame: %s Face %s)", # type: ignore - extract_media.filename, idx) - continue - logger.trace("Accepting recognised face. Frame: %s. Face: %s", # type: ignore - extract_media.filename, idx) - ret_faces.append(detect_face) - extract_media.add_detected_faces(ret_faces) diff --git a/tools/manual/manual.py b/tools/manual/manual.py index b36e2ee..ded172f 100644 --- a/tools/manual/manual.py +++ b/tools/manual/manual.py @@ -801,7 +801,8 @@ class Aligner(): for plugin, aligner in self._aligners.items(): if plugin == "mask": continue - aligner.set_aligner_normalization_method(method) + logger.debug("Setting to: '%s'", method) + aligner.aligner.set_normalize_method(method) class FrameLoader(): diff --git a/tools/mask/mask.py b/tools/mask/mask.py index 8ddce41..4202663 100644 --- a/tools/mask/mask.py +++ b/tools/mask/mask.py @@ -19,7 +19,7 @@ from plugins.extract.pipeline import Extractor, ExtractMedia if TYPE_CHECKING: from argparse import Namespace from lib.align.aligned_face import CenteringType - from lib.align.alignments import AlignmentFileDict + from lib.align.alignments import AlignmentFileDict, PNGHeaderDict, PNGHeaderSourceDict from lib.queue_manager import EventQueue logger = logging.getLogger(__name__) # pylint:disable=invalid-name @@ -125,9 +125,7 @@ class Mask(): # pylint:disable=too-few-public-methods logger.debug("Update type `output` selected. Not launching extractor") return None logger.debug("masker: %s", self._mask_type) - extractor = Extractor(None, None, self._mask_type, - exclude_gpus=exclude_gpus, - image_is_aligned=self._input_is_faces) + extractor = Extractor(None, None, self._mask_type, exclude_gpus=exclude_gpus) extractor.launch() logger.debug(extractor) return extractor @@ -172,7 +170,7 @@ class Mask(): # pylint:disable=too-few-public-methods def _process_face(self, filename: str, image: np.ndarray, - metadata: Dict[str, Any]) -> Optional["ExtractMedia"]: + metadata: "PNGHeaderDict") -> Optional["ExtractMedia"]: """ Process a single face when masking from face images filename: str @@ -190,12 +188,12 @@ class Mask(): # pylint:disable=too-few-public-methods """ frame_name = metadata["source"]["source_filename"] face_index = metadata["source"]["face_index"] - alignment = self._alignments.get_faces_in_frame(frame_name) - if not alignment or face_index > len(alignment) - 1: + alignments = self._alignments.get_faces_in_frame(frame_name) + if not alignments or face_index > len(alignments) - 1: self._counts["skip"] += 1 logger.warning("Skipping Face not found in alignments file: '%s'", filename) return None - alignment = alignment[face_index] + alignment = alignments[face_index] self._counts["face"] += 1 if self._check_for_missing(frame_name, face_index, alignment): @@ -207,7 +205,7 @@ class Mask(): # pylint:disable=too-few-public-methods self._save(frame_name, face_index, detected_face) return None - media = ExtractMedia(filename, image, detected_faces=[detected_face]) + media = ExtractMedia(filename, image, detected_faces=[detected_face], is_aligned=True) media.add_frame_metadata(metadata["source"]) self._counts["update"] += 1 return media @@ -236,7 +234,7 @@ class Mask(): # pylint:disable=too-few-public-methods logger.warning("Legacy face not found in alignments file. This face has not " "been updated: '%s'", filename) continue - if "source_frame_dims" not in metadata["source"]: + if not metadata.get("source_frame_dims"): logger.error("The faces need to be re-extracted as at least some of them do not " "contain information required to correctly generate masks.") logger.error("You can re-extract the face-set by using the Alignments Tool's " @@ -404,8 +402,8 @@ class Mask(): # pylint:disable=too-few-public-methods frame_name, face_index) self._alignments.update_face(frame_name, face_index, face.to_alignment()) - metadata = dict(alignments=face.to_png_meta(), - source=extractor_output.frame_metadata) + metadata: "PNGHeaderDict" = dict(alignments=face.to_png_meta(), + source=extractor_output.frame_metadata) self._faces_saver.save(extractor_output.filename, encode_image(extractor_output.image, ".png", metadata=metadata))