Merge branch 'staging'

This commit is contained in:
torzdf
2022-11-22 01:16:12 +00:00
18 changed files with 1335 additions and 597 deletions

View File

@@ -7,9 +7,9 @@ The Extract Package handles the various plugins available for extracting face se
.. contents:: Contents
:local:
pipeline module
===============
.. rubric:: Module Summary
.. autosummary::
@@ -25,32 +25,44 @@ pipeline module
:undoc-members:
:show-inheritance:
extract plugins package
=======================
.. contents:: Contents
:local:
_base module
------------
============
.. automodule:: plugins.extract._base
:members:
:undoc-members:
:show-inheritance:
align._base module
------------------
.. automodule:: plugins.extract.align._base
align plugins package
=====================
.. contents:: Contents
:local:
align._base.aligner module
--------------------------
.. automodule:: plugins.extract.align._base.aligner
:members:
:undoc-members:
:show-inheritance:
vgg\_face2\_keras module
------------------------
align._base.processing module
-----------------------------
.. automodule:: plugins.extract.align._base.processing
:members:
:undoc-members:
:show-inheritance:
.. automodule:: plugins.extract.recognition.vgg_face2_keras
align.cv2_dnn module
-------------------
.. automodule:: plugins.extract.align.cv2_dnn
:members:
:undoc-members:
:show-inheritance:
align.fan module
-------------------
.. automodule:: plugins.extract.align.fan
:members:
:undoc-members:
:show-inheritance:
@@ -58,13 +70,11 @@ vgg\_face2\_keras module
detect plugins package
======================
.. contents:: Contents
:local:
detect._base module
-------------------
.. automodule:: plugins.extract.detect._base
:members:
:undoc-members:
@@ -72,7 +82,6 @@ detect._base module
detect.mtcnn module
-------------------
.. automodule:: plugins.extract.detect.mtcnn
:members:
:undoc-members:
@@ -81,13 +90,11 @@ detect.mtcnn module
mask plugins package
====================
.. contents:: Contents
:local:
mask._base module
-----------------
.. automodule:: plugins.extract.mask._base
:members:
:undoc-members:
@@ -98,4 +105,25 @@ mask.bisenet_fp module
.. automodule:: plugins.extract.mask.bisenet_fp
:members:
:undoc-members:
:show-inheritance:
:show-inheritance:
recognition plugins package
===========================
.. contents:: Contents
:local:
recognition._base module
------------------------
.. automodule:: plugins.extract.recognition._base
:members:
:undoc-members:
:show-inheritance:
recognition.vgg_face2 module
----------------------------
.. automodule:: plugins.extract.recognition.vgg_face2
:members:
:undoc-members:
:show-inheritance:

View File

@@ -485,6 +485,15 @@ class ExtractArgs(ExtractConvertArgs):
"remove 'micro-jitter' but at the cost of slower extraction speed. The more "
"times the face is re-fed into the aligner, the less micro-jitter should occur "
"but the longer extraction will take.")))
argument_list.append(dict(
opts=("-a", "--re-align"),
action="store_true",
dest="re_align",
default=False,
group=_("Plugins"),
help=_("Re-feed the initially found aligned face through the aligner. Can help "
"produce better alignments for faces that are rotated beyond 45 degrees in "
"the frame or are at extreme angles. Slows down extraction.")))
argument_list.append(dict(
opts=("-r", "--rotate-images"),
type=str,

View File

@@ -34,7 +34,7 @@ _image_extensions = [ # pylint:disable=invalid-name
_video_extensions = [ # pylint:disable=invalid-name
".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv",
".ts", ".vob"]
_TF_VERS = None
_TF_VERS: Optional[float] = None
ValidBackends = Literal["amd", "nvidia", "cpu", "apple_silicon"]

View File

@@ -6,8 +6,8 @@ msgid ""
msgstr ""
"Project-Id-Version: faceswap.spanish\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-11-02 10:39+0000\n"
"PO-Revision-Date: 2022-11-02 10:41+0000\n"
"POT-Creation-Date: 2022-11-20 01:34+0000\n"
"PO-Revision-Date: 2022-11-20 01:35+0000\n"
"Last-Translator: \n"
"Language-Team: tokafondo\n"
"Language: es\n"
@@ -55,7 +55,7 @@ msgstr ""
"almacenarlo en la carpeta pde instalación de faceswap"
#: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337
#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677
#: lib/cli/args.py:386 lib/cli/args.py:677 lib/cli/args.py:686
msgid "Data"
msgstr "Datos"
@@ -102,8 +102,8 @@ msgstr ""
#: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424
#: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493
#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714
#: lib/cli/args.py:752
#: lib/cli/args.py:502 lib/cli/args.py:511 lib/cli/args.py:696
#: lib/cli/args.py:723 lib/cli/args.py:761
msgid "Plugins"
msgstr "Extensiones"
@@ -254,6 +254,17 @@ msgstr ""
#: lib/cli/args.py:494
msgid ""
"Re-feed the initially found aligned face through the aligner. Can help "
"produce better alignments for faces that are rotated beyond 45 degrees in "
"the frame or are at extreme angles. Slows down extraction."
msgstr ""
"Vuelva a introducir la cara alineada encontrada inicialmente a través del "
"alineador. Puede ayudar a producir mejores alineaciones para las caras que "
"se giran más de 45 grados en el marco o se encuentran en ángulos extremos. "
"Ralentiza la extracción."
#: lib/cli/args.py:503
msgid ""
"If a face isn't found, rotate the images to try to find a face. Can find "
"more faces at the cost of extraction speed. Pass in a single number to use "
"increments of that size up to 360, or pass in a list of numbers to enumerate "
@@ -264,7 +275,7 @@ msgstr ""
"un solo número para usar incrementos de ese tamaño hasta 360, o pase una "
"lista de números para enumerar exactamente qué ángulos comprobar."
#: lib/cli/args.py:503
#: lib/cli/args.py:512
msgid ""
"Obtain and store face identity encodings from VGGFace2. Slows down extract a "
"little, but will save time if using 'sort by face'"
@@ -272,13 +283,13 @@ msgstr ""
"Obtenga y almacene codificaciones de identidad facial de VGGFace2. Ralentiza "
"un poco la extracción, pero ahorrará tiempo si usa 'sort by face'"
#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535
#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803
#: lib/cli/args.py:816 lib/cli/args.py:830
#: lib/cli/args.py:522 lib/cli/args.py:532 lib/cli/args.py:544
#: lib/cli/args.py:557 lib/cli/args.py:798 lib/cli/args.py:812
#: lib/cli/args.py:825 lib/cli/args.py:839
msgid "Face Processing"
msgstr "Proceso de Caras"
#: lib/cli/args.py:514
#: lib/cli/args.py:523
msgid ""
"Filters out faces detected below this size. Length, in pixels across the "
"diagonal of the bounding box. Set to 0 for off"
@@ -287,7 +298,7 @@ msgstr ""
"a lo largo de la diagonal del cuadro delimitador. Establecer a 0 para "
"desactivar"
#: lib/cli/args.py:524
#: lib/cli/args.py:533
msgid ""
"Optionally filter out people who you do not wish to extract by passing in "
"images of those people. Should be a small variety of images at different "
@@ -300,7 +311,7 @@ msgstr ""
"contenga las imágenes requeridas o múltiples archivos de imágenes, separados "
"por espacios."
#: lib/cli/args.py:536
#: lib/cli/args.py:545
msgid ""
"Optionally select people you wish to extract by passing in images of that "
"person. Should be a small variety of images at different angles and in "
@@ -313,7 +324,7 @@ msgstr ""
"contenga las imágenes requeridas o múltiples archivos de imágenes, separados "
"por espacios."
#: lib/cli/args.py:549
#: lib/cli/args.py:558
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Higher values are stricter."
@@ -321,12 +332,12 @@ msgstr ""
"Para usar con los archivos nfilter/filter opcionales. Umbral para el "
"reconocimiento facial positivo. Los valores más altos son más estrictos."
#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582
#: lib/cli/args.py:594
#: lib/cli/args.py:567 lib/cli/args.py:579 lib/cli/args.py:591
#: lib/cli/args.py:603
msgid "output"
msgstr "salida"
#: lib/cli/args.py:559
#: lib/cli/args.py:568
msgid ""
"The output size of extracted faces. Make sure that the model you intend to "
"train supports your required size. This will only need to be changed for hi-"
@@ -336,7 +347,7 @@ msgstr ""
"pretende entrenar admite el tamaño deseado. Esto sólo tendrá que ser "
"cambiado para los modelos de alta resolución."
#: lib/cli/args.py:571
#: lib/cli/args.py:580
msgid ""
"Extract every 'nth' frame. This option will skip frames when extracting "
"faces. For example a value of 1 will extract faces from every frame, a value "
@@ -346,7 +357,7 @@ msgstr ""
"extraer las caras. Por ejemplo, un valor de 1 extraerá las caras de cada "
"fotograma, un valor de 10 extraerá las caras de cada 10 fotogramas."
#: lib/cli/args.py:583
#: lib/cli/args.py:592
msgid ""
"Automatically save the alignments file after a set amount of frames. By "
"default the alignments file is only saved at the end of the extraction "
@@ -362,18 +373,18 @@ msgstr ""
"ADVERTENCIA: No interrumpa el script al escribir el archivo porque podría "
"corromperse. Poner a 0 para desactivar"
#: lib/cli/args.py:595
#: lib/cli/args.py:604
msgid "Draw landmarks on the ouput faces for debugging purposes."
msgstr ""
"Dibujar puntos de referencia en las caras de salida para fines de depuración."
#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618
#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854
#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887
#: lib/cli/args.py:610 lib/cli/args.py:619 lib/cli/args.py:627
#: lib/cli/args.py:634 lib/cli/args.py:852 lib/cli/args.py:863
#: lib/cli/args.py:871 lib/cli/args.py:890 lib/cli/args.py:896
msgid "settings"
msgstr "ajustes"
#: lib/cli/args.py:602
#: lib/cli/args.py:611
msgid ""
"Don't run extraction in parallel. Will run each part of the extraction "
"process separately (one after the other) rather than all at the smae time. "
@@ -383,7 +394,7 @@ msgstr ""
"extracción por separado (una tras otra) en lugar de hacerlo todo al mismo "
"tiempo. Útil si la VRAM es escasa."
#: lib/cli/args.py:611
#: lib/cli/args.py:620
msgid ""
"Skips frames that have already been extracted and exist in the alignments "
"file"
@@ -391,19 +402,19 @@ msgstr ""
"Omite los fotogramas que ya han sido extraídos y que existen en el archivo "
"de alineaciones"
#: lib/cli/args.py:619
#: lib/cli/args.py:628
msgid "Skip frames that already have detected faces in the alignments file"
msgstr ""
"Omitir los fotogramas que ya tienen caras detectadas en el archivo de "
"alineaciones"
#: lib/cli/args.py:626
#: lib/cli/args.py:635
msgid "Skip saving the detected faces to disk. Just create an alignments file"
msgstr ""
"No guardar las caras detectadas en el disco. Crear sólo un archivo de "
"alineaciones"
#: lib/cli/args.py:648
#: lib/cli/args.py:657
msgid ""
"Swap the original faces in a source video/images to your final faces.\n"
"Conversion plugins can be configured in the 'Settings' Menu"
@@ -413,7 +424,7 @@ msgstr ""
"Los plugins de conversión pueden ser configurados en el menú "
"\"Configuración\""
#: lib/cli/args.py:669
#: lib/cli/args.py:678
msgid ""
"Only required if converting from images to video. Provide The original video "
"that the source frames were extracted from (for extracting the fps and "
@@ -423,7 +434,7 @@ msgstr ""
"original del que se extrajeron los fotogramas de origen (para extraer los "
"fps y el audio)."
#: lib/cli/args.py:678
#: lib/cli/args.py:687
msgid ""
"Model directory. The directory containing the trained model you wish to use "
"for conversion."
@@ -431,7 +442,7 @@ msgstr ""
"Directorio del modelo. El directorio que contiene el modelo entrenado que "
"desea utilizar para la conversión."
#: lib/cli/args.py:688
#: lib/cli/args.py:697
msgid ""
"R|Performs color adjustment to the swapped face. Some of these options have "
"configurable settings in '/config/convert.ini' or 'Settings > Configure "
@@ -471,7 +482,7 @@ msgstr ""
"colores. Generalmente no da resultados muy satisfactorios.\n"
"L|none: No realice el ajuste de color."
#: lib/cli/args.py:715
#: lib/cli/args.py:724
msgid ""
"R|Masker to use. NB: The mask you require must exist within the alignments "
"file. You can add additional masks with the Mask Tool.\n"
@@ -547,7 +558,7 @@ msgstr ""
"L|predicted: Si la opción 'Learn Mask' se habilitó durante el entrenamiento, "
"esto usará la máscara que fue creada por el modelo entrenado."
#: lib/cli/args.py:753
#: lib/cli/args.py:762
msgid ""
"R|The plugin to use to output the converted images. The writers are "
"configurable in '/config/convert.ini' or 'Settings > Configure Convert "
@@ -573,11 +584,11 @@ msgstr ""
"L|pillow: [images] Más lento que opencv, pero tiene más opciones y soporta "
"más formatos."
#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873
#: lib/cli/args.py:781 lib/cli/args.py:788 lib/cli/args.py:882
msgid "Frame Processing"
msgstr "Proceso de fotogramas"
#: lib/cli/args.py:773
#: lib/cli/args.py:782
#, python-format
msgid ""
"Scale the final output frames by this amount. 100%% will output the frames "
@@ -587,7 +598,7 @@ msgstr ""
"a los fotogramas a las dimensiones de origen. 50%% a la mitad de tamaño. "
"200%% al doble de tamaño"
#: lib/cli/args.py:780
#: lib/cli/args.py:789
msgid ""
"Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use "
"--frame-ranges 10-50 90-100. Frames falling outside of the selected range "
@@ -601,7 +612,7 @@ msgstr ""
"imágenes, ¡los nombres de los archivos deben terminar con el número de "
"fotograma!"
#: lib/cli/args.py:790
#: lib/cli/args.py:799
msgid ""
"If you have not cleansed your alignments file, then you can filter out faces "
"by defining a folder here that contains the faces extracted from your input "
@@ -617,7 +628,7 @@ msgstr ""
"especificada. Si se deja en blanco, se convertirán todas las caras que "
"existan en el archivo de alineaciones."
#: lib/cli/args.py:804
#: lib/cli/args.py:813
msgid ""
"Optionally filter out people who you do not wish to process by passing in an "
"image of that person. Should be a front portrait with a single person in the "
@@ -631,7 +642,7 @@ msgstr ""
"uso del filtro de caras disminuirá significativamente la velocidad de "
"extracción y no se puede garantizar su precisión."
#: lib/cli/args.py:817
#: lib/cli/args.py:826
msgid ""
"Optionally select people you wish to process by passing in an image of that "
"person. Should be a front portrait with a single person in the image. "
@@ -645,7 +656,7 @@ msgstr ""
"del filtro facial disminuirá significativamente la velocidad de extracción y "
"no se puede garantizar su precisión."
#: lib/cli/args.py:831
#: lib/cli/args.py:840
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Lower values are stricter. NB: Using face filter will "
@@ -657,7 +668,7 @@ msgstr ""
"NB: El uso del filtro facial disminuirá significativamente la velocidad de "
"extracción y no se puede garantizar su precisión."
#: lib/cli/args.py:844
#: lib/cli/args.py:853
msgid ""
"The maximum number of parallel processes for performing conversion. "
"Converting images is system RAM heavy so it is possible to run out of memory "
@@ -674,7 +685,7 @@ msgstr ""
"procesos que los disponibles en su sistema. Si 'singleprocess' está "
"habilitado, este ajuste será ignorado."
#: lib/cli/args.py:855
#: lib/cli/args.py:864
msgid ""
"[LEGACY] This only needs to be selected if a legacy model is being loaded or "
"if there are multiple models in the model folder"
@@ -682,7 +693,7 @@ msgstr ""
"[LEGACY] Sólo es necesario seleccionar esta opción si se está cargando un "
"modelo heredado si hay varios modelos en la carpeta de modelos"
#: lib/cli/args.py:863
#: lib/cli/args.py:872
msgid ""
"Enable On-The-Fly Conversion. NOT recommended. You should generate a clean "
"alignments file for your destination video. However, if you wish you can "
@@ -697,7 +708,7 @@ msgstr ""
"de baja calidad. Si se encuentra un archivo de alineaciones, esta opción "
"será ignorada."
#: lib/cli/args.py:874
#: lib/cli/args.py:883
msgid ""
"When used with --frame-ranges outputs the unchanged frames that are not "
"processed instead of discarding them."
@@ -705,16 +716,16 @@ msgstr ""
"Cuando se usa con --frame-ranges, la salida incluye los fotogramas no "
"procesados en vez de descartarlos."
#: lib/cli/args.py:882
#: lib/cli/args.py:891
msgid "Swap the model. Instead converting from of A -> B, converts B -> A"
msgstr ""
"Intercambiar el modelo. En vez de convertir de A a B, convierte de B a A"
#: lib/cli/args.py:888
#: lib/cli/args.py:897
msgid "Disable multiprocessing. Slower but less resource intensive."
msgstr "Desactiva el multiproceso. Es más lento, pero usa menos recursos."
#: lib/cli/args.py:904
#: lib/cli/args.py:913
msgid ""
"Train a model on extracted original (A) and swap (B) faces.\n"
"Training models can take a long time. Anything from 24hrs to over a week\n"
@@ -726,11 +737,11 @@ msgstr ""
"hasta más de una semana.\n"
"Los plugins de los modelos pueden configurarse en el menú \"Ajustes\""
#: lib/cli/args.py:923 lib/cli/args.py:932
#: lib/cli/args.py:932 lib/cli/args.py:941
msgid "faces"
msgstr "caras"
#: lib/cli/args.py:924
#: lib/cli/args.py:933
msgid ""
"Input directory. A directory containing training images for face A. This is "
"the original face, i.e. the face that you want to remove and replace with "
@@ -740,7 +751,7 @@ msgstr ""
"para la cara A. Esta es la cara original, es decir, la cara que se quiere "
"eliminar y sustituir por la cara B."
#: lib/cli/args.py:933
#: lib/cli/args.py:942
msgid ""
"Input directory. A directory containing training images for face B. This is "
"the swap face, i.e. the face that you want to place onto the head of person "
@@ -750,12 +761,12 @@ msgstr ""
"para la cara B. Esta es la cara de intercambio, es decir, la cara que se "
"quiere colocar en la cabeza de la persona A."
#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969
#: lib/cli/args.py:994 lib/cli/args.py:1004
#: lib/cli/args.py:950 lib/cli/args.py:962 lib/cli/args.py:978
#: lib/cli/args.py:1003 lib/cli/args.py:1013
msgid "model"
msgstr "modelo"
#: lib/cli/args.py:942
#: lib/cli/args.py:951
msgid ""
"Model directory. This is where the training data will be stored. You should "
"always specify a new folder for new models. If starting a new model, select "
@@ -769,7 +780,7 @@ msgstr ""
"carpeta que no exista (que se creará). Si continúa entrenando un modelo "
"existente, especifique la ubicación del modelo existente."
#: lib/cli/args.py:954
#: lib/cli/args.py:963
msgid ""
"R|Load the weights from a pre-existing model into a newly created model. For "
"most models this will load weights from the Encoder of the given model into "
@@ -793,7 +804,7 @@ msgstr ""
"NB: Los pesos solo se pueden cargar desde modelos del mismo complemento que "
"desea entrenar."
#: lib/cli/args.py:970
#: lib/cli/args.py:979
msgid ""
"R|Select which trainer to use. Trainers can be configured from the Settings "
"menu or the config folder.\n"
@@ -838,7 +849,7 @@ msgstr ""
"recursos (se necesita una GPU con una buena cantidad de VRAM). Bueno para "
"los detalles, pero más susceptible a las diferencias de color."
#: lib/cli/args.py:995
#: lib/cli/args.py:1004
msgid ""
"Output a summary of the model and exit. If a model folder is provided then a "
"summary of the saved model is displayed. Otherwise a summary of the model "
@@ -850,7 +861,7 @@ msgstr ""
"muestra un resumen del modelo que crearía el complemento elegido y los "
"ajustes de configuración."
#: lib/cli/args.py:1005
#: lib/cli/args.py:1014
msgid ""
"Freeze the weights of the model. Freezing weights means that some of the "
"parameters in the model will no longer continue to learn, but those that are "
@@ -864,12 +875,12 @@ msgstr ""
"congelará el codificador, pero algunos modelos pueden tener opciones de "
"configuración para congelar otras capas."
#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041
#: lib/cli/args.py:1052 lib/cli/args.py:1135
#: lib/cli/args.py:1027 lib/cli/args.py:1039 lib/cli/args.py:1050
#: lib/cli/args.py:1061 lib/cli/args.py:1144
msgid "training"
msgstr "entrenamiento"
#: lib/cli/args.py:1019
#: lib/cli/args.py:1028
msgid ""
"Batch size. This is the number of images processed through the model for "
"each side per iteration. NB: As the model is fed 2 sides at a time, the "
@@ -882,7 +893,7 @@ msgstr ""
"momento es el doble del número que se establece aquí. Los lotes más grandes "
"requieren más RAM de la GPU."
#: lib/cli/args.py:1031
#: lib/cli/args.py:1040
msgid ""
"Length of training in iterations. This is only really used for automation. "
"There is no 'correct' number of iterations a model should be trained for. "
@@ -897,7 +908,7 @@ msgstr ""
"automáticamente en un número determinado de iteraciones, puede establecer "
"ese valor aquí."
#: lib/cli/args.py:1042
#: lib/cli/args.py:1051
msgid ""
"[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow "
"Mirrored Distrubution Strategy to train on multiple GPUs."
@@ -905,7 +916,7 @@ msgstr ""
"[Obsoleto: use '-D, --distribution-strategy' en su lugar] Use la estrategia "
"de distribución duplicada de Tensorflow para entrenar en varias GPU."
#: lib/cli/args.py:1053
#: lib/cli/args.py:1062
msgid ""
"R|Select the distribution stategy to use.\n"
"L|default: Use Tensorflow's default distribution strategy.\n"
@@ -931,15 +942,15 @@ msgstr ""
"locales. Se carga una copia del modelo y todas las variables en cada GPU con "
"lotes distribuidos a cada GPU en cada iteración."
#: lib/cli/args.py:1070 lib/cli/args.py:1080
#: lib/cli/args.py:1079 lib/cli/args.py:1089
msgid "Saving"
msgstr "Guardar"
#: lib/cli/args.py:1071
#: lib/cli/args.py:1080
msgid "Sets the number of iterations between each model save."
msgstr "Establece el número de iteraciones entre cada guardado del modelo."
#: lib/cli/args.py:1081
#: lib/cli/args.py:1090
msgid ""
"Sets the number of iterations before saving a backup snapshot of the model "
"in it's current state. Set to 0 for off."
@@ -947,11 +958,11 @@ msgstr ""
"Establece el número de iteraciones antes de guardar una copia de seguridad "
"del modelo en su estado actual. Establece 0 para que esté desactivado."
#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110
#: lib/cli/args.py:1097 lib/cli/args.py:1108 lib/cli/args.py:1119
msgid "timelapse"
msgstr "intervalo"
#: lib/cli/args.py:1089
#: lib/cli/args.py:1098
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -965,7 +976,7 @@ msgstr ""
"para crear el timelapse. También debe suministrar un parámetro --timelapse-"
"output y un parámetro --timelapse-input-B."
#: lib/cli/args.py:1100
#: lib/cli/args.py:1109
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -979,7 +990,7 @@ msgstr ""
"para crear el timelapse. También debe suministrar un parámetro --timelapse-"
"output y un parámetro --timelapse-input-A."
#: lib/cli/args.py:1111
#: lib/cli/args.py:1120
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. If "
@@ -991,17 +1002,17 @@ msgstr ""
"Si se suministran las carpetas de entrada pero no la carpeta de salida, se "
"guardará por defecto en la carpeta del modelo /timelapse/"
#: lib/cli/args.py:1120 lib/cli/args.py:1127
#: lib/cli/args.py:1129 lib/cli/args.py:1136
msgid "preview"
msgstr "previsualización"
#: lib/cli/args.py:1121
#: lib/cli/args.py:1130
msgid "Show training preview output. in a separate window."
msgstr ""
"Mostrar la salida de la vista previa del entrenamiento. en una ventana "
"separada."
#: lib/cli/args.py:1128
#: lib/cli/args.py:1137
msgid ""
"Writes the training result to a file. The image will be stored in the root "
"of your FaceSwap folder."
@@ -1009,7 +1020,7 @@ msgstr ""
"Escribe el resultado del entrenamiento en un archivo. La imagen se "
"almacenará en la raíz de su carpeta FaceSwap."
#: lib/cli/args.py:1136
#: lib/cli/args.py:1145
msgid ""
"Disables TensorBoard logging. NB: Disabling logs means that you will not be "
"able to use the graph or analysis for this session in the GUI."
@@ -1017,12 +1028,12 @@ msgstr ""
"Desactiva el registro de TensorBoard. NB: Desactivar los registros significa "
"que no podrá utilizar el gráfico o el análisis de esta sesión en la GUI."
#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161
#: lib/cli/args.py:1170
#: lib/cli/args.py:1152 lib/cli/args.py:1161 lib/cli/args.py:1170
#: lib/cli/args.py:1179
msgid "augmentation"
msgstr "aumento"
#: lib/cli/args.py:1144
#: lib/cli/args.py:1153
msgid ""
"Warps training faces to closely matched Landmarks from the opposite face-set "
"rather than randomly warping the face. This is the 'dfaker' way of doing "
@@ -1032,7 +1043,7 @@ msgstr ""
"conjunto de caras opuestas en lugar de deformar la cara al azar. Esta es la "
"forma 'dfaker' de hacer la deformación."
#: lib/cli/args.py:1153
#: lib/cli/args.py:1162
msgid ""
"To effectively learn, a random set of images are flipped horizontally. "
"Sometimes it is desirable for this not to occur. Generally this should be "
@@ -1043,7 +1054,7 @@ msgstr ""
"general, esto debería dejarse sin efecto, excepto durante el 'entrenamiento "
"de ajuste'."
#: lib/cli/args.py:1162
#: lib/cli/args.py:1171
msgid ""
"Color augmentation helps make the model less susceptible to color "
"differences between the A and B sets, at an increased training time cost. "
@@ -1053,7 +1064,7 @@ msgstr ""
"diferencias de color entre los conjuntos A y B, con un mayor coste de tiempo "
"de entrenamiento. Activa esta opción para desactivar el aumento de color."
#: lib/cli/args.py:1171
#: lib/cli/args.py:1180
msgid ""
"Warping is integral to training the Neural Network. This option should only "
"be enabled towards the very end of training to try to bring out more detail. "
@@ -1066,7 +1077,7 @@ msgstr ""
"esta opción desde el principio, es probable que arruine el modelo y se "
"obtengan resultados terribles."
#: lib/cli/args.py:1196
#: lib/cli/args.py:1205
msgid "Output to Shell console instead of GUI console"
msgstr "Salida a la consola Shell en lugar de la consola GUI"

View File

@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-11-02 10:39+0000\n"
"POT-Creation-Date: 2022-11-20 01:34+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -46,7 +46,7 @@ msgid "Path to store the logfile. Leave blank to store in the faceswap folder"
msgstr ""
#: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337
#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677
#: lib/cli/args.py:386 lib/cli/args.py:677 lib/cli/args.py:686
msgid "Data"
msgstr ""
@@ -82,8 +82,8 @@ msgstr ""
#: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424
#: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493
#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714
#: lib/cli/args.py:752
#: lib/cli/args.py:502 lib/cli/args.py:511 lib/cli/args.py:696
#: lib/cli/args.py:723 lib/cli/args.py:761
msgid "Plugins"
msgstr ""
@@ -168,31 +168,38 @@ msgstr ""
#: lib/cli/args.py:494
msgid ""
"Re-feed the initially found aligned face through the aligner. Can help "
"produce better alignments for faces that are rotated beyond 45 degrees in "
"the frame or are at extreme angles. Slows down extraction."
msgstr ""
#: lib/cli/args.py:503
msgid ""
"If a face isn't found, rotate the images to try to find a face. Can find "
"more faces at the cost of extraction speed. Pass in a single number to use "
"increments of that size up to 360, or pass in a list of numbers to enumerate "
"exactly what angles to check."
msgstr ""
#: lib/cli/args.py:503
#: lib/cli/args.py:512
msgid ""
"Obtain and store face identity encodings from VGGFace2. Slows down extract a "
"little, but will save time if using 'sort by face'"
msgstr ""
#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535
#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803
#: lib/cli/args.py:816 lib/cli/args.py:830
#: lib/cli/args.py:522 lib/cli/args.py:532 lib/cli/args.py:544
#: lib/cli/args.py:557 lib/cli/args.py:798 lib/cli/args.py:812
#: lib/cli/args.py:825 lib/cli/args.py:839
msgid "Face Processing"
msgstr ""
#: lib/cli/args.py:514
#: lib/cli/args.py:523
msgid ""
"Filters out faces detected below this size. Length, in pixels across the "
"diagonal of the bounding box. Set to 0 for off"
msgstr ""
#: lib/cli/args.py:524
#: lib/cli/args.py:533
msgid ""
"Optionally filter out people who you do not wish to extract by passing in "
"images of those people. Should be a small variety of images at different "
@@ -200,7 +207,7 @@ msgid ""
"or multiple image files, space separated, can be selected."
msgstr ""
#: lib/cli/args.py:536
#: lib/cli/args.py:545
msgid ""
"Optionally select people you wish to extract by passing in images of that "
"person. Should be a small variety of images at different angles and in "
@@ -208,32 +215,32 @@ msgid ""
"image files, space separated, can be selected."
msgstr ""
#: lib/cli/args.py:549
#: lib/cli/args.py:558
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Higher values are stricter."
msgstr ""
#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582
#: lib/cli/args.py:594
#: lib/cli/args.py:567 lib/cli/args.py:579 lib/cli/args.py:591
#: lib/cli/args.py:603
msgid "output"
msgstr ""
#: lib/cli/args.py:559
#: lib/cli/args.py:568
msgid ""
"The output size of extracted faces. Make sure that the model you intend to "
"train supports your required size. This will only need to be changed for hi-"
"res models."
msgstr ""
#: lib/cli/args.py:571
#: lib/cli/args.py:580
msgid ""
"Extract every 'nth' frame. This option will skip frames when extracting "
"faces. For example a value of 1 will extract faces from every frame, a value "
"of 10 will extract faces from every 10th frame."
msgstr ""
#: lib/cli/args.py:583
#: lib/cli/args.py:592
msgid ""
"Automatically save the alignments file after a set amount of frames. By "
"default the alignments file is only saved at the end of the extraction "
@@ -243,57 +250,57 @@ msgid ""
"turn off"
msgstr ""
#: lib/cli/args.py:595
#: lib/cli/args.py:604
msgid "Draw landmarks on the ouput faces for debugging purposes."
msgstr ""
#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618
#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854
#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887
#: lib/cli/args.py:610 lib/cli/args.py:619 lib/cli/args.py:627
#: lib/cli/args.py:634 lib/cli/args.py:852 lib/cli/args.py:863
#: lib/cli/args.py:871 lib/cli/args.py:890 lib/cli/args.py:896
msgid "settings"
msgstr ""
#: lib/cli/args.py:602
#: lib/cli/args.py:611
msgid ""
"Don't run extraction in parallel. Will run each part of the extraction "
"process separately (one after the other) rather than all at the smae time. "
"Useful if VRAM is at a premium."
msgstr ""
#: lib/cli/args.py:611
#: lib/cli/args.py:620
msgid ""
"Skips frames that have already been extracted and exist in the alignments "
"file"
msgstr ""
#: lib/cli/args.py:619
#: lib/cli/args.py:628
msgid "Skip frames that already have detected faces in the alignments file"
msgstr ""
#: lib/cli/args.py:626
#: lib/cli/args.py:635
msgid "Skip saving the detected faces to disk. Just create an alignments file"
msgstr ""
#: lib/cli/args.py:648
#: lib/cli/args.py:657
msgid ""
"Swap the original faces in a source video/images to your final faces.\n"
"Conversion plugins can be configured in the 'Settings' Menu"
msgstr ""
#: lib/cli/args.py:669
#: lib/cli/args.py:678
msgid ""
"Only required if converting from images to video. Provide The original video "
"that the source frames were extracted from (for extracting the fps and "
"audio)."
msgstr ""
#: lib/cli/args.py:678
#: lib/cli/args.py:687
msgid ""
"Model directory. The directory containing the trained model you wish to use "
"for conversion."
msgstr ""
#: lib/cli/args.py:688
#: lib/cli/args.py:697
msgid ""
"R|Performs color adjustment to the swapped face. Some of these options have "
"configurable settings in '/config/convert.ini' or 'Settings > Configure "
@@ -314,7 +321,7 @@ msgid ""
"L|none: Don't perform color adjustment."
msgstr ""
#: lib/cli/args.py:715
#: lib/cli/args.py:724
msgid ""
"R|Masker to use. NB: The mask you require must exist within the alignments "
"file. You can add additional masks with the Mask Tool.\n"
@@ -351,7 +358,7 @@ msgid ""
"will use the mask that was created by the trained model."
msgstr ""
#: lib/cli/args.py:753
#: lib/cli/args.py:762
msgid ""
"R|The plugin to use to output the converted images. The writers are "
"configurable in '/config/convert.ini' or 'Settings > Configure Convert "
@@ -366,18 +373,18 @@ msgid ""
"more formats."
msgstr ""
#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873
#: lib/cli/args.py:781 lib/cli/args.py:788 lib/cli/args.py:882
msgid "Frame Processing"
msgstr ""
#: lib/cli/args.py:773
#: lib/cli/args.py:782
#, python-format
msgid ""
"Scale the final output frames by this amount. 100%% will output the frames "
"at source dimensions. 50%% at half size 200%% at double size"
msgstr ""
#: lib/cli/args.py:780
#: lib/cli/args.py:789
msgid ""
"Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use "
"--frame-ranges 10-50 90-100. Frames falling outside of the selected range "
@@ -385,7 +392,7 @@ msgid ""
"converting from images, then the filenames must end with the frame-number!"
msgstr ""
#: lib/cli/args.py:790
#: lib/cli/args.py:799
msgid ""
"If you have not cleansed your alignments file, then you can filter out faces "
"by defining a folder here that contains the faces extracted from your input "
@@ -395,7 +402,7 @@ msgid ""
"alignments file."
msgstr ""
#: lib/cli/args.py:804
#: lib/cli/args.py:813
msgid ""
"Optionally filter out people who you do not wish to process by passing in an "
"image of that person. Should be a front portrait with a single person in the "
@@ -404,7 +411,7 @@ msgid ""
"guaranteed."
msgstr ""
#: lib/cli/args.py:817
#: lib/cli/args.py:826
msgid ""
"Optionally select people you wish to process by passing in an image of that "
"person. Should be a front portrait with a single person in the image. "
@@ -413,7 +420,7 @@ msgid ""
"guaranteed."
msgstr ""
#: lib/cli/args.py:831
#: lib/cli/args.py:840
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Lower values are stricter. NB: Using face filter will "
@@ -421,7 +428,7 @@ msgid ""
"guaranteed."
msgstr ""
#: lib/cli/args.py:844
#: lib/cli/args.py:853
msgid ""
"The maximum number of parallel processes for performing conversion. "
"Converting images is system RAM heavy so it is possible to run out of memory "
@@ -431,13 +438,13 @@ msgid ""
"your system. If singleprocess is enabled this setting will be ignored."
msgstr ""
#: lib/cli/args.py:855
#: lib/cli/args.py:864
msgid ""
"[LEGACY] This only needs to be selected if a legacy model is being loaded or "
"if there are multiple models in the model folder"
msgstr ""
#: lib/cli/args.py:863
#: lib/cli/args.py:872
msgid ""
"Enable On-The-Fly Conversion. NOT recommended. You should generate a clean "
"alignments file for your destination video. However, if you wish you can "
@@ -446,51 +453,51 @@ msgid ""
"alignments file is found, this option will be ignored."
msgstr ""
#: lib/cli/args.py:874
#: lib/cli/args.py:883
msgid ""
"When used with --frame-ranges outputs the unchanged frames that are not "
"processed instead of discarding them."
msgstr ""
#: lib/cli/args.py:882
#: lib/cli/args.py:891
msgid "Swap the model. Instead converting from of A -> B, converts B -> A"
msgstr ""
#: lib/cli/args.py:888
#: lib/cli/args.py:897
msgid "Disable multiprocessing. Slower but less resource intensive."
msgstr ""
#: lib/cli/args.py:904
#: lib/cli/args.py:913
msgid ""
"Train a model on extracted original (A) and swap (B) faces.\n"
"Training models can take a long time. Anything from 24hrs to over a week\n"
"Model plugins can be configured in the 'Settings' Menu"
msgstr ""
#: lib/cli/args.py:923 lib/cli/args.py:932
#: lib/cli/args.py:932 lib/cli/args.py:941
msgid "faces"
msgstr ""
#: lib/cli/args.py:924
#: lib/cli/args.py:933
msgid ""
"Input directory. A directory containing training images for face A. This is "
"the original face, i.e. the face that you want to remove and replace with "
"face B."
msgstr ""
#: lib/cli/args.py:933
#: lib/cli/args.py:942
msgid ""
"Input directory. A directory containing training images for face B. This is "
"the swap face, i.e. the face that you want to place onto the head of person "
"A."
msgstr ""
#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969
#: lib/cli/args.py:994 lib/cli/args.py:1004
#: lib/cli/args.py:950 lib/cli/args.py:962 lib/cli/args.py:978
#: lib/cli/args.py:1003 lib/cli/args.py:1013
msgid "model"
msgstr ""
#: lib/cli/args.py:942
#: lib/cli/args.py:951
msgid ""
"Model directory. This is where the training data will be stored. You should "
"always specify a new folder for new models. If starting a new model, select "
@@ -499,7 +506,7 @@ msgid ""
"the existing model."
msgstr ""
#: lib/cli/args.py:954
#: lib/cli/args.py:963
msgid ""
"R|Load the weights from a pre-existing model into a newly created model. For "
"most models this will load weights from the Encoder of the given model into "
@@ -513,7 +520,7 @@ msgid ""
"to train."
msgstr ""
#: lib/cli/args.py:970
#: lib/cli/args.py:979
msgid ""
"R|Select which trainer to use. Trainers can be configured from the Settings "
"menu or the config folder.\n"
@@ -536,7 +543,7 @@ msgid ""
"susceptible to color differences."
msgstr ""
#: lib/cli/args.py:995
#: lib/cli/args.py:1004
msgid ""
"Output a summary of the model and exit. If a model folder is provided then a "
"summary of the saved model is displayed. Otherwise a summary of the model "
@@ -544,7 +551,7 @@ msgid ""
"displayed."
msgstr ""
#: lib/cli/args.py:1005
#: lib/cli/args.py:1014
msgid ""
"Freeze the weights of the model. Freezing weights means that some of the "
"parameters in the model will no longer continue to learn, but those that are "
@@ -553,12 +560,12 @@ msgid ""
"layers."
msgstr ""
#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041
#: lib/cli/args.py:1052 lib/cli/args.py:1135
#: lib/cli/args.py:1027 lib/cli/args.py:1039 lib/cli/args.py:1050
#: lib/cli/args.py:1061 lib/cli/args.py:1144
msgid "training"
msgstr ""
#: lib/cli/args.py:1019
#: lib/cli/args.py:1028
msgid ""
"Batch size. This is the number of images processed through the model for "
"each side per iteration. NB: As the model is fed 2 sides at a time, the "
@@ -566,7 +573,7 @@ msgid ""
"number that you set here. Larger batches require more GPU RAM."
msgstr ""
#: lib/cli/args.py:1031
#: lib/cli/args.py:1040
msgid ""
"Length of training in iterations. This is only really used for automation. "
"There is no 'correct' number of iterations a model should be trained for. "
@@ -575,13 +582,13 @@ msgid ""
"can set that value here."
msgstr ""
#: lib/cli/args.py:1042
#: lib/cli/args.py:1051
msgid ""
"[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow "
"Mirrored Distrubution Strategy to train on multiple GPUs."
msgstr ""
#: lib/cli/args.py:1053
#: lib/cli/args.py:1062
msgid ""
"R|Select the distribution stategy to use.\n"
"L|default: Use Tensorflow's default distribution strategy.\n"
@@ -594,25 +601,25 @@ msgid ""
"batches distributed to each GPU at each iteration."
msgstr ""
#: lib/cli/args.py:1070 lib/cli/args.py:1080
#: lib/cli/args.py:1079 lib/cli/args.py:1089
msgid "Saving"
msgstr ""
#: lib/cli/args.py:1071
#: lib/cli/args.py:1080
msgid "Sets the number of iterations between each model save."
msgstr ""
#: lib/cli/args.py:1081
#: lib/cli/args.py:1090
msgid ""
"Sets the number of iterations before saving a backup snapshot of the model "
"in it's current state. Set to 0 for off."
msgstr ""
#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110
#: lib/cli/args.py:1097 lib/cli/args.py:1108 lib/cli/args.py:1119
msgid "timelapse"
msgstr ""
#: lib/cli/args.py:1089
#: lib/cli/args.py:1098
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -621,7 +628,7 @@ msgid ""
"timelapse-input-B parameter."
msgstr ""
#: lib/cli/args.py:1100
#: lib/cli/args.py:1109
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -630,7 +637,7 @@ msgid ""
"timelapse-input-A parameter."
msgstr ""
#: lib/cli/args.py:1111
#: lib/cli/args.py:1120
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. If "
@@ -638,53 +645,53 @@ msgid ""
"model folder /timelapse/"
msgstr ""
#: lib/cli/args.py:1120 lib/cli/args.py:1127
#: lib/cli/args.py:1129 lib/cli/args.py:1136
msgid "preview"
msgstr ""
#: lib/cli/args.py:1121
#: lib/cli/args.py:1130
msgid "Show training preview output. in a separate window."
msgstr ""
#: lib/cli/args.py:1128
#: lib/cli/args.py:1137
msgid ""
"Writes the training result to a file. The image will be stored in the root "
"of your FaceSwap folder."
msgstr ""
#: lib/cli/args.py:1136
#: lib/cli/args.py:1145
msgid ""
"Disables TensorBoard logging. NB: Disabling logs means that you will not be "
"able to use the graph or analysis for this session in the GUI."
msgstr ""
#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161
#: lib/cli/args.py:1170
#: lib/cli/args.py:1152 lib/cli/args.py:1161 lib/cli/args.py:1170
#: lib/cli/args.py:1179
msgid "augmentation"
msgstr ""
#: lib/cli/args.py:1144
#: lib/cli/args.py:1153
msgid ""
"Warps training faces to closely matched Landmarks from the opposite face-set "
"rather than randomly warping the face. This is the 'dfaker' way of doing "
"warping."
msgstr ""
#: lib/cli/args.py:1153
#: lib/cli/args.py:1162
msgid ""
"To effectively learn, a random set of images are flipped horizontally. "
"Sometimes it is desirable for this not to occur. Generally this should be "
"left off except for during 'fit training'."
msgstr ""
#: lib/cli/args.py:1162
#: lib/cli/args.py:1171
msgid ""
"Color augmentation helps make the model less susceptible to color "
"differences between the A and B sets, at an increased training time cost. "
"Enable this option to disable color augmentation."
msgstr ""
#: lib/cli/args.py:1171
#: lib/cli/args.py:1180
msgid ""
"Warping is integral to training the Neural Network. This option should only "
"be enabled towards the very end of training to try to bring out more detail. "
@@ -692,6 +699,6 @@ msgid ""
"likely to kill a model and lead to terrible results."
msgstr ""
#: lib/cli/args.py:1196
#: lib/cli/args.py:1205
msgid "Output to Shell console instead of GUI console"
msgstr ""

View File

@@ -6,8 +6,8 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-11-02 10:39+0000\n"
"PO-Revision-Date: 2022-11-02 10:42+0000\n"
"POT-Creation-Date: 2022-11-20 01:34+0000\n"
"PO-Revision-Date: 2022-11-20 01:35+0000\n"
"Last-Translator: \n"
"Language-Team: \n"
"Language: ru\n"
@@ -58,7 +58,7 @@ msgstr ""
"с faceswap"
#: lib/cli/args.py:320 lib/cli/args.py:329 lib/cli/args.py:337
#: lib/cli/args.py:386 lib/cli/args.py:668 lib/cli/args.py:677
#: lib/cli/args.py:386 lib/cli/args.py:677 lib/cli/args.py:686
msgid "Data"
msgstr "Данные"
@@ -102,8 +102,8 @@ msgstr ""
#: lib/cli/args.py:396 lib/cli/args.py:412 lib/cli/args.py:424
#: lib/cli/args.py:463 lib/cli/args.py:481 lib/cli/args.py:493
#: lib/cli/args.py:502 lib/cli/args.py:687 lib/cli/args.py:714
#: lib/cli/args.py:752
#: lib/cli/args.py:502 lib/cli/args.py:511 lib/cli/args.py:696
#: lib/cli/args.py:723 lib/cli/args.py:761
msgid "Plugins"
msgstr "Плагины"
@@ -248,6 +248,16 @@ msgstr ""
#: lib/cli/args.py:494
msgid ""
"Re-feed the initially found aligned face through the aligner. Can help "
"produce better alignments for faces that are rotated beyond 45 degrees in "
"the frame or are at extreme angles. Slows down extraction."
msgstr ""
"Повторно подайте первоначально найденное выровненное лицо через элайнер. "
"Может помочь улучшить выравнивание лиц, которые повернуты в кадре более чем "
"на 45 градусов или находятся под экстремальными углами. Замедляет извлечение."
#: lib/cli/args.py:503
msgid ""
"If a face isn't found, rotate the images to try to find a face. Can find "
"more faces at the cost of extraction speed. Pass in a single number to use "
"increments of that size up to 360, or pass in a list of numbers to enumerate "
@@ -258,7 +268,7 @@ msgstr ""
"использовать приращения этого размера до 360, либо передайте список чисел, "
"чтобы точно указать, какие углы проверять."
#: lib/cli/args.py:503
#: lib/cli/args.py:512
msgid ""
"Obtain and store face identity encodings from VGGFace2. Slows down extract a "
"little, but will save time if using 'sort by face'"
@@ -266,13 +276,13 @@ msgstr ""
"Получите и сохраните кодировку идентификации лица от VGGFace2. Немного "
"замедляет извлечение, но сэкономит время при использовании «sort by face»"
#: lib/cli/args.py:513 lib/cli/args.py:523 lib/cli/args.py:535
#: lib/cli/args.py:548 lib/cli/args.py:789 lib/cli/args.py:803
#: lib/cli/args.py:816 lib/cli/args.py:830
#: lib/cli/args.py:522 lib/cli/args.py:532 lib/cli/args.py:544
#: lib/cli/args.py:557 lib/cli/args.py:798 lib/cli/args.py:812
#: lib/cli/args.py:825 lib/cli/args.py:839
msgid "Face Processing"
msgstr "Обработка лиц"
#: lib/cli/args.py:514
#: lib/cli/args.py:523
msgid ""
"Filters out faces detected below this size. Length, in pixels across the "
"diagonal of the bounding box. Set to 0 for off"
@@ -280,7 +290,7 @@ msgstr ""
"Отбрасывает лица ниже указанного размера. Длина указывается в пикселях по "
"диагонали. Установите в 0 для отключения"
#: lib/cli/args.py:524
#: lib/cli/args.py:533
msgid ""
"Optionally filter out people who you do not wish to extract by passing in "
"images of those people. Should be a small variety of images at different "
@@ -292,7 +302,7 @@ msgstr ""
"разными углами и в разных условиях. Можно выбрать папку, содержащую "
"требуемые изображения или несколько файлов изображений, разделенных пробелом."
#: lib/cli/args.py:536
#: lib/cli/args.py:545
msgid ""
"Optionally select people you wish to extract by passing in images of that "
"person. Should be a small variety of images at different angles and in "
@@ -304,7 +314,7 @@ msgstr ""
"углами и в разных условиях. Можно выбрать папку, содержащую необходимые "
"изображения или несколько файлов изображений, разделенных пробелом."
#: lib/cli/args.py:549
#: lib/cli/args.py:558
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Higher values are stricter."
@@ -313,12 +323,12 @@ msgstr ""
"положительного распознавания лиц. Более высокие значения являются более "
"строгими."
#: lib/cli/args.py:558 lib/cli/args.py:570 lib/cli/args.py:582
#: lib/cli/args.py:594
#: lib/cli/args.py:567 lib/cli/args.py:579 lib/cli/args.py:591
#: lib/cli/args.py:603
msgid "output"
msgstr "вывод"
#: lib/cli/args.py:559
#: lib/cli/args.py:568
msgid ""
"The output size of extracted faces. Make sure that the model you intend to "
"train supports your required size. This will only need to be changed for hi-"
@@ -328,7 +338,7 @@ msgstr ""
"поддерживает такой входной размер. Стоит изменять только для моделей "
"высокого разрешения."
#: lib/cli/args.py:571
#: lib/cli/args.py:580
msgid ""
"Extract every 'nth' frame. This option will skip frames when extracting "
"faces. For example a value of 1 will extract faces from every frame, a value "
@@ -338,7 +348,7 @@ msgstr ""
"извлечении. Например, значение 1 будет искать лица в каждом кадре, а "
"значение 10 в каждом 10том кадре."
#: lib/cli/args.py:583
#: lib/cli/args.py:592
msgid ""
"Automatically save the alignments file after a set amount of frames. By "
"default the alignments file is only saved at the end of the extraction "
@@ -353,17 +363,17 @@ msgstr ""
"только во время второго прохода. ВНИМАНИЕ: Не прерывайте выполнение во время "
"записи, так как это может повлечь порчу файла. Установите в 0 для выключения"
#: lib/cli/args.py:595
#: lib/cli/args.py:604
msgid "Draw landmarks on the ouput faces for debugging purposes."
msgstr "Рисовать ландмарки на выходных лицах для нужд отладки."
#: lib/cli/args.py:601 lib/cli/args.py:610 lib/cli/args.py:618
#: lib/cli/args.py:625 lib/cli/args.py:843 lib/cli/args.py:854
#: lib/cli/args.py:862 lib/cli/args.py:881 lib/cli/args.py:887
#: lib/cli/args.py:610 lib/cli/args.py:619 lib/cli/args.py:627
#: lib/cli/args.py:634 lib/cli/args.py:852 lib/cli/args.py:863
#: lib/cli/args.py:871 lib/cli/args.py:890 lib/cli/args.py:896
msgid "settings"
msgstr "настройки"
#: lib/cli/args.py:602
#: lib/cli/args.py:611
msgid ""
"Don't run extraction in parallel. Will run each part of the extraction "
"process separately (one after the other) rather than all at the smae time. "
@@ -373,7 +383,7 @@ msgstr ""
"стадия извлечения будет запущена отдельно (одна, за другой). Полезно при "
"нехватке VRAM."
#: lib/cli/args.py:611
#: lib/cli/args.py:620
msgid ""
"Skips frames that have already been extracted and exist in the alignments "
"file"
@@ -381,16 +391,16 @@ msgstr ""
"Пропускать кадры, которые уже были извлечены и существуют в файле "
"выравнивания"
#: lib/cli/args.py:619
#: lib/cli/args.py:628
msgid "Skip frames that already have detected faces in the alignments file"
msgstr "Пропускать кадры, для которых в файле выравнивания есть найденные лица"
#: lib/cli/args.py:626
#: lib/cli/args.py:635
msgid "Skip saving the detected faces to disk. Just create an alignments file"
msgstr ""
"Не сохранять найденные лица на носитель. Просто создать файл выравнивания"
#: lib/cli/args.py:648
#: lib/cli/args.py:657
msgid ""
"Swap the original faces in a source video/images to your final faces.\n"
"Conversion plugins can be configured in the 'Settings' Menu"
@@ -398,7 +408,7 @@ msgstr ""
"Заменить оригиналы лица в исходном видео/фотографиях новыми.\n"
"Плагины конвертации могут быть настроены в меню 'Настройки'"
#: lib/cli/args.py:669
#: lib/cli/args.py:678
msgid ""
"Only required if converting from images to video. Provide The original video "
"that the source frames were extracted from (for extracting the fps and "
@@ -408,7 +418,7 @@ msgstr ""
"Предоставьте исходное видео, из которого были извлечены кадры (для настройки "
"частоты кадров, а также аудио)."
#: lib/cli/args.py:678
#: lib/cli/args.py:687
msgid ""
"Model directory. The directory containing the trained model you wish to use "
"for conversion."
@@ -416,7 +426,7 @@ msgstr ""
"Папка с моделью. Папка, содержащая обученную модель, которую вы хотите "
"использовать для преобразования."
#: lib/cli/args.py:688
#: lib/cli/args.py:697
msgid ""
"R|Performs color adjustment to the swapped face. Some of these options have "
"configurable settings in '/config/convert.ini' or 'Settings > Configure "
@@ -455,7 +465,7 @@ msgstr ""
"дает удовлетворительных результатов.\n"
"L|none: Не производить подгонку цвета."
#: lib/cli/args.py:715
#: lib/cli/args.py:724
msgid ""
"R|Masker to use. NB: The mask you require must exist within the alignments "
"file. You can add additional masks with the Mask Tool.\n"
@@ -524,7 +534,7 @@ msgstr ""
"L| predicted: Если во время обучения была включена опция «Learn Mask», будет "
"использоваться маска, созданная обученной моделью."
#: lib/cli/args.py:753
#: lib/cli/args.py:762
msgid ""
"R|The plugin to use to output the converted images. The writers are "
"configurable in '/config/convert.ini' or 'Settings > Configure Convert "
@@ -550,11 +560,11 @@ msgstr ""
"L|pillow: [изображения] Более медленный, чем opencv, но имеет больше опций и "
"поддерживает больше форматов."
#: lib/cli/args.py:772 lib/cli/args.py:779 lib/cli/args.py:873
#: lib/cli/args.py:781 lib/cli/args.py:788 lib/cli/args.py:882
msgid "Frame Processing"
msgstr "Обработка кадров"
#: lib/cli/args.py:773
#: lib/cli/args.py:782
#, python-format
msgid ""
"Scale the final output frames by this amount. 100%% will output the frames "
@@ -564,7 +574,7 @@ msgstr ""
"кадры в исходном размере. 50%% половина от размера, а 200%% в удвоенном "
"размере"
#: lib/cli/args.py:780
#: lib/cli/args.py:789
msgid ""
"Frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use "
"--frame-ranges 10-50 90-100. Frames falling outside of the selected range "
@@ -577,7 +587,7 @@ msgstr ""
"unchanged). Прим.: Если при конверсии используются изображения, то имена "
"файлов должны заканчиваться номером кадра!"
#: lib/cli/args.py:790
#: lib/cli/args.py:799
msgid ""
"If you have not cleansed your alignments file, then you can filter out faces "
"by defining a folder here that contains the faces extracted from your input "
@@ -593,7 +603,7 @@ msgstr ""
"Если оставить это поле пустым, то все лица, которые существуют в файле "
"выравниваний будут сконвертированы."
#: lib/cli/args.py:804
#: lib/cli/args.py:813
msgid ""
"Optionally filter out people who you do not wish to process by passing in an "
"image of that person. Should be a front portrait with a single person in the "
@@ -607,7 +617,7 @@ msgstr ""
"пробел. Прим.: Фильтрация лиц существенно снижает скорость извлечения, при "
"этом точность не гарантируется."
#: lib/cli/args.py:817
#: lib/cli/args.py:826
msgid ""
"Optionally select people you wish to process by passing in an image of that "
"person. Should be a front portrait with a single person in the image. "
@@ -621,7 +631,7 @@ msgstr ""
"изображений через пробел. Прим.: Использование фильтра существенно замедлит "
"скорость извлечения. Также точность не гарантируется."
#: lib/cli/args.py:831
#: lib/cli/args.py:840
msgid ""
"For use with the optional nfilter/filter files. Threshold for positive face "
"recognition. Lower values are stricter. NB: Using face filter will "
@@ -632,7 +642,7 @@ msgstr ""
"лица. Чем ниже значения, тем строже. Прим.: Использование фильтра лиц "
"существенно замедлит скорость извлечения. Также точность не гарантируется."
#: lib/cli/args.py:844
#: lib/cli/args.py:853
msgid ""
"The maximum number of parallel processes for performing conversion. "
"Converting images is system RAM heavy so it is possible to run out of memory "
@@ -649,7 +659,7 @@ msgstr ""
"будет использоваться больше процессов, чем доступно в вашей системе. Если "
"включен одиночный процесс, этот параметр будет проигнорирован."
#: lib/cli/args.py:855
#: lib/cli/args.py:864
msgid ""
"[LEGACY] This only needs to be selected if a legacy model is being loaded or "
"if there are multiple models in the model folder"
@@ -657,7 +667,7 @@ msgstr ""
"[СОВМЕСТИМОСТЬ] Это нужно выбирать только в том случае, если загружается "
"устаревшая модель или если в папке сохранения есть несколько моделей"
#: lib/cli/args.py:863
#: lib/cli/args.py:872
msgid ""
"Enable On-The-Fly Conversion. NOT recommended. You should generate a clean "
"alignments file for your destination video. However, if you wish you can "
@@ -671,7 +681,7 @@ msgstr ""
"использованию улучшенного конвейера экстракции и некачественных результатов. "
"Если файл выравниваний найден, этот параметр будет проигнорирован."
#: lib/cli/args.py:874
#: lib/cli/args.py:883
msgid ""
"When used with --frame-ranges outputs the unchanged frames that are not "
"processed instead of discarding them."
@@ -679,16 +689,16 @@ msgstr ""
"При использовании с --frame-range кадры не попавшие в диапазон выводятся "
"неизменными, вместо их пропуска."
#: lib/cli/args.py:882
#: lib/cli/args.py:891
msgid "Swap the model. Instead converting from of A -> B, converts B -> A"
msgstr ""
"Поменять модели местами. Вместо преобразования из A -> B, преобразует B -> A"
#: lib/cli/args.py:888
#: lib/cli/args.py:897
msgid "Disable multiprocessing. Slower but less resource intensive."
msgstr "Отключить многопроцессорность. Медленнее, но менее ресурсоемко."
#: lib/cli/args.py:904
#: lib/cli/args.py:913
msgid ""
"Train a model on extracted original (A) and swap (B) faces.\n"
"Training models can take a long time. Anything from 24hrs to over a week\n"
@@ -699,11 +709,11 @@ msgstr ""
"Обучение моделей может занять долгое время: от 24 часов до недели\n"
"Каждую модель можно отдельно настроить в меню «Настройки»"
#: lib/cli/args.py:923 lib/cli/args.py:932
#: lib/cli/args.py:932 lib/cli/args.py:941
msgid "faces"
msgstr "лица"
#: lib/cli/args.py:924
#: lib/cli/args.py:933
msgid ""
"Input directory. A directory containing training images for face A. This is "
"the original face, i.e. the face that you want to remove and replace with "
@@ -712,7 +722,7 @@ msgstr ""
"Входная папка. Папка содержащая изображения для тренировки лица A. Это "
"исходное лицо т.е. лицо, которое вы хотите убрать, заменив лицом B."
#: lib/cli/args.py:933
#: lib/cli/args.py:942
msgid ""
"Input directory. A directory containing training images for face B. This is "
"the swap face, i.e. the face that you want to place onto the head of person "
@@ -721,12 +731,12 @@ msgstr ""
"Входная папка. Папка содержащая изображения для тренировки лица B. Это новое "
"лицо т.е. лицо, которое вы хотите поместить на голову человека A."
#: lib/cli/args.py:941 lib/cli/args.py:953 lib/cli/args.py:969
#: lib/cli/args.py:994 lib/cli/args.py:1004
#: lib/cli/args.py:950 lib/cli/args.py:962 lib/cli/args.py:978
#: lib/cli/args.py:1003 lib/cli/args.py:1013
msgid "model"
msgstr "модель"
#: lib/cli/args.py:942
#: lib/cli/args.py:951
msgid ""
"Model directory. This is where the training data will be stored. You should "
"always specify a new folder for new models. If starting a new model, select "
@@ -740,7 +750,7 @@ msgstr ""
"будет создана). Если вы хотите продолжить тренировку, выберите папку с уже "
"существующими сохранениями."
#: lib/cli/args.py:954
#: lib/cli/args.py:963
msgid ""
"R|Load the weights from a pre-existing model into a newly created model. For "
"most models this will load weights from the Encoder of the given model into "
@@ -764,7 +774,7 @@ msgstr ""
"NB: Вес можно загружать только из моделей того же плагина, который вы "
"собираетесь тренировать."
#: lib/cli/args.py:970
#: lib/cli/args.py:979
msgid ""
"R|Select which trainer to use. Trainers can be configured from the Settings "
"menu or the config folder.\n"
@@ -810,7 +820,7 @@ msgstr ""
"ресурсам (Вам потребуется GPU с хорошим количеством видеопамяти). Хороша для "
"деталей, но подвержена к неправильной передаче цвета."
#: lib/cli/args.py:995
#: lib/cli/args.py:1004
msgid ""
"Output a summary of the model and exit. If a model folder is provided then a "
"summary of the saved model is displayed. Otherwise a summary of the model "
@@ -822,7 +832,7 @@ msgstr ""
"сводная информация о модели, которая будет создана выбранным плагином, и "
"параметрами конфигурации."
#: lib/cli/args.py:1005
#: lib/cli/args.py:1014
msgid ""
"Freeze the weights of the model. Freezing weights means that some of the "
"parameters in the model will no longer continue to learn, but those that are "
@@ -836,12 +846,12 @@ msgstr ""
"некоторые модели могут иметь параметры конфигурации для замораживания других "
"слоев."
#: lib/cli/args.py:1018 lib/cli/args.py:1030 lib/cli/args.py:1041
#: lib/cli/args.py:1052 lib/cli/args.py:1135
#: lib/cli/args.py:1027 lib/cli/args.py:1039 lib/cli/args.py:1050
#: lib/cli/args.py:1061 lib/cli/args.py:1144
msgid "training"
msgstr "тренировка"
#: lib/cli/args.py:1019
#: lib/cli/args.py:1028
msgid ""
"Batch size. This is the number of images processed through the model for "
"each side per iteration. NB: As the model is fed 2 sides at a time, the "
@@ -854,7 +864,7 @@ msgstr ""
"изображений в два раза больше этого числа. Увеличение размера партии требует "
"больше памяти GPU."
#: lib/cli/args.py:1031
#: lib/cli/args.py:1040
msgid ""
"Length of training in iterations. This is only really used for automation. "
"There is no 'correct' number of iterations a model should be trained for. "
@@ -868,7 +878,7 @@ msgstr ""
"Однако, если вы хотите, чтобы тренировка прервалась после указанного кол-ва "
"итерация, вы можете ввести это здесь."
#: lib/cli/args.py:1042
#: lib/cli/args.py:1051
msgid ""
"[Deprecated - Use '-D, --distribution-strategy' instead] Use the Tensorflow "
"Mirrored Distrubution Strategy to train on multiple GPUs."
@@ -877,7 +887,7 @@ msgstr ""
"Используйте стратегию зеркального распространения Tensorflow для обучения на "
"нескольких графических процессорах."
#: lib/cli/args.py:1053
#: lib/cli/args.py:1062
msgid ""
"R|Select the distribution stategy to use.\n"
"L|default: Use Tensorflow's default distribution strategy.\n"
@@ -902,15 +912,15 @@ msgstr ""
"в каждый GPU, причем пакеты распределяются между каждым GPU на каждой "
"итерации."
#: lib/cli/args.py:1070 lib/cli/args.py:1080
#: lib/cli/args.py:1079 lib/cli/args.py:1089
msgid "Saving"
msgstr "Сохранение"
#: lib/cli/args.py:1071
#: lib/cli/args.py:1080
msgid "Sets the number of iterations between each model save."
msgstr "Установка количества итераций между сохранениями модели."
#: lib/cli/args.py:1081
#: lib/cli/args.py:1090
msgid ""
"Sets the number of iterations before saving a backup snapshot of the model "
"in it's current state. Set to 0 for off."
@@ -918,11 +928,11 @@ msgstr ""
"Устанавливает кол-во итераций перед созданием резервной копии модели. "
"Установите в 0 для отключения."
#: lib/cli/args.py:1088 lib/cli/args.py:1099 lib/cli/args.py:1110
#: lib/cli/args.py:1097 lib/cli/args.py:1108 lib/cli/args.py:1119
msgid "timelapse"
msgstr "таймлапс"
#: lib/cli/args.py:1089
#: lib/cli/args.py:1098
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -935,7 +945,7 @@ msgstr ""
"папку лиц набора 'A' для использования при создании таймлапса. Вам также "
"нужно указать параметры--timelapse-output и --timelapse-input-B."
#: lib/cli/args.py:1100
#: lib/cli/args.py:1109
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. "
@@ -949,7 +959,7 @@ msgstr ""
"таймлапса. Вы также должны указать параметр --timelapse-output и --timelapse-"
"input-A."
#: lib/cli/args.py:1111
#: lib/cli/args.py:1120
msgid ""
"Optional for creating a timelapse. Timelapse will save an image of your "
"selected faces into the timelapse-output folder at every save iteration. If "
@@ -961,15 +971,15 @@ msgstr ""
"указаны только входные папки, то по умолчанию вывод будет сохранен вместе с "
"моделью в подкаталог /timelapse/"
#: lib/cli/args.py:1120 lib/cli/args.py:1127
#: lib/cli/args.py:1129 lib/cli/args.py:1136
msgid "preview"
msgstr "предварительный просмотр"
#: lib/cli/args.py:1121
#: lib/cli/args.py:1130
msgid "Show training preview output. in a separate window."
msgstr "Показывать предварительный просмотр в отдельном окне."
#: lib/cli/args.py:1128
#: lib/cli/args.py:1137
msgid ""
"Writes the training result to a file. The image will be stored in the root "
"of your FaceSwap folder."
@@ -977,7 +987,7 @@ msgstr ""
"Записывает результат тренировки в файл. Файл будет сохранен в коренной папке "
"FaceSwap."
#: lib/cli/args.py:1136
#: lib/cli/args.py:1145
msgid ""
"Disables TensorBoard logging. NB: Disabling logs means that you will not be "
"able to use the graph or analysis for this session in the GUI."
@@ -985,12 +995,12 @@ msgstr ""
"Отключает журнал TensorBoard. Примечание: Отключение журналов означает, что "
"вы не сможете использовать графики или анализ сессии внутри GUI."
#: lib/cli/args.py:1143 lib/cli/args.py:1152 lib/cli/args.py:1161
#: lib/cli/args.py:1170
#: lib/cli/args.py:1152 lib/cli/args.py:1161 lib/cli/args.py:1170
#: lib/cli/args.py:1179
msgid "augmentation"
msgstr "аугментация"
#: lib/cli/args.py:1144
#: lib/cli/args.py:1153
msgid ""
"Warps training faces to closely matched Landmarks from the opposite face-set "
"rather than randomly warping the face. This is the 'dfaker' way of doing "
@@ -1000,7 +1010,7 @@ msgstr ""
"Ориентирами/Landmarks противоположного набора лиц. Этот способ используется "
"пакетом \"dfaker\"."
#: lib/cli/args.py:1153
#: lib/cli/args.py:1162
msgid ""
"To effectively learn, a random set of images are flipped horizontally. "
"Sometimes it is desirable for this not to occur. Generally this should be "
@@ -1011,7 +1021,7 @@ msgstr ""
"происходило. Как правило, эту настройку не стоит трогать, за исключением "
"периода «финальной шлифовки»."
#: lib/cli/args.py:1162
#: lib/cli/args.py:1171
msgid ""
"Color augmentation helps make the model less susceptible to color "
"differences between the A and B sets, at an increased training time cost. "
@@ -1021,7 +1031,7 @@ msgstr ""
"цвета между наборами A and B ценой некоторого замедления скорости "
"тренировки. Включите эту опцию для отключения цветовой аугментации."
#: lib/cli/args.py:1171
#: lib/cli/args.py:1180
msgid ""
"Warping is integral to training the Neural Network. This option should only "
"be enabled towards the very end of training to try to bring out more detail. "
@@ -1034,7 +1044,7 @@ msgstr ""
"Включение этой опции с самого начала может убить модель и привести к ужасным "
"результатам."
#: lib/cli/args.py:1196
#: lib/cli/args.py:1205
msgid "Output to Shell console instead of GUI console"
msgstr "Вывод в системную консоль вместо GUI"

View File

@@ -33,7 +33,7 @@ if TYPE_CHECKING:
from .mask._base import MaskerBatch
from .recognition._base import RecogBatch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger = logging.getLogger(__name__)
# TODO Run with warnings mode
@@ -77,7 +77,7 @@ class ExtractorBatch:
List of :class:`~lib.align.DetectedFace` objects
filename: list
List of original frame filenames for the batch
feed: :class:`numpy.nd.array`
feed: :class:`numpy.ndarray`
Batch of feed images to feed the net with
prediction: :class:`numpy.nd.array`
Batch of predictions. Direct output from the aligner net
@@ -604,6 +604,8 @@ class Extractor():
batch = self._obtain_batch_item(function, in_queue, out_queue)
if batch is None:
break
if not batch.filename: # Batch not populated. Possible during re-aligns
continue
try:
batch = function(batch)
except tf_errors.UnknownError as err:

View File

@@ -101,7 +101,7 @@ class Config(FaceswapConfig):
datatype=bool,
default=True,
group="filters",
info="If enabled, and re-feed has been selected for extraction, then interim "
info="If enabled, and 're-feed' has been selected for extraction, then interim "
"alignments will be filtered prior to averaging the final landmarks. This can "
"help improve the final alignments by removing any obvious misaligns from the "
"interim results, and may also help pick up difficult alignments. If disabled, "
@@ -116,3 +116,20 @@ class Config(FaceswapConfig):
"extraction process. If disabled, filtered faces are deleted. Note: The faces "
"will always be filtered out of the alignments file, regardless of whether you "
"keep the faces or not.")
self.add_item(
section=section,
title="realign_refeeds",
datatype=bool,
default=True,
group="re-align",
info="If enabled, and 're-align' has been selected for extraction, then all re-feed "
"iterations are re-aligned. If disabled, then only the final averaged output "
"from re-feed will be re-aligned.")
self.add_item(
section=section,
title="filter_realign",
datatype=bool,
default=True,
group="re-align",
info="If enabled, and 're-align' has been selected for extraction, then any "
"alignments which would be filtered out will not be re-aligned.")

View File

@@ -0,0 +1,4 @@
#!/usr/bin/env python3
""" Base class for Aligner plugins ALL aligners should at least inherit from this class. """
from .aligner import Aligner, AlignerBatch, BatchType

View File

@@ -16,16 +16,17 @@ import logging
import sys
from dataclasses import dataclass, field
from typing import cast, Dict, Generator, List, Optional, Tuple, TYPE_CHECKING
from time import sleep
from typing import cast, Generator, List, Optional, Tuple, TYPE_CHECKING
import cv2
import numpy as np
from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module # noqa
from lib.align import AlignedFace, DetectedFace
from lib.utils import get_backend, FaceswapError
from plugins.extract._base import BatchType, Extractor, ExtractMedia, ExtractorBatch
from .processing import AlignedFilter, ReAlign
if sys.version_info < (3, 8):
from typing_extensions import Literal
@@ -34,8 +35,24 @@ else:
if TYPE_CHECKING:
from queue import Queue
from lib.align import DetectedFace
from lib.align.aligned_face import CenteringType
logger = logging.getLogger(__name__)
_BATCH_IDX: int = 0
def _get_new_batch_id() -> int:
""" Obtain the next available batch index
Returns
-------
int
The next available unique batch id
"""
global _BATCH_IDX # pylint:disable=global-statement
_BATCH_IDX += 1
return _BATCH_IDX
@dataclass
@@ -46,15 +63,46 @@ class AlignerBatch(ExtractorBatch):
Parameters
----------
batch_id: int
A unique integer for tracking this batch
landmarks: list
List of 68 point :class:`numpy.ndarray` landmark points returned from the aligner
refeeds: list
List of :class:`numpy.ndarrays` for holding each of the feeds that will be put through the
model for each refeed
second_pass: bool, optional
``True`` if this batch is passing through the aligner for a second time as re-align has
been selected otherwise ``False``. Default: ``False``
second_pass_masks: :class:`numpy.ndarray`, optional
The masks used to filter out re-feed values for passing to the re-aligner.
"""
batch_id: int = 0
detected_faces: List["DetectedFace"] = field(default_factory=list)
landmarks: np.ndarray = np.array([])
refeeds: List[np.ndarray] = field(default_factory=list)
second_pass: bool = False
second_pass_masks: np.ndarray = np.array([])
def __repr__(self):
""" Prettier repr for debug printing """
data = [{k: v.shape if isinstance(v, np.ndarray) else v for k, v in dat.items()}
for dat in self.data]
return ("AlignerBatch("
f"batch_id={self.batch_id}, "
f"image={[img.shape for img in self.image]}, "
f"detected_faces={self.detected_faces}, "
f"filename={self.filename}, "
f"feed={self.feed.shape}, "
f"prediction={self.prediction.shape}, "
f"data={data}, "
f"landmarks={self.landmarks.shape}, "
f"refeeds={[feed.shape for feed in self.refeeds]}, "
f"second_pass={self.second_pass}, "
f"second_pass_masks={self.second_pass_masks})")
def __post_init__(self):
""" Make sure that we have been given a non-zero ID """
assert self.batch_id != 0, ("A batch ID must be specified for Aligner Batches")
class Aligner(Extractor): # pylint:disable=abstract-method
@@ -74,6 +122,9 @@ class Aligner(Extractor): # pylint:disable=abstract-method
re_feed: int, optional
The number of times to re-feed a slightly adjusted bounding box into the aligner.
Default: `0`
re_align: bool, optional
``True`` to obtain landmarks by passing the initially aligned face back through the
aligner. Default ``False``
disable_filter: bool, optional
Disable all aligner filters regardless of config option. Default: ``False``
Other Parameters
@@ -97,20 +148,22 @@ class Aligner(Extractor): # pylint:disable=abstract-method
instance: int = 0,
normalize_method: Optional[Literal["none", "clahe", "hist", "mean"]] = None,
re_feed: int = 0,
re_align: bool = False,
disable_filter: bool = False,
**kwargs) -> None:
logger.debug("Initializing %s: (normalize_method: %s, re_feed: %s, disable_filter: %s)",
self.__class__.__name__, normalize_method, re_feed, disable_filter)
logger.debug("Initializing %s: (normalize_method: %s, re_feed: %s, re_align: %s, "
"disable_filter: %s)", self.__class__.__name__, normalize_method, re_feed,
re_align, disable_filter)
super().__init__(git_model_id,
model_filename,
configfile=configfile,
instance=instance,
**kwargs)
self._plugin_type = "align"
self.realign_centering: "CenteringType" = "face" # overide for plugin specific centering
self._eof_seen = False
self._normalize_method: Optional[Literal["clahe", "hist", "mean"]] = None
self._re_feed = re_feed
self.set_normalize_method(normalize_method)
self._plugin_type = "align"
self._filter = AlignedFilter(feature_filter=self.config["aligner_features"],
min_scale=self.config["aligner_min_scale"],
max_scale=self.config["aligner_max_scale"],
@@ -118,6 +171,14 @@ class Aligner(Extractor): # pylint:disable=abstract-method
roll=self.config["aligner_roll"],
save_output=self.config["save_filtered"],
disable=disable_filter)
self._re_align = ReAlign(re_align,
self.config["realign_refeeds"],
self.config["filter_realign"])
self._needs_refeed_masks: bool = self._re_feed > 0 and (
self.config["filter_refeed"] or (self._re_align.do_refeeds and
self._re_align.do_filter))
self.set_normalize_method(normalize_method)
logger.debug("Initialized %s", self.__class__.__name__)
def set_normalize_method(self,
@@ -132,7 +193,54 @@ class Aligner(Extractor): # pylint:disable=abstract-method
method = None if method is None or method.lower() == "none" else method
self._normalize_method = cast(Optional[Literal["clahe", "hist", "mean"]], method)
# << QUEUE METHODS >>> #
def initialize(self, *args, **kwargs) -> None:
""" Add a call to add model input size to the re-aligner """
self._re_align.set_input_size_and_centering(self.input_size, self.realign_centering)
super().initialize(*args, **kwargs)
def _handle_realigns(self, queue: "Queue") -> Optional[Tuple[bool, AlignerBatch]]:
""" Handle any items waiting for a second pass through the aligner.
If EOF has been recieved and items are still being processed through the first pass
then wait for a short time and try again to collect them.
On EOF return exhausted flag with an empty batch
Parameters
----------
queue : queue.Queue()
The ``queue`` that the plugin will be fed from.
Returns
-------
``None`` or tuple
If items are processed then returns (`bool`, :class:`AlignerBatch`) containing the
exhausted flag and the batch to be processed. If no items are processed returns
``None``
"""
if not self._re_align.active:
return None
exhausted = False
if self._re_align.items_queued:
batch = self._re_align.get_batch()
logger.trace("Re-align batch: %s", batch) # type: ignore[attr-defined]
return exhausted, batch
if self._eof_seen and self._re_align.items_tracked:
# EOF seen and items still being processed on first pass
logger.debug("Tracked re-align items waiting to be flushed, retrying...")
sleep(0.25)
return self.get_batch(queue)
if self._eof_seen:
exhausted = True
logger.debug("All items processed. Returning empty batch")
self._filter.output_counts()
return exhausted, AlignerBatch(batch_id=-1)
return None
def get_batch(self, queue: "Queue") -> Tuple[bool, AlignerBatch]:
""" Get items for inputting into the aligner from the queue in batches
@@ -168,14 +276,21 @@ class Aligner(Extractor): # pylint:disable=abstract-method
The batch object for the current batch
"""
exhausted = False
batch = AlignerBatch()
realign_batch = self._handle_realigns(queue)
if realign_batch is not None:
return realign_batch
batch = AlignerBatch(batch_id=_get_new_batch_id())
idx = 0
while idx < self.batchsize:
item = self.rollover_collector(queue)
if item == "EOF":
logger.trace("EOF received") # type:ignore
exhausted = True
logger.debug("EOF received")
self._eof_seen = True
exhausted = not self._re_align.items_tracked
break
# Put frames with no faces or are already aligned into the out queue
if not item.detected_faces or item.is_aligned:
self._queues["out"].put(item)
@@ -195,16 +310,15 @@ class Aligner(Extractor): # pylint:disable=abstract-method
item.image,
detected_faces=item.detected_faces[f_idx + 1:],
is_aligned=item.is_aligned)
logger.trace("Rolled over %s faces of %s to next batch " # type:ignore
"for '%s'", len(self._rollover.detected_faces), frame_faces,
item.filename)
logger.trace("Rolled over %s faces of %s to " # type: ignore[attr-defined]
"next batch for '%s'", len(self._rollover.detected_faces),
frame_faces, item.filename)
break
if batch.filename:
logger.trace("Returning batch: %s", {k: len(v) # type:ignore
if isinstance(v, (list, np.ndarray)) else v
for k, v in batch.__dict__.items()})
logger.trace("Returning batch: %s", batch) # type: ignore[attr-defined]
self._re_align.track_batch(batch.batch_id)
else:
logger.debug(item) # type:ignore
logger.debug(item)
# TODO Move to end of process not beginning
if exhausted:
@@ -212,6 +326,22 @@ class Aligner(Extractor): # pylint:disable=abstract-method
return exhausted, batch
def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Overide for specific plugin processing to convert a batch of face images from UINT8
(0-255) into the correct format for the plugin's inference
Parameters
----------
faces: :class:`numpy.ndarray`
The batch of faces in UINT8 format
Returns
-------
class: `numpy.ndarray`
The batch of faces in the format to feed through the plugin
"""
raise NotImplementedError()
# <<< FINALIZE METHODS >>> #
def finalize(self, batch: BatchType) -> Generator[ExtractMedia, None, None]:
""" Finalize the output from Aligner
@@ -231,16 +361,17 @@ class Aligner(Extractor): # pylint:disable=abstract-method
The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes
and landmarks for the detected faces found in the frame.
"""
assert isinstance(batch, AlignerBatch)
if not batch.second_pass and self._re_align.active:
# Add the batch for second pass re-alignment and return
self._re_align.add_batch(batch)
return
for face, landmarks in zip(batch.detected_faces, batch.landmarks):
if not isinstance(landmarks, np.ndarray):
landmarks = np.array(landmarks)
face.add_landmarks_xy(landmarks)
logger.trace("Item out: %s", {key: val.shape # type:ignore
if isinstance(val, np.ndarray) else val
for key, val in batch.__dict__.items()})
logger.trace("Item out: %s", batch) # type: ignore[attr-defined]
for frame, filename, face in zip(batch.image, batch.filename, batch.detected_faces):
self._output_faces.append(face)
@@ -254,54 +385,14 @@ class Aligner(Extractor): # pylint:disable=abstract-method
output.add_sub_folders(folders)
self._output_faces = []
logger.trace("Final Output: (filename: '%s', image shape: %s, " # type:ignore
"detected_faces: %s, item: %s)",
output.filename, output.image_shape, output.detected_faces, output)
logger.trace("Final Output: (filename: '%s', image " # type: ignore[attr-defined]
"shape: %s, detected_faces: %s, item: %s)", output.filename,
output.image_shape, output.detected_faces, output)
yield output
self._re_align.untrack_batch(batch.batch_id)
# <<< PROTECTED METHODS >>> #
# << PROCESS_INPUT WRAPPER >>
def _process_input(self, batch: BatchType) -> AlignerBatch:
""" Process the input to the aligner model multiple times based on the user selected
`re-feed` command line option. This adjusts the bounding box for the face to be fed
into the model by a random amount within 0.05 pixels of the detected face's shortest axis.
References
----------
https://studios.disneyresearch.com/2020/06/29/high-resolution-neural-face-swapping-for-visual-effects/
Parameters
----------
batch: :class:`AlignerBatch`
Contains the batch that is currently being passed through the plugin process
Returns
-------
:class:`AlignerBatch`
The batch with input processed
"""
assert isinstance(batch, AlignerBatch)
original_boxes = np.array([(face.left, face.top, face.width, face.height)
for face in batch.detected_faces])
adjusted_boxes = self._get_adjusted_boxes(original_boxes)
# Put in random re-feed data to the bounding boxes
for bounding_boxes in adjusted_boxes:
for face, box in zip(batch.detected_faces, bounding_boxes):
face.left, face.top, face.width, face.height = box
self.process_input(batch)
# Move the populated feed into the batch refeed list. It will be overwritten at next
# iteration
batch.refeeds.append(batch.feed)
# Place the original bounding box back to detected face objects
for face, box in zip(batch.detected_faces, original_boxes):
face.left, face.top, face.width, face.height = box
return batch
def _get_adjusted_boxes(self, original_boxes: np.ndarray) -> np.ndarray:
""" Obtain an array of adjusted bounding boxes based on the number of re-feed iterations
that have been selected and the minimum dimension of the original bounding box.
@@ -324,9 +415,104 @@ class Aligner(Extractor): # pylint:disable=abstract-method
rands = np.random.rand(self._re_feed, *original_boxes.shape) * 2 - 1
new_boxes = np.rint(original_boxes + (rands * max_shift[None, :, None])).astype("int32")
retval = np.concatenate((original_boxes[None, ...], new_boxes))
logger.trace(retval) # type:ignore
logger.trace(retval) # type: ignore[attr-defined]
return retval
def _process_input_first_pass(self, batch: AlignerBatch) -> None:
""" Standard pre-processing for aligners for first pass (if re-align selected) or the
only pass.
Process the input to the aligner model multiple times based on the user selected
`re-feed` command line option. This adjusts the bounding box for the face to be fed
into the model by a random amount within 0.05 pixels of the detected face's shortest axis.
References
----------
https://studios.disneyresearch.com/2020/06/29/high-resolution-neural-face-swapping-for-visual-effects/
Parameters
----------
batch: :class:`AlignerBatch`
Contains the batch that is currently being passed through the plugin process
"""
original_boxes = np.array([(face.left, face.top, face.width, face.height)
for face in batch.detected_faces])
adjusted_boxes = self._get_adjusted_boxes(original_boxes)
# Put in random re-feed data to the bounding boxes
for bounding_boxes in adjusted_boxes:
for face, box in zip(batch.detected_faces, bounding_boxes):
face.left, face.top, face.width, face.height = box
self.process_input(batch)
batch.feed = self.faces_to_feed(self._normalize_faces(batch.feed))
# Move the populated feed into the batch refeed list. It will be overwritten at next
# iteration
batch.refeeds.append(batch.feed)
# Place the original bounding box back to detected face objects
for face, box in zip(batch.detected_faces, original_boxes):
face.left, face.top, face.width, face.height = box
def _get_realign_masks(self, batch: AlignerBatch) -> np.ndarray:
""" Obtain the masks required for processing re-aligns
Parameters
----------
batch: :class:`AlignerBatch`
Contains the batch that is currently being passed through the plugin process
Returns
-------
:class:`numpy.ndarray`
The filter masks required for masking the re-aligns
"""
if self._re_align.do_refeeds:
retval = batch.second_pass_masks # Masks already calculated during re-feed
elif self._re_align.do_filter:
retval = self._filter.filtered_mask(batch)[None, ...]
else:
retval = np.zeros((batch.landmarks.shape[0], ), dtype="bool")[None, ...]
return retval
def _process_input_second_pass(self, batch: AlignerBatch) -> None:
""" Process the input for 2nd-pass re-alignment
Parameters
----------
batch: :class:`AlignerBatch`
Contains the batch that is currently being passed through the plugin process
"""
batch.second_pass_masks = self._get_realign_masks(batch)
if not self._re_align.do_refeeds:
# Expand the dimensions for re-aligns for consistent handling of code
batch.landmarks = batch.landmarks[None, ...]
refeeds = self._re_align.process_batch(batch)
batch.refeeds = [self.faces_to_feed(self._normalize_faces(faces)) for faces in refeeds]
def _process_input(self, batch: BatchType) -> AlignerBatch:
""" Perform pre-processing depending on whether this is the first/only pass through the
aligner or the 2nd pass when re-align has been selected
Parameters
----------
batch: :class:`AlignerBatch`
Contains the batch that is currently being passed through the plugin process
Returns
-------
:class:`AlignerBatch`
The batch with input processed
"""
assert isinstance(batch, AlignerBatch)
if batch.second_pass:
self._process_input_second_pass(batch)
else:
self._process_input_first_pass(batch)
return batch
# <<< PREDICT WRAPPER >>> #
def _predict(self, batch: BatchType) -> AlignerBatch:
""" Just return the aligner's predict function
@@ -378,7 +564,93 @@ class Aligner(Extractor): # pylint:disable=abstract-method
raise FaceswapError(msg) from err
raise
def _get_mean_landmarks(self, landmarks: np.ndarray, masks: List[List[bool]]) -> np.ndarray:
def _process_refeeds(self, batch: AlignerBatch) -> List[AlignerBatch]:
""" Process the output for each selected re-feed
Parameters
----------
batch: :class:`AlignerBatch`
The batch object passing through the aligner
Returns
-------
list
List of :class:`AlignerBatch` objects. Each object in the list contains the
results for each selected re-feed
"""
retval: List[AlignerBatch] = []
if batch.second_pass:
# Re-insert empty sub-patches for re-population in ReAlign for filtered out batches
selected_idx = 0
for mask in batch.second_pass_masks:
all_filtered = np.all(mask)
if not all_filtered:
feed = batch.refeeds[selected_idx]
pred = batch.prediction[selected_idx]
data = batch.data[selected_idx]
selected_idx += 1
else: # All resuts have been filtered out
feed = pred = np.array([])
data = {}
subbatch = AlignerBatch(batch_id=batch.batch_id,
image=batch.image,
detected_faces=batch.detected_faces,
filename=batch.filename,
feed=feed,
prediction=pred,
data=[data],
second_pass=batch.second_pass)
if not all_filtered:
self.process_output(subbatch)
retval.append(subbatch)
else:
for feed, pred, data in zip(batch.refeeds, batch.prediction, batch.data):
subbatch = AlignerBatch(batch_id=batch.batch_id,
image=batch.image,
detected_faces=batch.detected_faces,
filename=batch.filename,
feed=feed,
prediction=pred,
data=[data],
second_pass=batch.second_pass)
self.process_output(subbatch)
retval.append(subbatch)
return retval
def _get_refeed_filter_masks(self,
subbatches: List[AlignerBatch],
original_masks: Optional[np.ndarray] = None) -> np.ndarray:
""" Obtain the boolean mask array for masking out failed re-feed results if filter refeed
has been selected
Parameters
----------
subbatches: list
List of sub-batch results for each re-feed performed
original_masks: :class:`numpy.ndarray`, Optional
If passing in the second pass landmarks, these should be the original filter masks so
that we don't calculate the mask again for already filtered faces. Default: ``None``
Returns
-------
:class:`numpy.ndarray`
boolean values for every detected face indicating whether the interim landmarks have
passed the filter test
"""
retval = np.zeros((len(subbatches), subbatches[0].landmarks.shape[0]), dtype="bool")
if not self._needs_refeed_masks:
return retval
retval = retval if original_masks is None else original_masks
for subbatch, masks in zip(subbatches, retval):
masks[:] = self._filter.filtered_mask(subbatch, np.flatnonzero(masks))
return retval
def _get_mean_landmarks(self, landmarks: np.ndarray, masks: np.ndarray) -> np.ndarray:
""" Obtain the averaged landmarks from the re-fed alignments. If config option
'filter_refeed' is enabled, then average those results which have not been filtered out
otherwise average all results
@@ -387,7 +659,7 @@ class Aligner(Extractor): # pylint:disable=abstract-method
----------
landmarks: :class:`numpy.ndarray`
The batch of re-fed alignments
masks: list
masks: :class:`numpy.ndarray`
List of boolean values indicating whether each re-fed alignments passed or failed
the filter test
@@ -396,20 +668,65 @@ class Aligner(Extractor): # pylint:disable=abstract-method
:class:`numpy.ndarray`
The final averaged landmarks
"""
if not self.config["filter_refeed"]:
return landmarks.mean(axis=0).astype("float32")
mask = np.array(masks)
if any(np.all(masked) for masked in mask.T):
if any(np.all(masked) for masked in masks.T):
# hacky fix for faces which entirely failed the filter
# We just unmask one value as it is junk anyway and will be discarded on output
for idx, masked in enumerate(mask.T):
for idx, masked in enumerate(masks.T):
if np.all(masked):
mask[0, idx] = False
masks[0, idx] = False
mask = np.broadcast_to(np.reshape(mask, (*landmarks.shape[:2], 1, 1)),
landmarks.shape)
return np.ma.array(landmarks, mask=mask).mean(axis=0).data.astype("float32")
masks = np.broadcast_to(np.reshape(masks, (*landmarks.shape[:2], 1, 1)),
landmarks.shape)
return np.ma.array(landmarks, mask=masks).mean(axis=0).data.astype("float32")
def _process_output_first_pass(self, subbatches: List[AlignerBatch]) -> Tuple[np.ndarray,
np.ndarray]:
""" Process the output from the aligner if this is the first or only pass.
Parameters
----------
subbatches: list
List of sub-batch results for each re-feed performed
Returns
-------
landmarks: :class:`numpy.ndarray`
If re-align is not selected or if re-align has been selected but only on the final
output (ie: realign_reefeeds is ``False``) then the averaged batch of landmarks for all
re-feeds is returned.
If re-align_refeeds has been selected, then this will output each batch of re-feed
landmarks.
masks: :class:`numpy.ndarray`
Boolean mask corresponding to the re-fed landmarks output indicating any values which
should be filtered out prior to further processing
"""
masks = self._get_refeed_filter_masks(subbatches)
all_landmarks = np.array([sub.landmarks for sub in subbatches])
# re-align not selected or not filtering the re-feeds
if not self._re_align.do_refeeds:
retval = self._get_mean_landmarks(all_landmarks, masks)
return retval, masks
# Re-align selected with filter re-feeds
return all_landmarks, masks
def _process_output_second_pass(self,
subbatches: List[AlignerBatch],
masks: np.ndarray) -> np.ndarray:
""" Process the output from the aligner if this is the first or only pass.
Parameters
----------
subbatches: list
List of sub-batch results for each re-aligned re-feed performed
masks: :class:`numpy.ndarray`
The original re-feed filter masks from the first pass
"""
self._re_align.process_output(subbatches, masks)
masks = self._get_refeed_filter_masks(subbatches, original_masks=masks)
all_landmarks = np.array([sub.landmarks for sub in subbatches])
return self._get_mean_landmarks(all_landmarks, masks)
def _process_output(self, batch: BatchType) -> AlignerBatch:
""" Process the output from the aligner model multiple times based on the user selected
@@ -429,37 +746,24 @@ class Aligner(Extractor): # pylint:disable=abstract-method
The batch item with :attr:`landmarks` populated
"""
assert isinstance(batch, AlignerBatch)
landmark_list: List[np.ndarray] = []
masks: List[List[bool]] = []
for idx in range(self._re_feed + 1):
# Create a pseudo object that only populates the data, feed and prediction slots with
# the current re-feed iteration
subbatch = AlignerBatch(image=batch.image,
detected_faces=batch.detected_faces,
filename=batch.filename,
feed=batch.refeeds[idx],
prediction=batch.prediction[idx],
data=[batch.data[idx]])
self.process_output(subbatch)
landmark_list.append(subbatch.landmarks)
if self.config["filter_refeed"]:
fcs = [DetectedFace(landmarks_xy=lm) for lm in subbatch.landmarks.copy()]
min_sizes = [min(img.shape[:2]) for img in batch.image]
masks.append(self._filter.filtered_mask(fcs, min_sizes))
batch.landmarks = self._get_mean_landmarks(np.array(landmark_list), masks)
subbatches = self._process_refeeds(batch)
if batch.second_pass:
batch.landmarks = self._process_output_second_pass(subbatches, batch.second_pass_masks)
else:
landmarks, masks = self._process_output_first_pass(subbatches)
batch.landmarks = landmarks
batch.second_pass_masks = masks
return batch
# <<< FACE NORMALIZATION METHODS >>> #
def _normalize_faces(self, faces: List[np.ndarray]) -> List[np.ndarray]:
def _normalize_faces(self, faces: np.ndarray) -> np.ndarray:
""" Normalizes the face for feeding into model
The normalization method is dictated by the normalization command line argument
Parameters
----------
faces: :class:`numpy.ndarray`
The faces to normalize
The batch of faces to normalize
Returns
-------
@@ -468,10 +772,10 @@ class Aligner(Extractor): # pylint:disable=abstract-method
"""
if self._normalize_method is None:
return faces
logger.trace("Normalizing faces") # type:ignore
logger.trace("Normalizing faces") # type: ignore[attr-defined]
meth = getattr(self, f"_normalize_{self._normalize_method.lower()}")
faces = [meth(face) for face in faces]
logger.trace("Normalized faces") # type:ignore
faces = np.array([meth(face) for face in faces])
logger.trace("Normalized faces") # type: ignore[attr-defined]
return faces
@classmethod
@@ -480,13 +784,13 @@ class Aligner(Extractor): # pylint:disable=abstract-method
Parameters
----------
faces: :class:`numpy.ndarray`
The faces to normalize
face: :class:`numpy.ndarray`
The face to normalize
Returns
-------
:class:`numpy.ndarray`
The normalized faces
The normalized face
"""
face = face / 255.0
for chan in range(3):
@@ -501,13 +805,13 @@ class Aligner(Extractor): # pylint:disable=abstract-method
Parameters
----------
faces: :class:`numpy.ndarray`
The faces to normalize
face: :class:`numpy.ndarray`
The face to normalize
Returns
-------
:class:`numpy.ndarray`
The normalized faces
The normalized face
"""
for chan in range(3):
face[:, :, chan] = cv2.equalizeHist(face[:, :, chan])
@@ -519,209 +823,15 @@ class Aligner(Extractor): # pylint:disable=abstract-method
Parameters
----------
faces: :class:`numpy.ndarray`
The faces to normalize
face: :class:`numpy.ndarray`
The face to normalize
Returns
-------
:class:`numpy.ndarray`
The normalized faces
The normalized face
"""
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
for chan in range(3):
face[:, :, chan] = clahe.apply(face[:, :, chan])
return face
class AlignedFilter():
""" Applies filters on the output of the aligner
Parameters
----------
feature_filter: bool
``True`` to enable filter to check relative position of eyes/eyebrows and mouth. ``False``
to disable.
min_scale: float
Filters out faces that have been aligned at below this value as a multiplier of the
minimum frame dimension. Set to ``0`` for off.
max_scale: float
Filters out faces that have been aligned at above this value as a multiplier of the
minimum frame dimension. Set to ``0`` for off.
distance: float
Filters out faces that are further than this distance from an "average" face. Set to
``0`` for off.
roll: float
Filters out faces with a roll value outside of 0 +/- the value given here. Set to ``0``
for off.
save_output: bool
``True`` if the filtered faces should be kept as they are being saved. ``False`` if they
should be deleted
disable: bool, Optional
``True`` to disable the filter regardless of config options. Default: ``False``
"""
def __init__(self,
feature_filter: bool,
min_scale: float,
max_scale: float,
distance: float,
roll: float,
save_output: bool,
disable: bool = False) -> None:
logger.debug("Initializing %s: (feature_filter: %s, min_scale: %s, max_scale: %s, "
"distance: %s, roll, %s, save_output: %s, disable: %s)",
self.__class__.__name__, feature_filter, min_scale, max_scale, distance, roll,
save_output, disable)
self._features = feature_filter
self._min_scale = min_scale
self._max_scale = max_scale
self._distance = distance / 100.
self._roll = roll
self._save_output = save_output
self._active = not disable and (feature_filter or
max_scale > 0.0 or
min_scale > 0.0 or
distance > 0.0 or
roll > 0.0)
self._counts: Dict[str, int] = dict(features=0,
min_scale=0,
max_scale=0,
distance=0,
roll=0)
logger.debug("Initialized %s: ", self.__class__.__name__)
def __call__(self, faces: List[DetectedFace], minimum_dimension: int
) -> Tuple[List[DetectedFace], List[Optional[str]]]:
""" Apply the filter to the incoming batch
Parameters
----------
faces: list
List of detected face objects to filter out on size
minimum_dimension: int
The minimum (height, width) of the original frame
Returns
-------
detected_faces: list
The filtered list of detected face objects, if saving filtered faces has not been
selected or the full list of detected faces
sub_folders: list
List of ``Nones`` if saving filtered faces has not been selected or list of ``Nones``
and sub folder names corresponding the filtered face location
"""
sub_folders: List[Optional[str]] = [None for _ in range(len(faces))]
if not self._active:
return faces, sub_folders
retval: List[DetectedFace] = []
for idx, face in enumerate(faces):
aligned = AlignedFace(landmarks=face.landmarks_xy, centering="face")
if self._features and aligned.relative_eye_mouth_position < 0.0:
self._counts["features"] += 1
if self._save_output:
retval.append(face)
sub_folders[idx] = "_align_filt_features"
continue
min_max = self._scale_test(aligned, minimum_dimension)
if min_max in ("min", "max"):
self._counts[f"{min_max}_scale"] += 1
if self._save_output:
retval.append(face)
sub_folders[idx] = f"_align_filt_{min_max}_scale"
continue
if 0.0 < self._distance < aligned.average_distance:
self._counts["distance"] += 1
if self._save_output:
retval.append(face)
sub_folders[idx] = "_align_filt_distance"
continue
if self._roll != 0.0 and not 0.0 < abs(aligned.pose.roll) < self._roll:
self._counts["roll"] += 1
if self._save_output:
retval.append(face)
sub_folders[idx] = "_align_filt_roll"
continue
retval.append(face)
return retval, sub_folders
def _scale_test(self,
face: AlignedFace,
minimum_dimension: int) -> Optional[Literal["min", "max"]]:
""" Test if a face is below or above the min/max size thresholds. Returns as soon as a test
fails.
Parameters
----------
face: :class:`~lib.aligned.AlignedFace`
The aligned face to test the original size of.
minimum_dimension: int
The minimum (height, width) of the original frame
Returns
-------
"min", "max" or ``None``
Returns min or max if the face failed the minimum or maximum test respectively.
``None`` if all tests passed
"""
if self._min_scale <= 0.0 and self._max_scale <= 0.0:
return None
roi = face.original_roi.astype("int64")
size = ((roi[1][0] - roi[0][0]) ** 2 + (roi[1][1] - roi[0][1]) ** 2) ** 0.5
if self._min_scale > 0.0 and size < minimum_dimension * self._min_scale:
return "min"
if self._max_scale > 0.0 and size > minimum_dimension * self._max_scale:
return "max"
return None
def filtered_mask(self, faces: List[DetectedFace], minimum_dimension: List[int]) -> List[bool]:
""" Obtain a list of boolean values for the given faces indicating whether they pass the
filter test.
Parameters
----------
faces: list
List of detected face objects to test the filters for
minimum_dimension: list
The minimum (height, width) of the original frames that the faces come from
Returns
-------
list
List of bools corresponding to any of the input DetectedFace objects that passed a
test. ``False`` the face passed the test. ``True`` it failed
"""
retval = [True for _ in range(len(faces))]
for idx, (face, dim) in enumerate(zip(faces, minimum_dimension)):
aligned = AlignedFace(landmarks=face.landmarks_xy)
if self._features and aligned.relative_eye_mouth_position < 0.0:
continue
if self._scale_test(aligned, dim) is not None:
continue
if 0.0 < self._distance < aligned.average_distance:
continue
if self._roll != 0.0 and not 0.0 < abs(aligned.pose.roll) < self._roll:
continue
retval[idx] = False
return retval
def output_counts(self):
""" Output the counts of filtered items """
if not self._active:
return
counts = [f"{key} ({getattr(self, f'_{key}'):.2f}): {count}"
for key, count in self._counts.items()
if count > 0]
if counts:
logger.info("Aligner filtered: (%s)", ", ".join(counts))

View File

@@ -0,0 +1,493 @@
#!/usr/bin/env python3
""" Processing methods for aligner plugins """
import logging
import sys
from threading import Lock
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import numpy as np
from lib.align import AlignedFace
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
if TYPE_CHECKING:
from lib.align import DetectedFace
from .aligner import AlignerBatch
from lib.align.aligned_face import CenteringType
logger = logging.getLogger(__name__)
class AlignedFilter():
""" Applies filters on the output of the aligner
Parameters
----------
feature_filter: bool
``True`` to enable filter to check relative position of eyes/eyebrows and mouth. ``False``
to disable.
min_scale: float
Filters out faces that have been aligned at below this value as a multiplier of the
minimum frame dimension. Set to ``0`` for off.
max_scale: float
Filters out faces that have been aligned at above this value as a multiplier of the
minimum frame dimension. Set to ``0`` for off.
distance: float
Filters out faces that are further than this distance from an "average" face. Set to
``0`` for off.
roll: float
Filters out faces with a roll value outside of 0 +/- the value given here. Set to ``0``
for off.
save_output: bool
``True`` if the filtered faces should be kept as they are being saved. ``False`` if they
should be deleted
disable: bool, Optional
``True`` to disable the filter regardless of config options. Default: ``False``
"""
def __init__(self,
feature_filter: bool,
min_scale: float,
max_scale: float,
distance: float,
roll: float,
save_output: bool,
disable: bool = False) -> None:
logger.debug("Initializing %s: (feature_filter: %s, min_scale: %s, max_scale: %s, "
"distance: %s, roll, %s, save_output: %s, disable: %s)",
self.__class__.__name__, feature_filter, min_scale, max_scale, distance, roll,
save_output, disable)
self._features = feature_filter
self._min_scale = min_scale
self._max_scale = max_scale
self._distance = distance / 100.
self._roll = roll
self._save_output = save_output
self._active = not disable and (feature_filter or
max_scale > 0.0 or
min_scale > 0.0 or
distance > 0.0 or
roll > 0.0)
self._counts: Dict[str, int] = dict(features=0,
min_scale=0,
max_scale=0,
distance=0,
roll=0)
logger.debug("Initialized %s: ", self.__class__.__name__)
def _scale_test(self,
face: AlignedFace,
minimum_dimension: int) -> Optional[Literal["min", "max"]]:
""" Test if a face is below or above the min/max size thresholds. Returns as soon as a test
fails.
Parameters
----------
face: :class:`~lib.aligned.AlignedFace`
The aligned face to test the original size of.
minimum_dimension: int
The minimum (height, width) of the original frame
Returns
-------
"min", "max" or ``None``
Returns min or max if the face failed the minimum or maximum test respectively.
``None`` if all tests passed
"""
if self._min_scale <= 0.0 and self._max_scale <= 0.0:
return None
roi = face.original_roi.astype("int64")
size = ((roi[1][0] - roi[0][0]) ** 2 + (roi[1][1] - roi[0][1]) ** 2) ** 0.5
if self._min_scale > 0.0 and size < minimum_dimension * self._min_scale:
return "min"
if self._max_scale > 0.0 and size > minimum_dimension * self._max_scale:
return "max"
return None
def _handle_filtered(self,
key: str,
face: "DetectedFace",
faces: List["DetectedFace"],
sub_folders: List[Optional[str]],
sub_folder_index: int) -> None:
""" Add the filtered item to the filter counts.
If config option `save_filtered` has been enabled then add the face to the output faces
list and update the sub_folder list with the correct name for this face.
Parameters
----------
key: str
The key to use for the filter counts dictionary and the sub_folder name
face: :class:`~lib.align.detected_face.DetectedFace`
The detected face object to be filtered out
faces: list
The list of faces that will be returned from the filter
sub_folders: list
List of sub folder names corresponding to the list of detected face objects
sub_folder_index: int
The index within the sub-folder list that the filtered face belongs to
"""
self._counts[key] += 1
if not self._save_output:
return
faces.append(face)
sub_folders[sub_folder_index] = f"_align_filt_{key}"
def __call__(self, faces: List["DetectedFace"], minimum_dimension: int
) -> Tuple[List["DetectedFace"], List[Optional[str]]]:
""" Apply the filter to the incoming batch
Parameters
----------
faces: list
List of detected face objects to filter out on size
minimum_dimension: int
The minimum (height, width) of the original frame
Returns
-------
detected_faces: list
The filtered list of detected face objects, if saving filtered faces has not been
selected or the full list of detected faces
sub_folders: list
List of ``Nones`` if saving filtered faces has not been selected or list of ``Nones``
and sub folder names corresponding the filtered face location
"""
sub_folders: List[Optional[str]] = [None for _ in range(len(faces))]
if not self._active:
return faces, sub_folders
retval: List["DetectedFace"] = []
for idx, face in enumerate(faces):
aligned = AlignedFace(landmarks=face.landmarks_xy, centering="face")
if self._features and aligned.relative_eye_mouth_position < 0.0:
self._handle_filtered("features", face, retval, sub_folders, idx)
continue
min_max = self._scale_test(aligned, minimum_dimension)
if min_max in ("min", "max"):
self._handle_filtered(f"{min_max}_scale", face, retval, sub_folders, idx)
continue
if 0.0 < self._distance < aligned.average_distance:
self._handle_filtered("distance", face, retval, sub_folders, idx)
continue
if self._roll != 0.0 and not 0.0 < abs(aligned.pose.roll) < self._roll:
self._handle_filtered("roll", face, retval, sub_folders, idx)
continue
retval.append(face)
return retval, sub_folders
def filtered_mask(self,
batch: "AlignerBatch",
skip: Optional[Union[np.ndarray, List[int]]] = None) -> np.ndarray:
""" Obtain a list of boolean values for the given batch indicating whether they pass the
filter test.
Parameters
----------
batch: :class:`AlignerBatch`
The batch of face to obtain masks for
skip: list or :class:`numpy.ndarray`, optional
List or 1D numpy array of indices indicating faces that have already been filter
masked and so should not be filtered again. Values in these index positions will be
returned as ``True``
Returns
-------
:class:`numpy.ndarray`
Boolean mask array corresponding to any of the input DetectedFace objects that passed a
test. ``False`` the face passed the test. ``True`` it failed
"""
skip = [] if skip is None else skip
retval = np.ones((len(batch.detected_faces), ), dtype="bool")
for idx, (landmarks, image) in enumerate(zip(batch.landmarks, batch.image)):
if idx in skip:
continue
face = AlignedFace(landmarks)
if self._features and face.relative_eye_mouth_position < 0.0:
continue
if self._scale_test(face, min(image.shape[:2])) is not None:
continue
if 0.0 < self._distance < face.average_distance:
continue
if self._roll != 0.0 and not 0.0 < abs(face.pose.roll) < self._roll:
continue
retval[idx] = False
return retval
def output_counts(self):
""" Output the counts of filtered items """
if not self._active:
return
counts = [f"{key} ({getattr(self, f'_{key}'):.2f}): {count}"
for key, count in self._counts.items()
if count > 0]
if counts:
logger.info("Aligner filtered: (%s)", ", ".join(counts))
class ReAlign():
""" Holds data and methods for 2nd pass re-aligns
Parameters
----------
active: bool
``True`` if re-alignment has been requested otherwise ``False``
do_refeeds: bool
``True`` if re-feeds should be re-aligned, ``False`` if just the final output of the
re-feeds should be aligned
do_filter: bool
``True`` if aligner filtered out faces should not be re-aligned. ``False`` if all faces
should be re-aligned
"""
def __init__(self, active: bool, do_refeeds: bool, do_filter: bool) -> None:
logger.debug("Initializing %s: (active: %s, do_refeeds: %s, do_filter: %s)",
self.__class__.__name__, active, do_refeeds, do_filter)
self._active = active
self._do_refeeds = do_refeeds
self._do_filter = do_filter
self._centering: "CenteringType" = "face"
self._size = 0
self._tracked_lock = Lock()
self._tracked_batchs: Dict[int, Dict[Literal["filtered_landmarks"], List[np.ndarray]]] = {}
# TODO. Probably does not need to be a list, just alignerbatch
self._queue_lock = Lock()
self._queued: List["AlignerBatch"] = []
logger.debug("Initialized %s", self.__class__.__name__)
@property
def active(self) -> bool:
"""bool: ``True`` if re_aligns have been selected otherwise ``False``"""
return self._active
@property
def do_refeeds(self) -> bool:
"""bool: ``True`` if re-aligning is active and re-aligning re-feeds has been selected
otherwise ``False``"""
return self._active and self._do_refeeds
@property
def do_filter(self) -> bool:
"""bool: ``True`` if re-aligning is active and faces which failed the aligner filter test
should not be re-aligned otherwise ``False``"""
return self._active and self._do_filter
@property
def items_queued(self) -> bool:
"""bool: ``True`` if re-align is active and items are queued for a 2nd pass otherwise
``False`` """
with self._queue_lock:
return self._active and bool(self._queued)
@property
def items_tracked(self) -> bool:
"""bool: ``True`` if items exist in the tracker so still need to be processed """
with self._tracked_lock:
return bool(self._tracked_batchs)
def set_input_size_and_centering(self, input_size: int, centering: "CenteringType") -> None:
""" Set the input size of the loaded plugin once the model has been loaded
Parameters
----------
input_size: int
The input size, in pixels, of the aligner plugin
centering: ["face", "head" or "legacy"]
The centering to align the image at for re-aligning
"""
logger.debug("input_size: %s, centering: %s", input_size, centering)
self._size = input_size
self._centering = centering
def track_batch(self, batch_id: int) -> None:
""" Add newly seen batch id from the aligner to the batch tracker, so that we can keep
track of whether there are still batches to be processed when the aligner hits 'EOF'
Parameters
----------
batch_id: int
The batch id to add to batch tracking
"""
if not self._active:
return
logger.trace("Tracking batch id: %s", batch_id) # type: ignore[attr-defined]
with self._tracked_lock:
self._tracked_batchs[batch_id] = {}
def untrack_batch(self, batch_id: int) -> None:
""" Remove the tracked batch from the tracker once the batch has been fully processed
Parameters
----------
batch_id: int
The batch id to remove from batch tracking
"""
if not self._active:
return
logger.trace("Removing batch id from tracking: %s", batch_id) # type: ignore[attr-defined]
with self._tracked_lock:
del self._tracked_batchs[batch_id]
def add_batch(self, batch: "AlignerBatch") -> None:
""" Add first pass alignments to the queue for picking up for re-alignment, update their
:attr:`second_pass` attribute to ``True`` and clear attributes not required.
Parameters
----------
batch: :class:`AlignerBatch`
aligner batch to perform re-alignment on
"""
with self._queue_lock:
logger.trace("Queueing for second pass: %s", batch) # type: ignore[attr-defined]
batch.second_pass = True
batch.feed = np.array([])
batch.prediction = np.array([])
batch.refeeds = []
batch.data = []
self._queued.append(batch)
def get_batch(self) -> "AlignerBatch":
""" Retrieve the next batch currently queued for re-alignment
Returns
-------
:class:`AlignerBatch`
The next :class:`AlignerBatch` for re-alignment
"""
with self._queue_lock:
retval = self._queued.pop(0)
logger.trace("Retrieving for second pass: %s", # type: ignore[attr-defined]
retval.filename)
return retval
def process_batch(self, batch: "AlignerBatch") -> List[np.ndarray]:
""" Pre process a batch object for re-aligning through the aligner.
Parameters
----------
batch: :class:`AlignerBatch`
aligner batch to perform pre-processing on
Returns
-------
list
List of UINT8 aligned faces batch for each selected refeed
"""
logger.trace("Processing batch: %s, landmarks: %s", # type: ignore[attr-defined]
batch.filename, [b.shape for b in batch.landmarks])
retval: List[np.ndarray] = []
filtered_landmarks: List[np.ndarray] = []
for landmarks, masks in zip(batch.landmarks, batch.second_pass_masks):
if not np.all(masks): # At least one face has not already been filtered
aligned_faces = [AlignedFace(lms,
image=image,
size=self._size,
centering=self._centering)
for image, lms, msk in zip(batch.image, landmarks, masks)
if not msk]
faces = np.array([aligned.face for aligned in aligned_faces
if aligned.face is not None])
retval.append(faces)
batch.data.append({"aligned_faces": aligned_faces})
if np.any(masks):
# Track the original landmarks for re-insertion on the other side
filtered_landmarks.append(landmarks[masks])
with self._tracked_lock:
self._tracked_batchs[batch.batch_id] = {"filtered_landmarks": filtered_landmarks}
batch.landmarks = np.array([]) # Clear the old landmarks
return retval
def _transform_to_frame(self, batch: "AlignerBatch") -> np.ndarray:
""" Transform the predicted landmarks from the aligned face image back into frame
co-ordinates
Parameters
----------
batch: :class:`AlignerBatch`
An aligner batch containing the aligned faces in the data field and the face
co-ordinate landmarks in the landmarks field
Returns
-------
:class:`numpy.ndarray`
The landmarks transformed to frame space
"""
faces: List[AlignedFace] = batch.data[0]["aligned_faces"]
retval = np.array([aligned.transform_points(landmarks, invert=True)
for landmarks, aligned in zip(batch.landmarks, faces)])
logger.trace("Transformed points: original max: %s, " # type: ignore[attr-defined]
"new max: %s", batch.landmarks.max(), retval.max())
return retval
def _re_insert_filtered(self, batch: "AlignerBatch", masks: np.ndarray) -> np.ndarray:
""" Re-insert landmarks that were filtered out from the re-align process back into the
landmark results
Parameters
----------
batch: :class:`AlignerBatch`
An aligner batch containing the aligned faces in the data field and the landmarks in
frame space in the landmarks field
masks: np.ndarray
The original filter masks for this batch
Returns
-------
:class:`numpy.ndarray`
The full batch of landmarks with filtered out values re-inserted
"""
if not np.any(masks):
logger.trace("No landmarks to re-insert: %s", masks) # type: ignore[attr-defined]
return batch.landmarks
with self._tracked_lock:
filtered = self._tracked_batchs[batch.batch_id]["filtered_landmarks"].pop(0)
if np.all(masks):
retval = filtered
else:
retval = np.empty((masks.shape[0], *filtered.shape[1:]), dtype=filtered.dtype)
retval[~masks] = batch.landmarks
retval[masks] = filtered
logger.trace("Filtered re-inserted: old shape: %s, " # type: ignore[attr-defined]
"new shape: %s)", batch.landmarks.shape, retval.shape)
return retval
def process_output(self, subbatches: List["AlignerBatch"], batch_masks: np.ndarray) -> None:
""" Process the output from the re-align pass.
- Transform landmarks from aligned face space to face space
- Re-insert faces that were filtered out from the re-align process back into the
landmarks list
Parameters
----------
subbatches: list
List of sub-batch results for each re-aligned re-feed performed
batch_masks: :class:`numpy.ndarray`
The original re-feed filter masks from the first pass
"""
for batch, masks in zip(subbatches, batch_masks):
if not np.all(masks):
batch.landmarks = self._transform_to_frame(batch)
batch.landmarks = self._re_insert_filtered(batch, masks)

View File

@@ -51,12 +51,28 @@ class Align(Aligner):
self.vram = 0 # Doesn't use GPU
self.vram_per_batch = 0
self.batchsize = 1
self.realign_centering = "legacy"
def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model"""
self.model = cv2.dnn.readNetFromTensorflow(self.model_path) # pylint: disable=no-member
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint: disable=no-member
def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-255.0)
Parameters
----------
faces: :class:`numpy.ndarray`
The batch of faces in UINT8 format
Returns
-------
class: `numpy.ndarray`
The batch of faces as fp32
"""
return faces.astype("float32").transpose((0, 3, 1, 2))
def process_input(self, batch: BatchType) -> None:
""" Compile the detected faces for prediction
@@ -71,10 +87,9 @@ class Align(Aligner):
The batch item with the :attr:`feed` populated and any required :attr:`data` added
"""
assert isinstance(batch, AlignerBatch)
faces, roi, offsets = self.align_image(batch)
faces = self._normalize_faces(faces)
lfaces, roi, offsets = self.align_image(batch)
batch.feed = np.array(lfaces)[..., :3]
batch.data.append(dict(roi=roi, offsets=offsets))
batch.feed = np.array(faces, dtype="float32")[..., :3].transpose((0, 3, 1, 2))
def _get_box_and_offset(self, face: "DetectedFace") -> Tuple[List[int], int]:
"""Obtain the bounding box and offset from a detected face.
@@ -274,8 +289,7 @@ class Align(Aligner):
assert isinstance(batch, AlignerBatch)
self.get_pts_from_predict(batch)
@classmethod
def get_pts_from_predict(cls, batch: AlignerBatch):
def get_pts_from_predict(self, batch: AlignerBatch):
""" Get points from predictor and populates the :attr:`landmarks` property
Parameters
@@ -284,13 +298,16 @@ class Align(Aligner):
The current batch from the model with :attr:`predictions` populated
"""
landmarks = []
for prediction, roi, offset in zip(batch.prediction,
batch.data[0]["roi"],
batch.data[0]["offsets"]):
points = np.reshape(prediction, (-1, 2))
points *= (roi[2] - roi[0])
points[:, 0] += (roi[0] - offset[0])
points[:, 1] += (roi[1] - offset[1])
landmarks.append(points)
batch.landmarks = np.array(landmarks)
if batch.second_pass:
batch.landmarks = batch.prediction.reshape(self.batchsize, -1, 2) * self.input_size
else:
for prediction, roi, offset in zip(batch.prediction,
batch.data[0]["roi"],
batch.data[0]["offsets"]):
points = np.reshape(prediction, (-1, 2))
points *= (roi[2] - roi[0])
points[:, 0] += (roi[0] - offset[0])
points[:, 1] += (roi[1] - offset[1])
landmarks.append(points)
batch.landmarks = np.array(landmarks)
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore

View File

@@ -31,6 +31,7 @@ class Align(Aligner):
self.vram = 2240
self.vram_warnings = 512 # Will run at this with warnings
self.vram_per_batch = 64
self.realign_centering = "head"
self.batchsize: int = self.config["batch-size"]
self.reference_scale = 200. / 195.
@@ -48,6 +49,21 @@ class Align(Aligner):
placeholder = np.zeros(placeholder_shape, dtype="float32")
self.model.predict(placeholder)
def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-1.0)
Parameters
----------
faces: :class:`numpy.ndarray`
The batch of faces in UINT8 format
Returns
-------
class: `numpy.ndarray`
The batch of faces as fp32 in 0.0 to 1.0 range
"""
return faces.astype("float32") / 255.
def process_input(self, batch: BatchType) -> None:
""" Compile the detected faces for prediction
@@ -59,11 +75,9 @@ class Align(Aligner):
assert isinstance(batch, AlignerBatch)
logger.trace("Aligning faces around center") # type:ignore
center_scale = self.get_center_scale(batch.detected_faces)
faces = self.crop(batch, center_scale)
logger.trace("Aligned image around center") # type:ignore
faces = self._normalize_faces(faces)
batch.feed = np.array(self.crop(batch, center_scale))[..., :3]
batch.data.append(dict(center_scale=center_scale))
batch.feed = np.array(faces, dtype="float32")[..., :3] / 255.0
logger.trace("Aligned image around center") # type:ignore
def get_center_scale(self, detected_faces: List["DetectedFace"]) -> np.ndarray:
""" Get the center and set scale of bounding box
@@ -115,7 +129,7 @@ class Align(Aligner):
new_dim = (bottom_right_height - top_left_height,
bottom_right_width - top_left_width,
3 if image.ndim > 2 else 1)
new_img = np.empty(new_dim, dtype=np.uint8)
new_img = np.zeros(new_dim, dtype=np.uint8)
new_x = slice(max(0, -top_left_width),
min(bottom_right_width, image.shape[1]) - top_left_width)
@@ -256,7 +270,10 @@ class Align(Aligner):
subpixel_landmarks[:, :, 0] = indices[1] + np.sign(x_subpixel_shift) * 0.25 + 0.5
subpixel_landmarks[:, :, 1] = indices[0] + np.sign(y_subpixel_shift) * 0.25 + 0.5
batch.landmarks = self.transform(subpixel_landmarks,
batch.data[0]["center_scale"],
resolution)
if batch.second_pass: # Transformation handled by plugin parent for re-aligned faces
batch.landmarks = subpixel_landmarks[..., :2] * 4.
else:
batch.landmarks = self.transform(subpixel_landmarks,
batch.data[0]["center_scale"],
resolution)
logger.trace("Obtained points from prediction: %s", batch.landmarks) # type:ignore

View File

@@ -90,6 +90,9 @@ class Extractor():
re_feed: int
The number of times to re-feed a slightly adjusted bounding box into the aligner.
Default: `0`
re_align: bool, optional
``True`` to obtain landmarks by passing the initially aligned face back through the
aligner. Default ``False``
disable_filter: bool, optional
Disable all aligner filters regardless of config option. Default: ``False``
@@ -111,13 +114,14 @@ class Extractor():
min_size: int = 0,
normalize_method: Optional[Literal["none", "clahe", "hist", "mean"]] = None,
re_feed: int = 0,
re_align: bool = False,
disable_filter: bool = False) -> None:
logger.debug("Initializing %s: (detector: %s, aligner: %s, masker: %s, recognition: %s, "
"configfile: %s, multiprocess: %s, exclude_gpus: %s, rotate_images: %s, "
"min_size: %s, normalize_method: %s, re_feed: %s, disable_filter: %s, )",
self.__class__.__name__, detector, aligner, masker, recognition, configfile,
multiprocess, exclude_gpus, rotate_images, min_size, normalize_method,
re_feed, disable_filter)
"min_size: %s, normalize_method: %s, re_feed: %s, re_align: %s, "
"disable_filter: %s)", self.__class__.__name__, detector, aligner, masker,
recognition, configfile, multiprocess, exclude_gpus, rotate_images, min_size,
normalize_method, re_feed, re_align, disable_filter)
self._instance = _get_instance()
maskers = [cast(Optional[str],
masker)] if not isinstance(masker, list) else cast(List[Optional[str]], masker)
@@ -134,6 +138,7 @@ class Extractor():
configfile,
normalize_method,
re_feed,
re_align,
disable_filter)
self._recognition = self._load_recognition(recognition, configfile)
self._mask = [self._load_mask(mask, configfile) for mask in maskers]
@@ -581,6 +586,7 @@ class Extractor():
configfile: Optional[str],
normalize_method: Optional[Literal["none", "clahe", "hist", "mean"]],
re_feed: int,
re_align: bool,
disable_filter: bool) -> Optional["Aligner"]:
""" Set global arguments and load aligner plugin
@@ -594,6 +600,9 @@ class Extractor():
Optional normalization method to use
re_feed: int
The number of times to adjust the image and re-feed to get an average score
re_align: bool
``True`` to obtain landmarks by passing the initially aligned face back through the
aligner.
disable_filter: bool
Disable all aligner filters regardless of config option
@@ -610,6 +619,7 @@ class Extractor():
configfile=configfile,
normalize_method=normalize_method,
re_feed=re_feed,
re_align=re_align,
disable_filter=disable_filter,
instance=self._instance)
return plugin

View File

@@ -67,7 +67,8 @@ class Extract(): # pylint:disable=too-few-public-methods
rotate_images=self._args.rotate_images,
min_size=self._args.min_size,
normalize_method=normalization,
re_feed=self._args.re_feed)
re_feed=self._args.re_feed,
re_align=self._args.re_align)
self._filter = Filter(self._args.ref_threshold,
self._args.filter,
self._args.nfilter,

View File

@@ -23,6 +23,8 @@ ignore_missing_imports = True
ignore_missing_imports = True
[mypy-numexpr.*]
ignore_missing_imports = True
[mypy-numpy.core._multiarray_umath.*]
ignore_missing_imports = True
[mypy-pexpect.*]
ignore_missing_imports = True
[mypy-PIL.*]