Skip to content

Commit

Permalink
Minor fixups and linting
Browse files Browse the repository at this point in the history
  • Loading branch information
torzdf committed Apr 3, 2024
1 parent 9839014 commit 70c064c
Show file tree
Hide file tree
Showing 32 changed files with 347 additions and 338 deletions.
2 changes: 1 addition & 1 deletion lib/gui/display_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,6 @@ def display_item_process(self) -> None:
self.after(1000, self.display_item_process)
return

logger.debug("Adding graph")
existing = list(self.subnotebook_get_titles_ids().keys())

loss_keys = self.display_item.get_loss_keys(Session.session_ids[-1])
Expand All @@ -367,6 +366,7 @@ def display_item_process(self) -> None:
tabname = loss_key.replace("_", " ").title()
if tabname in existing:
continue
logger.debug("Adding graph '%s'", tabname)

display_keys = [key for key in loss_keys if key.startswith(loss_key)]
data = Calculations(session_id=Session.session_ids[-1],
Expand Down
6 changes: 3 additions & 3 deletions lib/gui/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ def process_options(self, command_options, command):
helptext=opt["help"],
track_modified=True,
command=command)
gui_options[title] = dict(cpanel_option=cpanel_option,
opts=opt["opts"],
nargs=opt.get("nargs", None))
gui_options[title] = {"cpanel_option": cpanel_option,
"opts": opt["opts"],
"nargs": opt.get("nargs", None)}
logger.trace("Processed: %s", gui_options)
return gui_options

Expand Down
12 changes: 6 additions & 6 deletions lib/gui/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ def _stored_tab_name(self):
def _selected_to_choices(self):
""" dict: The selected value and valid choices for multi-option, radio or combo options.
"""
valid_choices = {cmd: {opt: dict(choices=val["cpanel_option"].choices,
is_multi=val["cpanel_option"].is_multi_option)
valid_choices = {cmd: {opt: {"choices": val["cpanel_option"].choices,
"is_multi": val["cpanel_option"].is_multi_option}
for opt, val in data.items()
if isinstance(val, dict) and "cpanel_option" in val
and val["cpanel_option"].choices is not None}
Expand Down Expand Up @@ -600,9 +600,9 @@ def _add_task(self, command):
The tab that pertains to the currently active task
"""
self._tasks[command] = dict(filename=self._filename,
options=self._options,
is_project=self._is_project)
self._tasks[command] = {"filename": self._filename,
"options": self._options,
"is_project": self._is_project}

def clear_tasks(self):
""" Clears all of the stored tasks.
Expand All @@ -629,7 +629,7 @@ def add_project_task(self, filename, command, options):
options: dict
The options for this task loaded from the project
"""
self._tasks[command] = dict(filename=filename, options=options, is_project=True)
self._tasks[command] = {"filename": filename, "options": options, "is_project": True}

def _set_active_task(self, command=None):
""" Set the active :attr:`_filename` and :attr:`_options` to currently selected tab's
Expand Down
4 changes: 2 additions & 2 deletions lib/gui/theme.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ def scrollbar(self, key, trough_color, border_color, control_backgrounds, contro
("disabled", images[f"img_{lookup}_disabled"]),
("pressed !disabled", images[f"img_{lookup}_active"]),
("active !disabled", images[f"img_{lookup}_active"]))
kwargs = dict(border=1, sticky="ns") if element == "thumb" else {}
kwargs = {"border": 1, "sticky": "ns"} if element == "thumb" else {}
self._style.element_create(*args, **kwargs)

# Get a configurable trough
Expand Down Expand Up @@ -487,7 +487,7 @@ def _get_arrow(cls, dimensions, thickness, direction):
crop_size = (square_size // 16) * 16
draw_rows = int(6 * crop_size / 16)
start_row = dimensions[1] // 2 - draw_rows // 2
initial_indent = (2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2)
initial_indent = 2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2

retval = np.zeros((dimensions[1], dimensions[0]), dtype="uint8")
for i in range(start_row, start_row + draw_rows):
Expand Down
4 changes: 2 additions & 2 deletions lib/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from lib.multithreading import MultiThread
from lib.queue_manager import queue_manager, QueueEmpty
from lib.utils import convert_to_secs, FaceswapError, _video_extensions, get_image_paths
from lib.utils import convert_to_secs, FaceswapError, VIDEO_EXTENSIONS, get_image_paths

if T.TYPE_CHECKING:
from lib.align.alignments import PNGHeaderDict
Expand Down Expand Up @@ -1148,7 +1148,7 @@ def _check_for_video(self):
"""
if not isinstance(self.location, str) or os.path.isdir(self.location):
retval = False
elif os.path.splitext(self.location)[1].lower() in _video_extensions:
elif os.path.splitext(self.location)[1].lower() in VIDEO_EXTENSIONS:
retval = True
else:
raise FaceswapError("The input file '{}' is not a valid video".format(self.location))
Expand Down
2 changes: 1 addition & 1 deletion lib/keypress.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(self, is_gui=False):
self.old_term = termios.tcgetattr(self.file_desc)

# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
self.new_term[3] = self.new_term[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(self.file_desc, termios.TCSAFLUSH, self.new_term)

# Support normal-terminal reset at exit
Expand Down
6 changes: 3 additions & 3 deletions lib/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
from http.client import HTTPResponse

# Global variables
_image_extensions = [ # pylint:disable=invalid-name
IMAGE_EXTENSIONS = [ # pylint:disable=invalid-name
".bmp", ".jpeg", ".jpg", ".png", ".tif", ".tiff"]
_video_extensions = [ # pylint:disable=invalid-name
VIDEO_EXTENSIONS = [ # pylint:disable=invalid-name
".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv",
".ts", ".vob"]
_TF_VERS: tuple[int, int] | None = None
Expand Down Expand Up @@ -249,7 +249,7 @@ def get_image_paths(directory: str, extension: str | None = None) -> list[str]:
['/path/to/directory/image1.jpg']
"""
logger = logging.getLogger(__name__)
image_extensions = _image_extensions if extension is None else [extension]
image_extensions = IMAGE_EXTENSIONS if extension is None else [extension]
dir_contents = []

if not os.path.exists(directory):
Expand Down
2 changes: 1 addition & 1 deletion plugins/convert/mask/mask_blend.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
logger = logging.getLogger(__name__)


class Mask(): # pylint:disable=too-few-public-methods
class Mask():
""" Manipulations to perform to the mask that is to be applied to the output of the Faceswap
model.
Expand Down
14 changes: 8 additions & 6 deletions plugins/extract/align/cv2_dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def __init__(self, **kwargs) -> None:
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)

self.model: cv2.dnn.Net
self.model_path: str
self.name = "cv2-DNN Aligner"
self.input_size = 128
self.color_format = "RGB"
Expand All @@ -56,8 +57,8 @@ def __init__(self, **kwargs) -> None:

def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model"""
self.model = cv2.dnn.readNetFromTensorflow(self.model_path) # pylint:disable=no-member
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member
self.model = cv2.dnn.readNetFromTensorflow(self.model_path)
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-255.0)
Expand Down Expand Up @@ -136,7 +137,7 @@ def align_image(self, batch: AlignerBatch) -> tuple[list[np.ndarray],
offsets: list
List of offsets for the faces
"""
logger.trace("Aligning image around center") # type:ignore
logger.trace("Aligning image around center") # type:ignore[attr-defined]
sizes = (self.input_size, self.input_size)
rois = []
faces = []
Expand Down Expand Up @@ -247,7 +248,7 @@ def pad_image(cls, box: list[int], image: np.ndarray) -> tuple[np.ndarray, tuple
pad_t = 1 - box[1] if box[1] < 0 else 0
pad_r = box[2] - width if box[2] > width else 0
pad_b = box[3] - height if box[3] > height else 0
logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore
logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore[attr-defined]
pad_l, pad_t, pad_r, pad_b)
padded_image = cv2.copyMakeBorder(image.copy(),
pad_t,
Expand All @@ -257,7 +258,8 @@ def pad_image(cls, box: list[int], image: np.ndarray) -> tuple[np.ndarray, tuple
cv2.BORDER_CONSTANT,
value=(0, 0, 0))
offsets = (pad_l - pad_r, pad_t - pad_b)
logger.trace("image_shape: %s, Padded shape: %s, box: %s, offsets: %s", # type:ignore
logger.trace("image_shape: %s, Padded shape: %s, box: %s, " # type:ignore[attr-defined]
"offsets: %s",
image.shape, padded_image.shape, box, offsets)
return padded_image, offsets

Expand Down Expand Up @@ -311,4 +313,4 @@ def get_pts_from_predict(self, batch: AlignerBatch):
points[:, 1] += (roi[1] - offset[1])
landmarks.append(points)
batch.landmarks = np.array(landmarks)
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore[attr-defined]
30 changes: 15 additions & 15 deletions plugins/extract/align/fan_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,19 +50,19 @@


_DEFAULTS = {
"batch-size": dict(
default=12,
info="The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 4 GB vram.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"batch-size": {
"default": 12,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 4 GB vram.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}
10 changes: 5 additions & 5 deletions plugins/extract/detect/cv2_dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ def __init__(self, **kwargs) -> None:
def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model"""
assert isinstance(self.model_path, list)
self.model = cv2.dnn.readNetFromCaffe(self.model_path[1], # pylint:disable=no-member
self.model = cv2.dnn.readNetFromCaffe(self.model_path[1],
self.model_path[0])
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

def process_input(self, batch: BatchType) -> None:
""" Compile the detection image(s) for prediction """
assert isinstance(batch, DetectorBatch)
batch.feed = cv2.dnn.blobFromImages(batch.image, # pylint:disable=no-member
batch.feed = cv2.dnn.blobFromImages(batch.image,
scalefactor=1.0,
size=(self.input_size, self.input_size),
mean=[104, 117, 123],
Expand All @@ -53,13 +53,13 @@ def finalize_predictions(self, predictions: np.ndarray) -> np.ndarray:
for i in range(predictions.shape[2]):
confidence = predictions[0, 0, i, 2]
if confidence >= self.confidence:
logger.trace("Accepting due to confidence %s >= %s", # type:ignore
logger.trace("Accepting due to confidence %s >= %s", # type:ignore[attr-defined]
confidence, self.confidence)
faces.append([(predictions[0, 0, i, 3] * self.input_size),
(predictions[0, 0, i, 4] * self.input_size),
(predictions[0, 0, i, 5] * self.input_size),
(predictions[0, 0, i, 6] * self.input_size)])
logger.trace("faces: %s", faces) # type:ignore
logger.trace("faces: %s", faces) # type:ignore[attr-defined]
return np.array(faces)[None, ...]

def process_output(self, batch: BatchType) -> None:
Expand Down
28 changes: 14 additions & 14 deletions plugins/extract/detect/cv2_dnn_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,17 @@
)


_DEFAULTS = dict(
confidence=dict(
default=50,
info="The confidence level at which the detector has succesfully found a face.\nHigher "
"levels will be more discriminating, lower levels will have more false positives.",
datatype=int,
rounding=5,
min_max=(25, 100),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
)
_DEFAULTS = {
"confidence": {
"default": 50,
"info": "The confidence level at which the detector has succesfully found a face.\nHigher "
"levels will be more discriminating, lower levels will have more false positives.",
"datatype": int,
"rounding": 5,
"min_max": (25, 100),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
}
Loading

0 comments on commit 70c064c

Please sign in to comment.