@Dan64: I compared the speed of AutoGain with PIL and without it:
With:
without:
Using vsViewers Benchmark tool I got:
460-480fps with "clip = autoadjust.AutoGain(clip)"
and
640-650fps "clip = autoadjust.AutoGain2(clip)"
that's nearly 40% faster. Do you get similar speeds?
Cu Selur
Ps.: Since I don't like that Fred and some other users write their scripts, I also attached a function which wraps the whole thing.
PPs.: added UnsharpenMask to sharpen.py
With:
def AutoGain(clip: vs.VideoNode, clip_limit : float = 1.0, strength: float = 0.5) -> vs.VideoNode:
""" AutoGain filter using OpenCV
:param clip: Clip to process (support only RGB24).
:param clip_limit: Threshold for contrast limiting, range [0, 50] (default=1.0)
:param strength: Strength of the filter. A strength=0 means that the clip is returned unchanged,
range [0, 1] (default=0.5)
"""
if clip.format.id != vs.RGB24:
# clip not in RGB24 format, it will be converted
if clip.format.color_family == vs.ColorFamily.YUV:
rgb_clip = clip.resize.Bicubic(format=vs.RGB24, matrix_in_s="709", range_s="full",
dither_type="error_diffusion")
else:
rgb_clip = clip.resize.Bicubic(format=vs.RGB24, range_s="full")
else:
rgb_clip = clip
weight: float = max(min(1.0 - strength, 1.0), 0.0)
def frame_autogain(n, f, bits_per_pixel: int = 8):
img = _frame_to_image(f)
img_np = np.asarray(img)
yuv = cv2.cvtColor(img_np, cv2.COLOR_RGB2YUV)
yuv_new = cv2.cvtColor(img_np, cv2.COLOR_RGB2YUV)
dY = yuv[:, :, 0]
if bits_per_pixel <= 16:
bits_per_pixel_for_freq = bits_per_pixel
else:
bits_per_pixel_for_freq = 16
loose_max_limit = (235 + 1) << (bits_per_pixel_for_freq - 8)
loose_min_limit = 16 << (bits_per_pixel_for_freq - 8)
loose_max = dY.max()
loose_min = dY.min()
maxY = min(loose_max, loose_max_limit)
minY = max(loose_min, loose_min_limit)
y_range = maxY - minY
y_offset = 0
y_gain = 0
if y_range > 0:
scale = (loose_max_limit - loose_min_limit) / y_range
y_offset = (loose_min_limit - scale * minY) / (1 << (bits_per_pixel_for_freq - 8))
y_gain = (256 * (scale - 1.0))
if clip_limit > 0:
y_offset = y_offset * (1 - clip_limit/100.0)
y_gain = y_gain * (1 - clip_limit/100.0)
dY = (dY + y_offset) * (y_gain/256 + 1)
yuv_new[:, :, 0] = dY.clip(min=0, max=255).astype(np.uint8)
img_new = Image.fromarray(cv2.cvtColor(yuv_new, cv2.COLOR_YUV2RGB))
return _image_to_frame(img_new, f.copy())
clip_a = clip.std.ModifyFrame(clips=rgb_clip, selector=partial(frame_autogain,
bits_per_pixel = clip.format.bits_per_sample))
clip_b = rgb_clip
if weight > 0:
clip_rgb = core.std.Merge(clip_a, clip_b, weight)
else:
clip_rgb = clip_a
if clip.format.id != vs.RGB24:
# convert the format for tweak to YUV 8bits
clip_new = clip_rgb.resize.Bicubic(format=vs.YUV420P8, matrix_s="709", range_s="limited")
else:
clip_new = clip_rgb
return clip_new
def AutoGain2(clip: vs.VideoNode, clip_limit: float = 1.0, strength: float = 0.5) -> vs.VideoNode:
if clip.format.id != vs.RGB24:
rgb_clip = (clip.resize.Bicubic(format=vs.RGB24, matrix_in_s="709", range_s="full")
if clip.format.color_family == vs.ColorFamily.YUV else
clip.resize.Bicubic(format=vs.RGB24, range_s="full"))
else:
rgb_clip = clip
weight = max(min(1.0 - strength, 1.0), 0.0)
bits_per_pixel = clip.format.bits_per_sample
bits_per_pixel_for_freq = min(bits_per_pixel, 16)
loose_max_limit = (235 + 1) << (bits_per_pixel_for_freq - 8)
loose_min_limit = 16 << (bits_per_pixel_for_freq - 8)
clip_limit_factor = (1 - clip_limit/100.0) if clip_limit > 0 else 1.0
def frame_autogain(n, f):
# Create RGB numpy array from frame planes
img_np = np.stack([
np.asarray(f[0], dtype=np.uint8),
np.asarray(f[1], dtype=np.uint8),
np.asarray(f[2], dtype=np.uint8)
], axis=2)
# Process image
yuv = cv2.cvtColor(img_np, cv2.COLOR_RGB2YUV)
dY = yuv[:, :, 0]
maxY = min(dY.max(), loose_max_limit)
minY = max(dY.min(), loose_min_limit)
y_range = maxY - minY
if y_range > 0:
scale = (loose_max_limit - loose_min_limit) / y_range
y_offset = ((loose_min_limit - scale * minY) /
(1 << (bits_per_pixel_for_freq - 8))) * clip_limit_factor
y_gain = (256 * (scale - 1.0)) * clip_limit_factor
yuv[:, :, 0] = np.clip((dY + y_offset) * (y_gain/256 + 1), 0, 255)
# Convert back to RGB
rgb_new = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB)
# Create new frame and copy planes
new_frame = f.copy()
for i in range(3):
np.copyto(np.asarray(new_frame[i]), rgb_new[:, :, i])
return new_frame
clip_a = rgb_clip.std.ModifyFrame(clips=[rgb_clip], selector=frame_autogain)
clip_rgb = core.std.Merge(clip_a, rgb_clip, weight) if weight > 0 else clip_a
return (clip_rgb.resize.Bicubic(format=vs.YUV420P8, matrix_s="709", range_s="limited")
if clip.format.id != vs.RGB24 else clip_rgb)
Using vsViewers Benchmark tool I got:
460-480fps with "clip = autoadjust.AutoGain(clip)"
and
640-650fps "clip = autoadjust.AutoGain2(clip)"
that's nearly 40% faster. Do you get similar speeds?
Cu Selur
Ps.: Since I don't like that Fred and some other users write their scripts, I also attached a function which wraps the whole thing.
PPs.: added UnsharpenMask to sharpen.py
----
Dev versions are in the 'experimental'-folder of my GoogleDrive, which is linked on the download page.
Dev versions are in the 'experimental'-folder of my GoogleDrive, which is linked on the download page.