This forum uses cookies
This forum makes use of cookies to store your login information if you are registered, and your last visit if you are not. Cookies are small text documents stored on your computer; the cookies set by this forum can only be used on this website and pose no security risk. Cookies on this forum also track the specific topics you have read and when you last read them. Please confirm whether you accept or reject these cookies being set.

A cookie will be stored in your browser regardless of choice to prevent you being asked this question again. You will be able to change your cookie settings at any time using the link in the footer.

New AutoColor adjustment filter
#9
@Dan64:
Had a quick look at the new autoadjust.
I like the new versions. (just a bit slow)

a. Do they all have to be RGB24 only? iirc. cv2 should be able to also use RGB48 Wink
b. wouldn't it be better to not use PIL ?
c. have you tried cupy?
d. It seems wrong to use range_s="full" and later use tv range restrictions (16,235) for the limits


Here's a quick try for a high bit depth version without PIL:
def AutoGainTest(clip: vs.VideoNode, clip_limit: float = 1.0, strength: float = 0.5) -> vs.VideoNode:
    if not isinstance(clip, vs.VideoNode):
        raise vs.Error("AutoGain: This is not a clip")
    
    # Convert to RGB if needed (supporting high bit depth)
    rgb_format = vs.RGBS if clip.format.bits_per_sample > 8 else vs.RGB24
    if clip.format.id != rgb_format:
        if clip.format.color_family == vs.YUV:
            rgb_clip = clip.resize.Bicubic(format=rgb_format, matrix_in_s="709", range_s="full")
        else:
            rgb_clip = clip.resize.Bicubic(format=rgb_format, range_s="full")
    else:
        rgb_clip = clip

    weight = max(min(1.0 - strength, 1.0), 0.0)
    bits = clip.format.bits_per_sample
    max_val = (1 << bits) - 1
    loose_max_limit = (235 + 1) << (bits - 8) if bits <= 16 else (235 + 1) * 256
    loose_min_limit = 16 << (bits - 8) if bits <= 16 else 16 * 256
    clip_limit_factor = (1 - clip_limit/100.0) if clip_limit > 0 else 1.0

    def frame_autogain(n, f):
        # Create numpy array from frame planes (handles both 8-bit and high bit depth)
        if rgb_format == vs.RGB24:
            img_np = np.stack([
                np.asarray(f[0]),
                np.asarray(f[1]),
                np.asarray(f[2])
            ], axis=2).astype(np.float32)
        else:
            # For high bit depth (RGBS)
            img_np = np.stack([
                np.asarray(f[0]) * 255,
                np.asarray(f[1]) * 255,
                np.asarray(f[2]) * 255
            ], axis=2).astype(np.float32)

        # Process image
        yuv = cv2.cvtColor(img_np, cv2.COLOR_RGB2YUV)
        dY = yuv[:, :, 0]
        
        maxY = min(dY.max(), loose_max_limit)
        minY = max(dY.min(), loose_min_limit)
        y_range = maxY - minY

        if y_range > 0:
            scale = (loose_max_limit - loose_min_limit) / y_range
            y_offset = (loose_min_limit - scale * minY)
            y_gain = (scale - 1.0)
            
            # Apply gain and offset
            yuv[:, :, 0] = np.clip((dY + y_offset) * (y_gain + 1), 0, 255)
        
        # Convert back to RGB
        rgb_new = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB)
        
        # Create new frame
        new_frame = f.copy()
        if rgb_format == vs.RGB24:
            for i in range(3):
                np.copyto(np.asarray(new_frame[i]), rgb_new[:, :, i].astype(np.uint8))
        else:
            # For high bit depth (RGBS)
            for i in range(3):
                np.copyto(np.asarray(new_frame[i]), (rgb_new[:, :, i] / 255).astype(np.float32))
        
        return new_frame

    clip_a = rgb_clip.std.ModifyFrame(clips=[rgb_clip], selector=frame_autogain)
    clip_rgb = core.std.Merge(clip_a, rgb_clip, weight) if weight > 0 else clip_a
    
    # Convert back to original format if needed
    if clip.format.id != rgb_format:
        if clip.format.color_family == vs.YUV:
            return clip_rgb.resize.Bicubic(format=clip.format.id, matrix_s="709", range_s="limited")
        else:
            return clip_rgb.resize.Bicubic(format=clip.format.id, range_s=clip.get_frame(0).props._ColorRange)
    
    return clip_rgb
I'm not sure about the luma range stuff. Smile
def AutoGainFull(clip: vs.VideoNode, clip_limit: float = 1.0, strength: float = 0.5) -> vs.VideoNode:
    """
    AutoGain filter for full-range RGB input/output only.
    Uses RGB limits (0-max) instead of YUV TV range (16-235).
    
    Args:
        clip: RGB input clip (must be full range)
        clip_limit: Threshold for contrast limiting (0-50)
        strength: Filter strength (0-1)
    """
    if not isinstance(clip, vs.VideoNode):
        raise vs.Error("AutoGain: Input must be a clip")
    
    # Verify input is RGB and full range
    if clip.format.color_family != vs.RGB:
        raise vs.Error("AutoGain: Input must be RGB format")
    
    if hasattr(clip.get_frame(0).props, "_ColorRange") and clip.get_frame(0).props._ColorRange != 0:
        raise vs.Error("AutoGain: Input must be full range (0-255/1023/etc.)")
    
    bits = clip.format.bits_per_sample
    max_val = (1 << bits) - 1
    scale_factor = 255 / max_val if bits > 8 else 1
    
    # Parameters
    weight = max(min(1.0 - strength, 1.0), 0.0)
    clip_limit_factor = (1 - clip_limit/100.0) if clip_limit > 0 else 1.0

    def frame_autogain(n, f):
        # Create numpy array from frame planes (handles all bit depths)
        if clip.format.sample_type == vs.INTEGER:
            img_np = np.stack([
                np.asarray(f[0]),
                np.asarray(f[1]),
                np.asarray(f[2])
            ], axis=2).astype(np.float32) * scale_factor
        else:
            # Floating point format (RGBS)
            img_np = np.stack([
                np.asarray(f[0]) * 255,
                np.asarray(f[1]) * 255,
                np.asarray(f[2]) * 255
            ], axis=2).astype(np.float32)

        # Convert to YUV for processing (still using full range)
        yuv = cv2.cvtColor(img_np, cv2.COLOR_RGB2YUV)
        dY = yuv[:, :, 0]
        
        # Calculate using full range (0-255)
        maxY = dY.max()
        minY = dY.min()
        y_range = maxY - minY

        if y_range > 0:
            scale = 255 / y_range  # Full range scaling
            y_offset = -scale * minY
            y_gain = (scale - 1.0)
            
            # Apply with clip_limit factor
            y_offset *= clip_limit_factor
            y_gain *= clip_limit_factor
            
            # Apply gain and offset
            yuv[:, :, 0] = np.clip((dY + y_offset) * (y_gain + 1), 0, 255)
        
        # Convert back to RGB
        rgb_new = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB)
        
        # Create new frame with proper bit depth
        new_frame = f.copy()
        if clip.format.sample_type == vs.INTEGER:
            output = np.clip(rgb_new / scale_factor, 0, max_val).astype(np.uint16 if bits > 8 else np.uint8)
            for i in range(3):
                np.copyto(np.asarray(new_frame[i]), output[:, :, i])
        else:
            # Floating point format (RGBS)
            for i in range(3):
                np.copyto(np.asarray(new_frame[i]), (rgb_new / 255).astype(np.float32))
        
        return new_frame

    # Process and optionally blend with original
    clip_a = clip.std.ModifyFrame(clips=[clip], selector=frame_autogain)
    return core.std.Merge(clip_a, clip, weight) if weight > 0 else clip_a

I think it would be better to: Require full range RGB input and only work with full range luma.
Just for inspiration (probably still got some errors in it):
def AutoGainYUV(clip: vs.VideoNode, clip_limit: float = 1.0, strength: float = 0.5, planes: tuple = (0,)) -> vs.VideoNode:
    """
    YUV-only AutoGain that processes luma (and optionally chroma) directly
    Requires YUV input, avoids all RGB conversions
    
    Args:
        clip: YUV input clip
        clip_limit: Threshold for contrast limiting (0-50)
        strength: Filter strength (0-1)
        planes: Which planes to process (default=(0,) for luma only)
    """
    if not isinstance(clip, vs.VideoNode):
        raise vs.Error("AutoGainYUV: Input must be a clip")
    
    if clip.format.color_family != vs.YUV:
        raise vs.Error("AutoGainYUV: Input must be YUV format")

    bits = clip.format.bits_per_sample
    is_float = clip.format.sample_type == vs.FLOAT
    weight = max(min(1.0 - strength, 1.0), 0.0)
    clip_limit_factor = (1 - clip_limit/100.0) if clip_limit > 0 else 1.0

    def frame_autogain(n, f):
        # Get frame properties to determine data type
        sample_type = clip.format.sample_type
        bytes_per_sample = clip.format.bytes_per_sample
        
        # Process each plane
        processed_planes = []
        for i in range(clip.format.num_planes):
            if i not in planes:
                processed_planes.append(np.asarray(f[i]))
                continue
            
            # Convert plane to numpy array with correct dtype
            if sample_type == vs.FLOAT:
                plane = np.asarray(f[i], dtype=np.float32)
                processing_plane = plane * 255
            elif bytes_per_sample == 1:
                plane = np.asarray(f[i], dtype=np.uint8)
                processing_plane = plane.astype(np.float32)
            else:
                plane = np.asarray(f[i], dtype=np.uint16)
                processing_plane = plane.astype(np.float32)
            
            # Calculate statistics
            maxY = processing_plane.max()
            minY = processing_plane.min()
            y_range = maxY - minY

            if y_range > 0:
                scale = 255 / y_range
                y_offset = -scale * minY
                y_gain = (scale - 1.0)
                
                # Apply with clip_limit factor
                y_offset *= clip_limit_factor
                y_gain *= clip_limit_factor
                
                # Apply gain and offset
                processing_plane = np.clip((processing_plane + y_offset) * (y_gain + 1), 0, 255)
            
            # Convert back to original format
            if sample_type == vs.FLOAT:
                result = processing_plane / 255
            elif bytes_per_sample == 1:
                result = np.clip(processing_plane, 0, 255).astype(np.uint8)
            else:
                result = np.clip(processing_plane, 0, 65535).astype(np.uint16)
            
            processed_planes.append(result)
        
        # Create new frame with processed planes
        new_frame = f.copy()
        for i in range(clip.format.num_planes):
            np.copyto(np.asarray(new_frame[i]), processed_planes[i])
        
        return new_frame

    # Process and optionally blend with original
    clip_a = clip.std.ModifyFrame(clips=[clip], selector=frame_autogain)
    return core.std.Merge(clip_a, clip, weight) if weight > 0 else clip_a

Cu Selur
----
Dev versions are in the 'experimental'-folder of my GoogleDrive, which is linked on the download page.
Reply


Messages In This Thread
New AutoColor adjustment filter - by Dan64 - 13.04.2025, 15:51
RE: New AutoColor adjustment filter - by Selur - 15.04.2025, 17:57

Forum Jump:


Users browsing this thread: 1 Guest(s)