Updated the deoldify test version.
I took the latest RC and loaded it into my new deoldify test version of Hybrid together with "sample2\thing1.mp4" and it didn't crash.
I get the first frame colored, and the second frame not.
some color is added.
seems like every 24 frames I get a bit more color.
Switching to async, coloring isn't good, but there.
Code:
Failed to evaluate the script:
Python exception: _upsample_bilinear2d_aa() received an invalid combination of arguments - got (Tensor, list, bool, NoneType), but expected one of:
* (Tensor input, tuple of ints output_size, bool align_corners, tuple of floats scale_factors)
didn't match because some of the arguments have invalid types: (Tensor, !list of [bool, int]!, bool, !NoneType!)
* (Tensor input, tuple of ints output_size, bool align_corners, float scales_h = None, float scales_w = None, *, Tensor out = None)
Traceback (most recent call last):
File "src\\cython\\vapoursynth.pyx", line 3365, in vapoursynth._vpy_evaluate
File "src\\cython\\vapoursynth.pyx", line 3366, in vapoursynth._vpy_evaluate
File "J:\tmp\tempPreviewVapoursynthFile09_23_22_212.vpy", line 43, in
clip = HAVC_main(clip=clip, EnableDeepEx=True, DeepExRefMerge=0, ScFrameDir="J:/tmp", DeepExModel=0, DeepExEncMode=2, DeepExMaxMemFrames=0)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\__init__.py", line 280, in HAVC_main
clip_colored = HAVC_deepex(clip=clip, clip_ref=clip_ref, method=DeepExMethod, render_speed=DeepExPreset,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\__init__.py", line 554, in HAVC_deepex
clip_colored = vs_colormnet(clip, clip_ref, clip_sc, image_size=-1, enable_resize=enable_resize,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\vsslib\vsmodels.py", line 61, in vs_colormnet
return vs_colormnet_sync(clip, clip_ref, image_size, enable_resize, frame_propagate, max_memory_frames)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\colormnet\__init__.py", line 209, in vs_colormnet_sync
clip_i = vs_colormnet_sync_task(t_start, t_end, vid_render, clip, clip_ref, clip_sc, frame_propagate, ref_weight)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\colormnet\__init__.py", line 177, in vs_colormnet_sync_task
out_img = vid_render.colorize_frame(i, img_orig)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\colormnet\colormnet_render.py", line 190, in colorize_frame
data = self.get_image(ti, frame_i, self.ref_img)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsdeoldify\colormnet\colormnet_render.py", line 259, in get_image
img = self.im_transform(img)
^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torchvision\transforms\transforms.py", line 95, in __call__
img = t(img)
^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\nn\modules\module.py", line 1716, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\nn\modules\module.py", line 1727, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torchvision\transforms\transforms.py", line 354, in forward
return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torchvision\transforms\functional.py", line 479, in resize
return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torchvision\transforms\_functional_tensor.py", line 465, in resize
img = interpolate(img, size=size, mode=interpolation, align_corners=align_corners, antialias=antialias)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "F:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\nn\functional.py", line 4565, in interpolate
return torch._C._nn._upsample_bilinear2d_aa(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: _upsample_bilinear2d_aa() received an invalid combination of arguments - got (Tensor, list, bool, NoneType), but expected one of:
* (Tensor input, tuple of ints output_size, bool align_corners, tuple of floats scale_factors)
didn't match because some of the arguments have invalid types: (Tensor, !list of [bool, int]!, bool, !NoneType!)
* (Tensor input, tuple of ints output_size, bool align_corners, float scales_h = None, float scales_w = None, *, Tensor out = None)
that seems like a bug.