Skip to content

Commit

Permalink
made 'outplace' decorator (bad idea ?). Fixed 'is_playing'. Put most …
Browse files Browse the repository at this point in the history
…of the audio reading engine in FFMPEGAudioReader
  • Loading branch information
Zulko committed Feb 25, 2014
1 parent 61cc5ac commit ad5a7a8
Show file tree
Hide file tree
Showing 17 changed files with 297 additions and 427 deletions.
37 changes: 23 additions & 14 deletions examples/example_with_sound.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,40 @@
# LOAD THE MAIN SCENE
# this small video contains the two scenes that we will put together.

main_clip = VideoFileClip("../../charadePhone.mp4")
main_clip = VideoFileClip("../../videos/charadePhone.mp4")
W,H = main_clip.size



# MAKE THE LEFT CLIP : cut, crop, add a mask

clip_left = main_clip.coreader().subclip(0,duration).\
crop( x1=60, x2=60 + 2*W/3)

mask = color_split((2*W/3,H), p1=(W/3,H), p2=(2*W/3,0),
col1=1, col2=0, grad_width=2)
mask = color_split((2*W/3,H),
p1=(W/3,H), p2=(2*W/3,0),
col1=1, col2=0,
grad_width=2)

mask_clip = ImageClip(mask, ismask=True)

clip_left.mask = ImageClip(mask, ismask=True)
clip_left = (main_clip.coreader()
.subclip(0,duration)
.crop( x1=60, x2=60 + 2*W/3)
.set_mask(mask_clip))


# MAKE THE RIGHT CLIP : cut, crop, add a mask

clip_right = main_clip.coreader().subclip(21,21+duration).\
crop(x1=70, x2=70+2*W/3)

mask = color_split((2*W/3,H), p1=(2,H), p2=(W/3+2,0),
col1=0, col2=1, grad_width=2)

clip_right.mask = ImageClip(mask, ismask=True)
mask = color_split((2*W/3,H),
p1=(2,H), p2=(W/3+2,0),
col1=0, col2=1,
grad_width=2)

mask_clip = ImageClip(mask, ismask=True)

clip_right = (main_clip.coreader()
.subclip(21,21+duration)
.crop(x1=70, x2=70+2*W/3)
.set_mask(mask_clip))




Expand Down
6 changes: 3 additions & 3 deletions examples/star_worms.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def trapzWarp(pic,cx,cy,ismask=False):

# BACKGROUND IMAGE, DARKENED AT 60%

stars = ImageClip('../../starworms/stars.jpg')
stars = ImageClip('../../videos/stars.jpg')
stars_darkened = stars.fl_image(lambda pic: (0.6*pic).astype('int16'))


Expand All @@ -100,7 +100,7 @@ def trapzWarp(pic,cx,cy,ismask=False):

# WRITE TO A FILE

final.set_duration(8).to_videofile("starworms.avi")
final.set_duration(8).to_videofile("starworms.avi", fps=5)

# This script is heavy (30s of computations to render 8s of video)

Expand Down Expand Up @@ -175,5 +175,5 @@ def composeCenter(clip):

# Concatenate and write to a file

concatenate(annotated_clips).to_videofile('tutorial.avi')
concatenate(annotated_clips).to_videofile('tutorial.avi', fps=5)

89 changes: 51 additions & 38 deletions moviepy/Clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@
from copy import copy
import numpy as np

from moviepy.decorators import ( apply_to_mask, apply_to_audio,
time_can_be_tuple)
from moviepy.decorators import ( apply_to_mask,
apply_to_audio,
time_can_be_tuple,
outplace)


class Clip:
Expand Down Expand Up @@ -48,7 +50,6 @@ def __init__(self):
self.duration = None



def copy(self):
"""
Shallow copy of the clip. This method is intensively used
Expand Down Expand Up @@ -111,7 +112,8 @@ def fl(self, fun, apply_to=[] , keep_duration=True):
if hasattr(newclip, attr):
a = getattr(newclip, attr)
if a != None:
setattr(newclip, attr, a.fl(fl))
new_a = a.fl(fun, keep_duration=keep_duration)
setattr(newclip, attr, new_a)

return newclip

Expand Down Expand Up @@ -183,6 +185,7 @@ def fx(self, func, *args, **kwargs):
@apply_to_mask
@apply_to_audio
@time_can_be_tuple
@outplace
def set_start(self, t, change_end=True):
"""
Returns a copy of the clip, with the ``start`` attribute set
Expand All @@ -199,83 +202,93 @@ def set_start(self, t, change_end=True):
These changes are also applied to the ``audio`` and ``mask``
clips of the current clip, if they exist.
"""
newclip = self.copy()
newclip.start = t
if (newclip.duration != None) and change_end:
newclip.end = t + newclip.duration
elif (newclip.end !=None):
newclip.duration = newclip.end - newclip.start

return newclip

self.start = t
if (self.duration != None) and change_end:
self.end = t + self.duration
elif (self.end !=None):
self.duration = self.end - self.start



@apply_to_mask
@apply_to_audio
@time_can_be_tuple
@outplace
def set_end(self, t):
"""
Returns a copy of the clip, with the ``end`` attribute set to
``t``. Also sets the duration of the mask and audio, if any,
of the returned clip.
"""
newclip = self.copy()
newclip.end = t
if newclip.start is None:
if newclip.duration != None:
newclip.start = max(0, t - newclip.duration)
self.end = t
if self.start is None:
if self.duration != None:
self.start = max(0, t - newclip.duration)
else:
newclip.duration = newclip.end - newclip.start

return newclip
self.duration = self.end - self.start



@apply_to_mask
@apply_to_audio
@time_can_be_tuple
@outplace
def set_duration(self, t, change_end=True):
"""
Returns a copy of the clip, with the ``duration`` attribute
set to ``t``.
Also sets the duration of the mask and audio, if any, of the
returned clip.
"""
newclip = copy(self)
newclip.duration = t
self.duration = t
if change_end:
newclip.end = newclip.start + t
self.end = self.start + t
else:
newclip.start = newclip.end - t


return newclip
self.start = self.end - t



@outplace
def set_get_frame(self, gf):
"""
Sets a ``get_frame`` attribute for the clip. Useful for setting
arbitrary/complicated videoclips.
"""

newclip = copy(self)
newclip.get_frame = gf
return newclip
self.get_frame = gf



@time_can_be_tuple
def is_playing(self, t):
"""
Return true if t is between the start and the end of the clip.
If t is a number, returns true if t is between the start and the
end of the clip.
If t is a numpy array, returns False if none of the t is in the
clip, else returns a vector [b_1, b_2, b_3...] where b_i is true
iff tti is in the clip.
"""

if isinstance(t, np.ndarray):
t = t.max()
return (((self.end is None) and (t >= self.start)) or
(self.start <= t <= self.end))


# is the whole list of t outside the clip ?
tmin, tmax = t.min(), t.max()

if (self.end != None) and (tmin > self.end) :
return False

if tmax < self.start:
return False

# If we arrive here, a part of t falls in the clip
result = 1 * (t >= self.start)
if (self.end != None):
result *= (t <= self.end)
return result

else:

return( (t >= self.start) and
((self.end is None) or (t <= self.end) ) )

@time_can_be_tuple
@apply_to_mask
Expand Down
42 changes: 31 additions & 11 deletions moviepy/audio/AudioClip.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,15 @@ class AudioClip(Clip):
"""

def __init__(self):
def __init__(self, get_frame = None):
Clip.__init__(self)
if get_frame:
self.get_frame = get_frame
frame0 = self.get_frame(0)
if hasattr(frame0, '__iter__'):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1

@requires_duration
def to_soundarray(self,tt=None,fps=None, nbytes=2):
Expand All @@ -72,19 +79,24 @@ def to_soundarray(self,tt=None,fps=None, nbytes=2):
tt = np.arange(0,self.duration, 1.0/fps)

snd_array = self.get_frame(tt)
snd_array = np.maximum(-0.999, np.minimum(0.999,snd_array))
snd_array = np.maximum(-0.99,
np.minimum(0.99,snd_array))
inttype = {1:'int8',2:'int16',4:'int32'}[nbytes]
return (2**(8*nbytes-1)*snd_array).astype(inttype)

@property
def nchannels(self):
return len(list(self.get_frame(0)))



@requires_duration
def to_audiofile(self,filename, fps=44100, nbytes=2,
buffersize=5000, codec='libvorbis',
buffersize=2000, codec='libvorbis',
bitrate=None, verbose=True):
"""
codecs = { 'libmp3lame': 'mp3',
'libvorbis':'ogg',
'libfdk_aac':'m4a',
'pcm_s16le':'wav',
'pcm_s32le': 'wav'}
"""

return ffmpeg_audiowrite(self,filename, fps, nbytes, buffersize,
codec, bitrate, verbose)
Expand Down Expand Up @@ -142,6 +154,7 @@ def get_frame(t):
return self.array[i]

self.get_frame = get_frame
self.nchannels = len(list(self.get_frame(0)))


class CompositeAudioClip(AudioClip):
Expand All @@ -166,19 +179,26 @@ def __init__(self, clips):
self.clips = clips

ends = [c.end for c in self.clips]
self.nchannels = max([c.nchannels for c in self.clips])
if not any([(e is None) for e in ends]):
self.duration = max(ends)
self.end = max(ends)

def get_frame(t):
# buggy


played_parts = [c.is_playing(t) for c in self.clips]

sounds= [c.get_frame(t - c.start)
for c in clips if c.is_playing(t)]
sounds= [c.get_frame(t - c.start)*np.array([part]).T
for c,part in zip(self.clips, played_parts)
if (part is not False) ]

if isinstance(t,np.ndarray):
zero = np.zeros((len(t),2))
zero = np.zeros((len(t),self.nchannels))

else:
zero = np.zeros(2)
zero = np.zeros(self.nchannels)

return zero + sum(sounds)

Expand Down
Loading

0 comments on commit ad5a7a8

Please sign in to comment.