1
2
3 """
4 action.py
5
6 Created by Tristan Jehan and Jason Sundram.
7 """
8 import os
9 from numpy import zeros, multiply, float32, mean, copy
10 from math import atan, pi
11 import sys
12
13 from echonest.remix.audio import assemble, AudioData
14 from cAction import limit, crossfade, fadein, fadeout
15
16 import dirac
19 """returns the # of rows in a numpy matrix"""
20 return m.shape[0]
21
23 """Converts stereo tracks to mono; leaves mono tracks alone."""
24 if track.data.ndim == 2:
25 mono = mean(track.data,1)
26 track.data = mono
27 track.numChannels = 1
28 return track
29
31 """If the track is mono, doubles it. otherwise, does nothing."""
32 if track.data.ndim == 1:
33 stereo = zeros((len(track.data), 2))
34 stereo[:,0] = copy(track.data)
35 stereo[:,1] = copy(track.data)
36 track.data = stereo
37 track.numChannels = 2
38 return track
39
40 -def render(actions, filename, verbose=True):
41 """Calls render on each action in actions, concatenates the results,
42 renders an audio file, and returns a path to the file"""
43 pieces = [a.render() for a in actions]
44
45 out = assemble(pieces, numChannels=2, sampleRate=44100, verbose=verbose)
46 return out, out.encode(filename)
47
50 """A snippet of the given track with start and duration. Volume leveling
51 may be applied."""
52 - def __init__(self, track, start, duration):
56
58
59 output = self.track[self]
60
61 gain = getattr(self.track, 'gain', None)
62 if gain != None:
63
64 output.data = limit(multiply(output.data, float32(gain)))
65
66 return output
67
69 return "<Playback '%s'>" % self.track.analysis.pyechonest_track.title
70
72 args = (self.start, self.start + self.duration,
73 self.duration, self.track.analysis.pyechonest_track.title)
74 return "Playback\t%.3f\t-> %.3f\t (%.3f)\t%s" % args
75
78 """Fadeout"""
80 gain = getattr(self.track, 'gain', 1.0)
81 output = self.track[self]
82
83 output.data = fadeout(output.data, gain)
84 return output
85
87 return "<Fadeout '%s'>" % self.track.analysis.pyechonest_track.title
88
90 args = (self.start, self.start + self.duration,
91 self.duration, self.track.analysis.pyechonest_track.title)
92 return "Fade out\t%.3f\t-> %.3f\t (%.3f)\t%s" % args
93
96 """Fadein"""
98 gain = getattr(self.track, 'gain', 1.0)
99 output = self.track[self]
100
101 output.data = fadein(output.data, gain)
102 return output
103
105 return "<Fadein '%s'>" % self.track.analysis.pyechonest_track.title
106
108 args = (self.start, self.start + self.duration,
109 self.duration, self.track.analysis.pyechonest_track.title)
110 return "Fade in\t%.3f\t-> %.3f\t (%.3f)\t%s" % args
111
112
113 -class Edit(object):
114 """Refer to a snippet of audio"""
115 - def __init__(self, track, start, duration):
119
121 args = (self.start, self.start + self.duration,
122 self.duration, self.track.analysis.pyechonest_track.title)
123 return "Edit\t%.3f\t-> %.3f\t (%.3f)\t%s" % args
124
126 return self.track[self]
127
128 @property
131
134 """Crossfades between two tracks, at the start points specified,
135 for the given duration"""
136 - def __init__(self, tracks, starts, duration, mode='linear'):
140
142 t1, t2 = map(make_stereo, (self.t1.get(), self.t2.get()))
143 vecout = crossfade(t1.data, t2.data, self.mode)
144 audio_out = AudioData(ndarray=vecout, shape=vecout.shape,
145 sampleRate=t1.sampleRate,
146 numChannels=vecout.shape[1])
147 return audio_out
148
150 args = (self.t1.track.analysis.pyechonest_track.title, self.t2.track.analysis.pyechonest_track.title)
151 return "<Crossfade '%s' and '%s'>" % args
152
154 args = (self.t1.start, self.t2.start + self.duration, self.duration,
155 self.t1.track.analysis.pyechonest_track.title, self.t2.track.analysis.pyechonest_track.title)
156 return "Crossfade\t%.3f\t-> %.3f\t (%.3f)\t%s -> %s" % args
157
158
159 -class Jump(Crossfade):
160 """Move from one point """
161 - def __init__(self, track, source, target, duration):
168
169 @property
172
173 @property
176
178 return "<Jump '%s'>" % (self.t1.track.analysis.pyechonest_track.title)
179
181 args = (self.t1.start, self.t2.end, self.duration,
182 self.t1.track.analysis.pyechonest_track.title)
183 return "Jump\t\t%.3f\t-> %.3f\t (%.3f)\t%s" % args
184
187 """Mix together two lists of beats"""
189 self.t1, self.t2 = tracks
190 self.l1, self.l2 = lists
191 assert(len(self.l1) == len(self.l2))
192
193 self.calculate_durations()
194
196 zipped = zip(self.l1, self.l2)
197 self.durations = [(d1 + d2) / 2.0 for ((s1, d1), (s2, d2)) in zipped]
198 self.duration = sum(self.durations)
199
205
207 args = (self.t1.analysis.pyechonest_track.title, self.t2.analysis.pyechonest_track.title)
208 return "<Blend '%s' and '%s'>" % args
209
211
212 s1, e1 = self.l1[0][0], sum(self.l1[-1])
213 s2, e2 = self.l2[0][0], sum(self.l2[-1])
214 n1, n2 = self.t1.analysis.pyechonest_track.title, self.t2.analysis.pyechonest_track.title
215 args = (s1, s2, e1, e2, self.duration, n1, n2)
216 return "Blend [%.3f, %.3f] -> [%.3f, %.3f] (%.3f)\t%s + %s" % args
217
220 """Makes a beat-matched crossfade between the two input tracks."""
222 c, dec = 1.0, 1.0 / float(len(self.l1)+1)
223 self.durations = []
224 for ((s1, d1), (s2, d2)) in zip(self.l1, self.l2):
225 c -= dec
226 self.durations.append(c * d1 + (1 - c) * d2)
227 self.duration = sum(self.durations)
228
230 """t is a track, l is a list"""
231 signal_start = int(l[0][0] * t.sampleRate)
232 signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate)
233 vecin = t.data[signal_start:signal_start + signal_duration,:]
234
235 rates = []
236 for i in xrange(len(l)):
237 rate = (int(l[i][0] * t.sampleRate) - signal_start,
238 self.durations[i] / l[i][1])
239 rates.append(rate)
240
241 vecout = dirac.timeScale(vecin, rates, t.sampleRate, 0)
242 if hasattr(t, 'gain'):
243 vecout = limit(multiply(vecout, float32(t.gain)))
244
245 audio_out = AudioData(ndarray=vecout, shape=vecout.shape,
246 sampleRate=t.sampleRate,
247 numChannels=vecout.shape[1])
248 return audio_out
249
263
265 args = (self.t1.analysis.pyechonest_track.title, self.t2.analysis.pyechonest_track.title)
266 return "<Crossmatch '%s' and '%s'>" % args
267
269
270 s1, e1 = self.l1[0][0], sum(self.l1[-1])
271 s2, e2 = self.l2[0][0], sum(self.l2[-1])
272 n1, n2 = self.t1.analysis.pyechonest_track.title, self.t2.analysis.pyechonest_track.title
273 args = (s1, e2, self.duration, n1, n2)
274 return "Crossmatch\t%.3f\t-> %.3f\t (%.3f)\t%s -> %s" % args
275
278 """Turns seconds into a string of the form HH:MM:SS,
279 or MM:SS if less than one hour."""
280 mins, secs = divmod(secs, 60)
281 hours, mins = divmod(mins, 60)
282 if 0 < hours:
283 return '%02d:%02d:%02d' % (hours, mins, secs)
284
285 return '%02d:%02d' % (mins, secs)
286
289 total = 0
290 print
291 for a in actions:
292 print "%s\t %s" % (humanize_time(total), unicode(a))
293 total += a.duration
294 print
295