OpenShot Library | libopenshot  0.2.0
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init max image size
64 
65  // Init cache
66  final_cache = new CacheMemory();
68 }
69 
70 // Add an openshot::Clip to the timeline
72 {
73  // All clips should be converted to the frame rate of this timeline
74  if (auto_map_clips)
75  // Apply framemapper (or update existing framemapper)
76  apply_mapper_to_clip(clip);
77 
78  // Add clip to list
79  clips.push_back(clip);
80 
81  // Sort clips
82  sort_clips();
83 }
84 
85 // Add an effect to the timeline
87 {
88  // Add effect to list
89  effects.push_back(effect);
90 
91  // Sort effects
92  sort_effects();
93 }
94 
95 // Remove an effect from the timeline
97 {
98  effects.remove(effect);
99 }
100 
101 // Remove an openshot::Clip to the timeline
103 {
104  clips.remove(clip);
105 }
106 
107 // Apply a FrameMapper to a clip which matches the settings of this timeline
108 void Timeline::apply_mapper_to_clip(Clip* clip)
109 {
110  // Get lock (prevent getting frames while this happens)
111  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
112 
113  // Determine type of reader
114  ReaderBase* clip_reader = NULL;
115  if (clip->Reader()->Name() == "FrameMapper")
116  {
117  // Get the existing reader
118  clip_reader = (ReaderBase*) clip->Reader();
119 
120  } else {
121 
122  // Create a new FrameMapper to wrap the current reader
124  }
125 
126  // Update the mapping
127  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
129 
130  // Update clip reader
131  clip->Reader(clip_reader);
132 }
133 
134 // Apply the timeline's framerate and samplerate to all clips
136 {
137  // Clear all cached frames
138  ClearAllCache();
139 
140  // Loop through all clips
141  list<Clip*>::iterator clip_itr;
142  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
143  {
144  // Get clip object from the iterator
145  Clip *clip = (*clip_itr);
146 
147  // Apply framemapper (or update existing framemapper)
148  apply_mapper_to_clip(clip);
149  }
150 }
151 
152 // Calculate time of a frame number, based on a framerate
153 double Timeline::calculate_time(int64_t number, Fraction rate)
154 {
155  // Get float version of fps fraction
156  double raw_fps = rate.ToFloat();
157 
158  // Return the time (in seconds) of this frame
159  return double(number - 1) / raw_fps;
160 }
161 
162 // Apply effects to the source frame (if any)
163 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
164 {
165  // Debug output
166  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
167 
168  // Find Effects at this position and layer
169  list<EffectBase*>::iterator effect_itr;
170  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
171  {
172  // Get effect object from the iterator
173  EffectBase *effect = (*effect_itr);
174 
175  // Does clip intersect the current requested time
176  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
177  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
178 
179  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
180 
181  // Debug output
182  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
183 
184  // Clip is visible
185  if (does_effect_intersect)
186  {
187  // Determine the frame needed for this clip (based on the position on the timeline)
188  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
189  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
190 
191  // Debug output
192  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect, "", -1, "", -1, "", -1, "", -1);
193 
194  // Apply the effect to this frame
195  frame = effect->GetFrame(frame, effect_frame_number);
196  }
197 
198  } // end effect loop
199 
200  // Return modified frame
201  return frame;
202 }
203 
204 // Get or generate a blank frame
205 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
206 {
207  std::shared_ptr<Frame> new_frame;
208 
209  // Init some basic properties about this frame
210  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
211 
212  try {
213  // Debug output
214  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
215 
216  // Set max image size (used for performance optimization)
217  clip->SetMaxSize(info.width, info.height);
218 
219  // Attempt to get a frame (but this could fail if a reader has just been closed)
220  #pragma omp critical (T_GetOtCreateFrame)
221  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
222 
223  // Return real frame
224  return new_frame;
225 
226  } catch (const ReaderClosed & e) {
227  // ...
228  } catch (const TooManySeeks & e) {
229  // ...
230  } catch (const OutOfBoundsFrame & e) {
231  // ...
232  }
233 
234  // Debug output
235  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
236 
237  // Create blank frame
238  new_frame = std::make_shared<Frame>(number, max_width, max_height, "#000000", samples_in_frame, info.channels);
239  #pragma omp critical (T_GetOtCreateFrame)
240  {
241  new_frame->SampleRate(info.sample_rate);
242  new_frame->ChannelsLayout(info.channel_layout);
243  }
244  return new_frame;
245 }
246 
247 // Process a new layer of video or audio
248 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
249 {
250  // Get the clip's frame & image
251  std::shared_ptr<Frame> source_frame;
252  #pragma omp critical (T_addLayer)
253  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
254 
255  // No frame found... so bail
256  if (!source_frame)
257  return;
258 
259  // Debug output
260  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
261 
262  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
263  if (source_clip->Waveform())
264  {
265  // Debug output
266  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
267 
268  // Get the color of the waveform
269  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
270  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
271  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
272  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
273 
274  // Generate Waveform Dynamically (the size of the timeline)
275  std::shared_ptr<QImage> source_image;
276  #pragma omp critical (T_addLayer)
277  source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha);
278  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
279  }
280 
281  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
282  * effects on the top clip. */
283  if (is_top_clip && source_frame)
284  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
285 
286  // Declare an image to hold the source frame's image
287  std::shared_ptr<QImage> source_image;
288 
289  /* COPY AUDIO - with correct volume */
290  if (source_clip->Reader()->info.has_audio) {
291  // Debug output
292  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
293 
294  if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
295  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
296  {
297  // Get volume from previous frame and this frame
298  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
299  float volume = source_clip->volume.GetValue(clip_frame_number);
300  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
301  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
302 
303  // Apply volume mixing strategy
304  if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
305  // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
306  previous_volume = previous_volume / max_volume;
307  volume = volume / max_volume;
308  }
309  else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
310  // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
311  previous_volume = previous_volume * 0.77;
312  volume = volume * 0.77;
313  }
314 
315  // If channel filter enabled, check for correct channel (and skip non-matching channels)
316  if (channel_filter != -1 && channel_filter != channel)
317  continue; // skip to next channel
318 
319  // If no volume on this frame or previous frame, do nothing
320  if (previous_volume == 0.0 && volume == 0.0)
321  continue; // skip to next channel
322 
323  // If channel mapping disabled, just use the current channel
324  if (channel_mapping == -1)
325  channel_mapping = channel;
326 
327  // Apply ramp to source frame (if needed)
328  if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
329  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
330 
331  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
332  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
333  // number of samples returned is variable... and does not match the number expected.
334  // This is a crude solution at best. =)
335  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
336  // Force timeline frame to match the source frame
337  #pragma omp critical (T_addLayer)
338  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
339 
340  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
341  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
342  #pragma omp critical (T_addLayer)
343  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
344 
345  }
346  else
347  // Debug output
348  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
349 
350  }
351 
352  // Skip out if only an audio frame
353  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
354  // Skip the rest of the image processing for performance reasons
355  return;
356 
357  // Debug output
358  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
359 
360  // Get actual frame image data
361  source_image = source_frame->GetImage();
362 
363  /* ALPHA & OPACITY */
364  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
365  {
366  float alpha = source_clip->alpha.GetValue(clip_frame_number);
367 
368  // Get source image's pixels
369  unsigned char *pixels = (unsigned char *) source_image->bits();
370 
371  // Loop through pixels
372  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
373  {
374  // Get the alpha values from the pixel
375  int A = pixels[byte_index + 3];
376 
377  // Apply alpha to pixel
378  pixels[byte_index + 3] *= alpha;
379  }
380 
381  // Debug output
382  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
383  }
384 
385  /* RESIZE SOURCE IMAGE - based on scale type */
386  QSize source_size = source_image->size();
387  switch (source_clip->scale)
388  {
389  case (SCALE_FIT):
390  // keep aspect ratio
391  source_size.scale(max_width, max_height, Qt::KeepAspectRatio);
392 
393  // Debug output
394  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
395  break;
396 
397  case (SCALE_STRETCH):
398  // ignore aspect ratio
399  source_size.scale(max_width, max_height, Qt::IgnoreAspectRatio);
400 
401  // Debug output
402  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
403  break;
404 
405  case (SCALE_CROP):
406  QSize width_size(max_width, round(max_width / (float(source_size.width()) / float(source_size.height()))));
407  QSize height_size(round(max_height / (float(source_size.height()) / float(source_size.width()))), max_height);
408 
409  // respect aspect ratio
410  if (width_size.width() >= max_width && width_size.height() >= max_height)
411  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
412  else
413  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
414 
415  // Debug output
416  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
417  break;
418  }
419 
420  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
421  float x = 0.0; // left
422  float y = 0.0; // top
423 
424  // Adjust size for scale x and scale y
425  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
426  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
427  float scaled_source_width = source_size.width() * sx;
428  float scaled_source_height = source_size.height() * sy;
429 
430  switch (source_clip->gravity)
431  {
432  case (GRAVITY_TOP):
433  x = (max_width - scaled_source_width) / 2.0; // center
434  break;
435  case (GRAVITY_TOP_RIGHT):
436  x = max_width - scaled_source_width; // right
437  break;
438  case (GRAVITY_LEFT):
439  y = (max_height - scaled_source_height) / 2.0; // center
440  break;
441  case (GRAVITY_CENTER):
442  x = (max_width - scaled_source_width) / 2.0; // center
443  y = (max_height - scaled_source_height) / 2.0; // center
444  break;
445  case (GRAVITY_RIGHT):
446  x = max_width - scaled_source_width; // right
447  y = (max_height - scaled_source_height) / 2.0; // center
448  break;
449  case (GRAVITY_BOTTOM_LEFT):
450  y = (max_height - scaled_source_height); // bottom
451  break;
452  case (GRAVITY_BOTTOM):
453  x = (max_width - scaled_source_width) / 2.0; // center
454  y = (max_height - scaled_source_height); // bottom
455  break;
456  case (GRAVITY_BOTTOM_RIGHT):
457  x = max_width - scaled_source_width; // right
458  y = (max_height - scaled_source_height); // bottom
459  break;
460  }
461 
462  // Debug output
463  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
464 
465  /* LOCATION, ROTATION, AND SCALE */
466  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
467  x += (max_width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
468  y += (max_height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
469  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
470  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
471 
472  bool transformed = false;
473  QTransform transform;
474 
475  // Transform source image (if needed)
476  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
477 
478  if (!isEqual(r, 0)) {
479  // ROTATE CLIP
480  float origin_x = x + (scaled_source_width / 2.0);
481  float origin_y = y + (scaled_source_height / 2.0);
482  transform.translate(origin_x, origin_y);
483  transform.rotate(r);
484  transform.translate(-origin_x,-origin_y);
485  transformed = true;
486  }
487 
488  if (!isEqual(x, 0) || !isEqual(y, 0)) {
489  // TRANSLATE/MOVE CLIP
490  transform.translate(x, y);
491  transformed = true;
492  }
493 
494  // SCALE CLIP (if needed)
495  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
496  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
497 
498  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
499  transform.scale(source_width_scale, source_height_scale);
500  transformed = true;
501  }
502 
503  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
504  // SHEAR HEIGHT/WIDTH
505  transform.shear(shear_x, shear_y);
506  transformed = true;
507  }
508 
509  // Debug output
510  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
511 
512  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
513  std::shared_ptr<QImage> new_image;
514  #pragma omp critical (T_addLayer)
515  new_image = new_frame->GetImage();
516 
517  // Load timeline's new frame image into a QPainter
518  QPainter painter(new_image.get());
519  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
520 
521  // Apply transform (translate, rotate, scale)... if any
522  if (transformed)
523  painter.setTransform(transform);
524 
525  // Composite a new layer onto the image
526  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
527  painter.drawImage(0, 0, *source_image);
528 
529  // Draw frame #'s on top of image (if needed)
530  if (source_clip->display != FRAME_DISPLAY_NONE) {
531  stringstream frame_number_str;
532  switch (source_clip->display)
533  {
534  case (FRAME_DISPLAY_CLIP):
535  frame_number_str << clip_frame_number;
536  break;
537 
538  case (FRAME_DISPLAY_TIMELINE):
539  frame_number_str << timeline_frame_number;
540  break;
541 
542  case (FRAME_DISPLAY_BOTH):
543  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
544  break;
545  }
546 
547  // Draw frame number on top of image
548  painter.setPen(QColor("#ffffff"));
549  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
550  }
551 
552  painter.end();
553 
554  // Debug output
555  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
556 }
557 
558 // Update the list of 'opened' clips
559 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
560 {
561  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
562 
563  // is clip already in list?
564  bool clip_found = open_clips.count(clip);
565 
566  if (clip_found && !does_clip_intersect)
567  {
568  // Remove clip from 'opened' list, because it's closed now
569  open_clips.erase(clip);
570 
571  // Close clip
572  clip->Close();
573  }
574  else if (!clip_found && does_clip_intersect)
575  {
576  // Add clip to 'opened' list, because it's missing
577  open_clips[clip] = clip;
578 
579  // Open the clip
580  clip->Open();
581  }
582 
583  // Debug output
584  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
585 }
586 
587 // Sort clips by position on the timeline
588 void Timeline::sort_clips()
589 {
590  // Debug output
591  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
592 
593  // sort clips
594  clips.sort(CompareClips());
595 }
596 
597 // Sort effects by position on the timeline
598 void Timeline::sort_effects()
599 {
600  // sort clips
601  effects.sort(CompareEffects());
602 }
603 
604 // Close the reader (and any resources it was consuming)
606 {
607  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
608 
609  // Close all open clips
610  list<Clip*>::iterator clip_itr;
611  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
612  {
613  // Get clip object from the iterator
614  Clip *clip = (*clip_itr);
615 
616  // Open or Close this clip, based on if it's intersecting or not
617  update_open_clips(clip, false);
618  }
619 
620  // Mark timeline as closed
621  is_open = false;
622 
623  // Clear cache
624  final_cache->Clear();
625 }
626 
627 // Open the reader (and start consuming resources)
629 {
630  is_open = true;
631 }
632 
633 // Compare 2 floating point numbers for equality
634 bool Timeline::isEqual(double a, double b)
635 {
636  return fabs(a - b) < 0.000001;
637 }
638 
639 // Get an openshot::Frame object for a specific frame number of this reader.
640 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
641 {
642  // Adjust out of bounds frame number
643  if (requested_frame < 1)
644  requested_frame = 1;
645 
646  // Check cache
647  std::shared_ptr<Frame> frame;
648  #pragma omp critical (T_GetFrame)
649  frame = final_cache->GetFrame(requested_frame);
650  if (frame) {
651  // Debug output
652  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
653 
654  // Return cached frame
655  return frame;
656  }
657  else
658  {
659  // Create a scoped lock, allowing only a single thread to run the following code at one time
660  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
661 
662  // Check for open reader (or throw exception)
663  if (!is_open)
664  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
665 
666  // Check cache again (due to locking)
667  #pragma omp critical (T_GetFrame)
668  frame = final_cache->GetFrame(requested_frame);
669  if (frame) {
670  // Debug output
671  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
672 
673  // Return cached frame
674  return frame;
675  }
676 
677  // Minimum number of frames to process (for performance reasons)
678  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
679 
680  // Get a list of clips that intersect with the requested section of timeline
681  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
682  vector<Clip*> nearby_clips;
683  #pragma omp critical (T_GetFrame)
684  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
685 
686  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
687  // Allow nested OpenMP sections
688  omp_set_nested(true);
689 
690  // Debug output
691  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
692 
693  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
694  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
695  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
696  {
697  // Loop through clips
698  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
699  {
700  // Get clip object from the iterator
701  Clip *clip = nearby_clips[clip_index];
702  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
703  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
704 
705  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
706  if (does_clip_intersect)
707  {
708  // Get clip frame #
709  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
710  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
711  // Cache clip object
712  clip->GetFrame(clip_frame_number);
713  }
714  }
715  }
716 
717  #pragma omp parallel
718  {
719  // Loop through all requested frames
720  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
721  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
722  {
723  // Debug output
724  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
725 
726  // Init some basic properties about this frame
727  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
728 
729  // Create blank frame (which will become the requested frame)
730  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, max_width, max_height, "#000000", samples_in_frame, info.channels));
731  #pragma omp critical (T_GetFrame)
732  {
733  new_frame->AddAudioSilence(samples_in_frame);
734  new_frame->SampleRate(info.sample_rate);
735  new_frame->ChannelsLayout(info.channel_layout);
736  }
737 
738  // Debug output
739  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
740 
741  // Add Background Color to 1st layer (if animated or not black)
742  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
743  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
744  new_frame->AddColor(max_width, max_height, color.GetColorHex(frame_number));
745 
746  // Debug output
747  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1);
748 
749  // Find Clips near this time
750  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
751  {
752  // Get clip object from the iterator
753  Clip *clip = nearby_clips[clip_index];
754  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
755  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
756 
757  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
758 
759  // Debug output
760  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1, "", -1);
761 
762  // Clip is visible
763  if (does_clip_intersect)
764  {
765  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
766  bool is_top_clip = true;
767  float max_volume = 0.0;
768  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
769  {
770  Clip *nearby_clip = nearby_clips[top_clip_index];
771  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
772  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
773  long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
774  long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
775 
776  // Determine if top clip
777  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
778  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
779  nearby_clip_start_position > clip_start_position && is_top_clip == true) {
780  is_top_clip = false;
781  }
782 
783  // Determine max volume of overlapping clips
784  if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
785  nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
786  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
787  max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
788  }
789  }
790 
791  // Determine the frame needed for this clip (based on the position on the timeline)
792  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
793  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
794 
795  // Debug output
796  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number, "", -1, "", -1);
797 
798  // Add clip's frame as layer
799  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
800 
801  } else
802  // Debug output
803  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1, "", -1);
804 
805  } // end clip loop
806 
807  // Debug output
808  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
809 
810  // Set frame # on mapped frame
811  #pragma omp ordered
812  {
813  new_frame->SetFrameNumber(frame_number);
814 
815  // Add final frame to cache
816  final_cache->Add(new_frame);
817  }
818 
819  } // end frame loop
820  } // end parallel
821 
822  // Debug output
823  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
824 
825  // Return frame (or blank frame)
826  return final_cache->GetFrame(requested_frame);
827  }
828 }
829 
830 
831 // Find intersecting clips (or non intersecting clips)
832 vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
833 {
834  // Find matching clips
835  vector<Clip*> matching_clips;
836 
837  // Calculate time of frame
838  float min_requested_frame = requested_frame;
839  float max_requested_frame = requested_frame + (number_of_frames - 1);
840 
841  // Re-Sort Clips (since they likely changed)
842  sort_clips();
843 
844  // Find Clips at this time
845  list<Clip*>::iterator clip_itr;
846  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
847  {
848  // Get clip object from the iterator
849  Clip *clip = (*clip_itr);
850 
851  // Does clip intersect the current requested time
852  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
853  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
854 
855  bool does_clip_intersect =
856  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
857  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
858 
859  // Debug output
860  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect, "", -1);
861 
862  // Open (or schedule for closing) this clip, based on if it's intersecting or not
863  #pragma omp critical (reader_lock)
864  update_open_clips(clip, does_clip_intersect);
865 
866  // Clip is visible
867  if (does_clip_intersect && include)
868  // Add the intersecting clip
869  matching_clips.push_back(clip);
870 
871  else if (!does_clip_intersect && !include)
872  // Add the non-intersecting clip
873  matching_clips.push_back(clip);
874 
875  } // end clip loop
876 
877  // return list
878  return matching_clips;
879 }
880 
881 // Get the cache object used by this reader
882 void Timeline::SetCache(CacheBase* new_cache) {
883  // Set new cache
884  final_cache = new_cache;
885 }
886 
887 // Generate JSON string of this object
888 string Timeline::Json() {
889 
890  // Return formatted string
891  return JsonValue().toStyledString();
892 }
893 
894 // Generate Json::JsonValue for this object
895 Json::Value Timeline::JsonValue() {
896 
897  // Create root json object
898  Json::Value root = ReaderBase::JsonValue(); // get parent properties
899  root["type"] = "Timeline";
900  root["viewport_scale"] = viewport_scale.JsonValue();
901  root["viewport_x"] = viewport_x.JsonValue();
902  root["viewport_y"] = viewport_y.JsonValue();
903  root["color"] = color.JsonValue();
904 
905  // Add array of clips
906  root["clips"] = Json::Value(Json::arrayValue);
907 
908  // Find Clips at this time
909  list<Clip*>::iterator clip_itr;
910  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
911  {
912  // Get clip object from the iterator
913  Clip *existing_clip = (*clip_itr);
914  root["clips"].append(existing_clip->JsonValue());
915  }
916 
917  // Add array of effects
918  root["effects"] = Json::Value(Json::arrayValue);
919 
920  // loop through effects
921  list<EffectBase*>::iterator effect_itr;
922  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
923  {
924  // Get clip object from the iterator
925  EffectBase *existing_effect = (*effect_itr);
926  root["effects"].append(existing_effect->JsonValue());
927  }
928 
929  // return JsonValue
930  return root;
931 }
932 
933 // Load JSON string into this object
934 void Timeline::SetJson(string value) {
935 
936  // Get lock (prevent getting frames while this happens)
937  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
938 
939  // Parse JSON string into JSON objects
940  Json::Value root;
941  Json::Reader reader;
942  bool success = reader.parse( value, root );
943  if (!success)
944  // Raise exception
945  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
946 
947  try
948  {
949  // Set all values that match
950  SetJsonValue(root);
951  }
952  catch (exception e)
953  {
954  // Error parsing JSON (or missing keys)
955  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
956  }
957 }
958 
959 // Load Json::JsonValue into this object
960 void Timeline::SetJsonValue(Json::Value root) {
961 
962  // Close timeline before we do anything (this also removes all open and closing clips)
963  bool was_open = is_open;
964  Close();
965 
966  // Set parent data
968 
969  if (!root["clips"].isNull()) {
970  // Clear existing clips
971  clips.clear();
972 
973  // loop through clips
974  for (int x = 0; x < root["clips"].size(); x++) {
975  // Get each clip
976  Json::Value existing_clip = root["clips"][x];
977 
978  // Create Clip
979  Clip *c = new Clip();
980 
981  // Load Json into Clip
982  c->SetJsonValue(existing_clip);
983 
984  // Add Clip to Timeline
985  AddClip(c);
986  }
987  }
988 
989  if (!root["effects"].isNull()) {
990  // Clear existing effects
991  effects.clear();
992 
993  // loop through effects
994  for (int x = 0; x < root["effects"].size(); x++) {
995  // Get each effect
996  Json::Value existing_effect = root["effects"][x];
997 
998  // Create Effect
999  EffectBase *e = NULL;
1000 
1001  if (!existing_effect["type"].isNull()) {
1002  // Create instance of effect
1003  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
1004 
1005  // Load Json into Effect
1006  e->SetJsonValue(existing_effect);
1007 
1008  // Add Effect to Timeline
1009  AddEffect(e);
1010  }
1011  }
1012  }
1013 
1014  if (!root["duration"].isNull()) {
1015  // Update duration of timeline
1016  info.duration = root["duration"].asDouble();
1018  }
1019 
1020  // Re-open if needed
1021  if (was_open)
1022  Open();
1023 }
1024 
1025 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1026 void Timeline::ApplyJsonDiff(string value) {
1027 
1028  // Get lock (prevent getting frames while this happens)
1029  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1030 
1031  // Parse JSON string into JSON objects
1032  Json::Value root;
1033  Json::Reader reader;
1034  bool success = reader.parse( value, root );
1035  if (!success || !root.isArray())
1036  // Raise exception
1037  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
1038 
1039  try
1040  {
1041  // Process the JSON change array, loop through each item
1042  for (int x = 0; x < root.size(); x++) {
1043  // Get each change
1044  Json::Value change = root[x];
1045  string root_key = change["key"][(uint)0].asString();
1046 
1047  // Process each type of change
1048  if (root_key == "clips")
1049  // Apply to CLIPS
1050  apply_json_to_clips(change);
1051 
1052  else if (root_key == "effects")
1053  // Apply to EFFECTS
1054  apply_json_to_effects(change);
1055 
1056  else
1057  // Apply to TIMELINE
1058  apply_json_to_timeline(change);
1059 
1060  }
1061  }
1062  catch (exception e)
1063  {
1064  // Error parsing JSON (or missing keys)
1065  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1066  }
1067 }
1068 
1069 // Apply JSON diff to clips
1070 void Timeline::apply_json_to_clips(Json::Value change) {
1071 
1072  // Get key and type of change
1073  string change_type = change["type"].asString();
1074  string clip_id = "";
1075  Clip *existing_clip = NULL;
1076 
1077  // Find id of clip (if any)
1078  for (int x = 0; x < change["key"].size(); x++) {
1079  // Get each change
1080  Json::Value key_part = change["key"][x];
1081 
1082  if (key_part.isObject()) {
1083  // Check for id
1084  if (!key_part["id"].isNull()) {
1085  // Set the id
1086  clip_id = key_part["id"].asString();
1087 
1088  // Find matching clip in timeline (if any)
1089  list<Clip*>::iterator clip_itr;
1090  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1091  {
1092  // Get clip object from the iterator
1093  Clip *c = (*clip_itr);
1094  if (c->Id() == clip_id) {
1095  existing_clip = c;
1096  break; // clip found, exit loop
1097  }
1098  }
1099  break; // id found, exit loop
1100  }
1101  }
1102  }
1103 
1104  // Check for a more specific key (targetting this clip's effects)
1105  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1106  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1107  {
1108  // This change is actually targetting a specific effect under a clip (and not the clip)
1109  Json::Value key_part = change["key"][3];
1110 
1111  if (key_part.isObject()) {
1112  // Check for id
1113  if (!key_part["id"].isNull())
1114  {
1115  // Set the id
1116  string effect_id = key_part["id"].asString();
1117 
1118  // Find matching effect in timeline (if any)
1119  list<EffectBase*> effect_list = existing_clip->Effects();
1120  list<EffectBase*>::iterator effect_itr;
1121  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1122  {
1123  // Get effect object from the iterator
1124  EffectBase *e = (*effect_itr);
1125  if (e->Id() == effect_id) {
1126  // Apply the change to the effect directly
1127  apply_json_to_effects(change, e);
1128 
1129  // Calculate start and end frames that this impacts, and remove those frames from the cache
1130  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1131  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1132  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1133 
1134  return; // effect found, don't update clip
1135  }
1136  }
1137  }
1138  }
1139  }
1140 
1141  // Calculate start and end frames that this impacts, and remove those frames from the cache
1142  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1143  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1144  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1145  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1146  }
1147 
1148  // Determine type of change operation
1149  if (change_type == "insert") {
1150 
1151  // Create new clip
1152  Clip *clip = new Clip();
1153  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1154  AddClip(clip); // Add clip to timeline
1155 
1156  // Apply framemapper (or update existing framemapper)
1157  apply_mapper_to_clip(clip);
1158 
1159  } else if (change_type == "update") {
1160 
1161  // Update existing clip
1162  if (existing_clip) {
1163 
1164  // Calculate start and end frames that this impacts, and remove those frames from the cache
1165  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1166  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1167  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1168 
1169  // Remove cache on clip's Reader (if found)
1170  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1171  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1172 
1173  // Update clip properties from JSON
1174  existing_clip->SetJsonValue(change["value"]);
1175 
1176  // Apply framemapper (or update existing framemapper)
1177  apply_mapper_to_clip(existing_clip);
1178 
1179  // Clear any cached image sizes (since size might have changed)
1180  existing_clip->SetMaxSize(0, 0); // force clearing of cached image size
1181  if (existing_clip->Reader()) {
1182  existing_clip->Reader()->SetMaxSize(0, 0);
1183  if (existing_clip->Reader()->Name() == "FrameMapper") {
1184  FrameMapper *nested_reader = (FrameMapper *) existing_clip->Reader();
1185  if (nested_reader->Reader())
1186  nested_reader->Reader()->SetMaxSize(0, 0);
1187  }
1188  }
1189  }
1190 
1191  } else if (change_type == "delete") {
1192 
1193  // Remove existing clip
1194  if (existing_clip) {
1195 
1196  // Calculate start and end frames that this impacts, and remove those frames from the cache
1197  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1198  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1199  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1200 
1201  // Remove clip from timeline
1202  RemoveClip(existing_clip);
1203  }
1204 
1205  }
1206 
1207 }
1208 
1209 // Apply JSON diff to effects
1210 void Timeline::apply_json_to_effects(Json::Value change) {
1211 
1212  // Get key and type of change
1213  string change_type = change["type"].asString();
1214  EffectBase *existing_effect = NULL;
1215 
1216  // Find id of an effect (if any)
1217  for (int x = 0; x < change["key"].size(); x++) {
1218  // Get each change
1219  Json::Value key_part = change["key"][x];
1220 
1221  if (key_part.isObject()) {
1222  // Check for id
1223  if (!key_part["id"].isNull())
1224  {
1225  // Set the id
1226  string effect_id = key_part["id"].asString();
1227 
1228  // Find matching effect in timeline (if any)
1229  list<EffectBase*>::iterator effect_itr;
1230  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1231  {
1232  // Get effect object from the iterator
1233  EffectBase *e = (*effect_itr);
1234  if (e->Id() == effect_id) {
1235  existing_effect = e;
1236  break; // effect found, exit loop
1237  }
1238  }
1239  break; // id found, exit loop
1240  }
1241  }
1242  }
1243 
1244  // Now that we found the effect, apply the change to it
1245  if (existing_effect || change_type == "insert")
1246  // Apply change to effect
1247  apply_json_to_effects(change, existing_effect);
1248 }
1249 
1250 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1251 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1252 
1253  // Get key and type of change
1254  string change_type = change["type"].asString();
1255 
1256  // Calculate start and end frames that this impacts, and remove those frames from the cache
1257  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1258  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1259  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1260  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1261  }
1262 
1263  // Determine type of change operation
1264  if (change_type == "insert") {
1265 
1266  // Determine type of effect
1267  string effect_type = change["value"]["type"].asString();
1268 
1269  // Create Effect
1270  EffectBase *e = NULL;
1271 
1272  // Init the matching effect object
1273  e = EffectInfo().CreateEffect(effect_type);
1274 
1275  // Load Json into Effect
1276  e->SetJsonValue(change["value"]);
1277 
1278  // Add Effect to Timeline
1279  AddEffect(e);
1280 
1281  } else if (change_type == "update") {
1282 
1283  // Update existing effect
1284  if (existing_effect) {
1285 
1286  // Calculate start and end frames that this impacts, and remove those frames from the cache
1287  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1288  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1289  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1290 
1291  // Update effect properties from JSON
1292  existing_effect->SetJsonValue(change["value"]);
1293  }
1294 
1295  } else if (change_type == "delete") {
1296 
1297  // Remove existing effect
1298  if (existing_effect) {
1299 
1300  // Calculate start and end frames that this impacts, and remove those frames from the cache
1301  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1302  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1303  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1304 
1305  // Remove effect from timeline
1306  RemoveEffect(existing_effect);
1307  }
1308 
1309  }
1310 }
1311 
1312 // Apply JSON diff to timeline properties
1313 void Timeline::apply_json_to_timeline(Json::Value change) {
1314 
1315  // Get key and type of change
1316  string change_type = change["type"].asString();
1317  string root_key = change["key"][(uint)0].asString();
1318  string sub_key = "";
1319  if (change["key"].size() >= 2)
1320  sub_key = change["key"][(uint)1].asString();
1321 
1322  // Clear entire cache
1323  final_cache->Clear();
1324 
1325  // Determine type of change operation
1326  if (change_type == "insert" || change_type == "update") {
1327 
1328  // INSERT / UPDATE
1329  // Check for valid property
1330  if (root_key == "color")
1331  // Set color
1332  color.SetJsonValue(change["value"]);
1333  else if (root_key == "viewport_scale")
1334  // Set viewport scale
1335  viewport_scale.SetJsonValue(change["value"]);
1336  else if (root_key == "viewport_x")
1337  // Set viewport x offset
1338  viewport_x.SetJsonValue(change["value"]);
1339  else if (root_key == "viewport_y")
1340  // Set viewport y offset
1341  viewport_y.SetJsonValue(change["value"]);
1342  else if (root_key == "duration") {
1343  // Update duration of timeline
1344  info.duration = change["value"].asDouble();
1346  }
1347  else if (root_key == "width")
1348  // Set width
1349  info.width = change["value"].asInt();
1350  else if (root_key == "height")
1351  // Set height
1352  info.height = change["value"].asInt();
1353  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1354  // Set fps fraction
1355  if (!change["value"]["num"].isNull())
1356  info.fps.num = change["value"]["num"].asInt();
1357  if (!change["value"]["den"].isNull())
1358  info.fps.den = change["value"]["den"].asInt();
1359  }
1360  else if (root_key == "fps" && sub_key == "num")
1361  // Set fps.num
1362  info.fps.num = change["value"].asInt();
1363  else if (root_key == "fps" && sub_key == "den")
1364  // Set fps.den
1365  info.fps.den = change["value"].asInt();
1366  else if (root_key == "sample_rate")
1367  // Set sample rate
1368  info.sample_rate = change["value"].asInt();
1369  else if (root_key == "channels")
1370  // Set channels
1371  info.channels = change["value"].asInt();
1372  else if (root_key == "channel_layout")
1373  // Set channel layout
1374  info.channel_layout = (ChannelLayout) change["value"].asInt();
1375 
1376  else
1377 
1378  // Error parsing JSON (or missing keys)
1379  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1380 
1381 
1382  } else if (change["type"].asString() == "delete") {
1383 
1384  // DELETE / RESET
1385  // Reset the following properties (since we can't delete them)
1386  if (root_key == "color") {
1387  color = Color();
1388  color.red = Keyframe(0.0);
1389  color.green = Keyframe(0.0);
1390  color.blue = Keyframe(0.0);
1391  }
1392  else if (root_key == "viewport_scale")
1393  viewport_scale = Keyframe(1.0);
1394  else if (root_key == "viewport_x")
1395  viewport_x = Keyframe(0.0);
1396  else if (root_key == "viewport_y")
1397  viewport_y = Keyframe(0.0);
1398  else
1399  // Error parsing JSON (or missing keys)
1400  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1401 
1402  }
1403 
1404 }
1405 
1406 // Clear all caches
1408 
1409  // Get lock (prevent getting frames while this happens)
1410  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1411 
1412  // Clear primary cache
1413  final_cache->Clear();
1414 
1415  // Loop through all clips
1416  list<Clip*>::iterator clip_itr;
1417  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1418  {
1419  // Get clip object from the iterator
1420  Clip *clip = (*clip_itr);
1421 
1422  // Clear cache on clip
1423  clip->Reader()->GetCache()->Clear();
1424 
1425  // Clear nested Reader (if any)
1426  if (clip->Reader()->Name() == "FrameMapper") {
1427  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1428  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1429  nested_reader->Reader()->GetCache()->Clear();
1430  }
1431 
1432  }
1433 }
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:960
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ReaderBase.h:104
Display the timeline's frame number.
Definition: Enums.h:69
void Close()
Close the internal reader.
Definition: Clip.cpp:247
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:888
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:224
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:70
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:100
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:249
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:882
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:154
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:234
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:895
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:77
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:251
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:96
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
#define OPEN_MP_NUM_PROCESSORS
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:263
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
Do not display the frame number.
Definition: Enums.h:67
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:283
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:237
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:81
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:841
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:225
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:226
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:144
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
virtual std::shared_ptr< Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:247
bool Waveform()
Waveform property.
Definition: Clip.h:219
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ClipBase.h:97
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:155
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
string Id()
Get basic properties.
Definition: ClipBase.h:82
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:259
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1407
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition: Timeline.cpp:135
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:210
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:182
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:157
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:640
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:260
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:71
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:113
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:121
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:605
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:230
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:168
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip's internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:112
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:248
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:230
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:78
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:628
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1026
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Display both the clip's and timeline's frame number.
Definition: Enums.h:70
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:102
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:96
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:229
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:250
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:223
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:158
Color color
Background color of timeline canvas.
Definition: Timeline.h:254
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:934
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:86
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ReaderBase.h:103
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:519
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:760
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254