28 #include "../include/Timeline.h"
34 is_open(false), auto_map_clips(true)
76 apply_mapper_to_clip(clip);
79 clips.push_back(clip);
89 effects.push_back(effect);
98 effects.remove(effect);
108 void Timeline::apply_mapper_to_clip(
Clip* clip)
115 if (clip->
Reader()->Name() ==
"FrameMapper")
131 clip->
Reader(clip_reader);
141 list<Clip*>::iterator clip_itr;
142 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
145 Clip *clip = (*clip_itr);
148 apply_mapper_to_clip(clip);
153 double Timeline::calculate_time(int64_t number,
Fraction rate)
156 double raw_fps = rate.
ToFloat();
159 return double(number - 1) / raw_fps;
163 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
int layer)
166 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects",
"frame->number", frame->number,
"timeline_frame_number", timeline_frame_number,
"layer", layer,
"", -1,
"", -1,
"", -1);
169 list<EffectBase*>::iterator effect_itr;
170 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
179 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->
Layer() == layer);
182 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Does effect intersect)",
"effect->Position()", effect->
Position(),
"does_effect_intersect", does_effect_intersect,
"timeline_frame_number", timeline_frame_number,
"layer", layer,
"", -1,
"", -1);
185 if (does_effect_intersect)
189 long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
192 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Process Effect)",
"effect_frame_number", effect_frame_number,
"does_effect_intersect", does_effect_intersect,
"", -1,
"", -1,
"", -1,
"", -1);
195 frame = effect->
GetFrame(frame, effect_frame_number);
205 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(
Clip* clip, int64_t number)
207 std::shared_ptr<Frame> new_frame;
214 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetOrCreateFrame (from reader)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
220 #pragma omp critical (T_GetOtCreateFrame)
221 new_frame = std::shared_ptr<Frame>(clip->
GetFrame(number));
235 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetOrCreateFrame (create blank)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
239 #pragma omp critical (T_GetOtCreateFrame)
248 void Timeline::add_layer(std::shared_ptr<Frame> new_frame,
Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number,
bool is_top_clip,
float max_volume)
251 std::shared_ptr<Frame> source_frame;
252 #pragma omp critical (T_addLayer)
253 source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
260 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer",
"new_frame->number", new_frame->number,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1,
"", -1,
"", -1);
266 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Generate Waveform Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
275 std::shared_ptr<QImage> source_image;
276 #pragma omp critical (T_addLayer)
277 source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha);
278 source_frame->AddImage(std::shared_ptr<QImage>(source_image));
283 if (is_top_clip && source_frame)
284 source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->
Layer());
287 std::shared_ptr<QImage> source_image;
290 if (source_clip->
Reader()->info.has_audio) {
292 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Copy Audio)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1);
295 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
298 float previous_volume = source_clip->
volume.
GetValue(clip_frame_number - 1);
306 previous_volume = previous_volume / max_volume;
307 volume = volume / max_volume;
311 previous_volume = previous_volume * 0.77;
312 volume = volume * 0.77;
316 if (channel_filter != -1 && channel_filter != channel)
320 if (previous_volume == 0.0 && volume == 0.0)
324 if (channel_mapping == -1)
325 channel_mapping = channel;
328 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
329 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
335 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
337 #pragma omp critical (T_addLayer)
342 #pragma omp critical (T_addLayer)
343 new_frame->AddAudio(
false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
348 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1);
353 if (!source_clip->
Waveform() && !source_clip->
Reader()->info.has_video)
358 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Get Source Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
361 source_image = source_frame->GetImage();
366 float alpha = source_clip->
alpha.
GetValue(clip_frame_number);
369 unsigned char *pixels = (
unsigned char *) source_image->bits();
372 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
375 int A = pixels[byte_index + 3];
378 pixels[byte_index + 3] *= alpha;
382 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Set Alpha & Opacity)",
"alpha", alpha,
"source_frame->number", source_frame->number,
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
386 QSize source_size = source_image->size();
387 switch (source_clip->
scale)
391 source_size.scale(max_width, max_height, Qt::KeepAspectRatio);
394 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_FIT)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height(),
"", -1,
"", -1,
"", -1);
399 source_size.scale(max_width, max_height, Qt::IgnoreAspectRatio);
402 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_STRETCH)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height(),
"", -1,
"", -1,
"", -1);
406 QSize width_size(max_width, round(max_width / (
float(source_size.width()) /
float(source_size.height()))));
407 QSize height_size(round(max_height / (
float(source_size.height()) /
float(source_size.width()))), max_height);
410 if (width_size.width() >= max_width && width_size.height() >=
max_height)
411 source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
413 source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
416 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_CROP)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height(),
"", -1,
"", -1,
"", -1);
427 float scaled_source_width = source_size.width() * sx;
428 float scaled_source_height = source_size.height() * sy;
433 x = (max_width - scaled_source_width) / 2.0;
436 x = max_width - scaled_source_width;
439 y = (max_height - scaled_source_height) / 2.0;
442 x = (max_width - scaled_source_width) / 2.0;
443 y = (max_height - scaled_source_height) / 2.0;
446 x = max_width - scaled_source_width;
447 y = (max_height - scaled_source_height) / 2.0;
450 y = (max_height - scaled_source_height);
453 x = (max_width - scaled_source_width) / 2.0;
454 y = (max_height - scaled_source_height);
457 x = max_width - scaled_source_width;
458 y = (max_height - scaled_source_height);
463 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Gravity)",
"source_frame->number", source_frame->number,
"source_clip->gravity", source_clip->
gravity,
"info.width",
info.
width,
"scaled_source_width", scaled_source_width,
"info.height",
info.
height,
"scaled_source_height", scaled_source_height);
472 bool transformed =
false;
473 QTransform transform;
476 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Build QTransform - if needed)",
"source_frame->number", source_frame->number,
"x", x,
"y", y,
"r", r,
"sx", sx,
"sy", sy);
478 if (!isEqual(r, 0)) {
480 float origin_x = x + (scaled_source_width / 2.0);
481 float origin_y = y + (scaled_source_height / 2.0);
482 transform.translate(origin_x, origin_y);
484 transform.translate(-origin_x,-origin_y);
488 if (!isEqual(x, 0) || !isEqual(y, 0)) {
490 transform.translate(x, y);
495 float source_width_scale = (float(source_size.width()) /
float(source_image->width())) * sx;
496 float source_height_scale = (float(source_size.height()) /
float(source_image->height())) * sy;
498 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
499 transform.scale(source_width_scale, source_height_scale);
503 if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
505 transform.shear(shear_x, shear_y);
510 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Prepare)",
"source_frame->number", source_frame->number,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed,
"", -1,
"", -1,
"", -1);
513 std::shared_ptr<QImage> new_image;
514 #pragma omp critical (T_addLayer)
515 new_image = new_frame->GetImage();
518 QPainter painter(new_image.get());
519 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
523 painter.setTransform(transform);
526 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
527 painter.drawImage(0, 0, *source_image);
531 stringstream frame_number_str;
535 frame_number_str << clip_frame_number;
539 frame_number_str << timeline_frame_number;
543 frame_number_str << timeline_frame_number <<
" (" << clip_frame_number <<
")";
548 painter.setPen(QColor(
"#ffffff"));
549 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
555 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Completed)",
"source_frame->number", source_frame->number,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed,
"", -1,
"", -1,
"", -1);
559 void Timeline::update_open_clips(
Clip *clip,
bool does_clip_intersect)
561 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (before)",
"does_clip_intersect", does_clip_intersect,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size(),
"", -1,
"", -1,
"", -1);
564 bool clip_found = open_clips.count(clip);
566 if (clip_found && !does_clip_intersect)
569 open_clips.erase(clip);
574 else if (!clip_found && does_clip_intersect)
577 open_clips[clip] = clip;
584 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (after)",
"does_clip_intersect", does_clip_intersect,
"clip_found", clip_found,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size(),
"", -1,
"", -1);
588 void Timeline::sort_clips()
591 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::SortClips",
"clips.size()", clips.size(),
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
598 void Timeline::sort_effects()
607 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::Close",
"", -1,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
610 list<Clip*>::iterator clip_itr;
611 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
614 Clip *clip = (*clip_itr);
617 update_open_clips(clip,
false);
624 final_cache->
Clear();
634 bool Timeline::isEqual(
double a,
double b)
636 return fabs(a - b) < 0.000001;
643 if (requested_frame < 1)
647 std::shared_ptr<Frame> frame;
648 #pragma omp critical (T_GetFrame)
649 frame = final_cache->
GetFrame(requested_frame);
652 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Cached frame found)",
"requested_frame", requested_frame,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
664 throw ReaderClosed(
"The Timeline is closed. Call Open() before calling this method.",
"");
667 #pragma omp critical (T_GetFrame)
668 frame = final_cache->
GetFrame(requested_frame);
671 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Cached frame found on 2nd look)",
"requested_frame", requested_frame,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
682 vector<Clip*> nearby_clips;
683 #pragma omp critical (T_GetFrame)
684 nearby_clips = find_intersecting_clips(requested_frame, minimum_frames,
true);
688 omp_set_nested(
true);
691 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame",
"requested_frame", requested_frame,
"minimum_frames", minimum_frames,
"OPEN_MP_NUM_PROCESSORS",
OPEN_MP_NUM_PROCESSORS,
"", -1,
"", -1,
"", -1);
695 for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
698 for (
int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
701 Clip *clip = nearby_clips[clip_index];
705 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
706 if (does_clip_intersect)
710 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
720 #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
721 for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
724 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (processing frame)",
"frame_number", frame_number,
"omp_get_thread_num()", omp_get_thread_num(),
"", -1,
"", -1,
"", -1,
"", -1);
730 std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, max_width, max_height,
"#000000", samples_in_frame,
info.
channels));
731 #pragma omp critical (T_GetFrame)
733 new_frame->AddAudioSilence(samples_in_frame);
739 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Adding solid color)",
"frame_number", frame_number,
"info.width",
info.
width,
"info.height",
info.
height,
"", -1,
"", -1,
"", -1);
744 new_frame->AddColor(max_width, max_height,
color.
GetColorHex(frame_number));
747 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Loop through clips)",
"frame_number", frame_number,
"clips.size()", clips.size(),
"nearby_clips.size()", nearby_clips.size(),
"", -1,
"", -1,
"", -1);
750 for (
int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
753 Clip *clip = nearby_clips[clip_index];
757 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
760 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Does clip intersect)",
"frame_number", frame_number,
"clip->Position()", clip->
Position(),
"clip->Duration()", clip->
Duration(),
"does_clip_intersect", does_clip_intersect,
"", -1,
"", -1);
763 if (does_clip_intersect)
766 bool is_top_clip =
true;
767 float max_volume = 0.0;
768 for (
int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
770 Clip *nearby_clip = nearby_clips[top_clip_index];
774 long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
777 if (clip->
Id() != nearby_clip->
Id() && clip->
Layer() == nearby_clip->
Layer() &&
778 nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
779 nearby_clip_start_position > clip_start_position && is_top_clip ==
true) {
784 if (nearby_clip->
Reader() && nearby_clip->
Reader()->info.has_audio &&
786 nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
787 max_volume += nearby_clip->
volume.
GetValue(nearby_clip_frame_number);
793 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
796 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Calculate clip's frame #)",
"clip->Position()", clip->
Position(),
"clip->Start()", clip->
Start(),
"info.fps.ToFloat()",
info.
fps.
ToFloat(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1);
799 add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
803 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (clip does not intersect)",
"frame_number", frame_number,
"does_clip_intersect", does_clip_intersect,
"", -1,
"", -1,
"", -1,
"", -1);
808 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Add frame to cache)",
"frame_number", frame_number,
"info.width",
info.
width,
"info.height",
info.
height,
"", -1,
"", -1,
"", -1);
813 new_frame->SetFrameNumber(frame_number);
816 final_cache->
Add(new_frame);
823 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (end parallel region)",
"requested_frame", requested_frame,
"omp_get_thread_num()", omp_get_thread_num(),
"", -1,
"", -1,
"", -1,
"", -1);
826 return final_cache->
GetFrame(requested_frame);
832 vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame,
int number_of_frames,
bool include)
835 vector<Clip*> matching_clips;
838 float min_requested_frame = requested_frame;
839 float max_requested_frame = requested_frame + (number_of_frames - 1);
845 list<Clip*>::iterator clip_itr;
846 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
849 Clip *clip = (*clip_itr);
855 bool does_clip_intersect =
856 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
857 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
860 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::find_intersecting_clips (Is clip near or intersecting)",
"requested_frame", requested_frame,
"min_requested_frame", min_requested_frame,
"max_requested_frame", max_requested_frame,
"clip->Position()", clip->
Position(),
"does_clip_intersect", does_clip_intersect,
"", -1);
863 #pragma omp critical (reader_lock)
864 update_open_clips(clip, does_clip_intersect);
867 if (does_clip_intersect && include)
869 matching_clips.push_back(clip);
871 else if (!does_clip_intersect && !include)
873 matching_clips.push_back(clip);
878 return matching_clips;
884 final_cache = new_cache;
899 root[
"type"] =
"Timeline";
906 root[
"clips"] = Json::Value(Json::arrayValue);
909 list<Clip*>::iterator clip_itr;
910 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
913 Clip *existing_clip = (*clip_itr);
914 root[
"clips"].append(existing_clip->
JsonValue());
918 root[
"effects"] = Json::Value(Json::arrayValue);
921 list<EffectBase*>::iterator effect_itr;
922 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
926 root[
"effects"].append(existing_effect->
JsonValue());
942 bool success = reader.parse( value, root );
945 throw InvalidJSON(
"JSON could not be parsed (or is invalid)",
"");
955 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
963 bool was_open = is_open;
969 if (!root[
"clips"].isNull()) {
974 for (
int x = 0; x < root[
"clips"].size(); x++) {
976 Json::Value existing_clip = root[
"clips"][x];
989 if (!root[
"effects"].isNull()) {
994 for (
int x = 0; x < root[
"effects"].size(); x++) {
996 Json::Value existing_effect = root[
"effects"][x];
1001 if (!existing_effect[
"type"].isNull()) {
1014 if (!root[
"duration"].isNull()) {
1033 Json::Reader reader;
1034 bool success = reader.parse( value, root );
1035 if (!success || !root.isArray())
1037 throw InvalidJSON(
"JSON could not be parsed (or is invalid).",
"");
1042 for (
int x = 0; x < root.size(); x++) {
1044 Json::Value change = root[x];
1045 string root_key = change[
"key"][(uint)0].asString();
1048 if (root_key ==
"clips")
1050 apply_json_to_clips(change);
1052 else if (root_key ==
"effects")
1054 apply_json_to_effects(change);
1058 apply_json_to_timeline(change);
1065 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
1070 void Timeline::apply_json_to_clips(Json::Value change) {
1073 string change_type = change[
"type"].asString();
1074 string clip_id =
"";
1075 Clip *existing_clip = NULL;
1078 for (
int x = 0; x < change[
"key"].size(); x++) {
1080 Json::Value key_part = change[
"key"][x];
1082 if (key_part.isObject()) {
1084 if (!key_part[
"id"].isNull()) {
1086 clip_id = key_part[
"id"].asString();
1089 list<Clip*>::iterator clip_itr;
1090 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1093 Clip *c = (*clip_itr);
1094 if (c->
Id() == clip_id) {
1106 if (existing_clip && change[
"key"].size() == 4 && change[
"key"][2] ==
"effects")
1109 Json::Value key_part = change[
"key"][3];
1111 if (key_part.isObject()) {
1113 if (!key_part[
"id"].isNull())
1116 string effect_id = key_part[
"id"].asString();
1119 list<EffectBase*> effect_list = existing_clip->
Effects();
1120 list<EffectBase*>::iterator effect_itr;
1121 for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1125 if (e->
Id() == effect_id) {
1127 apply_json_to_effects(change, e);
1132 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1142 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1143 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1144 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1145 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1149 if (change_type ==
"insert") {
1157 apply_mapper_to_clip(clip);
1159 }
else if (change_type ==
"update") {
1162 if (existing_clip) {
1167 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1170 if (existing_clip->
Reader() && existing_clip->
Reader()->GetCache())
1171 existing_clip->
Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1177 apply_mapper_to_clip(existing_clip);
1181 if (existing_clip->
Reader()) {
1182 existing_clip->
Reader()->SetMaxSize(0, 0);
1183 if (existing_clip->
Reader()->Name() ==
"FrameMapper") {
1185 if (nested_reader->
Reader())
1191 }
else if (change_type ==
"delete") {
1194 if (existing_clip) {
1199 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1210 void Timeline::apply_json_to_effects(Json::Value change) {
1213 string change_type = change[
"type"].asString();
1217 for (
int x = 0; x < change[
"key"].size(); x++) {
1219 Json::Value key_part = change[
"key"][x];
1221 if (key_part.isObject()) {
1223 if (!key_part[
"id"].isNull())
1226 string effect_id = key_part[
"id"].asString();
1229 list<EffectBase*>::iterator effect_itr;
1230 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1234 if (e->
Id() == effect_id) {
1235 existing_effect =
e;
1245 if (existing_effect || change_type ==
"insert")
1247 apply_json_to_effects(change, existing_effect);
1251 void Timeline::apply_json_to_effects(Json::Value change,
EffectBase* existing_effect) {
1254 string change_type = change[
"type"].asString();
1257 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1258 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1259 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1260 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1264 if (change_type ==
"insert") {
1267 string effect_type = change[
"value"][
"type"].asString();
1281 }
else if (change_type ==
"update") {
1284 if (existing_effect) {
1289 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1295 }
else if (change_type ==
"delete") {
1298 if (existing_effect) {
1303 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1313 void Timeline::apply_json_to_timeline(Json::Value change) {
1316 string change_type = change[
"type"].asString();
1317 string root_key = change[
"key"][(uint)0].asString();
1318 string sub_key =
"";
1319 if (change[
"key"].size() >= 2)
1320 sub_key = change[
"key"][(uint)1].asString();
1323 final_cache->
Clear();
1326 if (change_type ==
"insert" || change_type ==
"update") {
1330 if (root_key ==
"color")
1333 else if (root_key ==
"viewport_scale")
1336 else if (root_key ==
"viewport_x")
1339 else if (root_key ==
"viewport_y")
1342 else if (root_key ==
"duration") {
1347 else if (root_key ==
"width")
1350 else if (root_key ==
"height")
1353 else if (root_key ==
"fps" && sub_key ==
"" && change[
"value"].isObject()) {
1355 if (!change[
"value"][
"num"].isNull())
1356 info.
fps.
num = change[
"value"][
"num"].asInt();
1357 if (!change[
"value"][
"den"].isNull())
1358 info.
fps.
den = change[
"value"][
"den"].asInt();
1360 else if (root_key ==
"fps" && sub_key ==
"num")
1363 else if (root_key ==
"fps" && sub_key ==
"den")
1366 else if (root_key ==
"sample_rate")
1369 else if (root_key ==
"channels")
1372 else if (root_key ==
"channel_layout")
1379 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1382 }
else if (change[
"type"].asString() ==
"delete") {
1386 if (root_key ==
"color") {
1392 else if (root_key ==
"viewport_scale")
1394 else if (root_key ==
"viewport_x")
1396 else if (root_key ==
"viewport_y")
1400 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1413 final_cache->
Clear();
1416 list<Clip*>::iterator clip_itr;
1417 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1420 Clip *clip = (*clip_itr);
1423 clip->
Reader()->GetCache()->Clear();
1426 if (clip->
Reader()->Name() ==
"FrameMapper") {
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
int max_height
The maximium image height needed by this clip (used for optimizations)
Display the timeline's frame number.
void Close()
Close the internal reader.
string Json()
Get and Set JSON methods.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
int num
Numerator for the fraction.
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
ReaderBase * Reader()
Get the current reader.
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
This abstract class is the base class, used by all effects in libopenshot.
EffectBase * CreateEffect(string effect_type)
Align clip to the right of its parent (middle aligned)
Keyframe green
Curve representing the green value (0 - 255)
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Align clip to the bottom right of its parent.
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Keyframe alpha
Curve representing the alpha value (0 - 255)
int width
The width of the video (in pixesl)
Keyframe volume
Curve representing the volume (0 to 1)
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Keyframe red
Curve representing the red value (0 - 255)
float duration
Length of time (in seconds)
string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Fraction Reciprocal()
Return the reciprocal as a Fraction.
This abstract class is the base class, used by all readers in libopenshot.
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
#define OPEN_MP_NUM_PROCESSORS
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
Do not display the frame number.
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Color wave_color
Curve representing the color of the audio wave form.
Align clip to the top right of its parent.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Align clip to the bottom left of its parent.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Exception for missing JSON Change key.
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
bool has_audio
Determines if this file has an audio stream.
This class represents a clip (used to arrange readers on the timeline)
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
virtual std::shared_ptr< Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
bool Waveform()
Waveform property.
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
int64_t video_length
The number of frames in the video stream.
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
int height
The height of the video (in pixels)
Align clip to the bottom center of its parent.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
string Id()
Get basic properties.
Keyframe channel_filter
Audio channel filter and mappings.
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
float Position()
Get position on timeline (in seconds)
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
void Reader(ReaderBase *new_reader)
Set the current reader.
list< EffectBase * > Effects()
Return the list of effects on the timeline.
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
FrameDisplayType display
The format to display the frame number (if any)
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
This class represents a fraction.
All cache managers in libopenshot are based on this CacheBase class.
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
void Close()
Close the timeline reader (and any resources it was consuming)
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Keyframe rotation
Curve representing the rotation (0 to 360)
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Scale the clip until both height and width fill the canvas (distort to fit)
Display the clip's internal frame number.
vector< Point > Points
Vector of all Points.
ReaderInfo info
Information about the current media file.
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Exception for frames that are out of bounds.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void Open()
Open the internal reader.
This class represents a color (used on the timeline and clips)
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Align clip to the center of its parent (middle aligned)
void Open()
Open the reader (and start consuming resources)
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
double GetValue(int64_t index)
Get the value at a specific index.
Display both the clip's and timeline's frame number.
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Exception for invalid JSON.
Keyframe alpha
Curve representing the alpha (1 to 0)
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Color color
Background color of timeline canvas.
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
This class returns a listing of all effects supported by libopenshot.
Align clip to the top center of its parent.
void SetJson(string value)
Load JSON string into this object.
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
int max_width
The maximum image width needed by this clip (used for optimizations)
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
float Duration()
Get the length of this clip (in seconds)
This class is a memory-based cache manager for Frame objects.
float Start()
Get start position (in seconds) of clip (trim start of video)
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Exception when too many seek attempts happen.