39 struct CompositeChoice {
const char* name;
CompositeType value; };
40 const CompositeChoice composite_choices[] = {
63 const int composite_choices_count =
sizeof(composite_choices)/
sizeof(CompositeChoice);
100 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
125 parentTrackedObject =
nullptr;
126 parentClipObject = NULL;
142 final_cache.SetMaxBytesFromInfo(8,
info.width,
info.height,
info.sample_rate,
info.channels);
148 if (
rotation.GetCount() > 0 || !reader)
151 const auto rotate_meta = reader->info.metadata.find(
"rotate");
152 if (rotate_meta == reader->info.metadata.end()) {
158 float rotate_angle = 0.0f;
160 rotate_angle = strtof(rotate_meta->second.c_str(),
nullptr);
161 }
catch (
const std::exception& e) {
168 auto has_default_scale = [](
const Keyframe& kf) {
169 return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
175 if (fabs(rotate_angle) < 0.0001f)
178 float w =
static_cast<float>(reader->info.width);
179 float h =
static_cast<float>(reader->info.height);
180 if (w <= 0.0f || h <= 0.0f)
183 float rad = rotate_angle *
static_cast<float>(M_PI) / 180.0f;
185 float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
186 float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
187 if (new_width <= 0.0f || new_height <= 0.0f)
190 float uniform_scale = std::min(w / new_width, h / new_height);
197Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
204Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
216 reader->ParentClip(
this);
223Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
229 std::string ext = get_file_extension(
path);
230 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
233 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
234 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" || ext==
"gif" ||
path.find(
"%") != std::string::npos)
275 reader->ParentClip(
this);
276 allocated_reader = reader;
286 if (allocated_reader) {
287 delete allocated_reader;
288 allocated_reader = NULL;
308 if (parentTimeline) {
310 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
311 Clip* clipObject = parentTimeline->
GetClip(object_id);
316 parentClipObject = NULL;
318 else if (clipObject) {
320 parentTrackedObject =
nullptr;
327 parentTrackedObject = trackedObject;
332 parentClipObject = clipObject;
340 bool is_same_reader =
false;
341 if (new_reader && allocated_reader) {
342 if (new_reader->
Name() ==
"FrameMapper") {
345 if (allocated_reader == clip_mapped_reader->
Reader()) {
346 is_same_reader =
true;
351 if (allocated_reader && !is_same_reader) {
353 allocated_reader->Close();
354 delete allocated_reader;
356 allocated_reader = NULL;
364 reader->ParentClip(
this);
378 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
399 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
405 if (is_open && reader) {
421 if (
time.GetCount() > 1)
427 fps = reader->info.fps.ToFloat();
430 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
432 return float(
time.GetLength()) / fps;
456 return GetFrame(NULL, clip_frame_number, NULL);
461std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
464 return GetFrame(background_frame, clip_frame_number, NULL);
472 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
477 std::shared_ptr<Frame> frame = NULL;
480 frame = final_cache.GetFrame(clip_frame_number);
483 frame = GetOrCreateFrame(clip_frame_number);
486 int64_t timeline_frame_number = clip_frame_number;
487 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
488 if (background_frame) {
490 timeline_frame_number = background_frame->number;
491 timeline_size.setWidth(background_frame->GetWidth());
492 timeline_size.setHeight(background_frame->GetHeight());
496 apply_timemapping(frame);
499 apply_waveform(frame, timeline_size);
502 apply_effects(frame, timeline_frame_number, options,
true);
505 apply_keyframes(frame, timeline_size);
508 apply_effects(frame, timeline_frame_number, options,
false);
511 final_cache.Add(frame);
514 if (!background_frame) {
516 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
517 "#00000000", frame->GetAudioSamplesCount(),
518 frame->GetAudioChannelsCount());
522 apply_background(frame, background_frame);
529 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
536 for (
const auto& effect : effects) {
537 if (effect->Id() ==
id) {
546 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
550 return parentClipObject;
555 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
559 return parentTrackedObject;
563std::string Clip::get_file_extension(std::string
path)
566 const auto dot_pos =
path.find_last_of(
'.');
567 if (dot_pos == std::string::npos || dot_pos + 1 >=
path.size()) {
568 return std::string();
571 return path.substr(dot_pos + 1);
575void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
580 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
583 if (
time.GetLength() > 1)
585 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
587 int64_t clip_frame_number = frame->number;
588 int64_t new_frame_number = adjust_frame_number_minimum(
time.GetLong(clip_frame_number));
591 juce::AudioBuffer<float> *source_samples =
nullptr;
594 double delta =
time.GetDelta(clip_frame_number + 1);
595 const bool prev_is_increasing =
time.IsIncreasing(clip_frame_number);
596 const bool is_increasing =
time.IsIncreasing(clip_frame_number + 1);
605 int source_sample_count = round(target_sample_count * fabs(delta));
608 AudioLocation location;
611 location.
frame = new_frame_number;
621 resampler =
new AudioResampler(
Reader()->
info.channels);
625 juce::AudioBuffer<float> init_samples(
Reader()->
info.channels, 64);
626 init_samples.clear();
627 resampler->SetBuffer(&init_samples, 1.0);
628 resampler->GetResampledBuffer();
635 if (source_sample_count <= 0) {
637 frame->AddAudioSilence(target_sample_count);
642 source_samples =
new juce::AudioBuffer<float>(
Reader()->
info.channels, source_sample_count);
643 source_samples->clear();
646 int remaining_samples = source_sample_count;
648 while (remaining_samples > 0) {
649 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
650 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
653 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
654 fm->SetDirectionHint(is_increasing);
656 source_frame->SetAudioDirection(is_increasing);
658 if (frame_sample_count == 0) {
668 if (remaining_samples - frame_sample_count >= 0) {
670 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
671 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
679 remaining_samples -= frame_sample_count;
680 source_pos += frame_sample_count;
684 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
685 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
688 remaining_samples = 0;
689 source_pos += remaining_samples;
696 frame->AddAudioSilence(target_sample_count);
698 if (source_sample_count != target_sample_count) {
700 double resample_ratio = double(source_sample_count) / double(target_sample_count);
701 resampler->SetBuffer(source_samples, resample_ratio);
704 juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
709 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
715 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
720 delete source_samples;
728int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
731 if (frame_number < 1)
739std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
743 int64_t clip_frame_number = adjust_frame_number_minimum(number);
744 bool is_increasing =
true;
747 if (enable_time &&
time.GetLength() > 1) {
748 is_increasing =
time.IsIncreasing(clip_frame_number + 1);
749 const int64_t time_frame_number = adjust_frame_number_minimum(
time.GetLong(clip_frame_number));
750 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
752 fm->SetDirectionHint(is_increasing);
754 clip_frame_number = time_frame_number;
759 "Clip::GetOrCreateFrame (from reader)",
760 "number", number,
"clip_frame_number", clip_frame_number);
763 auto reader_frame = reader->GetFrame(clip_frame_number);
766 reader_frame->number = number;
767 reader_frame->SetAudioDirection(is_increasing);
773 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
776 reader_copy->AddColor(QColor(Qt::transparent));
778 if (
has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
780 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
785 }
catch (
const ReaderClosed & e) {
787 }
catch (
const OutOfBoundsFrame & e) {
792 int estimated_samples_in_frame =
Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
796 "Clip::GetOrCreateFrame (create blank)",
798 "estimated_samples_in_frame", estimated_samples_in_frame);
801 auto new_frame = std::make_shared<Frame>(
802 number, reader->info.width, reader->info.height,
803 "#000000", estimated_samples_in_frame, reader->info.channels);
804 new_frame->SampleRate(reader->info.sample_rate);
805 new_frame->ChannelsLayout(reader->info.channel_layout);
806 new_frame->AddAudioSilence(estimated_samples_in_frame);
822 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
823 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
825 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
826 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
827 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
832 root[
"composite"] =
add_property_json(
"Composite",
composite,
"int",
"", NULL, 0, composite_choices_count - 1,
false, requested_frame);
833 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
834 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
865 for (
int i = 0; i < composite_choices_count; ++i)
873 if (parentClipObject)
876 long clip_start_position = round(
Position() *
info.fps.ToDouble()) + 1;
877 long clip_start_frame = (
Start() *
info.fps.ToDouble()) + 1;
878 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
881 float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
882 float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
883 float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
884 float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
885 float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
886 float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
887 float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
890 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
891 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
892 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
893 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
894 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
895 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
896 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
911 root[
"alpha"] =
add_property_json(
"Alpha",
alpha.GetValue(requested_frame),
"float",
"", &
alpha, 0.0, 1.0,
false, requested_frame);
915 root[
"time"] =
add_property_json(
"Time",
time.GetValue(requested_frame),
"float",
"", &
time, 0.0, 30 * 60 * 60 * 48,
false, requested_frame);
936 return root.toStyledString();
944 root[
"parentObjectId"] = parentObjectId;
946 root[
"scale"] =
scale;
951 root[
"waveform"] = waveform;
952 root[
"scale_x"] =
scale_x.JsonValue();
953 root[
"scale_y"] =
scale_y.JsonValue();
956 root[
"alpha"] =
alpha.JsonValue();
957 root[
"rotation"] =
rotation.JsonValue();
958 root[
"time"] =
time.JsonValue();
959 root[
"volume"] =
volume.JsonValue();
961 root[
"shear_x"] =
shear_x.JsonValue();
962 root[
"shear_y"] =
shear_y.JsonValue();
963 root[
"origin_x"] =
origin_x.JsonValue();
964 root[
"origin_y"] =
origin_y.JsonValue();
967 root[
"has_audio"] =
has_audio.JsonValue();
968 root[
"has_video"] =
has_video.JsonValue();
979 root[
"effects"] = Json::Value(Json::arrayValue);
982 for (
auto existing_effect : effects)
984 root[
"effects"].append(existing_effect->JsonValue());
988 root[
"reader"] = reader->JsonValue();
990 root[
"reader"] = Json::Value(Json::objectValue);
1006 catch (
const std::exception& e)
1009 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1020 if (!root[
"parentObjectId"].isNull()){
1021 parentObjectId = root[
"parentObjectId"].asString();
1022 if (parentObjectId.size() > 0 && parentObjectId !=
""){
1025 parentTrackedObject =
nullptr;
1026 parentClipObject = NULL;
1029 if (!root[
"gravity"].isNull())
1031 if (!root[
"scale"].isNull())
1033 if (!root[
"anchor"].isNull())
1035 if (!root[
"display"].isNull())
1037 if (!root[
"mixing"].isNull())
1039 if (!root[
"composite"].isNull())
1041 if (!root[
"waveform"].isNull())
1042 waveform = root[
"waveform"].asBool();
1043 if (!root[
"scale_x"].isNull())
1044 scale_x.SetJsonValue(root[
"scale_x"]);
1045 if (!root[
"scale_y"].isNull())
1046 scale_y.SetJsonValue(root[
"scale_y"]);
1047 if (!root[
"location_x"].isNull())
1049 if (!root[
"location_y"].isNull())
1051 if (!root[
"alpha"].isNull())
1052 alpha.SetJsonValue(root[
"alpha"]);
1053 if (!root[
"rotation"].isNull())
1054 rotation.SetJsonValue(root[
"rotation"]);
1055 if (!root[
"time"].isNull())
1056 time.SetJsonValue(root[
"time"]);
1057 if (!root[
"volume"].isNull())
1058 volume.SetJsonValue(root[
"volume"]);
1059 if (!root[
"wave_color"].isNull())
1061 if (!root[
"shear_x"].isNull())
1062 shear_x.SetJsonValue(root[
"shear_x"]);
1063 if (!root[
"shear_y"].isNull())
1064 shear_y.SetJsonValue(root[
"shear_y"]);
1065 if (!root[
"origin_x"].isNull())
1066 origin_x.SetJsonValue(root[
"origin_x"]);
1067 if (!root[
"origin_y"].isNull())
1068 origin_y.SetJsonValue(root[
"origin_y"]);
1069 if (!root[
"channel_filter"].isNull())
1071 if (!root[
"channel_mapping"].isNull())
1073 if (!root[
"has_audio"].isNull())
1074 has_audio.SetJsonValue(root[
"has_audio"]);
1075 if (!root[
"has_video"].isNull())
1076 has_video.SetJsonValue(root[
"has_video"]);
1077 if (!root[
"perspective_c1_x"].isNull())
1079 if (!root[
"perspective_c1_y"].isNull())
1081 if (!root[
"perspective_c2_x"].isNull())
1083 if (!root[
"perspective_c2_y"].isNull())
1085 if (!root[
"perspective_c3_x"].isNull())
1087 if (!root[
"perspective_c3_y"].isNull())
1089 if (!root[
"perspective_c4_x"].isNull())
1091 if (!root[
"perspective_c4_y"].isNull())
1093 if (!root[
"effects"].isNull()) {
1099 for (
const auto existing_effect : root[
"effects"]) {
1101 if (existing_effect.isNull()) {
1107 if (!existing_effect[
"type"].isNull()) {
1110 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1121 if (!root[
"reader"].isNull())
1123 if (!root[
"reader"][
"type"].isNull())
1126 bool already_open =
false;
1130 already_open = reader->IsOpen();
1137 std::string type = root[
"reader"][
"type"].asString();
1139 if (type ==
"FFmpegReader") {
1143 reader->SetJsonValue(root[
"reader"]);
1145 }
else if (type ==
"QtImageReader") {
1149 reader->SetJsonValue(root[
"reader"]);
1151#ifdef USE_IMAGEMAGICK
1152 }
else if (type ==
"ImageReader") {
1155 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1156 reader->SetJsonValue(root[
"reader"]);
1158 }
else if (type ==
"TextReader") {
1162 reader->SetJsonValue(root[
"reader"]);
1165 }
else if (type ==
"ChunkReader") {
1169 reader->SetJsonValue(root[
"reader"]);
1171 }
else if (type ==
"DummyReader") {
1175 reader->SetJsonValue(root[
"reader"]);
1177 }
else if (type ==
"Timeline") {
1186 reader->ParentClip(
this);
1187 allocated_reader = reader;
1198 final_cache.Clear();
1202void Clip::sort_effects()
1215 effects.push_back(effect);
1231 if (parentTimeline){
1239 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1242 trackedObjectBBox->ParentClip(
this);
1252 final_cache.Clear();
1258 effects.remove(effect);
1261 final_cache.Clear();
1265void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1267 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1268 QPainter painter(background_canvas.get());
1271 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1272 painter.drawImage(0, 0, *frame->GetImage());
1276 frame->AddImage(background_canvas);
1280void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
TimelineInfoStruct* options,
bool before_keyframes)
1282 for (
auto effect : effects)
1285 if (effect->info.apply_before_clip && before_keyframes) {
1286 effect->GetFrame(frame, frame->number);
1287 }
else if (!effect->info.apply_before_clip && !before_keyframes) {
1288 effect->GetFrame(frame, frame->number);
1292 if (
timeline != NULL && options != NULL) {
1294 Timeline* timeline_instance =
static_cast<Timeline*
>(
timeline);
1301bool Clip::isNear(
double a,
double b)
1303 return fabs(a - b) < 0.000001;
1307void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1309 if (!frame->has_image_data) {
1315 std::shared_ptr<QImage> source_image = frame->GetImage();
1316 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1317 timeline_size.height(),
1318 QImage::Format_RGBA8888_Premultiplied);
1319 background_canvas->fill(QColor(Qt::transparent));
1322 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1325 QPainter painter(background_canvas.get());
1326 painter.setRenderHint(QPainter::TextAntialiasing,
true);
1327 if (!transform.isIdentity()) {
1328 painter.setRenderHint(QPainter::SmoothPixmapTransform,
true);
1331 painter.setTransform(transform);
1334 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1337 const float alpha_value =
alpha.GetValue(frame->number);
1338 if (alpha_value != 1.0f) {
1339 painter.setOpacity(alpha_value);
1340 painter.drawImage(0, 0, *source_image);
1342 painter.setOpacity(1.0);
1344 painter.drawImage(0, 0, *source_image);
1348 Timeline *t =
static_cast<Timeline *
>(
timeline);
1352 std::stringstream frame_number_str;
1359 frame_number_str << frame->number;
1372 painter.setPen(QColor(
"#ffffff"));
1373 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1379 frame->AddImage(background_canvas);
1383void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1391 std::shared_ptr<QImage> source_image = frame->GetImage();
1395 "frame->number", frame->number,
1397 "width", timeline_size.width(),
1398 "height", timeline_size.height());
1401 int red =
wave_color.red.GetInt(frame->number);
1402 int green =
wave_color.green.GetInt(frame->number);
1403 int blue =
wave_color.blue.GetInt(frame->number);
1407 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue,
alpha);
1408 frame->AddImage(source_image);
1412QSize Clip::scale_size(QSize source_size,
ScaleType source_scale,
int target_width,
int target_height) {
1413 switch (source_scale)
1416 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1420 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1424 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1433QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1436 std::shared_ptr<QImage> source_image = frame->GetImage();
1439 QSize source_size = scale_size(source_image->size(),
scale, width, height);
1442 float parentObject_location_x = 0.0;
1443 float parentObject_location_y = 0.0;
1444 float parentObject_scale_x = 1.0;
1445 float parentObject_scale_y = 1.0;
1446 float parentObject_shear_x = 0.0;
1447 float parentObject_shear_y = 0.0;
1448 float parentObject_rotation = 0.0;
1453 long parent_start_offset = parentClipObject->Start() *
info.fps.ToDouble();
1454 long parent_frame_number = frame->number + parent_start_offset;
1457 parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1458 parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1459 parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1460 parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1461 parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1462 parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1463 parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1469 Clip* parentClip = (
Clip*) parentTrackedObject->ParentClip();
1473 long parent_start_offset = parentClip->
Start() *
info.fps.ToDouble();
1474 long parent_frame_number = frame->number + parent_start_offset;
1477 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1481 parentClip->
scale, width, height);
1484 int trackedWidth = trackedObjectProperties[
"w"] * trackedObjectProperties[
"sx"] * parent_size.width() *
1486 int trackedHeight = trackedObjectProperties[
"h"] * trackedObjectProperties[
"sy"] * parent_size.height() *
1490 source_size = scale_size(source_size,
scale, trackedWidth, trackedHeight);
1493 parentObject_location_x = parentClip->
location_x.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cx"] - 0.5) * parentClip->
scale_x.
GetValue(parent_frame_number));
1494 parentObject_location_y = parentClip->
location_y.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cy"] - 0.5) * parentClip->
scale_y.
GetValue(parent_frame_number));
1495 parentObject_rotation = trackedObjectProperties[
"r"] + parentClip->
rotation.
GetValue(parent_frame_number);
1504 float sx =
scale_x.GetValue(frame->number);
1505 float sy =
scale_y.GetValue(frame->number);
1508 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1509 sx*= parentObject_scale_x;
1510 sy*= parentObject_scale_y;
1513 float scaled_source_width = source_size.width() * sx;
1514 float scaled_source_height = source_size.height() * sy;
1522 x = (width - scaled_source_width) / 2.0;
1525 x = width - scaled_source_width;
1528 y = (height - scaled_source_height) / 2.0;
1531 x = (width - scaled_source_width) / 2.0;
1532 y = (height - scaled_source_height) / 2.0;
1535 x = width - scaled_source_width;
1536 y = (height - scaled_source_height) / 2.0;
1539 y = (height - scaled_source_height);
1542 x = (width - scaled_source_width) / 2.0;
1543 y = (height - scaled_source_height);
1546 x = width - scaled_source_width;
1547 y = (height - scaled_source_height);
1553 "Clip::get_transform (Gravity)",
1554 "frame->number", frame->number,
1555 "source_clip->gravity",
gravity,
1556 "scaled_source_width", scaled_source_width,
1557 "scaled_source_height", scaled_source_height);
1559 QTransform transform;
1562 float r =
rotation.GetValue(frame->number) + parentObject_rotation;
1563 x += width * (
location_x.GetValue(frame->number) + parentObject_location_x);
1564 y += height * (
location_y.GetValue(frame->number) + parentObject_location_y);
1565 float shear_x_value =
shear_x.GetValue(frame->number) + parentObject_shear_x;
1566 float shear_y_value =
shear_y.GetValue(frame->number) + parentObject_shear_y;
1567 float origin_x_value =
origin_x.GetValue(frame->number);
1568 float origin_y_value =
origin_y.GetValue(frame->number);
1572 "Clip::get_transform (Build QTransform - if needed)",
1573 "frame->number", frame->number,
1576 "sx", sx,
"sy", sy);
1578 if (!isNear(x, 0) || !isNear(y, 0)) {
1580 transform.translate(x, y);
1582 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1584 float origin_x_offset = (scaled_source_width * origin_x_value);
1585 float origin_y_offset = (scaled_source_height * origin_y_value);
1586 transform.translate(origin_x_offset, origin_y_offset);
1587 transform.rotate(r);
1588 transform.shear(shear_x_value, shear_y_value);
1589 transform.translate(-origin_x_offset,-origin_y_offset);
1592 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1593 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1594 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1595 transform.scale(source_width_scale, source_height_scale);
1602int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1617 int64_t clip_start_frame = (
start *
info.fps.ToDouble()) + 1;
1618 int64_t clip_start_position = round(
position *
info.fps.ToDouble()) + 1;
1619 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1621 return frame_number;
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay).
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video).
float start
The position in seconds to start playing (used to trim the beginning of a clip).
float Duration() const
Get the length of this clip (in seconds).
virtual float End() const
Get end position (in seconds) of clip (trim end of video).
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties).
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers).
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any).
float Position() const
Get position on timeline (in seconds).
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any).
float position
The position on the timeline where this clip should start playing.
float end
The position in seconds to end playing (used to trim the ending of a clip).
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline).
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1).
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1).
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right).
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360).
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels).
openshot::FrameDisplayType display
The format to display the frame number (if any).
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0).
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes).
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any).
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video).
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any).
bool Waveform()
Get the waveform property of this clip.
openshot::CompositeType composite
How this clip is composited onto lower layers.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
AudioLocation previous_location
Previous time-mapped audio location.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any).
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1).
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up).
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1).
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel).
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes).
std::string PropertiesJSON(int64_t requested_frame) const override
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1).
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right)).
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom)).
This class represents a color (used on the timeline and clips).
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL).
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips).
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5).
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number).
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
double GetValue(int64_t index) const
Get the value at a specific index.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL).
ReaderBase()
Constructor for the base reader, where many things are initialized.
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline (used for building generic timeline implementations).
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox).
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any).
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method).
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned).
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned).
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned).
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping).
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit).
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap).
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%).
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
CompositeType
This enumeration determines how clips are composited onto lower layers.
bool has_tracked_object
Determines if this effect track objects through the clip.
int width
The width of the video (in pixesl).
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps).
int height
The height of the video (in pixels).
This struct contains info about the current Timeline clip instance.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.