Added AutoJoin template parameter to timeline classes

Previously, timelines would always automatically join adjacent elements
if their values were equal. That behavior was usually desired for mouth
shapes, but not for phones, animation rules, etc.
This commit is contained in:
Daniel Wolf 2016-12-08 09:55:49 +01:00
parent 2eb0948c49
commit a24fe8874c
22 changed files with 148 additions and 79 deletions

View File

@ -61,4 +61,3 @@ struct ShapeRule {
// The resulting timeline will always cover the entire duration of the phone (starting at 0 cs).
// It may extend into the negative time range if animation is required prior to the sound being heard.
Timeline<ShapeRule> getShapeRules(Phone phone, centiseconds duration, centiseconds previousDuration);

View File

@ -20,11 +20,11 @@ using boost::adaptors::transformed;
using std::pair;
using std::tuple;
Timeline<Shape> createTweens(ContinuousTimeline<Shape> shapes) {
JoiningTimeline<Shape> createTweens(JoiningContinuousTimeline<Shape> shapes) {
centiseconds minTweenDuration = 4_cs;
centiseconds maxTweenDuration = 10_cs;
Timeline<Shape> tweens;
JoiningTimeline<Shape> tweens;
for (auto first = shapes.begin(), second = std::next(shapes.begin());
first != shapes.end() && second != shapes.end();
@ -66,8 +66,8 @@ Timeline<Shape> createTweens(ContinuousTimeline<Shape> shapes) {
return tweens;
}
Timeline<Shape> animatePauses(const ContinuousTimeline<Shape>& shapes) {
Timeline<Shape> result;
JoiningTimeline<Shape> animatePauses(const JoiningContinuousTimeline<Shape>& shapes) {
JoiningTimeline<Shape> result;
// Don't close mouth for short pauses
for_each_adjacent(shapes.begin(), shapes.end(), [&](const Timed<Shape>& lhs, const Timed<Shape>& pause, const Timed<Shape>& rhs) {
@ -96,8 +96,8 @@ Timeline<Shape> animatePauses(const ContinuousTimeline<Shape>& shapes) {
return result;
}
template<typename T>
ContinuousTimeline<optional<T>> boundedTimelinetoContinuousOptional(const BoundedTimeline<T>& timeline) {
template<typename T, bool AutoJoin>
ContinuousTimeline<optional<T>, AutoJoin> boundedTimelinetoContinuousOptional(const BoundedTimeline<T, AutoJoin>& timeline) {
return {
timeline.getRange(), boost::none,
timeline | transformed([](const Timed<T>& timedValue) { return Timed<optional<T>>(timedValue.getTimeRange(), timedValue.getValue()); })
@ -143,8 +143,8 @@ ContinuousTimeline<ShapeRule> getShapeRules(const BoundedTimeline<Phone>& phones
// always choosing a shape from the current set that resembles the last shape and is somewhat relaxed.
// * When speaking, we anticipate vowels, trying to form their shape before the actual vowel.
// So whenever we come across a one-shape set, we backtrack a little, spreating that shape to the left.
ContinuousTimeline<Shape> animate(const ContinuousTimeline<ShapeSet>& shapeSets) {
ContinuousTimeline<Shape> shapes(shapeSets.getRange(), X);
JoiningContinuousTimeline<Shape> animate(const ContinuousTimeline<ShapeSet>& shapeSets) {
JoiningContinuousTimeline<Shape> shapes(shapeSets.getRange(), X);
Shape referenceShape = X;
// Animate forwards
@ -186,7 +186,7 @@ ContinuousTimeline<Shape> animate(const ContinuousTimeline<ShapeSet>& shapeSets)
return shapes;
}
ContinuousTimeline<Shape> animate(const BoundedTimeline<Phone> &phones) {
JoiningContinuousTimeline<Shape> animate(const BoundedTimeline<Phone> &phones) {
// Create timeline of shape rules
ContinuousTimeline<ShapeRule> shapeRules = getShapeRules(phones);
@ -196,16 +196,16 @@ ContinuousTimeline<Shape> animate(const BoundedTimeline<Phone> &phones) {
shapeRules | transformed([](const Timed<ShapeRule>& timedRule) { return Timed<ShapeSet>(timedRule.getTimeRange(), timedRule.getValue().regularShapes); }));
// Animate
ContinuousTimeline<Shape> shapes = animate(shapeSets);
JoiningContinuousTimeline<Shape> shapes = animate(shapeSets);
// Animate pauses
Timeline<Shape> pauses = animatePauses(shapes);
JoiningTimeline<Shape> pauses = animatePauses(shapes);
for (const auto& pause : pauses) {
shapes.set(pause);
}
// Create inbetweens for smoother animation
Timeline<Shape> tweens = createTweens(shapes);
JoiningTimeline<Shape> tweens = createTweens(shapes);
for (const auto& tween : tweens) {
shapes.set(tween);
}

View File

@ -4,4 +4,4 @@
#include "Shape.h"
#include "ContinuousTimeline.h"
ContinuousTimeline<Shape> animate(const BoundedTimeline<Phone>& phones);
JoiningContinuousTimeline<Shape> animate(const BoundedTimeline<Phone>& phones);

View File

@ -9,6 +9,7 @@
#include <gsl_util.h>
#include "parallel.h"
#include "AudioSegment.h"
#include "stringTools.h"
using std::vector;
using boost::adaptors::transformed;
@ -16,7 +17,7 @@ using fmt::format;
using std::runtime_error;
using std::unique_ptr;
BoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioClip, ProgressSink& progressSink) {
JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioClip, ProgressSink& progressSink) {
VadInst* vadHandle = WebRtcVad_Create();
if (!vadHandle) throw runtime_error("Error creating WebRTC VAD handle.");
@ -34,7 +35,7 @@ BoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioClip, Prog
ProgressSink& pass2ProgressSink = progressMerger.addSink(0.3);
// Detect activity
BoundedTimeline<void> activity(audioClip.getTruncatedRange());
JoiningBoundedTimeline<void> activity(audioClip.getTruncatedRange());
centiseconds time = 0_cs;
const size_t bufferCapacity = audioClip.getSampleRate() / 100;
auto processBuffer = [&](const vector<int16_t>& buffer) {
@ -66,11 +67,11 @@ BoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioClip, Prog
return activity;
}
BoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip, int maxThreadCount, ProgressSink& progressSink) {
JoiningBoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip, int maxThreadCount, ProgressSink& progressSink) {
// Prepare audio for VAD
const unique_ptr<AudioClip> audioClip = inputAudioClip.clone() | resample(16000) | removeDcOffset();
BoundedTimeline<void> activity(audioClip->getTruncatedRange());
JoiningBoundedTimeline<void> activity(audioClip->getTruncatedRange());
std::mutex activityMutex;
// Split audio into segments and perform parallel VAD
@ -83,7 +84,7 @@ BoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip, int m
}
runParallel([&](const TimeRange& segmentRange, ProgressSink& segmentProgressSink) {
unique_ptr<AudioClip> audioSegment = audioClip->clone() | segment(segmentRange);
BoundedTimeline<void> activitySegment = webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink);
JoiningBoundedTimeline<void> activitySegment = webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink);
std::lock_guard<std::mutex> lock(activityMutex);
for (auto activityRange : activitySegment) {
@ -102,7 +103,7 @@ BoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip, int m
// Shorten activities. WebRTC adds a bit of buffer at the end.
const centiseconds tail(5);
for (const auto& utterance : Timeline<void>(activity)) {
for (const auto& utterance : JoiningBoundedTimeline<void>(activity)) {
if (utterance.getDuration() > tail && utterance.getEnd() < audioDuration) {
activity.clear(utterance.getEnd() - tail, utterance.getEnd());
}

View File

@ -3,4 +3,4 @@
#include <BoundedTimeline.h>
#include <ProgressBar.h>
BoundedTimeline<void> detectVoiceActivity(const AudioClip& audioClip, int maxThreadCount, ProgressSink& progressSink);
JoiningBoundedTimeline<void> detectVoiceActivity(const AudioClip& audioClip, int maxThreadCount, ProgressSink& progressSink);

View File

@ -7,5 +7,5 @@
class Exporter {
public:
virtual ~Exporter() {}
virtual void exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) = 0;
virtual void exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) = 0;
};

View File

@ -25,7 +25,7 @@ string escapeJsonString(const string& s) {
return result;
}
void JsonExporter::exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
void JsonExporter::exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
// Export as JSON.
// I'm not using a library because the code is short enough without one and it lets me control the formatting.
outputStream << "{\n";

View File

@ -4,5 +4,5 @@
class JsonExporter : public Exporter {
public:
void exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
void exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
};

View File

@ -1,6 +1,6 @@
#include "TsvExporter.h"
void TsvExporter::exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
void TsvExporter::exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
UNUSED(inputFilePath);
// Output shapes with start times

View File

@ -4,6 +4,6 @@
class TsvExporter : public Exporter {
public:
void exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
void exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
};

View File

@ -6,7 +6,7 @@
using std::string;
using boost::property_tree::ptree;
void XmlExporter::exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
void XmlExporter::exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) {
ptree tree;
// Add metadata

View File

@ -4,5 +4,5 @@
class XmlExporter : public Exporter {
public:
void exportShapes(const boost::filesystem::path& inputFilePath, const ContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
void exportShapes(const boost::filesystem::path& inputFilePath, const JoiningContinuousTimeline<Shape>& shapes, std::ostream& outputStream) override;
};

View File

@ -1,7 +1,7 @@
#include "exporterTools.h"
// Makes sure there is at least one mouth shape
std::vector<Timed<Shape>> dummyShapeIfEmpty(const Timeline<Shape>& shapes) {
std::vector<Timed<Shape>> dummyShapeIfEmpty(const JoiningTimeline<Shape>& shapes) {
std::vector<Timed<Shape>> result;
std::copy(shapes.begin(), shapes.end(), std::back_inserter(result));
if (result.empty()) {

View File

@ -4,4 +4,4 @@
#include "Timeline.h"
// Makes sure there is at least one mouth shape
std::vector<Timed<Shape>> dummyShapeIfEmpty(const Timeline<Shape>& shapes);
std::vector<Timed<Shape>> dummyShapeIfEmpty(const JoiningTimeline<Shape>& shapes);

View File

@ -10,14 +10,14 @@ using std::u32string;
using boost::filesystem::path;
using std::unique_ptr;
ContinuousTimeline<Shape> animateAudioClip(
JoiningContinuousTimeline<Shape> animateAudioClip(
const AudioClip& audioClip,
optional<u32string> dialog,
int maxThreadCount,
ProgressSink& progressSink)
{
BoundedTimeline<Phone> phones = recognizePhones(audioClip, dialog, maxThreadCount, progressSink);
ContinuousTimeline<Shape> result = animate(phones);
JoiningContinuousTimeline<Shape> result = animate(phones);
return result;
}
@ -29,7 +29,7 @@ unique_ptr<AudioClip> createWaveAudioClip(path filePath) {
}
}
ContinuousTimeline<Shape> animateWaveFile(
JoiningContinuousTimeline<Shape> animateWaveFile(
path filePath,
optional<u32string> dialog,
int maxThreadCount,

View File

@ -6,13 +6,13 @@
#include "ProgressBar.h"
#include <boost/filesystem.hpp>
ContinuousTimeline<Shape> animateAudioClip(
JoiningContinuousTimeline<Shape> animateAudioClip(
const AudioClip& audioClip,
boost::optional<std::u32string> dialog,
int maxThreadCount,
ProgressSink& progressSink);
ContinuousTimeline<Shape> animateWaveFile(
JoiningContinuousTimeline<Shape> animateWaveFile(
boost::filesystem::path filePath,
boost::optional<std::u32string> dialog,
int maxThreadCount,

View File

@ -125,7 +125,7 @@ int main(int argc, char *argv[]) {
vector<char*>(argv, argv + argc) | transformed([](char* arg) { return fmt::format("\"{}\"", arg); }), " "));
std::cerr << "Processing input file. ";
ContinuousTimeline<Shape> animation(TimeRange::zero(), Shape::X);
JoiningContinuousTimeline<Shape> animation(TimeRange::zero(), Shape::X);
{
ProgressBar progressBar;

View File

@ -314,8 +314,8 @@ lambda_unique_ptr<ps_decoder_t> createDecoder(optional<u32string> dialog) {
return decoder;
}
Timeline<void> getNoiseSounds(TimeRange utteranceTimeRange, const Timeline<Phone>& phones) {
Timeline<void> noiseSounds;
JoiningTimeline<void> getNoiseSounds(TimeRange utteranceTimeRange, const Timeline<Phone>& phones) {
JoiningTimeline<void> noiseSounds;
// Find utterance parts without recogniced phones
noiseSounds.set(utteranceTimeRange);
@ -325,7 +325,7 @@ Timeline<void> getNoiseSounds(TimeRange utteranceTimeRange, const Timeline<Phone
// Remove undesired elements
const centiseconds minSoundDuration = 12_cs;
for (const auto& unknownSound : Timeline<void>(noiseSounds)) {
for (const auto& unknownSound : JoiningTimeline<void>(noiseSounds)) {
bool startsAtZero = unknownSound.getStart() == 0_cs;
bool tooShort = unknownSound.getDuration() < minSoundDuration;
if (startsAtZero || tooShort) {
@ -386,7 +386,7 @@ Timeline<Phone> utteranceToPhones(
for (const auto& timedWord : words) {
wordIds.push_back(getWordId(timedWord.getValue(), *decoder.dict));
}
if (wordIds.empty()) return Timeline<Phone>();
if (wordIds.empty()) return {};
// Align the words' phones with speech
#if BOOST_VERSION < 105600 // Support legacy syntax
@ -403,7 +403,7 @@ Timeline<Phone> utteranceToPhones(
}
// Guess positions of noise sounds
Timeline<void> noiseSounds = getNoiseSounds(utteranceTimeRange, utterancePhones);
JoiningTimeline<void> noiseSounds = getNoiseSounds(utteranceTimeRange, utterancePhones);
for (const auto& noiseSound : noiseSounds) {
utterancePhones.set(noiseSound.getTimeRange(), Phone::Noise);
}
@ -430,7 +430,7 @@ BoundedTimeline<Phone> recognizePhones(
const unique_ptr<AudioClip> audioClip = inputAudioClip.clone() | removeDcOffset();
// Split audio into utterances
BoundedTimeline<void> utterances;
JoiningBoundedTimeline<void> utterances;
try {
utterances = detectVoiceActivity(*audioClip, maxThreadCount, voiceActivationProgressSink);
}

View File

@ -2,14 +2,14 @@
#include "Timeline.h"
template<typename T>
class BoundedTimeline : public Timeline<T> {
using typename Timeline<T>::time_type;
using Timeline<T>::equals;
template<typename T, bool AutoJoin = false>
class BoundedTimeline : public Timeline<T, AutoJoin> {
using typename Timeline<T, AutoJoin>::time_type;
using Timeline<T, AutoJoin>::equals;
public:
using typename Timeline<T>::iterator;
using Timeline<T>::end;
using typename Timeline<T, AutoJoin>::iterator;
using Timeline<T, AutoJoin>::end;
BoundedTimeline() :
range(TimeRange::zero())
@ -25,7 +25,7 @@ public:
{
for (auto it = first; it != last; ++it) {
// Virtual function call in constructor. Derived constructors shouldn't call this one!
BoundedTimeline<T>::set(*it);
BoundedTimeline::set(*it);
}
}
@ -42,7 +42,7 @@ public:
return range;
}
using Timeline<T>::set;
using Timeline<T, AutoJoin>::set;
iterator set(Timed<T> timedValue) override {
// Exit if the value's range is completely out of bounds
@ -54,16 +54,16 @@ public:
TimeRange& valueRange = timedValue.getTimeRange();
valueRange.resize(max(range.getStart(), valueRange.getStart()), min(range.getEnd(), valueRange.getEnd()));
return Timeline<T>::set(timedValue);
return Timeline<T, AutoJoin>::set(timedValue);
}
void shift(time_type offset) override {
Timeline<T>::shift(offset);
Timeline<T, AutoJoin>::shift(offset);
range.shift(offset);
}
bool operator==(const BoundedTimeline& rhs) const {
return Timeline<T>::equals(rhs) && range == rhs.range;
return Timeline<T, AutoJoin>::equals(rhs) && range == rhs.range;
}
bool operator!=(const BoundedTimeline& rhs) const {
@ -73,3 +73,6 @@ public:
private:
TimeRange range;
};
template<typename T>
using JoiningBoundedTimeline = BoundedTimeline<T, true>;

View File

@ -2,16 +2,16 @@
#include "BoundedTimeline.h"
template<typename T>
class ContinuousTimeline : public BoundedTimeline<T> {
template<typename T, bool AutoJoin = false>
class ContinuousTimeline : public BoundedTimeline<T, AutoJoin> {
public:
ContinuousTimeline(TimeRange range, T defaultValue) :
BoundedTimeline<T>(range),
BoundedTimeline<T, AutoJoin>(range),
defaultValue(defaultValue)
{
// Virtual function call in constructor. Derived constructors shouldn't call this one!
ContinuousTimeline<T>::clear(range);
ContinuousTimeline::clear(range);
}
template<typename InputIterator>
@ -20,7 +20,7 @@ public:
{
// Virtual function calls in constructor. Derived constructors shouldn't call this one!
for (auto it = first; it != last; ++it) {
ContinuousTimeline<T>::set(*it);
ContinuousTimeline::set(*it);
}
}
@ -33,12 +33,15 @@ public:
ContinuousTimeline(range, defaultValue, initializerList.begin(), initializerList.end())
{}
using BoundedTimeline<T>::clear;
using BoundedTimeline<T, AutoJoin>::clear;
void clear(const TimeRange& range) override {
BoundedTimeline<T>::set(Timed<T>(range, defaultValue));
BoundedTimeline<T, AutoJoin>::set(Timed<T>(range, defaultValue));
}
private:
T defaultValue;
};
template<typename T>
using JoiningContinuousTimeline = ContinuousTimeline<T, true>;

View File

@ -26,7 +26,7 @@ namespace internal {
}
}
template<typename T>
template<typename T, bool AutoJoin = false>
class Timeline {
public:
using time_type = TimeRange::time_type;
@ -94,7 +94,7 @@ public:
Timeline(InputIterator first, InputIterator last) {
for (auto it = first; it != last; ++it) {
// Virtual function call in constructor. Derived constructors don't call this one.
Timeline<T>::set(*it);
Timeline::set(*it);
}
}
@ -200,6 +200,7 @@ public:
return end();
}
if (AutoJoin) {
// Extend the timed value if it touches elements with equal value
iterator elementBefore = find(timedValue.getStart(), FindMode::SampleLeft);
if (elementBefore != end() && ::internal::valueEquals(*elementBefore, timedValue)) {
@ -209,6 +210,7 @@ public:
if (elementAfter != end() && ::internal::valueEquals(*elementAfter, timedValue)) {
timedValue.getTimeRange().resize(timedValue.getStart(), elementAfter->getEnd());
}
}
// Erase overlapping elements
Timeline::clear(timedValue.getTimeRange());
@ -242,6 +244,26 @@ public:
return ReferenceWrapper(*this, time);
}
// Combines adjacent equal elements into one
template<bool autoJoin = AutoJoin, typename = std::enable_if_t<!autoJoin>>
void joinAdjacent() {
Timeline copy(*this);
for (auto it = copy.begin(); it != copy.end(); ++it) {
const auto rangeBegin = it;
auto rangeEnd = std::next(rangeBegin);
while (rangeEnd != copy.end() && rangeEnd->getStart() == rangeBegin->getEnd() && ::internal::valueEquals(*rangeEnd, *rangeBegin)) {
++rangeEnd;
}
if (rangeEnd != std::next(rangeBegin)) {
Timed<T> combined = *rangeBegin;
combined.setTimeRange({rangeBegin->getStart(), rangeEnd->getEnd()});
set(combined);
it = rangeEnd;
}
}
}
virtual void shift(time_type offset) {
if (offset == time_type::zero()) return;
@ -290,7 +312,10 @@ private:
};
template<typename T>
std::ostream& operator<<(std::ostream& stream, const Timeline<T>& timeline) {
using JoiningTimeline = Timeline<T, true>;
template<typename T, bool AutoJoin>
std::ostream& operator<<(std::ostream& stream, const Timeline<T, AutoJoin>& timeline) {
stream << "Timeline{";
bool isFirst = true;
for (auto element : timeline) {

View File

@ -231,17 +231,10 @@ void testSetter(std::function<void(const Timed<int>&, Timeline<int>&)> set) {
}
// Check timeline via iterators
Timed<int> lastElement(centiseconds::min(), centiseconds::min(), std::numeric_limits<int>::min());
for (const auto& element : timeline) {
// No element shound have zero-length
EXPECT_LT(0_cs, element.getDuration());
// No two adjacent elements should have the same value; they should have been merged
if (element.getStart() == lastElement.getEnd()) {
EXPECT_NE(lastElement.getValue(), element.getValue());
}
lastElement = element;
// Element should match expected values
for (centiseconds t = std::max(centiseconds::zero(), element.getStart()); t < element.getEnd(); ++t) {
optional<int> expectedValue = expectedValues[t.count()];
@ -300,6 +293,51 @@ TEST(Timeline, indexer_set) {
});
}
TEST(Timeline, joinAdjacent) {
Timeline<int> timeline{
{1_cs, 2_cs, 1},
{2_cs, 4_cs, 2},
{3_cs, 6_cs, 2},
{6_cs, 7_cs, 2},
// Gap
{8_cs, 10_cs, 2},
{11_cs, 12_cs, 3}
};
EXPECT_EQ(6, timeline.size());
timeline.joinAdjacent();
EXPECT_EQ(4, timeline.size());
Timed<int> expectedJoined[] = {
{1_cs, 2_cs, 1},
{2_cs, 7_cs, 2},
// Gap
{8_cs, 10_cs, 2},
{11_cs, 12_cs, 3}
};
EXPECT_THAT(timeline, ElementsAreArray(expectedJoined));
}
TEST(Timeline, autoJoin) {
JoiningTimeline<int> timeline{
{1_cs, 2_cs, 1},
{2_cs, 4_cs, 2},
{3_cs, 6_cs, 2},
{6_cs, 7_cs, 2},
// Gap
{8_cs, 10_cs, 2},
{11_cs, 12_cs, 3}
};
Timed<int> expectedJoined[] = {
{1_cs, 2_cs, 1},
{2_cs, 7_cs, 2},
// Gap
{8_cs, 10_cs, 2},
{11_cs, 12_cs, 3}
};
EXPECT_EQ(4, timeline.size());
EXPECT_THAT(timeline, ElementsAreArray(expectedJoined));
}
TEST(Timeline, shift) {
Timeline<int> timeline{ { 1_cs, 2_cs, 1 },{ 2_cs, 5_cs, 2 },{ 7_cs, 9_cs, 3 } };
Timeline<int> expected{ { 3_cs, 4_cs, 1 },{ 4_cs, 7_cs, 2 },{ 9_cs, 11_cs, 3 } };