diff --git a/rhubarb/resharper.DotSettings b/rhubarb/resharper.DotSettings
index 168efbe..103065f 100644
--- a/rhubarb/resharper.DotSettings
+++ b/rhubarb/resharper.DotSettings
@@ -1,7 +1,50 @@
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+
+ WARNING
+ DO_NOT_SHOW
+ WARNING
+
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ WARNING
+ HINT
+ WARNING
+ HINT
HINT
ERROR
+ WARNING
+ WARNING
+ WARNING
+
+
+ DO_NOT_SHOW
+ WARNING
+ WARNING
+ WARNING
+ DO_NOT_SHOW
+ WARNING
+
+ WARNING
+ WARNING
+ WARNING
+
+
DO_NOT_SHOW
@@ -29,9 +72,12 @@
False
ON_SINGLE_LINE
True
+ True
False
+ True
END_OF_LINE
CHOP_ALWAYS
+ False
END_OF_LINE
END_OF_LINE
USE_TABS_ONLY
@@ -58,6 +104,7 @@
USE_TABS_ONLY
USE_TABS_ONLY
USE_TABS_ONLY
+ True
UseExplicitType
UseVarWhenEvident
<NamingElement Priority="10"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="_" Style="aaBb" /></NamingElement>
@@ -73,7 +120,7 @@
<NamingElement Priority="16"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="namespace" /><type Name="namespace alias" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement>
<NamingElement Priority="14"><Descriptor Static="True" Constexpr="Indeterminate" Const="True" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="local variable" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement>
<NamingElement Priority="5"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement>
- <NamingElement Priority="4"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="template parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement>
+
<NamingElement Priority="17"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="type alias" /><type Name="typedef" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement>
<NamingElement Priority="12"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union member" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement>
<NamingElement Priority="3"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement>
@@ -120,18 +167,59 @@
<Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" />
<Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" />
C:\Users\Daniel\AppData\Local\JetBrains\Transient\ReSharperPlatformVs14\v09\SolutionCaches
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ LIVE_MONITOR
+ DO_NOTHING
+ LIVE_MONITOR
True
True
True
True
+ True
True
True
True
True
True
+ True
+ True
+ True
+ True
+ True
+ True
True
True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
True
True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
+ True
True
\ No newline at end of file
diff --git a/rhubarb/src/animation/ShapeRule.cpp b/rhubarb/src/animation/ShapeRule.cpp
index e9b98c0..ad82900 100644
--- a/rhubarb/src/animation/ShapeRule.cpp
+++ b/rhubarb/src/animation/ShapeRule.cpp
@@ -1,26 +1,36 @@
#include "ShapeRule.h"
#include
+#include
#include "time/ContinuousTimeline.h"
using boost::optional;
using boost::adaptors::transformed;
template
-ContinuousTimeline, AutoJoin> boundedTimelinetoContinuousOptional(const BoundedTimeline& timeline) {
- return{
- timeline.getRange(), boost::none,
- timeline | transformed([](const Timed& timedValue) { return Timed>(timedValue.getTimeRange(), timedValue.getValue()); })
+ContinuousTimeline, AutoJoin> boundedTimelinetoContinuousOptional(
+ const BoundedTimeline& timeline
+) {
+ return {
+ timeline.getRange(),
+ boost::none,
+ timeline | transformed([](const Timed& timedValue) {
+ return Timed>(timedValue.getTimeRange(), timedValue.getValue());
+ })
};
}
-ShapeRule::ShapeRule(const ShapeSet& shapeSet, const optional& phone, TimeRange phoneTiming) :
- shapeSet(shapeSet),
- phone(phone),
+ShapeRule::ShapeRule(
+ ShapeSet shapeSet,
+ optional phone,
+ TimeRange phoneTiming
+) :
+ shapeSet(std::move(shapeSet)),
+ phone(std::move(phone)),
phoneTiming(phoneTiming)
{}
ShapeRule ShapeRule::getInvalid() {
- return {{}, boost::none,{0_cs, 0_cs}};
+ return { {}, boost::none, { 0_cs, 0_cs } };
}
bool ShapeRule::operator==(const ShapeRule& rhs) const {
@@ -43,11 +53,14 @@ ContinuousTimeline getShapeRules(const BoundedTimeline& phones
auto continuousPhones = boundedTimelinetoContinuousOptional(phones);
// Create timeline of shape rules
- ContinuousTimeline shapeRules(phones.getRange(), {{Shape::X}, boost::none, {0_cs, 0_cs}});
+ ContinuousTimeline shapeRules(
+ phones.getRange(),
+ { { Shape::X }, boost::none, { 0_cs, 0_cs } }
+ );
centiseconds previousDuration = 0_cs;
for (const auto& timedPhone : continuousPhones) {
optional phone = timedPhone.getValue();
- centiseconds duration = timedPhone.getDuration();
+ const centiseconds duration = timedPhone.getDuration();
if (phone) {
// Animate one phone
@@ -59,7 +72,10 @@ ContinuousTimeline getShapeRules(const BoundedTimeline& phones
// Copy to timeline.
// Later shape sets may overwrite earlier ones if overlapping.
for (const auto& timedShapeSet : phoneShapeSets) {
- shapeRules.set(timedShapeSet.getTimeRange(), ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange()));
+ shapeRules.set(
+ timedShapeSet.getTimeRange(),
+ ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange())
+ );
}
}
diff --git a/rhubarb/src/animation/ShapeRule.h b/rhubarb/src/animation/ShapeRule.h
index acb0a08..887dff6 100644
--- a/rhubarb/src/animation/ShapeRule.h
+++ b/rhubarb/src/animation/ShapeRule.h
@@ -11,7 +11,7 @@ struct ShapeRule {
boost::optional phone;
TimeRange phoneTiming;
- ShapeRule(const ShapeSet& shapeSet, const boost::optional& phone, TimeRange phoneTiming);
+ ShapeRule(ShapeSet shapeSet, boost::optional phone, TimeRange phoneTiming);
static ShapeRule getInvalid();
diff --git a/rhubarb/src/animation/animationRules.cpp b/rhubarb/src/animation/animationRules.cpp
index 4bef2e5..5b2b203 100644
--- a/rhubarb/src/animation/animationRules.cpp
+++ b/rhubarb/src/animation/animationRules.cpp
@@ -14,12 +14,14 @@ using std::map;
constexpr size_t shapeValueCount = static_cast(Shape::EndSentinel);
Shape getBasicShape(Shape shape) {
- static constexpr array basicShapes = make_array(A, B, C, D, E, F, B, C, A);
+ static constexpr array basicShapes =
+ make_array(A, B, C, D, E, F, B, C, A);
return basicShapes[static_cast(shape)];
}
Shape relax(Shape shape) {
- static constexpr array relaxedShapes = make_array(A, B, B, C, C, B, X, B, X);
+ static constexpr array relaxedShapes =
+ make_array(A, B, B, C, C, B, X, B, X);
return relaxedShapes[static_cast(shape)];
}
@@ -28,7 +30,8 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) {
throw std::invalid_argument("Cannot select from empty set of shapes.");
}
- // A matrix that for each shape contains all shapes in ascending order of effort required to move to them
+ // A matrix that for each shape contains all shapes in ascending order of effort required to
+ // move to them
constexpr static array, shapeValueCount> effortMatrix = make_array(
/* A */ make_array(A, X, G, B, C, H, E, D, F),
/* B */ make_array(B, G, A, X, C, H, E, D, F),
@@ -38,7 +41,7 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) {
/* F */ make_array(F, B, G, A, X, C, H, E, D),
/* G */ make_array(G, B, C, H, A, X, E, D, F),
/* H */ make_array(H, C, B, G, D, A, X, E, F), // Like C
- /* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A
+ /* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A
);
auto& closestShapes = effortMatrix.at(static_cast(reference));
@@ -55,107 +58,109 @@ optional> getTween(Shape first, Shape second) {
// Note that most of the following rules work in one direction only.
// That's because in animation, the mouth should usually "pop" open without inbetweens,
// then close slowly.
- static const map, pair> lookup{
- {{D, A}, {C, TweenTiming::Early}},
- {{D, B}, {C, TweenTiming::Centered}},
- {{D, G}, {C, TweenTiming::Early}},
- {{D, X}, {C, TweenTiming::Late}},
- {{C, F}, {E, TweenTiming::Centered}}, {{F, C}, {E, TweenTiming::Centered}},
- {{D, F}, {E, TweenTiming::Centered}},
- {{H, F}, {E, TweenTiming::Late}}, {{F, H}, {E, TweenTiming::Early}}
+ static const map, pair> lookup {
+ { { D, A }, { C, TweenTiming::Early } },
+ { { D, B }, { C, TweenTiming::Centered } },
+ { { D, G }, { C, TweenTiming::Early } },
+ { { D, X }, { C, TweenTiming::Late } },
+ { { C, F }, { E, TweenTiming::Centered } }, { { F, C }, { E, TweenTiming::Centered } },
+ { { D, F }, { E, TweenTiming::Centered } },
+ { { H, F }, { E, TweenTiming::Late } }, { { F, H }, { E, TweenTiming::Early } }
};
- auto it = lookup.find({first, second});
+ const auto it = lookup.find({ first, second });
return it != lookup.end() ? it->second : optional>();
}
Timeline getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration) {
// Returns a timeline with a single shape set
- auto single = [duration](ShapeSet value) {
- return Timeline {{0_cs, duration, value}};
+ const auto single = [duration](ShapeSet value) {
+ return Timeline { { 0_cs, duration, value } };
};
// Returns a timeline with two shape sets, timed as a diphthong
- auto diphthong = [duration](ShapeSet first, ShapeSet second) {
- centiseconds firstDuration = duration_cast(duration * 0.6);
+ const auto diphthong = [duration](ShapeSet first, ShapeSet second) {
+ const centiseconds firstDuration = duration_cast(duration * 0.6);
return Timeline {
- {0_cs, firstDuration, first},
- {firstDuration, duration, second}
+ { 0_cs, firstDuration, first },
+ { firstDuration, duration, second }
};
};
// Returns a timeline with two shape sets, timed as a plosive
- auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) {
- centiseconds minOcclusionDuration = 4_cs;
- centiseconds maxOcclusionDuration = 12_cs;
- centiseconds occlusionDuration = clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration);
+ const auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) {
+ const centiseconds minOcclusionDuration = 4_cs;
+ const centiseconds maxOcclusionDuration = 12_cs;
+ const centiseconds occlusionDuration =
+ clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration);
return Timeline {
- {-occlusionDuration, 0_cs, first},
- {0_cs, duration, second}
+ { -occlusionDuration, 0_cs, first },
+ { 0_cs, duration, second }
};
};
// Returns the result of `getShapeSets` when called with identical arguments
// except for a different phone.
- auto like = [duration, previousDuration](Phone referencePhone) {
+ const auto like = [duration, previousDuration](Phone referencePhone) {
return getShapeSets(referencePhone, duration, previousDuration);
};
- static const ShapeSet any{A, B, C, D, E, F, G, H, X};
- static const ShapeSet anyOpen{B, C, D, E, F, G, H};
+ static const ShapeSet any { A, B, C, D, E, F, G, H, X };
+ static const ShapeSet anyOpen { B, C, D, E, F, G, H };
// Note:
- // The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more than one of these shapes.
+ // The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more
+ // than one of these shapes.
// Otherwise, the resulting shape may be more or less random and might not be a good fit.
// As an exception, a very flexible rule may contain *all* these shapes.
switch (phone) {
- case Phone::AO: return single({E});
- case Phone::AA: return single({D});
- case Phone::IY: return single({B});
- case Phone::UW: return single({F});
- case Phone::EH: return single({C});
- case Phone::IH: return single({B});
- case Phone::UH: return single({F});
- case Phone::AH: return duration < 20_cs ? single({C}) : single({D});
- case Phone::Schwa: return single({B, C});
- case Phone::AE: return single({C});
- case Phone::EY: return diphthong({C}, {B});
- case Phone::AY: return duration < 20_cs ? diphthong({C}, {B}) : diphthong({D}, {B});
- case Phone::OW: return single({F});
- case Phone::AW: return duration < 30_cs ? diphthong({C}, {E}) : diphthong({D}, {E});
- case Phone::OY: return diphthong({E}, {B});
- case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({E});
+ case Phone::AO: return single({ E });
+ case Phone::AA: return single({ D });
+ case Phone::IY: return single({ B });
+ case Phone::UW: return single({ F });
+ case Phone::EH: return single({ C });
+ case Phone::IH: return single({ B });
+ case Phone::UH: return single({ F });
+ case Phone::AH: return duration < 20_cs ? single({ C }) : single({ D });
+ case Phone::Schwa: return single({ B, C });
+ case Phone::AE: return single({ C });
+ case Phone::EY: return diphthong({ C }, { B });
+ case Phone::AY: return duration < 20_cs ? diphthong({ C }, { B }) : diphthong({ D }, { B });
+ case Phone::OW: return single({ F });
+ case Phone::AW: return duration < 30_cs ? diphthong({ C }, { E }) : diphthong({ D }, { E });
+ case Phone::OY: return diphthong({ E }, { B });
+ case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({ E });
- case Phone::P:
- case Phone::B: return plosive({A}, any);
- case Phone::T:
- case Phone::D: return plosive({B, F}, anyOpen);
- case Phone::K:
- case Phone::G: return plosive({B, C, E, F, H}, anyOpen);
- case Phone::CH:
- case Phone::JH: return single({B, F});
- case Phone::F:
- case Phone::V: return single({G});
- case Phone::TH:
- case Phone::DH:
- case Phone::S:
- case Phone::Z:
- case Phone::SH:
- case Phone::ZH: return single({B, F});
- case Phone::HH: return single(any); // think "m-hm"
- case Phone::M: return single({A});
- case Phone::N: return single({B, C, F, H});
- case Phone::NG: return single({B, C, E, F});
- case Phone::L: return duration < 20_cs ? single({B, E, F, H}) : single({H});
- case Phone::R: return single({B, E, F});
- case Phone::Y: return single({B, C, F});
- case Phone::W: return single({F});
+ case Phone::P:
+ case Phone::B: return plosive({ A }, any);
+ case Phone::T:
+ case Phone::D: return plosive({ B, F }, anyOpen);
+ case Phone::K:
+ case Phone::G: return plosive({ B, C, E, F, H }, anyOpen);
+ case Phone::CH:
+ case Phone::JH: return single({ B, F });
+ case Phone::F:
+ case Phone::V: return single({ G });
+ case Phone::TH:
+ case Phone::DH:
+ case Phone::S:
+ case Phone::Z:
+ case Phone::SH:
+ case Phone::ZH: return single({ B, F });
+ case Phone::HH: return single(any); // think "m-hm"
+ case Phone::M: return single({ A });
+ case Phone::N: return single({ B, C, F, H });
+ case Phone::NG: return single({ B, C, E, F });
+ case Phone::L: return duration < 20_cs ? single({ B, E, F, H }) : single({ H });
+ case Phone::R: return single({ B, E, F });
+ case Phone::Y: return single({ B, C, F });
+ case Phone::W: return single({ F });
- case Phone::Breath:
- case Phone::Cough:
- case Phone::Smack: return single({C});
- case Phone::Noise: return single({B});
+ case Phone::Breath:
+ case Phone::Cough:
+ case Phone::Smack: return single({ C });
+ case Phone::Noise: return single({ B });
- default: throw std::invalid_argument("Unexpected phone.");
+ default: throw std::invalid_argument("Unexpected phone.");
}
}
diff --git a/rhubarb/src/animation/animationRules.h b/rhubarb/src/animation/animationRules.h
index e3a3b36..46ae857 100644
--- a/rhubarb/src/animation/animationRules.h
+++ b/rhubarb/src/animation/animationRules.h
@@ -31,5 +31,6 @@ boost::optional> getTween(Shape first, Shape secon
// Returns the shape set(s) to use for a given phone.
// The resulting timeline will always cover the entire duration of the phone (starting at 0 cs).
-// It may extend into the negative time range if animation is required prior to the sound being heard.
+// It may extend into the negative time range if animation is required prior to the sound being
+// heard.
Timeline getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration);
diff --git a/rhubarb/src/animation/mouthAnimation.cpp b/rhubarb/src/animation/mouthAnimation.cpp
index 30add4c..6d8d21d 100644
--- a/rhubarb/src/animation/mouthAnimation.cpp
+++ b/rhubarb/src/animation/mouthAnimation.cpp
@@ -8,17 +8,21 @@
#include "targetShapeSet.h"
#include "staticSegments.h"
-JoiningContinuousTimeline animate(const BoundedTimeline &phones, const ShapeSet& targetShapeSet) {
+JoiningContinuousTimeline animate(
+ const BoundedTimeline& phones,
+ const ShapeSet& targetShapeSet
+) {
// Create timeline of shape rules
ContinuousTimeline shapeRules = getShapeRules(phones);
- // Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and will be replaced later
+ // Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and
+ // will be replaced later
ShapeSet targetShapeSetPlusX = targetShapeSet;
targetShapeSetPlusX.insert(Shape::X);
shapeRules = convertToTargetShapeSet(shapeRules, targetShapeSetPlusX);
// Animate in multiple steps
- auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) {
+ const auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) {
JoiningContinuousTimeline animation = animateRough(shapeRules);
animation = optimizeTiming(animation);
animation = animatePauses(animation);
@@ -26,7 +30,8 @@ JoiningContinuousTimeline animate(const BoundedTimeline &phones, c
animation = convertToTargetShapeSet(animation, targetShapeSet);
return animation;
};
- const JoiningContinuousTimeline result = avoidStaticSegments(shapeRules, performMainAnimationSteps);
+ const JoiningContinuousTimeline result =
+ avoidStaticSegments(shapeRules, performMainAnimationSteps);
for (const auto& timedShape : result) {
logTimedEvent("shape", timedShape);
diff --git a/rhubarb/src/animation/mouthAnimation.h b/rhubarb/src/animation/mouthAnimation.h
index f001d4a..1c04d0a 100644
--- a/rhubarb/src/animation/mouthAnimation.h
+++ b/rhubarb/src/animation/mouthAnimation.h
@@ -5,4 +5,7 @@
#include "time/ContinuousTimeline.h"
#include "targetShapeSet.h"
-JoiningContinuousTimeline animate(const BoundedTimeline& phones, const ShapeSet& targetShapeSet);
+JoiningContinuousTimeline animate(
+ const BoundedTimeline& phones,
+ const ShapeSet& targetShapeSet
+);
diff --git a/rhubarb/src/animation/pauseAnimation.cpp b/rhubarb/src/animation/pauseAnimation.cpp
index 4f1281e..f7529c4 100644
--- a/rhubarb/src/animation/pauseAnimation.cpp
+++ b/rhubarb/src/animation/pauseAnimation.cpp
@@ -12,7 +12,7 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) {
// It looks odd if the pause shape is identical to the next shape.
// Make sure we find a relaxed shape that's different from the next one.
for (Shape currentRelaxedShape = previous;;) {
- Shape nextRelaxedShape = relax(currentRelaxedShape);
+ const Shape nextRelaxedShape = relax(currentRelaxedShape);
if (nextRelaxedShape != next) {
return nextRelaxedShape;
}
@@ -31,11 +31,18 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) {
JoiningContinuousTimeline animatePauses(const JoiningContinuousTimeline& animation) {
JoiningContinuousTimeline result(animation);
- for_each_adjacent(animation.begin(), animation.end(), [&](const Timed& previous, const Timed& pause, const Timed& next) {
- if (pause.getValue() != Shape::X) return;
+ for_each_adjacent(
+ animation.begin(),
+ animation.end(),
+ [&](const Timed& previous, const Timed& pause, const Timed& next) {
+ if (pause.getValue() != Shape::X) return;
- result.set(pause.getTimeRange(), getPauseShape(previous.getValue(), next.getValue(), pause.getDuration()));
- });
+ result.set(
+ pause.getTimeRange(),
+ getPauseShape(previous.getValue(), next.getValue(), pause.getDuration())
+ );
+ }
+ );
return result;
}
diff --git a/rhubarb/src/animation/roughAnimation.cpp b/rhubarb/src/animation/roughAnimation.cpp
index 1d1d672..af1f462 100644
--- a/rhubarb/src/animation/roughAnimation.cpp
+++ b/rhubarb/src/animation/roughAnimation.cpp
@@ -1,16 +1,17 @@
#include "roughAnimation.h"
#include
-using boost::optional;
-
// Create timeline of shapes using a bidirectional algorithm.
// Here's a rough sketch:
//
-// * Most consonants result in shape sets with multiple options; most vowels have only one shape option.
+// * Most consonants result in shape sets with multiple options; most vowels have only one shape
+// option.
// * When speaking, we tend to slur mouth shapes into each other. So we animate from start to end,
-// always choosing a shape from the current set that resembles the last shape and is somewhat relaxed.
+// always choosing a shape from the current set that resembles the last shape and is somewhat
+// relaxed.
// * When speaking, we anticipate vowels, trying to form their shape before the actual vowel.
-// So whenever we come across a one-shape vowel, we backtrack a little, spreating that shape to the left.
+// So whenever we come across a one-shape vowel, we backtrack a little, spreading that shape to
+// the left.
JoiningContinuousTimeline animateRough(const ContinuousTimeline& shapeRules) {
JoiningContinuousTimeline animation(shapeRules.getRange(), Shape::X);
@@ -21,24 +22,28 @@ JoiningContinuousTimeline animateRough(const ContinuousTimelinegetValue();
const Shape shape = getClosestShape(referenceShape, shapeRule.shapeSet);
animation.set(it->getTimeRange(), shape);
- const bool anticipateShape = shapeRule.phone && isVowel(*shapeRule.phone) && shapeRule.shapeSet.size() == 1;
+ const bool anticipateShape = shapeRule.phone
+ && isVowel(*shapeRule.phone)
+ && shapeRule.shapeSet.size() == 1;
if (anticipateShape) {
// Animate backwards a little
const Shape anticipatedShape = shape;
const centiseconds anticipatedShapeStart = it->getStart();
referenceShape = anticipatedShape;
- for (auto reverseIt = it; reverseIt != shapeRules.begin(); ) {
+ for (auto reverseIt = it; reverseIt != shapeRules.begin();) {
--reverseIt;
// Make sure we haven't animated too far back
centiseconds anticipatingShapeStart = reverseIt->getStart();
if (anticipatingShapeStart == lastAnticipatedShapeStart) break;
const centiseconds maxAnticipationDuration = 20_cs;
- const centiseconds anticipationDuration = anticipatedShapeStart - anticipatingShapeStart;
+ const centiseconds anticipationDuration =
+ anticipatedShapeStart - anticipatingShapeStart;
if (anticipationDuration > maxAnticipationDuration) break;
// Overwrite forward-animated shape with backwards-animated, anticipating shape
- const Shape anticipatingShape = getClosestShape(referenceShape, reverseIt->getValue().shapeSet);
+ const Shape anticipatingShape =
+ getClosestShape(referenceShape, reverseIt->getValue().shapeSet);
animation.set(reverseIt->getTimeRange(), anticipatingShape);
// Make sure the new, backwards-animated shape still resembles the anticipated shape
diff --git a/rhubarb/src/animation/roughAnimation.h b/rhubarb/src/animation/roughAnimation.h
index 83e8567..cb009e6 100644
--- a/rhubarb/src/animation/roughAnimation.h
+++ b/rhubarb/src/animation/roughAnimation.h
@@ -2,5 +2,6 @@
#include "ShapeRule.h"
-// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional algorithm.
+// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional
+// algorithm.
JoiningContinuousTimeline animateRough(const ContinuousTimeline& shapeRules);
diff --git a/rhubarb/src/animation/staticSegments.cpp b/rhubarb/src/animation/staticSegments.cpp
index 7c4a7ec..3cc093a 100644
--- a/rhubarb/src/animation/staticSegments.cpp
+++ b/rhubarb/src/animation/staticSegments.cpp
@@ -4,7 +4,6 @@
#include "tools/nextCombination.h"
using std::vector;
-using boost::optional;
int getSyllableCount(const ContinuousTimeline& shapeRules, TimeRange timeRange) {
if (timeRange.empty()) return 0;
@@ -31,16 +30,22 @@ int getSyllableCount(const ContinuousTimeline& shapeRules, TimeRange
}
// A static segment is a prolonged period during which the mouth shape doesn't change
-vector getStaticSegments(const ContinuousTimeline& shapeRules, const JoiningContinuousTimeline& animation) {
+vector getStaticSegments(
+ const ContinuousTimeline& shapeRules,
+ const JoiningContinuousTimeline& animation
+) {
// A static segment must contain a certain number of syllables to look distractingly static
const int minSyllableCount = 3;
- // It must also have a minimum duration. The same number of syllables in fast speech usually looks good.
+ // It must also have a minimum duration. The same number of syllables in fast speech usually
+ // looks good.
const centiseconds minDuration = 75_cs;
vector result;
for (const auto& timedShape : animation) {
const TimeRange timeRange = timedShape.getTimeRange();
- if (timeRange.getDuration() >= minDuration && getSyllableCount(shapeRules, timeRange) >= minSyllableCount) {
+ const bool isStatic = timeRange.getDuration() >= minDuration
+ && getSyllableCount(shapeRules, timeRange) >= minSyllableCount;
+ if (isStatic) {
result.push_back(timeRange);
}
}
@@ -48,20 +53,22 @@ vector getStaticSegments(const ContinuousTimeline& shapeRu
return result;
}
-// Indicates whether this shape rule can potentially be replaced by a modified version that breaks up long static segments
+// Indicates whether this shape rule can potentially be replaced by a modified version that breaks
+// up long static segments
bool canChange(const ShapeRule& rule) {
return rule.phone && isVowel(*rule.phone) && rule.shapeSet.size() == 1;
}
-// Returns a new shape rule that is identical to the specified one, except that it leads to a slightly different visualization
+// Returns a new shape rule that is identical to the specified one, except that it leads to a
+// slightly different visualization
ShapeRule getChangedShapeRule(const ShapeRule& rule) {
assert(canChange(rule));
ShapeRule result(rule);
// So far, I've only encountered B as a static shape.
// If there is ever a problem with another static shape, this function can easily be extended.
- if (rule.shapeSet == ShapeSet{Shape::B}) {
- result.shapeSet = {Shape::C};
+ if (rule.shapeSet == ShapeSet { Shape::B }) {
+ result.shapeSet = { Shape::C };
}
return result;
}
@@ -70,7 +77,10 @@ ShapeRule getChangedShapeRule(const ShapeRule& rule) {
using RuleChanges = vector;
// Replaces the indicated shape rules with slightly different ones, breaking up long static segments
-ContinuousTimeline applyChanges(const ContinuousTimeline& shapeRules, const RuleChanges& changes) {
+ContinuousTimeline applyChanges(
+ const ContinuousTimeline& shapeRules,
+ const RuleChanges& changes
+) {
ContinuousTimeline result(shapeRules);
for (centiseconds changedRuleStart : changes) {
const Timed timedOriginalRule = *shapeRules.get(changedRuleStart);
@@ -85,14 +95,16 @@ public:
RuleChangeScenario(
const ContinuousTimeline& originalRules,
const RuleChanges& changes,
- AnimationFunction animate) :
+ const AnimationFunction& animate
+ ) :
changedRules(applyChanges(originalRules, changes)),
animation(animate(changedRules)),
- staticSegments(getStaticSegments(changedRules, animation)) {}
+ staticSegments(getStaticSegments(changedRules, animation))
+ {}
bool isBetterThan(const RuleChangeScenario& rhs) const {
// We want zero static segments
- if (staticSegments.size() == 0 && rhs.staticSegments.size() > 0) return true;
+ if (staticSegments.empty() && !rhs.staticSegments.empty()) return true;
// Short shapes are better than long ones. Minimize sum-of-squares.
if (getSumOfShapeDurationSquares() < rhs.getSumOfShapeDurationSquares()) return true;
@@ -114,10 +126,17 @@ private:
vector staticSegments;
double getSumOfShapeDurationSquares() const {
- return std::accumulate(animation.begin(), animation.end(), 0.0, [](const double sum, const Timed& timedShape) {
- const double duration = std::chrono::duration_cast>(timedShape.getDuration()).count();
- return sum + duration * duration;
- });
+ return std::accumulate(
+ animation.begin(),
+ animation.end(),
+ 0.0,
+ [](const double sum, const Timed& timedShape) {
+ const double duration = std::chrono::duration_cast>(
+ timedShape.getDuration()
+ ).count();
+ return sum + duration * duration;
+ }
+ );
}
};
@@ -132,8 +151,12 @@ RuleChanges getPossibleRuleChanges(const ContinuousTimeline& shapeRul
return result;
}
-ContinuousTimeline fixStaticSegmentRules(const ContinuousTimeline& shapeRules, AnimationFunction animate) {
- // The complexity of this function is exponential with the number of replacements. So let's cap that value.
+ContinuousTimeline fixStaticSegmentRules(
+ const ContinuousTimeline& shapeRules,
+ const AnimationFunction& animate
+) {
+ // The complexity of this function is exponential with the number of replacements.
+ // So let's cap that value.
const int maxReplacementCount = 3;
// All potential changes
@@ -142,14 +165,18 @@ ContinuousTimeline fixStaticSegmentRules(const ContinuousTimeline 0 && replacementCount <= std::min(static_cast(possibleRuleChanges.size()), maxReplacementCount);
- ++replacementCount
- ) {
+ int replacementCount = 1;
+ bestScenario.getStaticSegmentCount() > 0 && replacementCount <= std::min(static_cast(possibleRuleChanges.size()), maxReplacementCount);
+ ++replacementCount
+ ) {
// Only the first elements of `currentRuleChanges` count
auto currentRuleChanges(possibleRuleChanges);
do {
- RuleChangeScenario currentScenario(shapeRules, {currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount}, animate);
+ RuleChangeScenario currentScenario(
+ shapeRules,
+ { currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount },
+ animate
+ );
if (currentScenario.isBetterThan(bestScenario)) {
bestScenario = currentScenario;
}
@@ -164,8 +191,12 @@ bool isFlexible(const ShapeRule& rule) {
return rule.shapeSet.size() > 1;
}
-// Extends the specified time range until it starts and ends with a non-flexible shape rule, if possible
-TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimeline& shapeRules) {
+// Extends the specified time range until it starts and ends with a non-flexible shape rule, if
+// possible
+TimeRange extendToFixedRules(
+ const TimeRange& timeRange,
+ const ContinuousTimeline& shapeRules
+) {
auto first = shapeRules.find(timeRange.getStart());
while (first != shapeRules.begin() && isFlexible(first->getValue())) {
--first;
@@ -174,10 +205,13 @@ TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimelin
while (std::next(last) != shapeRules.end() && isFlexible(last->getValue())) {
++last;
}
- return TimeRange(first->getStart(), last->getEnd());
+ return { first->getStart(), last->getEnd() };
}
-JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline& shapeRules, AnimationFunction animate) {
+JoiningContinuousTimeline avoidStaticSegments(
+ const ContinuousTimeline& shapeRules,
+ const AnimationFunction& animate
+) {
const auto animation = animate(shapeRules);
const vector staticSegments = getStaticSegments(shapeRules, animation);
if (staticSegments.empty()) {
@@ -187,11 +221,15 @@ JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline fixedShapeRules(shapeRules);
for (const TimeRange& staticSegment : staticSegments) {
- // Extend time range to the left and right so we don't lose adjacent rules that might influence the animation
+ // Extend time range to the left and right so we don't lose adjacent rules that might
+ // influence the animation
const TimeRange extendedStaticSegment = extendToFixedRules(staticSegment, shapeRules);
// Fix shape rules within the static segment
- const auto fixedSegmentShapeRules = fixStaticSegmentRules({extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules}, animate);
+ const auto fixedSegmentShapeRules = fixStaticSegmentRules(
+ { extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules },
+ animate
+ );
for (const auto& timedShapeRule : fixedSegmentShapeRules) {
fixedShapeRules.set(timedShapeRule);
}
diff --git a/rhubarb/src/animation/staticSegments.h b/rhubarb/src/animation/staticSegments.h
index 5b8ff68..843996e 100644
--- a/rhubarb/src/animation/staticSegments.h
+++ b/rhubarb/src/animation/staticSegments.h
@@ -8,7 +8,11 @@
using AnimationFunction = std::function(const ContinuousTimeline&)>;
// Calls the specified animation function with the specified shape rules.
-// If the resulting animation contains long static segments, the shape rules are tweaked and animated again.
+// If the resulting animation contains long static segments, the shape rules are tweaked and
+// animated again.
// Static segments happen rather often.
// See http://animateducated.blogspot.de/2016/10/lip-sync-animation-2.html?showComment=1478861729702#c2940729096183546458.
-JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline& shapeRules, AnimationFunction animate);
+JoiningContinuousTimeline avoidStaticSegments(
+ const ContinuousTimeline& shapeRules,
+ const AnimationFunction& animate
+);
diff --git a/rhubarb/src/animation/targetShapeSet.cpp b/rhubarb/src/animation/targetShapeSet.cpp
index dc9807b..da32ef7 100644
--- a/rhubarb/src/animation/targetShapeSet.cpp
+++ b/rhubarb/src/animation/targetShapeSet.cpp
@@ -4,9 +4,10 @@ Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet) {
if (targetShapeSet.find(shape) != targetShapeSet.end()) {
return shape;
}
- Shape basicShape = getBasicShape(shape);
+ const Shape basicShape = getBasicShape(shape);
if (targetShapeSet.find(basicShape) == targetShapeSet.end()) {
- throw std::invalid_argument(fmt::format("Target shape set must contain basic shape {}.", basicShape));
+ throw std::invalid_argument(
+ fmt::format("Target shape set must contain basic shape {}.", basicShape));
}
return basicShape;
}
@@ -19,7 +20,10 @@ ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetS
return result;
}
-ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline& shapeRules, const ShapeSet& targetShapeSet) {
+ContinuousTimeline convertToTargetShapeSet(
+ const ContinuousTimeline& shapeRules,
+ const ShapeSet& targetShapeSet
+) {
ContinuousTimeline result(shapeRules);
for (const auto& timedShapeRule : shapeRules) {
ShapeRule rule = timedShapeRule.getValue();
@@ -29,10 +33,16 @@ ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline convertToTargetShapeSet(const JoiningContinuousTimeline& animation, const ShapeSet& targetShapeSet) {
+JoiningContinuousTimeline convertToTargetShapeSet(
+ const JoiningContinuousTimeline& animation,
+ const ShapeSet& targetShapeSet
+) {
JoiningContinuousTimeline result(animation);
for (const auto& timedShape : animation) {
- result.set(timedShape.getTimeRange(), convertToTargetShapeSet(timedShape.getValue(), targetShapeSet));
+ result.set(
+ timedShape.getTimeRange(),
+ convertToTargetShapeSet(timedShape.getValue(), targetShapeSet)
+ );
}
return result;
}
diff --git a/rhubarb/src/animation/targetShapeSet.h b/rhubarb/src/animation/targetShapeSet.h
index 0725114..f60379b 100644
--- a/rhubarb/src/animation/targetShapeSet.h
+++ b/rhubarb/src/animation/targetShapeSet.h
@@ -6,11 +6,19 @@
// Returns the closest shape to the specified one that occurs in the target shape set.
Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet);
-// Replaces each shape in the specified set with the closest shape that occurs in the target shape set.
+// Replaces each shape in the specified set with the closest shape that occurs in the target shape
+// set.
ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetShapeSet);
// Replaces each shape in each rule with the closest shape that occurs in the target shape set.
-ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline& shapeRules, const ShapeSet& targetShapeSet);
+ContinuousTimeline convertToTargetShapeSet(
+ const ContinuousTimeline& shapeRules,
+ const ShapeSet& targetShapeSet
+);
-// Replaces each shape in the specified animation with the closest shape that occurs in the target shape set.
-JoiningContinuousTimeline convertToTargetShapeSet(const JoiningContinuousTimeline& animation, const ShapeSet& targetShapeSet);
+// Replaces each shape in the specified animation with the closest shape that occurs in the target
+// shape set.
+JoiningContinuousTimeline convertToTargetShapeSet(
+ const JoiningContinuousTimeline& animation,
+ const ShapeSet& targetShapeSet
+);
diff --git a/rhubarb/src/animation/timingOptimization.cpp b/rhubarb/src/animation/timingOptimization.cpp
index b9ccd55..321f6ba 100644
--- a/rhubarb/src/animation/timingOptimization.cpp
+++ b/rhubarb/src/animation/timingOptimization.cpp
@@ -11,7 +11,7 @@ using std::map;
string getShapesString(const JoiningContinuousTimeline& shapes) {
string result;
for (const auto& timedShape : shapes) {
- if (result.size()) {
+ if (!result.empty()) {
result.append(" ");
}
result.append(boost::lexical_cast(timedShape.getValue()));
@@ -44,12 +44,10 @@ Shape getRepresentativeShape(const JoiningTimeline& timeline) {
struct ShapeReduction {
ShapeReduction(const JoiningTimeline& sourceShapes) :
sourceShapes(sourceShapes),
- shape(getRepresentativeShape(sourceShapes))
- {}
+ shape(getRepresentativeShape(sourceShapes)) {}
ShapeReduction(const JoiningTimeline& sourceShapes, TimeRange candidateRange) :
- ShapeReduction(JoiningBoundedTimeline(candidateRange, sourceShapes))
- {}
+ ShapeReduction(JoiningBoundedTimeline(candidateRange, sourceShapes)) {}
JoiningTimeline sourceShapes;
Shape shape;
@@ -57,7 +55,8 @@ struct ShapeReduction {
// Returns a time range of candidate shapes for the next shape to draw.
// Guaranteed to be non-empty.
-TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange, const centiseconds writePosition) {
+TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& sourceShapes,
+ const TimeRange targetRange, const centiseconds writePosition) {
if (sourceShapes.empty()) {
throw std::invalid_argument("Cannot determine candidate range for empty source timeline.");
}
@@ -70,12 +69,15 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& s
const centiseconds remainingTargetDuration = writePosition - targetRange.getStart();
const bool canFitOneOrLess = remainingTargetDuration <= minShapeDuration;
const bool canFitTwo = remainingTargetDuration >= 2 * minShapeDuration;
- const centiseconds duration = canFitOneOrLess || canFitTwo ? minShapeDuration : remainingTargetDuration / 2;
+ const centiseconds duration = canFitOneOrLess || canFitTwo
+ ? minShapeDuration
+ : remainingTargetDuration / 2;
TimeRange candidateRange(writePosition - duration, writePosition);
if (writePosition == targetRange.getEnd()) {
// This is the first iteration.
- // Extend the candidate range to the right in order to consider all source shapes after the target range.
+ // Extend the candidate range to the right in order to consider all source shapes after the
+ // target range.
candidateRange.setEndIfLater(sourceShapes.getRange().getEnd());
}
if (candidateRange.getStart() >= sourceShapes.getRange().getEnd()) {
@@ -92,19 +94,31 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& s
return candidateRange;
}
-ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange, centiseconds writePosition) {
+ShapeReduction getNextShapeReduction(
+ const JoiningContinuousTimeline& sourceShapes,
+ const TimeRange targetRange,
+ centiseconds writePosition
+) {
// Determine the next time range of candidate shapes. Consider two scenarios:
// ... the shortest-possible candidate range
- const ShapeReduction minReduction(sourceShapes, getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition));
+ const ShapeReduction minReduction(sourceShapes,
+ getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition));
// ... a candidate range extended to the left to fully encompass its left-most shape
const ShapeReduction extendedReduction(sourceShapes,
- {minReduction.sourceShapes.begin()->getStart(), minReduction.sourceShapes.getRange().getEnd()});
+ {
+ minReduction.sourceShapes.begin()->getStart(),
+ minReduction.sourceShapes.getRange().getEnd()
+ }
+ );
- // Determine the shape that might be picked *next* if we choose the shortest-possible candidate range now
- const ShapeReduction nextReduction(sourceShapes,
- getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart()));
+ // Determine the shape that might be picked *next* if we choose the shortest-possible candidate
+ // range now
+ const ShapeReduction nextReduction(
+ sourceShapes,
+ getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart())
+ );
const bool minEqualsExtended = minReduction.shape == extendedReduction.shape;
const bool extendedIsSpecial = extendedReduction.shape != minReduction.shape
@@ -113,8 +127,10 @@ ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline& sou
return minEqualsExtended || extendedIsSpecial ? extendedReduction : minReduction;
}
-// Modifies the timing of the given animation to fit into the specified target time range without jitter.
-JoiningContinuousTimeline retime(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange) {
+// Modifies the timing of the given animation to fit into the specified target time range without
+// jitter.
+JoiningContinuousTimeline retime(const JoiningContinuousTimeline& sourceShapes,
+ const TimeRange targetRange) {
logTimedEvent("segment", targetRange, getShapesString(sourceShapes));
JoiningContinuousTimeline result(targetRange, Shape::X);
@@ -125,7 +141,8 @@ JoiningContinuousTimeline retime(const JoiningContinuousTimeline&
while (writePosition > targetRange.getStart()) {
// Decide which shape to show next, possibly discarding short shapes
- const ShapeReduction shapeReduction = getNextShapeReduction(sourceShapes, targetRange, writePosition);
+ const ShapeReduction shapeReduction =
+ getNextShapeReduction(sourceShapes, targetRange, writePosition);
// Determine how long to display the shape
TimeRange targetShapeRange(shapeReduction.sourceShapes.getRange());
@@ -144,7 +161,11 @@ JoiningContinuousTimeline retime(const JoiningContinuousTimeline&
return result;
}
-JoiningContinuousTimeline retime(const JoiningContinuousTimeline& animation, TimeRange sourceRange, TimeRange targetRange) {
+JoiningContinuousTimeline retime(
+ const JoiningContinuousTimeline& animation,
+ TimeRange sourceRange,
+ TimeRange targetRange
+) {
const auto sourceShapes = JoiningContinuousTimeline(sourceRange, Shape::X, animation);
return retime(sourceShapes, targetRange);
}
@@ -160,7 +181,12 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline<
JoiningContinuousTimeline segments(animation.getRange(), MouthState::Idle);
for (const auto& timedShape : animation) {
const Shape shape = timedShape.getValue();
- const MouthState mouthState = shape == Shape::X ? MouthState::Idle : shape == Shape::A ? MouthState::Closed : MouthState::Open;
+ const MouthState mouthState =
+ shape == Shape::X
+ ? MouthState::Idle
+ : shape == Shape::A
+ ? MouthState::Closed
+ : MouthState::Open;
segments.set(timedShape.getTimeRange(), mouthState);
}
@@ -171,7 +197,8 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline<
// Make sure all open and closed segments are long enough to register visually.
JoiningContinuousTimeline result(animation.getRange(), Shape::X);
- // ... we're filling the result timeline from right to left, so `resultStart` points to the earliest shape already written
+ // ... we're filling the result timeline from right to left, so `resultStart` points to the
+ // earliest shape already written
centiseconds resultStart = result.getRange().getEnd();
for (auto segmentIt = segments.rbegin(); segmentIt != segments.rend(); ++segmentIt) {
// We don't care about idle shapes at this point.
@@ -188,26 +215,40 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline<
resultStart = targetRange.getStart();
} else {
// The segment is too short; we have to extend it to the left.
- // Find all adjacent segments to our left that are also too short, then distribute them evenly.
+ // Find all adjacent segments to our left that are also too short, then distribute them
+ // evenly.
const auto begin = segmentIt;
auto end = std::next(begin);
- while (end != segments.rend() && end->getValue() != MouthState::Idle && end->getDuration() < minSegmentDuration) ++end;
+ while (
+ end != segments.rend()
+ && end->getValue() != MouthState::Idle
+ && end->getDuration() < minSegmentDuration
+ ) {
+ ++end;
+ }
// Determine how much we should extend the entire set of short segments to the left
const size_t shortSegmentCount = std::distance(begin, end);
const centiseconds desiredDuration = minSegmentDuration * shortSegmentCount;
const centiseconds currentDuration = begin->getEnd() - std::prev(end)->getStart();
const centiseconds desiredExtensionDuration = desiredDuration - currentDuration;
- const centiseconds availableExtensionDuration = end != segments.rend() ? end->getDuration() - 1_cs : 0_cs;
- const centiseconds extensionDuration = std::min({desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration});
+ const centiseconds availableExtensionDuration = end != segments.rend()
+ ? end->getDuration() - 1_cs
+ : 0_cs;
+ const centiseconds extensionDuration = std::min({
+ desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration
+ });
// Distribute available time range evenly among all short segments
- const centiseconds shortSegmentsTargetStart = std::prev(end)->getStart() - extensionDuration;
+ const centiseconds shortSegmentsTargetStart =
+ std::prev(end)->getStart() - extensionDuration;
for (auto shortSegmentIt = begin; shortSegmentIt != end; ++shortSegmentIt) {
size_t remainingShortSegmentCount = std::distance(shortSegmentIt, end);
- const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) / remainingShortSegmentCount;
+ const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) /
+ remainingShortSegmentCount;
const TimeRange segmentTargetRange(resultStart - segmentDuration, resultStart);
- const auto retimedSegment = retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange);
+ const auto retimedSegment =
+ retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange);
for (const auto& timedShape : retimedSegment) {
result.set(timedShape);
}
diff --git a/rhubarb/src/animation/timingOptimization.h b/rhubarb/src/animation/timingOptimization.h
index e8bb691..27de323 100644
--- a/rhubarb/src/animation/timingOptimization.h
+++ b/rhubarb/src/animation/timingOptimization.h
@@ -3,6 +3,7 @@
#include "core/Shape.h"
#include "time/ContinuousTimeline.h"
-// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register visually.
+// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register
+// visually.
// In some cases, shapes may be omitted.
JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline& animation);
diff --git a/rhubarb/src/animation/tweening.cpp b/rhubarb/src/animation/tweening.cpp
index c487049..6e9a9e7 100644
--- a/rhubarb/src/animation/tweening.cpp
+++ b/rhubarb/src/animation/tweening.cpp
@@ -19,21 +19,30 @@ JoiningContinuousTimeline insertTweens(const JoiningContinuousTimeline= size) {
- throw invalid_argument(fmt::format("Cannot read from sample index {}. Clip size is {}.", index, size));
+ throw invalid_argument(fmt::format(
+ "Cannot read from sample index {}. Clip size is {}.",
+ index,
+ size
+ ));
}
if (index == lastIndex) {
return lastSample;
@@ -51,7 +55,7 @@ AudioClip::iterator AudioClip::end() const {
return SampleIterator(*this, size());
}
-std::unique_ptr operator|(std::unique_ptr clip, AudioEffect effect) {
+std::unique_ptr operator|(std::unique_ptr clip, const AudioEffect& effect) {
return effect(std::move(clip));
}
diff --git a/rhubarb/src/audio/AudioClip.h b/rhubarb/src/audio/AudioClip.h
index 4a188b3..e2a235a 100644
--- a/rhubarb/src/audio/AudioClip.h
+++ b/rhubarb/src/audio/AudioClip.h
@@ -30,7 +30,7 @@ private:
using AudioEffect = std::function(std::unique_ptr)>;
-std::unique_ptr operator|(std::unique_ptr clip, AudioEffect effect);
+std::unique_ptr operator|(std::unique_ptr clip, const AudioEffect& effect);
using SampleReader = AudioClip::SampleReader;
diff --git a/rhubarb/src/audio/DcOffset.cpp b/rhubarb/src/audio/DcOffset.cpp
index 3a4a09c..afb9e71 100644
--- a/rhubarb/src/audio/DcOffset.cpp
+++ b/rhubarb/src/audio/DcOffset.cpp
@@ -15,15 +15,19 @@ unique_ptr DcOffset::clone() const {
}
SampleReader DcOffset::createUnsafeSampleReader() const {
- return [read = inputClip->createSampleReader(), factor = factor, offset = offset](size_type index) {
- float sample = read(index);
+ return [
+ read = inputClip->createSampleReader(),
+ factor = factor,
+ offset = offset
+ ](size_type index) {
+ const float sample = read(index);
return sample * factor + offset;
};
}
float getDcOffset(const AudioClip& audioClip) {
int flatMeanSampleCount, fadingMeanSampleCount;
- int sampleRate = audioClip.getSampleRate();
+ const int sampleRate = audioClip.getSampleRate();
if (audioClip.size() > 4 * sampleRate) {
// Long audio file. Average over the first 3 seconds, then fade out over the 4th.
flatMeanSampleCount = 3 * sampleRate;
@@ -34,31 +38,32 @@ float getDcOffset(const AudioClip& audioClip) {
fadingMeanSampleCount = 0;
}
- auto read = audioClip.createSampleReader();
+ const auto read = audioClip.createSampleReader();
double sum = 0;
for (int i = 0; i < flatMeanSampleCount; ++i) {
sum += read(i);
}
for (int i = 0; i < fadingMeanSampleCount; ++i) {
- double weight = static_cast(fadingMeanSampleCount - i) / fadingMeanSampleCount;
+ const double weight =
+ static_cast(fadingMeanSampleCount - i) / fadingMeanSampleCount;
sum += read(flatMeanSampleCount + i) * weight;
}
- double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0;
- double offset = sum / totalWeight;
+ const double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0;
+ const double offset = sum / totalWeight;
return static_cast(offset);
}
AudioEffect addDcOffset(float offset, float epsilon) {
return [offset, epsilon](unique_ptr inputClip) -> unique_ptr {
- if (std::abs(offset) < epsilon) return std::move(inputClip);
+ if (std::abs(offset) < epsilon) return inputClip;
return make_unique(std::move(inputClip), offset);
};
}
AudioEffect removeDcOffset(float epsilon) {
return [epsilon](unique_ptr inputClip) {
- float offset = getDcOffset(*inputClip);
+ const float offset = getDcOffset(*inputClip);
return std::move(inputClip) | addDcOffset(-offset, epsilon);
};
}
diff --git a/rhubarb/src/audio/OggVorbisFileReader.cpp b/rhubarb/src/audio/OggVorbisFileReader.cpp
index d3b01e1..f84aaa4 100644
--- a/rhubarb/src/audio/OggVorbisFileReader.cpp
+++ b/rhubarb/src/audio/OggVorbisFileReader.cpp
@@ -14,30 +14,30 @@ using std::ios_base;
std::string vorbisErrorToString(int64_t errorCode) {
switch (errorCode) {
- case OV_EREAD:
- return "Read error while fetching compressed data for decode.";
- case OV_EFAULT:
- return "Internal logic fault; indicates a bug or heap/stack corruption.";
- case OV_EIMPL:
- return "Feature not implemented";
- case OV_EINVAL:
- return "Either an invalid argument, or incompletely initialized argument passed to a call.";
- case OV_ENOTVORBIS:
- return "The given file/data was not recognized as Ogg Vorbis data.";
- case OV_EBADHEADER:
- return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header.";
- case OV_EVERSION:
- return "The bitstream format revision of the given Vorbis stream is not supported.";
- case OV_ENOTAUDIO:
- return "Packet is not an audio packet.";
- case OV_EBADPACKET:
- return "Error in packet.";
- case OV_EBADLINK:
- return "The given link exists in the Vorbis data stream, but is not decipherable due to garbacge or corruption.";
- case OV_ENOSEEK:
- return "The given stream is not seekable.";
- default:
- return "An unexpected Vorbis error occurred.";
+ case OV_EREAD:
+ return "Read error while fetching compressed data for decode.";
+ case OV_EFAULT:
+ return "Internal logic fault; indicates a bug or heap/stack corruption.";
+ case OV_EIMPL:
+ return "Feature not implemented";
+ case OV_EINVAL:
+ return "Either an invalid argument, or incompletely initialized argument passed to a call.";
+ case OV_ENOTVORBIS:
+ return "The given file/data was not recognized as Ogg Vorbis data.";
+ case OV_EBADHEADER:
+ return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header.";
+ case OV_EVERSION:
+ return "The bitstream format revision of the given Vorbis stream is not supported.";
+ case OV_ENOTAUDIO:
+ return "Packet is not an audio packet.";
+ case OV_EBADPACKET:
+ return "Error in packet.";
+ case OV_EBADLINK:
+ return "The given link exists in the Vorbis data stream, but is not decipherable due to garbage or corruption.";
+ case OV_ENOSEEK:
+ return "The given stream is not seekable.";
+ default:
+ return "An unexpected Vorbis error occurred.";
}
}
@@ -64,13 +64,13 @@ size_t readCallback(void* buffer, size_t elementSize, size_t elementCount, void*
}
int seekCallback(void* dataSource, ogg_int64_t offset, int origin) {
- static const vector seekDirections{
+ static const vector seekDirections {
ios_base::beg, ios_base::cur, ios_base::end
};
ifstream& stream = *static_cast(dataSource);
stream.seekg(offset, seekDirections.at(origin));
- stream.clear(); // In case we seeked to EOF
+ stream.clear(); // In case we sought to EOF
return 0;
}
@@ -82,26 +82,13 @@ long tellCallback(void* dataSource) {
}
// RAII wrapper around OggVorbis_File
-class OggVorbisFile {
+class OggVorbisFile final {
public:
+ OggVorbisFile(const path& filePath);
+
OggVorbisFile(const OggVorbisFile&) = delete;
OggVorbisFile& operator=(const OggVorbisFile&) = delete;
- OggVorbisFile(const path& filePath) :
- stream(openFile(filePath))
- {
- // Throw only on badbit, not on failbit.
- // Ogg Vorbis expects read operations past the end of the file to
- // succeed, not to throw.
- stream.exceptions(ifstream::badbit);
-
- // Ogg Vorbis normally uses the `FILE` API from the C standard library.
- // This doesn't handle Unicode paths on Windows.
- // Use wrapper functions around `ifstream` instead.
- const ov_callbacks callbacks{readCallback, seekCallback, nullptr, tellCallback};
- throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks));
- }
-
OggVorbis_File* get() {
return &oggVorbisHandle;
}
@@ -115,6 +102,22 @@ private:
ifstream stream;
};
+OggVorbisFile::OggVorbisFile(const path& filePath) :
+ oggVorbisHandle(),
+ stream(openFile(filePath))
+{
+ // Throw only on badbit, not on failbit.
+ // Ogg Vorbis expects read operations past the end of the file to
+ // succeed, not to throw.
+ stream.exceptions(ifstream::badbit);
+
+ // Ogg Vorbis normally uses the `FILE` API from the C standard library.
+ // This doesn't handle Unicode paths on Windows.
+ // Use wrapper functions around `ifstream` instead.
+ const ov_callbacks callbacks { readCallback, seekCallback, nullptr, tellCallback };
+ throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks));
+}
+
OggVorbisFileReader::OggVorbisFileReader(const path& filePath) :
filePath(filePath)
{
@@ -153,7 +156,7 @@ SampleReader OggVorbisFileReader::createUnsafeSampleReader() const {
}
// Downmix channels
- size_type bufferIndex = index - bufferStart;
+ const size_type bufferIndex = index - bufferStart;
value_type sum = 0.0f;
for (int channel = 0; channel < channelCount; ++channel) {
sum += buffer[channel][bufferIndex];
diff --git a/rhubarb/src/audio/SampleRateConverter.cpp b/rhubarb/src/audio/SampleRateConverter.cpp
index 24fba01..665a7e5 100644
--- a/rhubarb/src/audio/SampleRateConverter.cpp
+++ b/rhubarb/src/audio/SampleRateConverter.cpp
@@ -17,7 +17,10 @@ SampleRateConverter::SampleRateConverter(unique_ptr inputClip, int ou
throw invalid_argument("Sample rate must be positive.");
}
if (this->inputClip->getSampleRate() < outputSampleRate) {
- throw invalid_argument(fmt::format("Upsampling not supported. Input sample rate must not be below {}Hz.", outputSampleRate));
+ throw invalid_argument(fmt::format(
+ "Upsampling not supported. Input sample rate must not be below {}Hz.",
+ outputSampleRate
+ ));
}
}
@@ -30,11 +33,11 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) {
double sum = 0;
// ... first sample (weight <= 1)
- int64_t startIndex = static_cast(inputStart);
+ const int64_t startIndex = static_cast(inputStart);
sum += read(startIndex) * ((startIndex + 1) - inputStart);
// ... middle samples (weight 1 each)
- int64_t endIndex = static_cast(inputEnd);
+ const int64_t endIndex = static_cast(inputEnd);
for (int64_t index = startIndex + 1; index < endIndex; ++index) {
sum += read(index);
}
@@ -48,9 +51,14 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) {
}
SampleReader SampleRateConverter::createUnsafeSampleReader() const {
- return[read = inputClip->createSampleReader(), downscalingFactor = downscalingFactor, size = inputClip->size()](size_type index) {
- double inputStart = index * downscalingFactor;
- double inputEnd = std::min((index + 1) * downscalingFactor, static_cast(size));
+ return [
+ read = inputClip->createSampleReader(),
+ downscalingFactor = downscalingFactor,
+ size = inputClip->size()
+ ](size_type index) {
+ const double inputStart = index * downscalingFactor;
+ const double inputEnd =
+ std::min((index + 1) * downscalingFactor, static_cast(size));
return mean(inputStart, inputEnd, read);
};
}
diff --git a/rhubarb/src/audio/WaveFileReader.cpp b/rhubarb/src/audio/WaveFileReader.cpp
index e910dad..e8aa872 100644
--- a/rhubarb/src/audio/WaveFileReader.cpp
+++ b/rhubarb/src/audio/WaveFileReader.cpp
@@ -1,6 +1,7 @@
#include
#include "WaveFileReader.h"
#include "ioTools.h"
+#include
#include "tools/platformTools.h"
#include "tools/fileTools.h"
@@ -32,9 +33,9 @@ namespace Codec {
string codecToString(int codec);
-WaveFileReader::WaveFileReader(path filePath) :
+WaveFileReader::WaveFileReader(const path& filePath) :
filePath(filePath),
- formatInfo{}
+ formatInfo {}
{
auto file = openFile(filePath);
@@ -43,7 +44,7 @@ WaveFileReader::WaveFileReader(path filePath) :
file.seekg(0);
auto remaining = [&](int byteCount) {
- std::streamoff filePosition = file.tellg();
+ const std::streamoff filePosition = file.tellg();
return byteCount <= fileSize - filePosition;
};
@@ -51,7 +52,7 @@ WaveFileReader::WaveFileReader(path filePath) :
if (!remaining(10)) {
throw runtime_error("WAVE file is corrupt. Header not found.");
}
- uint32_t rootChunkId = read(file);
+ auto rootChunkId = read(file);
if (rootChunkId != fourcc('R', 'I', 'F', 'F')) {
throw runtime_error("Unknown file format. Only WAVE files are supported.");
}
@@ -67,69 +68,75 @@ WaveFileReader::WaveFileReader(path filePath) :
uint32_t chunkId = read(file);
int chunkSize = read(file);
switch (chunkId) {
- case fourcc('f', 'm', 't', ' '): {
- // Read relevant data
- uint16_t codec = read(file);
- formatInfo.channelCount = read(file);
- formatInfo.frameRate = read(file);
- read(file); // Bytes per second
- int frameSize = read(file);
- int bitsPerSample = read(file);
+ case fourcc('f', 'm', 't', ' '):
+ {
+ // Read relevant data
+ uint16_t codec = read(file);
+ formatInfo.channelCount = read(file);
+ formatInfo.frameRate = read(file);
+ read(file); // Bytes per second
+ int frameSize = read(file);
+ int bitsPerSample = read(file);
- // We've read 16 bytes so far. Skip the remainder.
- file.seekg(roundToEven(chunkSize) - 16, file.cur);
+ // We've read 16 bytes so far. Skip the remainder.
+ file.seekg(roundToEven(chunkSize) - 16, std::ios_base::cur);
- // Determine sample format
- int bytesPerSample;
- switch (codec) {
- case Codec::Pcm:
- // Determine sample size.
- // According to the WAVE standard, sample sizes that are not multiples of 8 bits
- // (e.g. 12 bits) can be treated like the next-larger byte size.
- if (bitsPerSample == 8) {
- formatInfo.sampleFormat = SampleFormat::UInt8;
- bytesPerSample = 1;
- } else if (bitsPerSample <= 16) {
- formatInfo.sampleFormat = SampleFormat::Int16;
- bytesPerSample = 2;
- } else if (bitsPerSample <= 24) {
- formatInfo.sampleFormat = SampleFormat::Int24;
- bytesPerSample = 3;
- } else {
- throw runtime_error(
- format("Unsupported sample format: {}-bit PCM.", bitsPerSample));
- }
- if (bytesPerSample != frameSize / formatInfo.channelCount) {
- throw runtime_error("Unsupported sample organization.");
+ // Determine sample format
+ int bytesPerSample;
+ switch (codec) {
+ case Codec::Pcm:
+ // Determine sample size.
+ // According to the WAVE standard, sample sizes that are not multiples of 8
+ // bits (e.g. 12 bits) can be treated like the next-larger byte size.
+ if (bitsPerSample == 8) {
+ formatInfo.sampleFormat = SampleFormat::UInt8;
+ bytesPerSample = 1;
+ } else if (bitsPerSample <= 16) {
+ formatInfo.sampleFormat = SampleFormat::Int16;
+ bytesPerSample = 2;
+ } else if (bitsPerSample <= 24) {
+ formatInfo.sampleFormat = SampleFormat::Int24;
+ bytesPerSample = 3;
+ } else {
+ throw runtime_error(
+ format("Unsupported sample format: {}-bit PCM.", bitsPerSample));
+ }
+ if (bytesPerSample != frameSize / formatInfo.channelCount) {
+ throw runtime_error("Unsupported sample organization.");
+ }
+ break;
+ case Codec::Float:
+ if (bitsPerSample == 32) {
+ formatInfo.sampleFormat = SampleFormat::Float32;
+ bytesPerSample = 4;
+ } else {
+ throw runtime_error(
+ format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample)
+ );
+ }
+ break;
+ default:
+ throw runtime_error(format(
+ "Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.",
+ codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float)
+ ));
}
+ formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount;
+ break;
+ }
+ case fourcc('d', 'a', 't', 'a'):
+ {
+ reachedDataChunk = true;
+ formatInfo.dataOffset = file.tellg();
+ formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame;
+ break;
+ }
+ default:
+ {
+ // Skip unknown chunk
+ file.seekg(roundToEven(chunkSize), std::ios_base::cur);
break;
- case Codec::Float:
- if (bitsPerSample == 32) {
- formatInfo.sampleFormat = SampleFormat::Float32;
- bytesPerSample = 4;
- } else {
- throw runtime_error(format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample));
- }
- break;
- default:
- throw runtime_error(format(
- "Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.",
- codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float)));
}
- formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount;
- break;
- }
- case fourcc('d', 'a', 't', 'a'): {
- reachedDataChunk = true;
- formatInfo.dataOffset = file.tellg();
- formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame;
- break;
- }
- default: {
- // Skip unknown chunk
- file.seekg(roundToEven(chunkSize), file.cur);
- break;
- }
}
}
}
@@ -138,30 +145,38 @@ unique_ptr WaveFileReader::clone() const {
return make_unique(*this);
}
-inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sampleFormat, int channelCount) {
+inline AudioClip::value_type readSample(
+ std::ifstream& file,
+ SampleFormat sampleFormat,
+ int channelCount
+) {
float sum = 0;
for (int channelIndex = 0; channelIndex < channelCount; channelIndex++) {
switch (sampleFormat) {
- case SampleFormat::UInt8: {
- uint8_t raw = read(file);
- sum += toNormalizedFloat(raw, 0, UINT8_MAX);
- break;
- }
- case SampleFormat::Int16: {
- int16_t raw = read(file);
- sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX);
- break;
- }
- case SampleFormat::Int24: {
- int raw = read(file);
- if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement
- sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX);
- break;
- }
- case SampleFormat::Float32: {
- sum += read(file);
- break;
- }
+ case SampleFormat::UInt8:
+ {
+ const uint8_t raw = read(file);
+ sum += toNormalizedFloat(raw, 0, UINT8_MAX);
+ break;
+ }
+ case SampleFormat::Int16:
+ {
+ const int16_t raw = read(file);
+ sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX);
+ break;
+ }
+ case SampleFormat::Int24:
+ {
+ int raw = read(file);
+ if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement
+ sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX);
+ break;
+ }
+ case SampleFormat::Float32:
+ {
+ sum += read(file);
+ break;
+ }
}
}
@@ -169,10 +184,17 @@ inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sample
}
SampleReader WaveFileReader::createUnsafeSampleReader() const {
- return [formatInfo = formatInfo, file = std::make_shared(openFile(filePath)), filePos = std::streampos(0)](size_type index) mutable {
- std::streampos newFilePos = formatInfo.dataOffset + static_cast(index * formatInfo.bytesPerFrame);
+ return
+ [
+ formatInfo = formatInfo,
+ file = std::make_shared(openFile(filePath)),
+ filePos = std::streampos(0)
+ ](size_type index) mutable {
+ const std::streampos newFilePos = formatInfo.dataOffset
+ + static_cast(index * formatInfo.bytesPerFrame);
file->seekg(newFilePos);
- value_type result = readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount);
+ const value_type result =
+ readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount);
filePos = newFilePos + static_cast(formatInfo.bytesPerFrame);
return result;
};
@@ -180,248 +202,249 @@ SampleReader WaveFileReader::createUnsafeSampleReader() const {
string codecToString(int codec) {
switch (codec) {
- case 0x0001: return "PCM";
- case 0x0002: return "Microsoft ADPCM";
- case 0x0003: return "IEEE Float";
- case 0x0004: return "Compaq VSELP";
- case 0x0005: return "IBM CVSD";
- case 0x0006: return "Microsoft a-Law";
- case 0x0007: return "Microsoft u-Law";
- case 0x0008: return "Microsoft DTS";
- case 0x0009: return "DRM";
- case 0x000a: return "WMA 9 Speech";
- case 0x000b: return "Microsoft Windows Media RT Voice";
- case 0x0010: return "OKI-ADPCM";
- case 0x0011: return "Intel IMA/DVI-ADPCM";
- case 0x0012: return "Videologic Mediaspace ADPCM";
- case 0x0013: return "Sierra ADPCM";
- case 0x0014: return "Antex G.723 ADPCM";
- case 0x0015: return "DSP Solutions DIGISTD";
- case 0x0016: return "DSP Solutions DIGIFIX";
- case 0x0017: return "Dialoic OKI ADPCM";
- case 0x0018: return "Media Vision ADPCM";
- case 0x0019: return "HP CU";
- case 0x001a: return "HP Dynamic Voice";
- case 0x0020: return "Yamaha ADPCM";
- case 0x0021: return "SONARC Speech Compression";
- case 0x0022: return "DSP Group True Speech";
- case 0x0023: return "Echo Speech Corp.";
- case 0x0024: return "Virtual Music Audiofile AF36";
- case 0x0025: return "Audio Processing Tech.";
- case 0x0026: return "Virtual Music Audiofile AF10";
- case 0x0027: return "Aculab Prosody 1612";
- case 0x0028: return "Merging Tech. LRC";
- case 0x0030: return "Dolby AC2";
- case 0x0031: return "Microsoft GSM610";
- case 0x0032: return "MSN Audio";
- case 0x0033: return "Antex ADPCME";
- case 0x0034: return "Control Resources VQLPC";
- case 0x0035: return "DSP Solutions DIGIREAL";
- case 0x0036: return "DSP Solutions DIGIADPCM";
- case 0x0037: return "Control Resources CR10";
- case 0x0038: return "Natural MicroSystems VBX ADPCM";
- case 0x0039: return "Crystal Semiconductor IMA ADPCM";
- case 0x003a: return "Echo Speech ECHOSC3";
- case 0x003b: return "Rockwell ADPCM";
- case 0x003c: return "Rockwell DIGITALK";
- case 0x003d: return "Xebec Multimedia";
- case 0x0040: return "Antex G.721 ADPCM";
- case 0x0041: return "Antex G.728 CELP";
- case 0x0042: return "Microsoft MSG723";
- case 0x0043: return "IBM AVC ADPCM";
- case 0x0045: return "ITU-T G.726";
- case 0x0050: return "Microsoft MPEG";
- case 0x0051: return "RT23 or PAC";
- case 0x0052: return "InSoft RT24";
- case 0x0053: return "InSoft PAC";
- case 0x0055: return "MP3";
- case 0x0059: return "Cirrus";
- case 0x0060: return "Cirrus Logic";
- case 0x0061: return "ESS Tech. PCM";
- case 0x0062: return "Voxware Inc.";
- case 0x0063: return "Canopus ATRAC";
- case 0x0064: return "APICOM G.726 ADPCM";
- case 0x0065: return "APICOM G.722 ADPCM";
- case 0x0066: return "Microsoft DSAT";
- case 0x0067: return "Micorsoft DSAT DISPLAY";
- case 0x0069: return "Voxware Byte Aligned";
- case 0x0070: return "Voxware AC8";
- case 0x0071: return "Voxware AC10";
- case 0x0072: return "Voxware AC16";
- case 0x0073: return "Voxware AC20";
- case 0x0074: return "Voxware MetaVoice";
- case 0x0075: return "Voxware MetaSound";
- case 0x0076: return "Voxware RT29HW";
- case 0x0077: return "Voxware VR12";
- case 0x0078: return "Voxware VR18";
- case 0x0079: return "Voxware TQ40";
- case 0x007a: return "Voxware SC3";
- case 0x007b: return "Voxware SC3";
- case 0x0080: return "Soundsoft";
- case 0x0081: return "Voxware TQ60";
- case 0x0082: return "Microsoft MSRT24";
- case 0x0083: return "AT&T G.729A";
- case 0x0084: return "Motion Pixels MVI MV12";
- case 0x0085: return "DataFusion G.726";
- case 0x0086: return "DataFusion GSM610";
- case 0x0088: return "Iterated Systems Audio";
- case 0x0089: return "Onlive";
- case 0x008a: return "Multitude, Inc. FT SX20";
- case 0x008b: return "Infocom ITS A/S G.721 ADPCM";
- case 0x008c: return "Convedia G729";
- case 0x008d: return "Not specified congruency, Inc.";
- case 0x0091: return "Siemens SBC24";
- case 0x0092: return "Sonic Foundry Dolby AC3 APDIF";
- case 0x0093: return "MediaSonic G.723";
- case 0x0094: return "Aculab Prosody 8kbps";
- case 0x0097: return "ZyXEL ADPCM";
- case 0x0098: return "Philips LPCBB";
- case 0x0099: return "Studer Professional Audio Packed";
- case 0x00a0: return "Malden PhonyTalk";
- case 0x00a1: return "Racal Recorder GSM";
- case 0x00a2: return "Racal Recorder G720.a";
- case 0x00a3: return "Racal G723.1";
- case 0x00a4: return "Racal Tetra ACELP";
- case 0x00b0: return "NEC AAC NEC Corporation";
- case 0x00ff: return "AAC";
- case 0x0100: return "Rhetorex ADPCM";
- case 0x0101: return "IBM u-Law";
- case 0x0102: return "IBM a-Law";
- case 0x0103: return "IBM ADPCM";
- case 0x0111: return "Vivo G.723";
- case 0x0112: return "Vivo Siren";
- case 0x0120: return "Philips Speech Processing CELP";
- case 0x0121: return "Philips Speech Processing GRUNDIG";
- case 0x0123: return "Digital G.723";
- case 0x0125: return "Sanyo LD ADPCM";
- case 0x0130: return "Sipro Lab ACEPLNET";
- case 0x0131: return "Sipro Lab ACELP4800";
- case 0x0132: return "Sipro Lab ACELP8V3";
- case 0x0133: return "Sipro Lab G.729";
- case 0x0134: return "Sipro Lab G.729A";
- case 0x0135: return "Sipro Lab Kelvin";
- case 0x0136: return "VoiceAge AMR";
- case 0x0140: return "Dictaphone G.726 ADPCM";
- case 0x0150: return "Qualcomm PureVoice";
- case 0x0151: return "Qualcomm HalfRate";
- case 0x0155: return "Ring Zero Systems TUBGSM";
- case 0x0160: return "Microsoft Audio1";
- case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio";
- case 0x0162: return "Windows Media Audio Professional V9";
- case 0x0163: return "Windows Media Audio Lossless V9";
- case 0x0164: return "WMA Pro over S/PDIF";
- case 0x0170: return "UNISYS NAP ADPCM";
- case 0x0171: return "UNISYS NAP ULAW";
- case 0x0172: return "UNISYS NAP ALAW";
- case 0x0173: return "UNISYS NAP 16K";
- case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies";
- case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies";
- case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies";
- case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies";
- case 0x0178: return "Knowledge Adventure ADPCM";
- case 0x0180: return "Fraunhofer IIS MPEG2AAC";
- case 0x0190: return "Digital Theater Systems DTS DS";
- case 0x0200: return "Creative Labs ADPCM";
- case 0x0202: return "Creative Labs FASTSPEECH8";
- case 0x0203: return "Creative Labs FASTSPEECH10";
- case 0x0210: return "UHER ADPCM";
- case 0x0215: return "Ulead DV ACM";
- case 0x0216: return "Ulead DV ACM";
- case 0x0220: return "Quarterdeck Corp.";
- case 0x0230: return "I-Link VC";
- case 0x0240: return "Aureal Semiconductor Raw Sport";
- case 0x0241: return "ESST AC3";
- case 0x0250: return "Interactive Products HSX";
- case 0x0251: return "Interactive Products RPELP";
- case 0x0260: return "Consistent CS2";
- case 0x0270: return "Sony SCX";
- case 0x0271: return "Sony SCY";
- case 0x0272: return "Sony ATRAC3";
- case 0x0273: return "Sony SPC";
- case 0x0280: return "TELUM Telum Inc.";
- case 0x0281: return "TELUMIA Telum Inc.";
- case 0x0285: return "Norcom Voice Systems ADPCM";
- case 0x0300: return "Fujitsu FM TOWNS SND";
- case 0x0301:
- case 0x0302:
- case 0x0303:
- case 0x0304:
- case 0x0305:
- case 0x0306:
- case 0x0307:
- case 0x0308: return "Fujitsu (not specified)";
- case 0x0350: return "Micronas Semiconductors, Inc. Development";
- case 0x0351: return "Micronas Semiconductors, Inc. CELP833";
- case 0x0400: return "Brooktree Digital";
- case 0x0401: return "Intel Music Coder (IMC)";
- case 0x0402: return "Ligos Indeo Audio";
- case 0x0450: return "QDesign Music";
- case 0x0500: return "On2 VP7 On2 Technologies";
- case 0x0501: return "On2 VP6 On2 Technologies";
- case 0x0680: return "AT&T VME VMPCM";
- case 0x0681: return "AT&T TCP";
- case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)";
- case 0x08ae: return "ClearJump LiteWave (lossless)";
- case 0x1000: return "Olivetti GSM";
- case 0x1001: return "Olivetti ADPCM";
- case 0x1002: return "Olivetti CELP";
- case 0x1003: return "Olivetti SBC";
- case 0x1004: return "Olivetti OPR";
- case 0x1100: return "Lernout & Hauspie";
- case 0x1101: return "Lernout & Hauspie CELP codec";
- case 0x1102:
- case 0x1103:
- case 0x1104: return "Lernout & Hauspie SBC codec";
- case 0x1400: return "Norris Comm. Inc.";
- case 0x1401: return "ISIAudio";
- case 0x1500: return "AT&T Soundspace Music Compression";
- case 0x181c: return "VoxWare RT24 speech codec";
- case 0x181e: return "Lucent elemedia AX24000P Music codec";
- case 0x1971: return "Sonic Foundry LOSSLESS";
- case 0x1979: return "Innings Telecom Inc. ADPCM";
- case 0x1c07: return "Lucent SX8300P speech codec";
- case 0x1c0c: return "Lucent SX5363S G.723 compliant codec";
- case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)";
- case 0x1fc4: return "NCT Soft ALF2CD ACM";
- case 0x2000: return "FAST Multimedia DVM";
- case 0x2001: return "Dolby DTS (Digital Theater System)";
- case 0x2002: return "RealAudio 1 / 2 14.4";
- case 0x2003: return "RealAudio 1 / 2 28.8";
- case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)";
- case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)";
- case 0x2006: return "RealAudio 10 AAC (RAAC)";
- case 0x2007: return "RealAudio 10 AAC+ (RACP)";
- case 0x2500: return "Reserved range to 0x2600 Microsoft";
- case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)";
- case 0x4143: return "Divio MPEG-4 AAC audio";
- case 0x4201: return "Nokia adaptive multirate";
- case 0x4243: return "Divio G726 Divio, Inc.";
- case 0x434c: return "LEAD Speech";
- case 0x564c: return "LEAD Vorbis";
- case 0x5756: return "WavPack Audio";
- case 0x674f: return "Ogg Vorbis (mode 1)";
- case 0x6750: return "Ogg Vorbis (mode 2)";
- case 0x6751: return "Ogg Vorbis (mode 3)";
- case 0x676f: return "Ogg Vorbis (mode 1+)";
- case 0x6770: return "Ogg Vorbis (mode 2+)";
- case 0x6771: return "Ogg Vorbis (mode 3+)";
- case 0x7000: return "3COM NBX 3Com Corporation";
- case 0x706d: return "FAAD AAC";
- case 0x7a21: return "GSM-AMR (CBR, no SID)";
- case 0x7a22: return "GSM-AMR (VBR, including SID)";
- case 0xa100: return "Comverse Infosys Ltd. G723 1";
- case 0xa101: return "Comverse Infosys Ltd. AVQSBC";
- case 0xa102: return "Comverse Infosys Ltd. OLDSBC";
- case 0xa103: return "Symbol Technologies G729A";
- case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation";
- case 0xa105: return "Ingenient Technologies Inc. G726";
- case 0xa106: return "ISO/MPEG-4 advanced audio Coding";
- case 0xa107: return "Encore Software Ltd G726";
- case 0xa109: return "Speex ACM Codec xiph.org";
- case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec";
- case 0xf1ac: return "Free Lossless Audio Codec FLAC";
- case 0xfffe: return "Extensible";
- case 0xffff: return "Development";
+ case 0x0001: return "PCM";
+ case 0x0002: return "Microsoft ADPCM";
+ case 0x0003: return "IEEE Float";
+ case 0x0004: return "Compaq VSELP";
+ case 0x0005: return "IBM CVSD";
+ case 0x0006: return "Microsoft a-Law";
+ case 0x0007: return "Microsoft u-Law";
+ case 0x0008: return "Microsoft DTS";
+ case 0x0009: return "DRM";
+ case 0x000a: return "WMA 9 Speech";
+ case 0x000b: return "Microsoft Windows Media RT Voice";
+ case 0x0010: return "OKI-ADPCM";
+ case 0x0011: return "Intel IMA/DVI-ADPCM";
+ case 0x0012: return "Videologic Mediaspace ADPCM";
+ case 0x0013: return "Sierra ADPCM";
+ case 0x0014: return "Antex G.723 ADPCM";
+ case 0x0015: return "DSP Solutions DIGISTD";
+ case 0x0016: return "DSP Solutions DIGIFIX";
+ case 0x0017: return "Dialoic OKI ADPCM";
+ case 0x0018: return "Media Vision ADPCM";
+ case 0x0019: return "HP CU";
+ case 0x001a: return "HP Dynamic Voice";
+ case 0x0020: return "Yamaha ADPCM";
+ case 0x0021: return "SONARC Speech Compression";
+ case 0x0022: return "DSP Group True Speech";
+ case 0x0023: return "Echo Speech Corp.";
+ case 0x0024: return "Virtual Music Audiofile AF36";
+ case 0x0025: return "Audio Processing Tech.";
+ case 0x0026: return "Virtual Music Audiofile AF10";
+ case 0x0027: return "Aculab Prosody 1612";
+ case 0x0028: return "Merging Tech. LRC";
+ case 0x0030: return "Dolby AC2";
+ case 0x0031: return "Microsoft GSM610";
+ case 0x0032: return "MSN Audio";
+ case 0x0033: return "Antex ADPCME";
+ case 0x0034: return "Control Resources VQLPC";
+ case 0x0035: return "DSP Solutions DIGIREAL";
+ case 0x0036: return "DSP Solutions DIGIADPCM";
+ case 0x0037: return "Control Resources CR10";
+ case 0x0038: return "Natural MicroSystems VBX ADPCM";
+ case 0x0039: return "Crystal Semiconductor IMA ADPCM";
+ case 0x003a: return "Echo Speech ECHOSC3";
+ case 0x003b: return "Rockwell ADPCM";
+ case 0x003c: return "Rockwell DIGITALK";
+ case 0x003d: return "Xebec Multimedia";
+ case 0x0040: return "Antex G.721 ADPCM";
+ case 0x0041: return "Antex G.728 CELP";
+ case 0x0042: return "Microsoft MSG723";
+ case 0x0043: return "IBM AVC ADPCM";
+ case 0x0045: return "ITU-T G.726";
+ case 0x0050: return "Microsoft MPEG";
+ case 0x0051: return "RT23 or PAC";
+ case 0x0052: return "InSoft RT24";
+ case 0x0053: return "InSoft PAC";
+ case 0x0055: return "MP3";
+ case 0x0059: return "Cirrus";
+ case 0x0060: return "Cirrus Logic";
+ case 0x0061: return "ESS Tech. PCM";
+ case 0x0062: return "Voxware Inc.";
+ case 0x0063: return "Canopus ATRAC";
+ case 0x0064: return "APICOM G.726 ADPCM";
+ case 0x0065: return "APICOM G.722 ADPCM";
+ case 0x0066: return "Microsoft DSAT";
+ case 0x0067: return "Micorsoft DSAT DISPLAY";
+ case 0x0069: return "Voxware Byte Aligned";
+ case 0x0070: return "Voxware AC8";
+ case 0x0071: return "Voxware AC10";
+ case 0x0072: return "Voxware AC16";
+ case 0x0073: return "Voxware AC20";
+ case 0x0074: return "Voxware MetaVoice";
+ case 0x0075: return "Voxware MetaSound";
+ case 0x0076: return "Voxware RT29HW";
+ case 0x0077: return "Voxware VR12";
+ case 0x0078: return "Voxware VR18";
+ case 0x0079: return "Voxware TQ40";
+ case 0x007a: return "Voxware SC3";
+ case 0x007b: return "Voxware SC3";
+ case 0x0080: return "Soundsoft";
+ case 0x0081: return "Voxware TQ60";
+ case 0x0082: return "Microsoft MSRT24";
+ case 0x0083: return "AT&T G.729A";
+ case 0x0084: return "Motion Pixels MVI MV12";
+ case 0x0085: return "DataFusion G.726";
+ case 0x0086: return "DataFusion GSM610";
+ case 0x0088: return "Iterated Systems Audio";
+ case 0x0089: return "Onlive";
+ case 0x008a: return "Multitude, Inc. FT SX20";
+ case 0x008b: return "Infocom ITS A/S G.721 ADPCM";
+ case 0x008c: return "Convedia G729";
+ case 0x008d: return "Not specified congruency, Inc.";
+ case 0x0091: return "Siemens SBC24";
+ case 0x0092: return "Sonic Foundry Dolby AC3 APDIF";
+ case 0x0093: return "MediaSonic G.723";
+ case 0x0094: return "Aculab Prosody 8kbps";
+ case 0x0097: return "ZyXEL ADPCM";
+ case 0x0098: return "Philips LPCBB";
+ case 0x0099: return "Studer Professional Audio Packed";
+ case 0x00a0: return "Malden PhonyTalk";
+ case 0x00a1: return "Racal Recorder GSM";
+ case 0x00a2: return "Racal Recorder G720.a";
+ case 0x00a3: return "Racal G723.1";
+ case 0x00a4: return "Racal Tetra ACELP";
+ case 0x00b0: return "NEC AAC NEC Corporation";
+ case 0x00ff: return "AAC";
+ case 0x0100: return "Rhetorex ADPCM";
+ case 0x0101: return "IBM u-Law";
+ case 0x0102: return "IBM a-Law";
+ case 0x0103: return "IBM ADPCM";
+ case 0x0111: return "Vivo G.723";
+ case 0x0112: return "Vivo Siren";
+ case 0x0120: return "Philips Speech Processing CELP";
+ case 0x0121: return "Philips Speech Processing GRUNDIG";
+ case 0x0123: return "Digital G.723";
+ case 0x0125: return "Sanyo LD ADPCM";
+ case 0x0130: return "Sipro Lab ACEPLNET";
+ case 0x0131: return "Sipro Lab ACELP4800";
+ case 0x0132: return "Sipro Lab ACELP8V3";
+ case 0x0133: return "Sipro Lab G.729";
+ case 0x0134: return "Sipro Lab G.729A";
+ case 0x0135: return "Sipro Lab Kelvin";
+ case 0x0136: return "VoiceAge AMR";
+ case 0x0140: return "Dictaphone G.726 ADPCM";
+ case 0x0150: return "Qualcomm PureVoice";
+ case 0x0151: return "Qualcomm HalfRate";
+ case 0x0155: return "Ring Zero Systems TUBGSM";
+ case 0x0160: return "Microsoft Audio1";
+ case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio";
+ case 0x0162: return "Windows Media Audio Professional V9";
+ case 0x0163: return "Windows Media Audio Lossless V9";
+ case 0x0164: return "WMA Pro over S/PDIF";
+ case 0x0170: return "UNISYS NAP ADPCM";
+ case 0x0171: return "UNISYS NAP ULAW";
+ case 0x0172: return "UNISYS NAP ALAW";
+ case 0x0173: return "UNISYS NAP 16K";
+ case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies";
+ case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies";
+ case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies";
+ case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies";
+ case 0x0178: return "Knowledge Adventure ADPCM";
+ case 0x0180: return "Fraunhofer IIS MPEG2AAC";
+ case 0x0190: return "Digital Theater Systems DTS DS";
+ case 0x0200: return "Creative Labs ADPCM";
+ case 0x0202: return "Creative Labs FASTSPEECH8";
+ case 0x0203: return "Creative Labs FASTSPEECH10";
+ case 0x0210: return "UHER ADPCM";
+ case 0x0215: return "Ulead DV ACM";
+ case 0x0216: return "Ulead DV ACM";
+ case 0x0220: return "Quarterdeck Corp.";
+ case 0x0230: return "I-Link VC";
+ case 0x0240: return "Aureal Semiconductor Raw Sport";
+ case 0x0241: return "ESST AC3";
+ case 0x0250: return "Interactive Products HSX";
+ case 0x0251: return "Interactive Products RPELP";
+ case 0x0260: return "Consistent CS2";
+ case 0x0270: return "Sony SCX";
+ case 0x0271: return "Sony SCY";
+ case 0x0272: return "Sony ATRAC3";
+ case 0x0273: return "Sony SPC";
+ case 0x0280: return "TELUM Telum Inc.";
+ case 0x0281: return "TELUMIA Telum Inc.";
+ case 0x0285: return "Norcom Voice Systems ADPCM";
+ case 0x0300: return "Fujitsu FM TOWNS SND";
+ case 0x0301:
+ case 0x0302:
+ case 0x0303:
+ case 0x0304:
+ case 0x0305:
+ case 0x0306:
+ case 0x0307:
+ case 0x0308: return "Fujitsu (not specified)";
+ case 0x0350: return "Micronas Semiconductors, Inc. Development";
+ case 0x0351: return "Micronas Semiconductors, Inc. CELP833";
+ case 0x0400: return "Brooktree Digital";
+ case 0x0401: return "Intel Music Coder (IMC)";
+ case 0x0402: return "Ligos Indeo Audio";
+ case 0x0450: return "QDesign Music";
+ case 0x0500: return "On2 VP7 On2 Technologies";
+ case 0x0501: return "On2 VP6 On2 Technologies";
+ case 0x0680: return "AT&T VME VMPCM";
+ case 0x0681: return "AT&T TCP";
+ case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)";
+ case 0x08ae: return "ClearJump LiteWave (lossless)";
+ case 0x1000: return "Olivetti GSM";
+ case 0x1001: return "Olivetti ADPCM";
+ case 0x1002: return "Olivetti CELP";
+ case 0x1003: return "Olivetti SBC";
+ case 0x1004: return "Olivetti OPR";
+ case 0x1100: return "Lernout & Hauspie";
+ case 0x1101: return "Lernout & Hauspie CELP codec";
+ case 0x1102:
+ case 0x1103:
+ case 0x1104: return "Lernout & Hauspie SBC codec";
+ case 0x1400: return "Norris Comm. Inc.";
+ case 0x1401: return "ISIAudio";
+ case 0x1500: return "AT&T Soundspace Music Compression";
+ case 0x181c: return "VoxWare RT24 speech codec";
+ case 0x181e: return "Lucent elemedia AX24000P Music codec";
+ case 0x1971: return "Sonic Foundry LOSSLESS";
+ case 0x1979: return "Innings Telecom Inc. ADPCM";
+ case 0x1c07: return "Lucent SX8300P speech codec";
+ case 0x1c0c: return "Lucent SX5363S G.723 compliant codec";
+ case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)";
+ case 0x1fc4: return "NCT Soft ALF2CD ACM";
+ case 0x2000: return "FAST Multimedia DVM";
+ case 0x2001: return "Dolby DTS (Digital Theater System)";
+ case 0x2002: return "RealAudio 1 / 2 14.4";
+ case 0x2003: return "RealAudio 1 / 2 28.8";
+ case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)";
+ case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)";
+ case 0x2006: return "RealAudio 10 AAC (RAAC)";
+ case 0x2007: return "RealAudio 10 AAC+ (RACP)";
+ case 0x2500: return "Reserved range to 0x2600 Microsoft";
+ case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)";
+ case 0x4143: return "Divio MPEG-4 AAC audio";
+ case 0x4201: return "Nokia adaptive multirate";
+ case 0x4243: return "Divio G726 Divio, Inc.";
+ case 0x434c: return "LEAD Speech";
+ case 0x564c: return "LEAD Vorbis";
+ case 0x5756: return "WavPack Audio";
+ case 0x674f: return "Ogg Vorbis (mode 1)";
+ case 0x6750: return "Ogg Vorbis (mode 2)";
+ case 0x6751: return "Ogg Vorbis (mode 3)";
+ case 0x676f: return "Ogg Vorbis (mode 1+)";
+ case 0x6770: return "Ogg Vorbis (mode 2+)";
+ case 0x6771: return "Ogg Vorbis (mode 3+)";
+ case 0x7000: return "3COM NBX 3Com Corporation";
+ case 0x706d: return "FAAD AAC";
+ case 0x7a21: return "GSM-AMR (CBR, no SID)";
+ case 0x7a22: return "GSM-AMR (VBR, including SID)";
+ case 0xa100: return "Comverse Infosys Ltd. G723 1";
+ case 0xa101: return "Comverse Infosys Ltd. AVQSBC";
+ case 0xa102: return "Comverse Infosys Ltd. OLDSBC";
+ case 0xa103: return "Symbol Technologies G729A";
+ case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation";
+ case 0xa105: return "Ingenient Technologies Inc. G726";
+ case 0xa106: return "ISO/MPEG-4 advanced audio Coding";
+ case 0xa107: return "Encore Software Ltd G726";
+ case 0xa109: return "Speex ACM Codec xiph.org";
+ case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec";
+ case 0xf1ac: return "Free Lossless Audio Codec FLAC";
+ case 0xfffe: return "Extensible";
+ case 0xffff: return "Development";
+ default:
+ return format("{0:#x}", codec);
}
- return format("{0:#x}", codec);
}
\ No newline at end of file
diff --git a/rhubarb/src/audio/WaveFileReader.h b/rhubarb/src/audio/WaveFileReader.h
index d4c080c..df68f19 100644
--- a/rhubarb/src/audio/WaveFileReader.h
+++ b/rhubarb/src/audio/WaveFileReader.h
@@ -12,7 +12,7 @@ enum class SampleFormat {
class WaveFileReader : public AudioClip {
public:
- WaveFileReader(boost::filesystem::path filePath);
+ WaveFileReader(const boost::filesystem::path& filePath);
std::unique_ptr clone() const override;
int getSampleRate() const override;
size_type size() const override;
diff --git a/rhubarb/src/audio/audioFileReading.cpp b/rhubarb/src/audio/audioFileReading.cpp
index 8c83aa5..f5b2d4c 100644
--- a/rhubarb/src/audio/audioFileReading.cpp
+++ b/rhubarb/src/audio/audioFileReading.cpp
@@ -20,7 +20,9 @@ std::unique_ptr createAudioFileClip(path filePath) {
return std::make_unique(filePath);
}
throw runtime_error(format(
- "Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.", extension));
+ "Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.",
+ extension
+ ));
} catch (...) {
std::throw_with_nested(runtime_error(format("Could not open sound file {}.", filePath)));
}
diff --git a/rhubarb/src/audio/ioTools.h b/rhubarb/src/audio/ioTools.h
index 7568462..b74d215 100644
--- a/rhubarb/src/audio/ioTools.h
+++ b/rhubarb/src/audio/ioTools.h
@@ -4,33 +4,38 @@
namespace little_endian {
- template
- Type read(std::istream &stream) {
+ template
+ Type read(std::istream& stream) {
static_assert(bitsToRead % 8 == 0, "Cannot read fractional bytes.");
static_assert(bitsToRead <= sizeof(Type) * 8, "Bits to read exceed target type size.");
Type result = 0;
- char *p = reinterpret_cast(&result);
- int bytesToRead = bitsToRead / 8;
+ char* p = reinterpret_cast(&result);
+ const int bytesToRead = bitsToRead / 8;
for (int byteIndex = 0; byteIndex < bytesToRead; byteIndex++) {
*(p + byteIndex) = static_cast(stream.get());
}
return result;
}
- template
- void write(Type value, std::ostream &stream) {
+ template