diff --git a/rhubarb/resharper.DotSettings b/rhubarb/resharper.DotSettings index 168efbe..103065f 100644 --- a/rhubarb/resharper.DotSettings +++ b/rhubarb/resharper.DotSettings @@ -1,7 +1,50 @@  + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + + WARNING + DO_NOT_SHOW + WARNING + + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + WARNING + HINT + WARNING + HINT HINT ERROR + WARNING + WARNING + WARNING + + + DO_NOT_SHOW + WARNING + WARNING + WARNING + DO_NOT_SHOW + WARNING + + WARNING + WARNING + WARNING + + DO_NOT_SHOW @@ -29,9 +72,12 @@ False ON_SINGLE_LINE True + True False + True END_OF_LINE CHOP_ALWAYS + False END_OF_LINE END_OF_LINE USE_TABS_ONLY @@ -58,6 +104,7 @@ USE_TABS_ONLY USE_TABS_ONLY USE_TABS_ONLY + True UseExplicitType UseVarWhenEvident <NamingElement Priority="10"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="_" Style="aaBb" /></NamingElement> @@ -73,7 +120,7 @@ <NamingElement Priority="16"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="namespace" /><type Name="namespace alias" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement> <NamingElement Priority="14"><Descriptor Static="True" Constexpr="Indeterminate" Const="True" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="local variable" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement> <NamingElement Priority="5"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement> - <NamingElement Priority="4"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="template parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement> + <NamingElement Priority="17"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="type alias" /><type Name="typedef" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement> <NamingElement Priority="12"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union member" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement> <NamingElement Priority="3"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement> @@ -120,18 +167,59 @@ <Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /> <Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /> C:\Users\Daniel\AppData\Local\JetBrains\Transient\ReSharperPlatformVs14\v09\SolutionCaches + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + LIVE_MONITOR + DO_NOTHING + LIVE_MONITOR True True True True + True True True True True True + True + True + True + True + True + True True True + True + True + True + True + True + True + True + True + True + True + True + True True True + True + True + True + True + True + True + True + True + True + True + True True \ No newline at end of file diff --git a/rhubarb/src/animation/ShapeRule.cpp b/rhubarb/src/animation/ShapeRule.cpp index e9b98c0..ad82900 100644 --- a/rhubarb/src/animation/ShapeRule.cpp +++ b/rhubarb/src/animation/ShapeRule.cpp @@ -1,26 +1,36 @@ #include "ShapeRule.h" #include +#include #include "time/ContinuousTimeline.h" using boost::optional; using boost::adaptors::transformed; template -ContinuousTimeline, AutoJoin> boundedTimelinetoContinuousOptional(const BoundedTimeline& timeline) { - return{ - timeline.getRange(), boost::none, - timeline | transformed([](const Timed& timedValue) { return Timed>(timedValue.getTimeRange(), timedValue.getValue()); }) +ContinuousTimeline, AutoJoin> boundedTimelinetoContinuousOptional( + const BoundedTimeline& timeline +) { + return { + timeline.getRange(), + boost::none, + timeline | transformed([](const Timed& timedValue) { + return Timed>(timedValue.getTimeRange(), timedValue.getValue()); + }) }; } -ShapeRule::ShapeRule(const ShapeSet& shapeSet, const optional& phone, TimeRange phoneTiming) : - shapeSet(shapeSet), - phone(phone), +ShapeRule::ShapeRule( + ShapeSet shapeSet, + optional phone, + TimeRange phoneTiming +) : + shapeSet(std::move(shapeSet)), + phone(std::move(phone)), phoneTiming(phoneTiming) {} ShapeRule ShapeRule::getInvalid() { - return {{}, boost::none,{0_cs, 0_cs}}; + return { {}, boost::none, { 0_cs, 0_cs } }; } bool ShapeRule::operator==(const ShapeRule& rhs) const { @@ -43,11 +53,14 @@ ContinuousTimeline getShapeRules(const BoundedTimeline& phones auto continuousPhones = boundedTimelinetoContinuousOptional(phones); // Create timeline of shape rules - ContinuousTimeline shapeRules(phones.getRange(), {{Shape::X}, boost::none, {0_cs, 0_cs}}); + ContinuousTimeline shapeRules( + phones.getRange(), + { { Shape::X }, boost::none, { 0_cs, 0_cs } } + ); centiseconds previousDuration = 0_cs; for (const auto& timedPhone : continuousPhones) { optional phone = timedPhone.getValue(); - centiseconds duration = timedPhone.getDuration(); + const centiseconds duration = timedPhone.getDuration(); if (phone) { // Animate one phone @@ -59,7 +72,10 @@ ContinuousTimeline getShapeRules(const BoundedTimeline& phones // Copy to timeline. // Later shape sets may overwrite earlier ones if overlapping. for (const auto& timedShapeSet : phoneShapeSets) { - shapeRules.set(timedShapeSet.getTimeRange(), ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange())); + shapeRules.set( + timedShapeSet.getTimeRange(), + ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange()) + ); } } diff --git a/rhubarb/src/animation/ShapeRule.h b/rhubarb/src/animation/ShapeRule.h index acb0a08..887dff6 100644 --- a/rhubarb/src/animation/ShapeRule.h +++ b/rhubarb/src/animation/ShapeRule.h @@ -11,7 +11,7 @@ struct ShapeRule { boost::optional phone; TimeRange phoneTiming; - ShapeRule(const ShapeSet& shapeSet, const boost::optional& phone, TimeRange phoneTiming); + ShapeRule(ShapeSet shapeSet, boost::optional phone, TimeRange phoneTiming); static ShapeRule getInvalid(); diff --git a/rhubarb/src/animation/animationRules.cpp b/rhubarb/src/animation/animationRules.cpp index 4bef2e5..5b2b203 100644 --- a/rhubarb/src/animation/animationRules.cpp +++ b/rhubarb/src/animation/animationRules.cpp @@ -14,12 +14,14 @@ using std::map; constexpr size_t shapeValueCount = static_cast(Shape::EndSentinel); Shape getBasicShape(Shape shape) { - static constexpr array basicShapes = make_array(A, B, C, D, E, F, B, C, A); + static constexpr array basicShapes = + make_array(A, B, C, D, E, F, B, C, A); return basicShapes[static_cast(shape)]; } Shape relax(Shape shape) { - static constexpr array relaxedShapes = make_array(A, B, B, C, C, B, X, B, X); + static constexpr array relaxedShapes = + make_array(A, B, B, C, C, B, X, B, X); return relaxedShapes[static_cast(shape)]; } @@ -28,7 +30,8 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) { throw std::invalid_argument("Cannot select from empty set of shapes."); } - // A matrix that for each shape contains all shapes in ascending order of effort required to move to them + // A matrix that for each shape contains all shapes in ascending order of effort required to + // move to them constexpr static array, shapeValueCount> effortMatrix = make_array( /* A */ make_array(A, X, G, B, C, H, E, D, F), /* B */ make_array(B, G, A, X, C, H, E, D, F), @@ -38,7 +41,7 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) { /* F */ make_array(F, B, G, A, X, C, H, E, D), /* G */ make_array(G, B, C, H, A, X, E, D, F), /* H */ make_array(H, C, B, G, D, A, X, E, F), // Like C - /* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A + /* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A ); auto& closestShapes = effortMatrix.at(static_cast(reference)); @@ -55,107 +58,109 @@ optional> getTween(Shape first, Shape second) { // Note that most of the following rules work in one direction only. // That's because in animation, the mouth should usually "pop" open without inbetweens, // then close slowly. - static const map, pair> lookup{ - {{D, A}, {C, TweenTiming::Early}}, - {{D, B}, {C, TweenTiming::Centered}}, - {{D, G}, {C, TweenTiming::Early}}, - {{D, X}, {C, TweenTiming::Late}}, - {{C, F}, {E, TweenTiming::Centered}}, {{F, C}, {E, TweenTiming::Centered}}, - {{D, F}, {E, TweenTiming::Centered}}, - {{H, F}, {E, TweenTiming::Late}}, {{F, H}, {E, TweenTiming::Early}} + static const map, pair> lookup { + { { D, A }, { C, TweenTiming::Early } }, + { { D, B }, { C, TweenTiming::Centered } }, + { { D, G }, { C, TweenTiming::Early } }, + { { D, X }, { C, TweenTiming::Late } }, + { { C, F }, { E, TweenTiming::Centered } }, { { F, C }, { E, TweenTiming::Centered } }, + { { D, F }, { E, TweenTiming::Centered } }, + { { H, F }, { E, TweenTiming::Late } }, { { F, H }, { E, TweenTiming::Early } } }; - auto it = lookup.find({first, second}); + const auto it = lookup.find({ first, second }); return it != lookup.end() ? it->second : optional>(); } Timeline getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration) { // Returns a timeline with a single shape set - auto single = [duration](ShapeSet value) { - return Timeline {{0_cs, duration, value}}; + const auto single = [duration](ShapeSet value) { + return Timeline { { 0_cs, duration, value } }; }; // Returns a timeline with two shape sets, timed as a diphthong - auto diphthong = [duration](ShapeSet first, ShapeSet second) { - centiseconds firstDuration = duration_cast(duration * 0.6); + const auto diphthong = [duration](ShapeSet first, ShapeSet second) { + const centiseconds firstDuration = duration_cast(duration * 0.6); return Timeline { - {0_cs, firstDuration, first}, - {firstDuration, duration, second} + { 0_cs, firstDuration, first }, + { firstDuration, duration, second } }; }; // Returns a timeline with two shape sets, timed as a plosive - auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) { - centiseconds minOcclusionDuration = 4_cs; - centiseconds maxOcclusionDuration = 12_cs; - centiseconds occlusionDuration = clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration); + const auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) { + const centiseconds minOcclusionDuration = 4_cs; + const centiseconds maxOcclusionDuration = 12_cs; + const centiseconds occlusionDuration = + clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration); return Timeline { - {-occlusionDuration, 0_cs, first}, - {0_cs, duration, second} + { -occlusionDuration, 0_cs, first }, + { 0_cs, duration, second } }; }; // Returns the result of `getShapeSets` when called with identical arguments // except for a different phone. - auto like = [duration, previousDuration](Phone referencePhone) { + const auto like = [duration, previousDuration](Phone referencePhone) { return getShapeSets(referencePhone, duration, previousDuration); }; - static const ShapeSet any{A, B, C, D, E, F, G, H, X}; - static const ShapeSet anyOpen{B, C, D, E, F, G, H}; + static const ShapeSet any { A, B, C, D, E, F, G, H, X }; + static const ShapeSet anyOpen { B, C, D, E, F, G, H }; // Note: - // The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more than one of these shapes. + // The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more + // than one of these shapes. // Otherwise, the resulting shape may be more or less random and might not be a good fit. // As an exception, a very flexible rule may contain *all* these shapes. switch (phone) { - case Phone::AO: return single({E}); - case Phone::AA: return single({D}); - case Phone::IY: return single({B}); - case Phone::UW: return single({F}); - case Phone::EH: return single({C}); - case Phone::IH: return single({B}); - case Phone::UH: return single({F}); - case Phone::AH: return duration < 20_cs ? single({C}) : single({D}); - case Phone::Schwa: return single({B, C}); - case Phone::AE: return single({C}); - case Phone::EY: return diphthong({C}, {B}); - case Phone::AY: return duration < 20_cs ? diphthong({C}, {B}) : diphthong({D}, {B}); - case Phone::OW: return single({F}); - case Phone::AW: return duration < 30_cs ? diphthong({C}, {E}) : diphthong({D}, {E}); - case Phone::OY: return diphthong({E}, {B}); - case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({E}); + case Phone::AO: return single({ E }); + case Phone::AA: return single({ D }); + case Phone::IY: return single({ B }); + case Phone::UW: return single({ F }); + case Phone::EH: return single({ C }); + case Phone::IH: return single({ B }); + case Phone::UH: return single({ F }); + case Phone::AH: return duration < 20_cs ? single({ C }) : single({ D }); + case Phone::Schwa: return single({ B, C }); + case Phone::AE: return single({ C }); + case Phone::EY: return diphthong({ C }, { B }); + case Phone::AY: return duration < 20_cs ? diphthong({ C }, { B }) : diphthong({ D }, { B }); + case Phone::OW: return single({ F }); + case Phone::AW: return duration < 30_cs ? diphthong({ C }, { E }) : diphthong({ D }, { E }); + case Phone::OY: return diphthong({ E }, { B }); + case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({ E }); - case Phone::P: - case Phone::B: return plosive({A}, any); - case Phone::T: - case Phone::D: return plosive({B, F}, anyOpen); - case Phone::K: - case Phone::G: return plosive({B, C, E, F, H}, anyOpen); - case Phone::CH: - case Phone::JH: return single({B, F}); - case Phone::F: - case Phone::V: return single({G}); - case Phone::TH: - case Phone::DH: - case Phone::S: - case Phone::Z: - case Phone::SH: - case Phone::ZH: return single({B, F}); - case Phone::HH: return single(any); // think "m-hm" - case Phone::M: return single({A}); - case Phone::N: return single({B, C, F, H}); - case Phone::NG: return single({B, C, E, F}); - case Phone::L: return duration < 20_cs ? single({B, E, F, H}) : single({H}); - case Phone::R: return single({B, E, F}); - case Phone::Y: return single({B, C, F}); - case Phone::W: return single({F}); + case Phone::P: + case Phone::B: return plosive({ A }, any); + case Phone::T: + case Phone::D: return plosive({ B, F }, anyOpen); + case Phone::K: + case Phone::G: return plosive({ B, C, E, F, H }, anyOpen); + case Phone::CH: + case Phone::JH: return single({ B, F }); + case Phone::F: + case Phone::V: return single({ G }); + case Phone::TH: + case Phone::DH: + case Phone::S: + case Phone::Z: + case Phone::SH: + case Phone::ZH: return single({ B, F }); + case Phone::HH: return single(any); // think "m-hm" + case Phone::M: return single({ A }); + case Phone::N: return single({ B, C, F, H }); + case Phone::NG: return single({ B, C, E, F }); + case Phone::L: return duration < 20_cs ? single({ B, E, F, H }) : single({ H }); + case Phone::R: return single({ B, E, F }); + case Phone::Y: return single({ B, C, F }); + case Phone::W: return single({ F }); - case Phone::Breath: - case Phone::Cough: - case Phone::Smack: return single({C}); - case Phone::Noise: return single({B}); + case Phone::Breath: + case Phone::Cough: + case Phone::Smack: return single({ C }); + case Phone::Noise: return single({ B }); - default: throw std::invalid_argument("Unexpected phone."); + default: throw std::invalid_argument("Unexpected phone."); } } diff --git a/rhubarb/src/animation/animationRules.h b/rhubarb/src/animation/animationRules.h index e3a3b36..46ae857 100644 --- a/rhubarb/src/animation/animationRules.h +++ b/rhubarb/src/animation/animationRules.h @@ -31,5 +31,6 @@ boost::optional> getTween(Shape first, Shape secon // Returns the shape set(s) to use for a given phone. // The resulting timeline will always cover the entire duration of the phone (starting at 0 cs). -// It may extend into the negative time range if animation is required prior to the sound being heard. +// It may extend into the negative time range if animation is required prior to the sound being +// heard. Timeline getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration); diff --git a/rhubarb/src/animation/mouthAnimation.cpp b/rhubarb/src/animation/mouthAnimation.cpp index 30add4c..6d8d21d 100644 --- a/rhubarb/src/animation/mouthAnimation.cpp +++ b/rhubarb/src/animation/mouthAnimation.cpp @@ -8,17 +8,21 @@ #include "targetShapeSet.h" #include "staticSegments.h" -JoiningContinuousTimeline animate(const BoundedTimeline &phones, const ShapeSet& targetShapeSet) { +JoiningContinuousTimeline animate( + const BoundedTimeline& phones, + const ShapeSet& targetShapeSet +) { // Create timeline of shape rules ContinuousTimeline shapeRules = getShapeRules(phones); - // Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and will be replaced later + // Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and + // will be replaced later ShapeSet targetShapeSetPlusX = targetShapeSet; targetShapeSetPlusX.insert(Shape::X); shapeRules = convertToTargetShapeSet(shapeRules, targetShapeSetPlusX); // Animate in multiple steps - auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) { + const auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) { JoiningContinuousTimeline animation = animateRough(shapeRules); animation = optimizeTiming(animation); animation = animatePauses(animation); @@ -26,7 +30,8 @@ JoiningContinuousTimeline animate(const BoundedTimeline &phones, c animation = convertToTargetShapeSet(animation, targetShapeSet); return animation; }; - const JoiningContinuousTimeline result = avoidStaticSegments(shapeRules, performMainAnimationSteps); + const JoiningContinuousTimeline result = + avoidStaticSegments(shapeRules, performMainAnimationSteps); for (const auto& timedShape : result) { logTimedEvent("shape", timedShape); diff --git a/rhubarb/src/animation/mouthAnimation.h b/rhubarb/src/animation/mouthAnimation.h index f001d4a..1c04d0a 100644 --- a/rhubarb/src/animation/mouthAnimation.h +++ b/rhubarb/src/animation/mouthAnimation.h @@ -5,4 +5,7 @@ #include "time/ContinuousTimeline.h" #include "targetShapeSet.h" -JoiningContinuousTimeline animate(const BoundedTimeline& phones, const ShapeSet& targetShapeSet); +JoiningContinuousTimeline animate( + const BoundedTimeline& phones, + const ShapeSet& targetShapeSet +); diff --git a/rhubarb/src/animation/pauseAnimation.cpp b/rhubarb/src/animation/pauseAnimation.cpp index 4f1281e..f7529c4 100644 --- a/rhubarb/src/animation/pauseAnimation.cpp +++ b/rhubarb/src/animation/pauseAnimation.cpp @@ -12,7 +12,7 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) { // It looks odd if the pause shape is identical to the next shape. // Make sure we find a relaxed shape that's different from the next one. for (Shape currentRelaxedShape = previous;;) { - Shape nextRelaxedShape = relax(currentRelaxedShape); + const Shape nextRelaxedShape = relax(currentRelaxedShape); if (nextRelaxedShape != next) { return nextRelaxedShape; } @@ -31,11 +31,18 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) { JoiningContinuousTimeline animatePauses(const JoiningContinuousTimeline& animation) { JoiningContinuousTimeline result(animation); - for_each_adjacent(animation.begin(), animation.end(), [&](const Timed& previous, const Timed& pause, const Timed& next) { - if (pause.getValue() != Shape::X) return; + for_each_adjacent( + animation.begin(), + animation.end(), + [&](const Timed& previous, const Timed& pause, const Timed& next) { + if (pause.getValue() != Shape::X) return; - result.set(pause.getTimeRange(), getPauseShape(previous.getValue(), next.getValue(), pause.getDuration())); - }); + result.set( + pause.getTimeRange(), + getPauseShape(previous.getValue(), next.getValue(), pause.getDuration()) + ); + } + ); return result; } diff --git a/rhubarb/src/animation/roughAnimation.cpp b/rhubarb/src/animation/roughAnimation.cpp index 1d1d672..af1f462 100644 --- a/rhubarb/src/animation/roughAnimation.cpp +++ b/rhubarb/src/animation/roughAnimation.cpp @@ -1,16 +1,17 @@ #include "roughAnimation.h" #include -using boost::optional; - // Create timeline of shapes using a bidirectional algorithm. // Here's a rough sketch: // -// * Most consonants result in shape sets with multiple options; most vowels have only one shape option. +// * Most consonants result in shape sets with multiple options; most vowels have only one shape +// option. // * When speaking, we tend to slur mouth shapes into each other. So we animate from start to end, -// always choosing a shape from the current set that resembles the last shape and is somewhat relaxed. +// always choosing a shape from the current set that resembles the last shape and is somewhat +// relaxed. // * When speaking, we anticipate vowels, trying to form their shape before the actual vowel. -// So whenever we come across a one-shape vowel, we backtrack a little, spreating that shape to the left. +// So whenever we come across a one-shape vowel, we backtrack a little, spreading that shape to +// the left. JoiningContinuousTimeline animateRough(const ContinuousTimeline& shapeRules) { JoiningContinuousTimeline animation(shapeRules.getRange(), Shape::X); @@ -21,24 +22,28 @@ JoiningContinuousTimeline animateRough(const ContinuousTimelinegetValue(); const Shape shape = getClosestShape(referenceShape, shapeRule.shapeSet); animation.set(it->getTimeRange(), shape); - const bool anticipateShape = shapeRule.phone && isVowel(*shapeRule.phone) && shapeRule.shapeSet.size() == 1; + const bool anticipateShape = shapeRule.phone + && isVowel(*shapeRule.phone) + && shapeRule.shapeSet.size() == 1; if (anticipateShape) { // Animate backwards a little const Shape anticipatedShape = shape; const centiseconds anticipatedShapeStart = it->getStart(); referenceShape = anticipatedShape; - for (auto reverseIt = it; reverseIt != shapeRules.begin(); ) { + for (auto reverseIt = it; reverseIt != shapeRules.begin();) { --reverseIt; // Make sure we haven't animated too far back centiseconds anticipatingShapeStart = reverseIt->getStart(); if (anticipatingShapeStart == lastAnticipatedShapeStart) break; const centiseconds maxAnticipationDuration = 20_cs; - const centiseconds anticipationDuration = anticipatedShapeStart - anticipatingShapeStart; + const centiseconds anticipationDuration = + anticipatedShapeStart - anticipatingShapeStart; if (anticipationDuration > maxAnticipationDuration) break; // Overwrite forward-animated shape with backwards-animated, anticipating shape - const Shape anticipatingShape = getClosestShape(referenceShape, reverseIt->getValue().shapeSet); + const Shape anticipatingShape = + getClosestShape(referenceShape, reverseIt->getValue().shapeSet); animation.set(reverseIt->getTimeRange(), anticipatingShape); // Make sure the new, backwards-animated shape still resembles the anticipated shape diff --git a/rhubarb/src/animation/roughAnimation.h b/rhubarb/src/animation/roughAnimation.h index 83e8567..cb009e6 100644 --- a/rhubarb/src/animation/roughAnimation.h +++ b/rhubarb/src/animation/roughAnimation.h @@ -2,5 +2,6 @@ #include "ShapeRule.h" -// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional algorithm. +// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional +// algorithm. JoiningContinuousTimeline animateRough(const ContinuousTimeline& shapeRules); diff --git a/rhubarb/src/animation/staticSegments.cpp b/rhubarb/src/animation/staticSegments.cpp index 7c4a7ec..3cc093a 100644 --- a/rhubarb/src/animation/staticSegments.cpp +++ b/rhubarb/src/animation/staticSegments.cpp @@ -4,7 +4,6 @@ #include "tools/nextCombination.h" using std::vector; -using boost::optional; int getSyllableCount(const ContinuousTimeline& shapeRules, TimeRange timeRange) { if (timeRange.empty()) return 0; @@ -31,16 +30,22 @@ int getSyllableCount(const ContinuousTimeline& shapeRules, TimeRange } // A static segment is a prolonged period during which the mouth shape doesn't change -vector getStaticSegments(const ContinuousTimeline& shapeRules, const JoiningContinuousTimeline& animation) { +vector getStaticSegments( + const ContinuousTimeline& shapeRules, + const JoiningContinuousTimeline& animation +) { // A static segment must contain a certain number of syllables to look distractingly static const int minSyllableCount = 3; - // It must also have a minimum duration. The same number of syllables in fast speech usually looks good. + // It must also have a minimum duration. The same number of syllables in fast speech usually + // looks good. const centiseconds minDuration = 75_cs; vector result; for (const auto& timedShape : animation) { const TimeRange timeRange = timedShape.getTimeRange(); - if (timeRange.getDuration() >= minDuration && getSyllableCount(shapeRules, timeRange) >= minSyllableCount) { + const bool isStatic = timeRange.getDuration() >= minDuration + && getSyllableCount(shapeRules, timeRange) >= minSyllableCount; + if (isStatic) { result.push_back(timeRange); } } @@ -48,20 +53,22 @@ vector getStaticSegments(const ContinuousTimeline& shapeRu return result; } -// Indicates whether this shape rule can potentially be replaced by a modified version that breaks up long static segments +// Indicates whether this shape rule can potentially be replaced by a modified version that breaks +// up long static segments bool canChange(const ShapeRule& rule) { return rule.phone && isVowel(*rule.phone) && rule.shapeSet.size() == 1; } -// Returns a new shape rule that is identical to the specified one, except that it leads to a slightly different visualization +// Returns a new shape rule that is identical to the specified one, except that it leads to a +// slightly different visualization ShapeRule getChangedShapeRule(const ShapeRule& rule) { assert(canChange(rule)); ShapeRule result(rule); // So far, I've only encountered B as a static shape. // If there is ever a problem with another static shape, this function can easily be extended. - if (rule.shapeSet == ShapeSet{Shape::B}) { - result.shapeSet = {Shape::C}; + if (rule.shapeSet == ShapeSet { Shape::B }) { + result.shapeSet = { Shape::C }; } return result; } @@ -70,7 +77,10 @@ ShapeRule getChangedShapeRule(const ShapeRule& rule) { using RuleChanges = vector; // Replaces the indicated shape rules with slightly different ones, breaking up long static segments -ContinuousTimeline applyChanges(const ContinuousTimeline& shapeRules, const RuleChanges& changes) { +ContinuousTimeline applyChanges( + const ContinuousTimeline& shapeRules, + const RuleChanges& changes +) { ContinuousTimeline result(shapeRules); for (centiseconds changedRuleStart : changes) { const Timed timedOriginalRule = *shapeRules.get(changedRuleStart); @@ -85,14 +95,16 @@ public: RuleChangeScenario( const ContinuousTimeline& originalRules, const RuleChanges& changes, - AnimationFunction animate) : + const AnimationFunction& animate + ) : changedRules(applyChanges(originalRules, changes)), animation(animate(changedRules)), - staticSegments(getStaticSegments(changedRules, animation)) {} + staticSegments(getStaticSegments(changedRules, animation)) + {} bool isBetterThan(const RuleChangeScenario& rhs) const { // We want zero static segments - if (staticSegments.size() == 0 && rhs.staticSegments.size() > 0) return true; + if (staticSegments.empty() && !rhs.staticSegments.empty()) return true; // Short shapes are better than long ones. Minimize sum-of-squares. if (getSumOfShapeDurationSquares() < rhs.getSumOfShapeDurationSquares()) return true; @@ -114,10 +126,17 @@ private: vector staticSegments; double getSumOfShapeDurationSquares() const { - return std::accumulate(animation.begin(), animation.end(), 0.0, [](const double sum, const Timed& timedShape) { - const double duration = std::chrono::duration_cast>(timedShape.getDuration()).count(); - return sum + duration * duration; - }); + return std::accumulate( + animation.begin(), + animation.end(), + 0.0, + [](const double sum, const Timed& timedShape) { + const double duration = std::chrono::duration_cast>( + timedShape.getDuration() + ).count(); + return sum + duration * duration; + } + ); } }; @@ -132,8 +151,12 @@ RuleChanges getPossibleRuleChanges(const ContinuousTimeline& shapeRul return result; } -ContinuousTimeline fixStaticSegmentRules(const ContinuousTimeline& shapeRules, AnimationFunction animate) { - // The complexity of this function is exponential with the number of replacements. So let's cap that value. +ContinuousTimeline fixStaticSegmentRules( + const ContinuousTimeline& shapeRules, + const AnimationFunction& animate +) { + // The complexity of this function is exponential with the number of replacements. + // So let's cap that value. const int maxReplacementCount = 3; // All potential changes @@ -142,14 +165,18 @@ ContinuousTimeline fixStaticSegmentRules(const ContinuousTimeline 0 && replacementCount <= std::min(static_cast(possibleRuleChanges.size()), maxReplacementCount); - ++replacementCount - ) { + int replacementCount = 1; + bestScenario.getStaticSegmentCount() > 0 && replacementCount <= std::min(static_cast(possibleRuleChanges.size()), maxReplacementCount); + ++replacementCount + ) { // Only the first elements of `currentRuleChanges` count auto currentRuleChanges(possibleRuleChanges); do { - RuleChangeScenario currentScenario(shapeRules, {currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount}, animate); + RuleChangeScenario currentScenario( + shapeRules, + { currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount }, + animate + ); if (currentScenario.isBetterThan(bestScenario)) { bestScenario = currentScenario; } @@ -164,8 +191,12 @@ bool isFlexible(const ShapeRule& rule) { return rule.shapeSet.size() > 1; } -// Extends the specified time range until it starts and ends with a non-flexible shape rule, if possible -TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimeline& shapeRules) { +// Extends the specified time range until it starts and ends with a non-flexible shape rule, if +// possible +TimeRange extendToFixedRules( + const TimeRange& timeRange, + const ContinuousTimeline& shapeRules +) { auto first = shapeRules.find(timeRange.getStart()); while (first != shapeRules.begin() && isFlexible(first->getValue())) { --first; @@ -174,10 +205,13 @@ TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimelin while (std::next(last) != shapeRules.end() && isFlexible(last->getValue())) { ++last; } - return TimeRange(first->getStart(), last->getEnd()); + return { first->getStart(), last->getEnd() }; } -JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline& shapeRules, AnimationFunction animate) { +JoiningContinuousTimeline avoidStaticSegments( + const ContinuousTimeline& shapeRules, + const AnimationFunction& animate +) { const auto animation = animate(shapeRules); const vector staticSegments = getStaticSegments(shapeRules, animation); if (staticSegments.empty()) { @@ -187,11 +221,15 @@ JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline fixedShapeRules(shapeRules); for (const TimeRange& staticSegment : staticSegments) { - // Extend time range to the left and right so we don't lose adjacent rules that might influence the animation + // Extend time range to the left and right so we don't lose adjacent rules that might + // influence the animation const TimeRange extendedStaticSegment = extendToFixedRules(staticSegment, shapeRules); // Fix shape rules within the static segment - const auto fixedSegmentShapeRules = fixStaticSegmentRules({extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules}, animate); + const auto fixedSegmentShapeRules = fixStaticSegmentRules( + { extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules }, + animate + ); for (const auto& timedShapeRule : fixedSegmentShapeRules) { fixedShapeRules.set(timedShapeRule); } diff --git a/rhubarb/src/animation/staticSegments.h b/rhubarb/src/animation/staticSegments.h index 5b8ff68..843996e 100644 --- a/rhubarb/src/animation/staticSegments.h +++ b/rhubarb/src/animation/staticSegments.h @@ -8,7 +8,11 @@ using AnimationFunction = std::function(const ContinuousTimeline&)>; // Calls the specified animation function with the specified shape rules. -// If the resulting animation contains long static segments, the shape rules are tweaked and animated again. +// If the resulting animation contains long static segments, the shape rules are tweaked and +// animated again. // Static segments happen rather often. // See http://animateducated.blogspot.de/2016/10/lip-sync-animation-2.html?showComment=1478861729702#c2940729096183546458. -JoiningContinuousTimeline avoidStaticSegments(const ContinuousTimeline& shapeRules, AnimationFunction animate); +JoiningContinuousTimeline avoidStaticSegments( + const ContinuousTimeline& shapeRules, + const AnimationFunction& animate +); diff --git a/rhubarb/src/animation/targetShapeSet.cpp b/rhubarb/src/animation/targetShapeSet.cpp index dc9807b..da32ef7 100644 --- a/rhubarb/src/animation/targetShapeSet.cpp +++ b/rhubarb/src/animation/targetShapeSet.cpp @@ -4,9 +4,10 @@ Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet) { if (targetShapeSet.find(shape) != targetShapeSet.end()) { return shape; } - Shape basicShape = getBasicShape(shape); + const Shape basicShape = getBasicShape(shape); if (targetShapeSet.find(basicShape) == targetShapeSet.end()) { - throw std::invalid_argument(fmt::format("Target shape set must contain basic shape {}.", basicShape)); + throw std::invalid_argument( + fmt::format("Target shape set must contain basic shape {}.", basicShape)); } return basicShape; } @@ -19,7 +20,10 @@ ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetS return result; } -ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline& shapeRules, const ShapeSet& targetShapeSet) { +ContinuousTimeline convertToTargetShapeSet( + const ContinuousTimeline& shapeRules, + const ShapeSet& targetShapeSet +) { ContinuousTimeline result(shapeRules); for (const auto& timedShapeRule : shapeRules) { ShapeRule rule = timedShapeRule.getValue(); @@ -29,10 +33,16 @@ ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline convertToTargetShapeSet(const JoiningContinuousTimeline& animation, const ShapeSet& targetShapeSet) { +JoiningContinuousTimeline convertToTargetShapeSet( + const JoiningContinuousTimeline& animation, + const ShapeSet& targetShapeSet +) { JoiningContinuousTimeline result(animation); for (const auto& timedShape : animation) { - result.set(timedShape.getTimeRange(), convertToTargetShapeSet(timedShape.getValue(), targetShapeSet)); + result.set( + timedShape.getTimeRange(), + convertToTargetShapeSet(timedShape.getValue(), targetShapeSet) + ); } return result; } diff --git a/rhubarb/src/animation/targetShapeSet.h b/rhubarb/src/animation/targetShapeSet.h index 0725114..f60379b 100644 --- a/rhubarb/src/animation/targetShapeSet.h +++ b/rhubarb/src/animation/targetShapeSet.h @@ -6,11 +6,19 @@ // Returns the closest shape to the specified one that occurs in the target shape set. Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet); -// Replaces each shape in the specified set with the closest shape that occurs in the target shape set. +// Replaces each shape in the specified set with the closest shape that occurs in the target shape +// set. ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetShapeSet); // Replaces each shape in each rule with the closest shape that occurs in the target shape set. -ContinuousTimeline convertToTargetShapeSet(const ContinuousTimeline& shapeRules, const ShapeSet& targetShapeSet); +ContinuousTimeline convertToTargetShapeSet( + const ContinuousTimeline& shapeRules, + const ShapeSet& targetShapeSet +); -// Replaces each shape in the specified animation with the closest shape that occurs in the target shape set. -JoiningContinuousTimeline convertToTargetShapeSet(const JoiningContinuousTimeline& animation, const ShapeSet& targetShapeSet); +// Replaces each shape in the specified animation with the closest shape that occurs in the target +// shape set. +JoiningContinuousTimeline convertToTargetShapeSet( + const JoiningContinuousTimeline& animation, + const ShapeSet& targetShapeSet +); diff --git a/rhubarb/src/animation/timingOptimization.cpp b/rhubarb/src/animation/timingOptimization.cpp index b9ccd55..321f6ba 100644 --- a/rhubarb/src/animation/timingOptimization.cpp +++ b/rhubarb/src/animation/timingOptimization.cpp @@ -11,7 +11,7 @@ using std::map; string getShapesString(const JoiningContinuousTimeline& shapes) { string result; for (const auto& timedShape : shapes) { - if (result.size()) { + if (!result.empty()) { result.append(" "); } result.append(boost::lexical_cast(timedShape.getValue())); @@ -44,12 +44,10 @@ Shape getRepresentativeShape(const JoiningTimeline& timeline) { struct ShapeReduction { ShapeReduction(const JoiningTimeline& sourceShapes) : sourceShapes(sourceShapes), - shape(getRepresentativeShape(sourceShapes)) - {} + shape(getRepresentativeShape(sourceShapes)) {} ShapeReduction(const JoiningTimeline& sourceShapes, TimeRange candidateRange) : - ShapeReduction(JoiningBoundedTimeline(candidateRange, sourceShapes)) - {} + ShapeReduction(JoiningBoundedTimeline(candidateRange, sourceShapes)) {} JoiningTimeline sourceShapes; Shape shape; @@ -57,7 +55,8 @@ struct ShapeReduction { // Returns a time range of candidate shapes for the next shape to draw. // Guaranteed to be non-empty. -TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange, const centiseconds writePosition) { +TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& sourceShapes, + const TimeRange targetRange, const centiseconds writePosition) { if (sourceShapes.empty()) { throw std::invalid_argument("Cannot determine candidate range for empty source timeline."); } @@ -70,12 +69,15 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& s const centiseconds remainingTargetDuration = writePosition - targetRange.getStart(); const bool canFitOneOrLess = remainingTargetDuration <= minShapeDuration; const bool canFitTwo = remainingTargetDuration >= 2 * minShapeDuration; - const centiseconds duration = canFitOneOrLess || canFitTwo ? minShapeDuration : remainingTargetDuration / 2; + const centiseconds duration = canFitOneOrLess || canFitTwo + ? minShapeDuration + : remainingTargetDuration / 2; TimeRange candidateRange(writePosition - duration, writePosition); if (writePosition == targetRange.getEnd()) { // This is the first iteration. - // Extend the candidate range to the right in order to consider all source shapes after the target range. + // Extend the candidate range to the right in order to consider all source shapes after the + // target range. candidateRange.setEndIfLater(sourceShapes.getRange().getEnd()); } if (candidateRange.getStart() >= sourceShapes.getRange().getEnd()) { @@ -92,19 +94,31 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline& s return candidateRange; } -ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange, centiseconds writePosition) { +ShapeReduction getNextShapeReduction( + const JoiningContinuousTimeline& sourceShapes, + const TimeRange targetRange, + centiseconds writePosition +) { // Determine the next time range of candidate shapes. Consider two scenarios: // ... the shortest-possible candidate range - const ShapeReduction minReduction(sourceShapes, getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition)); + const ShapeReduction minReduction(sourceShapes, + getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition)); // ... a candidate range extended to the left to fully encompass its left-most shape const ShapeReduction extendedReduction(sourceShapes, - {minReduction.sourceShapes.begin()->getStart(), minReduction.sourceShapes.getRange().getEnd()}); + { + minReduction.sourceShapes.begin()->getStart(), + minReduction.sourceShapes.getRange().getEnd() + } + ); - // Determine the shape that might be picked *next* if we choose the shortest-possible candidate range now - const ShapeReduction nextReduction(sourceShapes, - getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart())); + // Determine the shape that might be picked *next* if we choose the shortest-possible candidate + // range now + const ShapeReduction nextReduction( + sourceShapes, + getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart()) + ); const bool minEqualsExtended = minReduction.shape == extendedReduction.shape; const bool extendedIsSpecial = extendedReduction.shape != minReduction.shape @@ -113,8 +127,10 @@ ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline& sou return minEqualsExtended || extendedIsSpecial ? extendedReduction : minReduction; } -// Modifies the timing of the given animation to fit into the specified target time range without jitter. -JoiningContinuousTimeline retime(const JoiningContinuousTimeline& sourceShapes, const TimeRange targetRange) { +// Modifies the timing of the given animation to fit into the specified target time range without +// jitter. +JoiningContinuousTimeline retime(const JoiningContinuousTimeline& sourceShapes, + const TimeRange targetRange) { logTimedEvent("segment", targetRange, getShapesString(sourceShapes)); JoiningContinuousTimeline result(targetRange, Shape::X); @@ -125,7 +141,8 @@ JoiningContinuousTimeline retime(const JoiningContinuousTimeline& while (writePosition > targetRange.getStart()) { // Decide which shape to show next, possibly discarding short shapes - const ShapeReduction shapeReduction = getNextShapeReduction(sourceShapes, targetRange, writePosition); + const ShapeReduction shapeReduction = + getNextShapeReduction(sourceShapes, targetRange, writePosition); // Determine how long to display the shape TimeRange targetShapeRange(shapeReduction.sourceShapes.getRange()); @@ -144,7 +161,11 @@ JoiningContinuousTimeline retime(const JoiningContinuousTimeline& return result; } -JoiningContinuousTimeline retime(const JoiningContinuousTimeline& animation, TimeRange sourceRange, TimeRange targetRange) { +JoiningContinuousTimeline retime( + const JoiningContinuousTimeline& animation, + TimeRange sourceRange, + TimeRange targetRange +) { const auto sourceShapes = JoiningContinuousTimeline(sourceRange, Shape::X, animation); return retime(sourceShapes, targetRange); } @@ -160,7 +181,12 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline< JoiningContinuousTimeline segments(animation.getRange(), MouthState::Idle); for (const auto& timedShape : animation) { const Shape shape = timedShape.getValue(); - const MouthState mouthState = shape == Shape::X ? MouthState::Idle : shape == Shape::A ? MouthState::Closed : MouthState::Open; + const MouthState mouthState = + shape == Shape::X + ? MouthState::Idle + : shape == Shape::A + ? MouthState::Closed + : MouthState::Open; segments.set(timedShape.getTimeRange(), mouthState); } @@ -171,7 +197,8 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline< // Make sure all open and closed segments are long enough to register visually. JoiningContinuousTimeline result(animation.getRange(), Shape::X); - // ... we're filling the result timeline from right to left, so `resultStart` points to the earliest shape already written + // ... we're filling the result timeline from right to left, so `resultStart` points to the + // earliest shape already written centiseconds resultStart = result.getRange().getEnd(); for (auto segmentIt = segments.rbegin(); segmentIt != segments.rend(); ++segmentIt) { // We don't care about idle shapes at this point. @@ -188,26 +215,40 @@ JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline< resultStart = targetRange.getStart(); } else { // The segment is too short; we have to extend it to the left. - // Find all adjacent segments to our left that are also too short, then distribute them evenly. + // Find all adjacent segments to our left that are also too short, then distribute them + // evenly. const auto begin = segmentIt; auto end = std::next(begin); - while (end != segments.rend() && end->getValue() != MouthState::Idle && end->getDuration() < minSegmentDuration) ++end; + while ( + end != segments.rend() + && end->getValue() != MouthState::Idle + && end->getDuration() < minSegmentDuration + ) { + ++end; + } // Determine how much we should extend the entire set of short segments to the left const size_t shortSegmentCount = std::distance(begin, end); const centiseconds desiredDuration = minSegmentDuration * shortSegmentCount; const centiseconds currentDuration = begin->getEnd() - std::prev(end)->getStart(); const centiseconds desiredExtensionDuration = desiredDuration - currentDuration; - const centiseconds availableExtensionDuration = end != segments.rend() ? end->getDuration() - 1_cs : 0_cs; - const centiseconds extensionDuration = std::min({desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration}); + const centiseconds availableExtensionDuration = end != segments.rend() + ? end->getDuration() - 1_cs + : 0_cs; + const centiseconds extensionDuration = std::min({ + desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration + }); // Distribute available time range evenly among all short segments - const centiseconds shortSegmentsTargetStart = std::prev(end)->getStart() - extensionDuration; + const centiseconds shortSegmentsTargetStart = + std::prev(end)->getStart() - extensionDuration; for (auto shortSegmentIt = begin; shortSegmentIt != end; ++shortSegmentIt) { size_t remainingShortSegmentCount = std::distance(shortSegmentIt, end); - const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) / remainingShortSegmentCount; + const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) / + remainingShortSegmentCount; const TimeRange segmentTargetRange(resultStart - segmentDuration, resultStart); - const auto retimedSegment = retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange); + const auto retimedSegment = + retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange); for (const auto& timedShape : retimedSegment) { result.set(timedShape); } diff --git a/rhubarb/src/animation/timingOptimization.h b/rhubarb/src/animation/timingOptimization.h index e8bb691..27de323 100644 --- a/rhubarb/src/animation/timingOptimization.h +++ b/rhubarb/src/animation/timingOptimization.h @@ -3,6 +3,7 @@ #include "core/Shape.h" #include "time/ContinuousTimeline.h" -// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register visually. +// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register +// visually. // In some cases, shapes may be omitted. JoiningContinuousTimeline optimizeTiming(const JoiningContinuousTimeline& animation); diff --git a/rhubarb/src/animation/tweening.cpp b/rhubarb/src/animation/tweening.cpp index c487049..6e9a9e7 100644 --- a/rhubarb/src/animation/tweening.cpp +++ b/rhubarb/src/animation/tweening.cpp @@ -19,21 +19,30 @@ JoiningContinuousTimeline insertTweens(const JoiningContinuousTimeline= size) { - throw invalid_argument(fmt::format("Cannot read from sample index {}. Clip size is {}.", index, size)); + throw invalid_argument(fmt::format( + "Cannot read from sample index {}. Clip size is {}.", + index, + size + )); } if (index == lastIndex) { return lastSample; @@ -51,7 +55,7 @@ AudioClip::iterator AudioClip::end() const { return SampleIterator(*this, size()); } -std::unique_ptr operator|(std::unique_ptr clip, AudioEffect effect) { +std::unique_ptr operator|(std::unique_ptr clip, const AudioEffect& effect) { return effect(std::move(clip)); } diff --git a/rhubarb/src/audio/AudioClip.h b/rhubarb/src/audio/AudioClip.h index 4a188b3..e2a235a 100644 --- a/rhubarb/src/audio/AudioClip.h +++ b/rhubarb/src/audio/AudioClip.h @@ -30,7 +30,7 @@ private: using AudioEffect = std::function(std::unique_ptr)>; -std::unique_ptr operator|(std::unique_ptr clip, AudioEffect effect); +std::unique_ptr operator|(std::unique_ptr clip, const AudioEffect& effect); using SampleReader = AudioClip::SampleReader; diff --git a/rhubarb/src/audio/DcOffset.cpp b/rhubarb/src/audio/DcOffset.cpp index 3a4a09c..afb9e71 100644 --- a/rhubarb/src/audio/DcOffset.cpp +++ b/rhubarb/src/audio/DcOffset.cpp @@ -15,15 +15,19 @@ unique_ptr DcOffset::clone() const { } SampleReader DcOffset::createUnsafeSampleReader() const { - return [read = inputClip->createSampleReader(), factor = factor, offset = offset](size_type index) { - float sample = read(index); + return [ + read = inputClip->createSampleReader(), + factor = factor, + offset = offset + ](size_type index) { + const float sample = read(index); return sample * factor + offset; }; } float getDcOffset(const AudioClip& audioClip) { int flatMeanSampleCount, fadingMeanSampleCount; - int sampleRate = audioClip.getSampleRate(); + const int sampleRate = audioClip.getSampleRate(); if (audioClip.size() > 4 * sampleRate) { // Long audio file. Average over the first 3 seconds, then fade out over the 4th. flatMeanSampleCount = 3 * sampleRate; @@ -34,31 +38,32 @@ float getDcOffset(const AudioClip& audioClip) { fadingMeanSampleCount = 0; } - auto read = audioClip.createSampleReader(); + const auto read = audioClip.createSampleReader(); double sum = 0; for (int i = 0; i < flatMeanSampleCount; ++i) { sum += read(i); } for (int i = 0; i < fadingMeanSampleCount; ++i) { - double weight = static_cast(fadingMeanSampleCount - i) / fadingMeanSampleCount; + const double weight = + static_cast(fadingMeanSampleCount - i) / fadingMeanSampleCount; sum += read(flatMeanSampleCount + i) * weight; } - double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0; - double offset = sum / totalWeight; + const double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0; + const double offset = sum / totalWeight; return static_cast(offset); } AudioEffect addDcOffset(float offset, float epsilon) { return [offset, epsilon](unique_ptr inputClip) -> unique_ptr { - if (std::abs(offset) < epsilon) return std::move(inputClip); + if (std::abs(offset) < epsilon) return inputClip; return make_unique(std::move(inputClip), offset); }; } AudioEffect removeDcOffset(float epsilon) { return [epsilon](unique_ptr inputClip) { - float offset = getDcOffset(*inputClip); + const float offset = getDcOffset(*inputClip); return std::move(inputClip) | addDcOffset(-offset, epsilon); }; } diff --git a/rhubarb/src/audio/OggVorbisFileReader.cpp b/rhubarb/src/audio/OggVorbisFileReader.cpp index d3b01e1..f84aaa4 100644 --- a/rhubarb/src/audio/OggVorbisFileReader.cpp +++ b/rhubarb/src/audio/OggVorbisFileReader.cpp @@ -14,30 +14,30 @@ using std::ios_base; std::string vorbisErrorToString(int64_t errorCode) { switch (errorCode) { - case OV_EREAD: - return "Read error while fetching compressed data for decode."; - case OV_EFAULT: - return "Internal logic fault; indicates a bug or heap/stack corruption."; - case OV_EIMPL: - return "Feature not implemented"; - case OV_EINVAL: - return "Either an invalid argument, or incompletely initialized argument passed to a call."; - case OV_ENOTVORBIS: - return "The given file/data was not recognized as Ogg Vorbis data."; - case OV_EBADHEADER: - return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header."; - case OV_EVERSION: - return "The bitstream format revision of the given Vorbis stream is not supported."; - case OV_ENOTAUDIO: - return "Packet is not an audio packet."; - case OV_EBADPACKET: - return "Error in packet."; - case OV_EBADLINK: - return "The given link exists in the Vorbis data stream, but is not decipherable due to garbacge or corruption."; - case OV_ENOSEEK: - return "The given stream is not seekable."; - default: - return "An unexpected Vorbis error occurred."; + case OV_EREAD: + return "Read error while fetching compressed data for decode."; + case OV_EFAULT: + return "Internal logic fault; indicates a bug or heap/stack corruption."; + case OV_EIMPL: + return "Feature not implemented"; + case OV_EINVAL: + return "Either an invalid argument, or incompletely initialized argument passed to a call."; + case OV_ENOTVORBIS: + return "The given file/data was not recognized as Ogg Vorbis data."; + case OV_EBADHEADER: + return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header."; + case OV_EVERSION: + return "The bitstream format revision of the given Vorbis stream is not supported."; + case OV_ENOTAUDIO: + return "Packet is not an audio packet."; + case OV_EBADPACKET: + return "Error in packet."; + case OV_EBADLINK: + return "The given link exists in the Vorbis data stream, but is not decipherable due to garbage or corruption."; + case OV_ENOSEEK: + return "The given stream is not seekable."; + default: + return "An unexpected Vorbis error occurred."; } } @@ -64,13 +64,13 @@ size_t readCallback(void* buffer, size_t elementSize, size_t elementCount, void* } int seekCallback(void* dataSource, ogg_int64_t offset, int origin) { - static const vector seekDirections{ + static const vector seekDirections { ios_base::beg, ios_base::cur, ios_base::end }; ifstream& stream = *static_cast(dataSource); stream.seekg(offset, seekDirections.at(origin)); - stream.clear(); // In case we seeked to EOF + stream.clear(); // In case we sought to EOF return 0; } @@ -82,26 +82,13 @@ long tellCallback(void* dataSource) { } // RAII wrapper around OggVorbis_File -class OggVorbisFile { +class OggVorbisFile final { public: + OggVorbisFile(const path& filePath); + OggVorbisFile(const OggVorbisFile&) = delete; OggVorbisFile& operator=(const OggVorbisFile&) = delete; - OggVorbisFile(const path& filePath) : - stream(openFile(filePath)) - { - // Throw only on badbit, not on failbit. - // Ogg Vorbis expects read operations past the end of the file to - // succeed, not to throw. - stream.exceptions(ifstream::badbit); - - // Ogg Vorbis normally uses the `FILE` API from the C standard library. - // This doesn't handle Unicode paths on Windows. - // Use wrapper functions around `ifstream` instead. - const ov_callbacks callbacks{readCallback, seekCallback, nullptr, tellCallback}; - throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks)); - } - OggVorbis_File* get() { return &oggVorbisHandle; } @@ -115,6 +102,22 @@ private: ifstream stream; }; +OggVorbisFile::OggVorbisFile(const path& filePath) : + oggVorbisHandle(), + stream(openFile(filePath)) +{ + // Throw only on badbit, not on failbit. + // Ogg Vorbis expects read operations past the end of the file to + // succeed, not to throw. + stream.exceptions(ifstream::badbit); + + // Ogg Vorbis normally uses the `FILE` API from the C standard library. + // This doesn't handle Unicode paths on Windows. + // Use wrapper functions around `ifstream` instead. + const ov_callbacks callbacks { readCallback, seekCallback, nullptr, tellCallback }; + throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks)); +} + OggVorbisFileReader::OggVorbisFileReader(const path& filePath) : filePath(filePath) { @@ -153,7 +156,7 @@ SampleReader OggVorbisFileReader::createUnsafeSampleReader() const { } // Downmix channels - size_type bufferIndex = index - bufferStart; + const size_type bufferIndex = index - bufferStart; value_type sum = 0.0f; for (int channel = 0; channel < channelCount; ++channel) { sum += buffer[channel][bufferIndex]; diff --git a/rhubarb/src/audio/SampleRateConverter.cpp b/rhubarb/src/audio/SampleRateConverter.cpp index 24fba01..665a7e5 100644 --- a/rhubarb/src/audio/SampleRateConverter.cpp +++ b/rhubarb/src/audio/SampleRateConverter.cpp @@ -17,7 +17,10 @@ SampleRateConverter::SampleRateConverter(unique_ptr inputClip, int ou throw invalid_argument("Sample rate must be positive."); } if (this->inputClip->getSampleRate() < outputSampleRate) { - throw invalid_argument(fmt::format("Upsampling not supported. Input sample rate must not be below {}Hz.", outputSampleRate)); + throw invalid_argument(fmt::format( + "Upsampling not supported. Input sample rate must not be below {}Hz.", + outputSampleRate + )); } } @@ -30,11 +33,11 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) { double sum = 0; // ... first sample (weight <= 1) - int64_t startIndex = static_cast(inputStart); + const int64_t startIndex = static_cast(inputStart); sum += read(startIndex) * ((startIndex + 1) - inputStart); // ... middle samples (weight 1 each) - int64_t endIndex = static_cast(inputEnd); + const int64_t endIndex = static_cast(inputEnd); for (int64_t index = startIndex + 1; index < endIndex; ++index) { sum += read(index); } @@ -48,9 +51,14 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) { } SampleReader SampleRateConverter::createUnsafeSampleReader() const { - return[read = inputClip->createSampleReader(), downscalingFactor = downscalingFactor, size = inputClip->size()](size_type index) { - double inputStart = index * downscalingFactor; - double inputEnd = std::min((index + 1) * downscalingFactor, static_cast(size)); + return [ + read = inputClip->createSampleReader(), + downscalingFactor = downscalingFactor, + size = inputClip->size() + ](size_type index) { + const double inputStart = index * downscalingFactor; + const double inputEnd = + std::min((index + 1) * downscalingFactor, static_cast(size)); return mean(inputStart, inputEnd, read); }; } diff --git a/rhubarb/src/audio/WaveFileReader.cpp b/rhubarb/src/audio/WaveFileReader.cpp index e910dad..e8aa872 100644 --- a/rhubarb/src/audio/WaveFileReader.cpp +++ b/rhubarb/src/audio/WaveFileReader.cpp @@ -1,6 +1,7 @@ #include #include "WaveFileReader.h" #include "ioTools.h" +#include #include "tools/platformTools.h" #include "tools/fileTools.h" @@ -32,9 +33,9 @@ namespace Codec { string codecToString(int codec); -WaveFileReader::WaveFileReader(path filePath) : +WaveFileReader::WaveFileReader(const path& filePath) : filePath(filePath), - formatInfo{} + formatInfo {} { auto file = openFile(filePath); @@ -43,7 +44,7 @@ WaveFileReader::WaveFileReader(path filePath) : file.seekg(0); auto remaining = [&](int byteCount) { - std::streamoff filePosition = file.tellg(); + const std::streamoff filePosition = file.tellg(); return byteCount <= fileSize - filePosition; }; @@ -51,7 +52,7 @@ WaveFileReader::WaveFileReader(path filePath) : if (!remaining(10)) { throw runtime_error("WAVE file is corrupt. Header not found."); } - uint32_t rootChunkId = read(file); + auto rootChunkId = read(file); if (rootChunkId != fourcc('R', 'I', 'F', 'F')) { throw runtime_error("Unknown file format. Only WAVE files are supported."); } @@ -67,69 +68,75 @@ WaveFileReader::WaveFileReader(path filePath) : uint32_t chunkId = read(file); int chunkSize = read(file); switch (chunkId) { - case fourcc('f', 'm', 't', ' '): { - // Read relevant data - uint16_t codec = read(file); - formatInfo.channelCount = read(file); - formatInfo.frameRate = read(file); - read(file); // Bytes per second - int frameSize = read(file); - int bitsPerSample = read(file); + case fourcc('f', 'm', 't', ' '): + { + // Read relevant data + uint16_t codec = read(file); + formatInfo.channelCount = read(file); + formatInfo.frameRate = read(file); + read(file); // Bytes per second + int frameSize = read(file); + int bitsPerSample = read(file); - // We've read 16 bytes so far. Skip the remainder. - file.seekg(roundToEven(chunkSize) - 16, file.cur); + // We've read 16 bytes so far. Skip the remainder. + file.seekg(roundToEven(chunkSize) - 16, std::ios_base::cur); - // Determine sample format - int bytesPerSample; - switch (codec) { - case Codec::Pcm: - // Determine sample size. - // According to the WAVE standard, sample sizes that are not multiples of 8 bits - // (e.g. 12 bits) can be treated like the next-larger byte size. - if (bitsPerSample == 8) { - formatInfo.sampleFormat = SampleFormat::UInt8; - bytesPerSample = 1; - } else if (bitsPerSample <= 16) { - formatInfo.sampleFormat = SampleFormat::Int16; - bytesPerSample = 2; - } else if (bitsPerSample <= 24) { - formatInfo.sampleFormat = SampleFormat::Int24; - bytesPerSample = 3; - } else { - throw runtime_error( - format("Unsupported sample format: {}-bit PCM.", bitsPerSample)); - } - if (bytesPerSample != frameSize / formatInfo.channelCount) { - throw runtime_error("Unsupported sample organization."); + // Determine sample format + int bytesPerSample; + switch (codec) { + case Codec::Pcm: + // Determine sample size. + // According to the WAVE standard, sample sizes that are not multiples of 8 + // bits (e.g. 12 bits) can be treated like the next-larger byte size. + if (bitsPerSample == 8) { + formatInfo.sampleFormat = SampleFormat::UInt8; + bytesPerSample = 1; + } else if (bitsPerSample <= 16) { + formatInfo.sampleFormat = SampleFormat::Int16; + bytesPerSample = 2; + } else if (bitsPerSample <= 24) { + formatInfo.sampleFormat = SampleFormat::Int24; + bytesPerSample = 3; + } else { + throw runtime_error( + format("Unsupported sample format: {}-bit PCM.", bitsPerSample)); + } + if (bytesPerSample != frameSize / formatInfo.channelCount) { + throw runtime_error("Unsupported sample organization."); + } + break; + case Codec::Float: + if (bitsPerSample == 32) { + formatInfo.sampleFormat = SampleFormat::Float32; + bytesPerSample = 4; + } else { + throw runtime_error( + format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample) + ); + } + break; + default: + throw runtime_error(format( + "Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.", + codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float) + )); } + formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount; + break; + } + case fourcc('d', 'a', 't', 'a'): + { + reachedDataChunk = true; + formatInfo.dataOffset = file.tellg(); + formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame; + break; + } + default: + { + // Skip unknown chunk + file.seekg(roundToEven(chunkSize), std::ios_base::cur); break; - case Codec::Float: - if (bitsPerSample == 32) { - formatInfo.sampleFormat = SampleFormat::Float32; - bytesPerSample = 4; - } else { - throw runtime_error(format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample)); - } - break; - default: - throw runtime_error(format( - "Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.", - codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float))); } - formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount; - break; - } - case fourcc('d', 'a', 't', 'a'): { - reachedDataChunk = true; - formatInfo.dataOffset = file.tellg(); - formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame; - break; - } - default: { - // Skip unknown chunk - file.seekg(roundToEven(chunkSize), file.cur); - break; - } } } } @@ -138,30 +145,38 @@ unique_ptr WaveFileReader::clone() const { return make_unique(*this); } -inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sampleFormat, int channelCount) { +inline AudioClip::value_type readSample( + std::ifstream& file, + SampleFormat sampleFormat, + int channelCount +) { float sum = 0; for (int channelIndex = 0; channelIndex < channelCount; channelIndex++) { switch (sampleFormat) { - case SampleFormat::UInt8: { - uint8_t raw = read(file); - sum += toNormalizedFloat(raw, 0, UINT8_MAX); - break; - } - case SampleFormat::Int16: { - int16_t raw = read(file); - sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX); - break; - } - case SampleFormat::Int24: { - int raw = read(file); - if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement - sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX); - break; - } - case SampleFormat::Float32: { - sum += read(file); - break; - } + case SampleFormat::UInt8: + { + const uint8_t raw = read(file); + sum += toNormalizedFloat(raw, 0, UINT8_MAX); + break; + } + case SampleFormat::Int16: + { + const int16_t raw = read(file); + sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX); + break; + } + case SampleFormat::Int24: + { + int raw = read(file); + if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement + sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX); + break; + } + case SampleFormat::Float32: + { + sum += read(file); + break; + } } } @@ -169,10 +184,17 @@ inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sample } SampleReader WaveFileReader::createUnsafeSampleReader() const { - return [formatInfo = formatInfo, file = std::make_shared(openFile(filePath)), filePos = std::streampos(0)](size_type index) mutable { - std::streampos newFilePos = formatInfo.dataOffset + static_cast(index * formatInfo.bytesPerFrame); + return + [ + formatInfo = formatInfo, + file = std::make_shared(openFile(filePath)), + filePos = std::streampos(0) + ](size_type index) mutable { + const std::streampos newFilePos = formatInfo.dataOffset + + static_cast(index * formatInfo.bytesPerFrame); file->seekg(newFilePos); - value_type result = readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount); + const value_type result = + readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount); filePos = newFilePos + static_cast(formatInfo.bytesPerFrame); return result; }; @@ -180,248 +202,249 @@ SampleReader WaveFileReader::createUnsafeSampleReader() const { string codecToString(int codec) { switch (codec) { - case 0x0001: return "PCM"; - case 0x0002: return "Microsoft ADPCM"; - case 0x0003: return "IEEE Float"; - case 0x0004: return "Compaq VSELP"; - case 0x0005: return "IBM CVSD"; - case 0x0006: return "Microsoft a-Law"; - case 0x0007: return "Microsoft u-Law"; - case 0x0008: return "Microsoft DTS"; - case 0x0009: return "DRM"; - case 0x000a: return "WMA 9 Speech"; - case 0x000b: return "Microsoft Windows Media RT Voice"; - case 0x0010: return "OKI-ADPCM"; - case 0x0011: return "Intel IMA/DVI-ADPCM"; - case 0x0012: return "Videologic Mediaspace ADPCM"; - case 0x0013: return "Sierra ADPCM"; - case 0x0014: return "Antex G.723 ADPCM"; - case 0x0015: return "DSP Solutions DIGISTD"; - case 0x0016: return "DSP Solutions DIGIFIX"; - case 0x0017: return "Dialoic OKI ADPCM"; - case 0x0018: return "Media Vision ADPCM"; - case 0x0019: return "HP CU"; - case 0x001a: return "HP Dynamic Voice"; - case 0x0020: return "Yamaha ADPCM"; - case 0x0021: return "SONARC Speech Compression"; - case 0x0022: return "DSP Group True Speech"; - case 0x0023: return "Echo Speech Corp."; - case 0x0024: return "Virtual Music Audiofile AF36"; - case 0x0025: return "Audio Processing Tech."; - case 0x0026: return "Virtual Music Audiofile AF10"; - case 0x0027: return "Aculab Prosody 1612"; - case 0x0028: return "Merging Tech. LRC"; - case 0x0030: return "Dolby AC2"; - case 0x0031: return "Microsoft GSM610"; - case 0x0032: return "MSN Audio"; - case 0x0033: return "Antex ADPCME"; - case 0x0034: return "Control Resources VQLPC"; - case 0x0035: return "DSP Solutions DIGIREAL"; - case 0x0036: return "DSP Solutions DIGIADPCM"; - case 0x0037: return "Control Resources CR10"; - case 0x0038: return "Natural MicroSystems VBX ADPCM"; - case 0x0039: return "Crystal Semiconductor IMA ADPCM"; - case 0x003a: return "Echo Speech ECHOSC3"; - case 0x003b: return "Rockwell ADPCM"; - case 0x003c: return "Rockwell DIGITALK"; - case 0x003d: return "Xebec Multimedia"; - case 0x0040: return "Antex G.721 ADPCM"; - case 0x0041: return "Antex G.728 CELP"; - case 0x0042: return "Microsoft MSG723"; - case 0x0043: return "IBM AVC ADPCM"; - case 0x0045: return "ITU-T G.726"; - case 0x0050: return "Microsoft MPEG"; - case 0x0051: return "RT23 or PAC"; - case 0x0052: return "InSoft RT24"; - case 0x0053: return "InSoft PAC"; - case 0x0055: return "MP3"; - case 0x0059: return "Cirrus"; - case 0x0060: return "Cirrus Logic"; - case 0x0061: return "ESS Tech. PCM"; - case 0x0062: return "Voxware Inc."; - case 0x0063: return "Canopus ATRAC"; - case 0x0064: return "APICOM G.726 ADPCM"; - case 0x0065: return "APICOM G.722 ADPCM"; - case 0x0066: return "Microsoft DSAT"; - case 0x0067: return "Micorsoft DSAT DISPLAY"; - case 0x0069: return "Voxware Byte Aligned"; - case 0x0070: return "Voxware AC8"; - case 0x0071: return "Voxware AC10"; - case 0x0072: return "Voxware AC16"; - case 0x0073: return "Voxware AC20"; - case 0x0074: return "Voxware MetaVoice"; - case 0x0075: return "Voxware MetaSound"; - case 0x0076: return "Voxware RT29HW"; - case 0x0077: return "Voxware VR12"; - case 0x0078: return "Voxware VR18"; - case 0x0079: return "Voxware TQ40"; - case 0x007a: return "Voxware SC3"; - case 0x007b: return "Voxware SC3"; - case 0x0080: return "Soundsoft"; - case 0x0081: return "Voxware TQ60"; - case 0x0082: return "Microsoft MSRT24"; - case 0x0083: return "AT&T G.729A"; - case 0x0084: return "Motion Pixels MVI MV12"; - case 0x0085: return "DataFusion G.726"; - case 0x0086: return "DataFusion GSM610"; - case 0x0088: return "Iterated Systems Audio"; - case 0x0089: return "Onlive"; - case 0x008a: return "Multitude, Inc. FT SX20"; - case 0x008b: return "Infocom ITS A/S G.721 ADPCM"; - case 0x008c: return "Convedia G729"; - case 0x008d: return "Not specified congruency, Inc."; - case 0x0091: return "Siemens SBC24"; - case 0x0092: return "Sonic Foundry Dolby AC3 APDIF"; - case 0x0093: return "MediaSonic G.723"; - case 0x0094: return "Aculab Prosody 8kbps"; - case 0x0097: return "ZyXEL ADPCM"; - case 0x0098: return "Philips LPCBB"; - case 0x0099: return "Studer Professional Audio Packed"; - case 0x00a0: return "Malden PhonyTalk"; - case 0x00a1: return "Racal Recorder GSM"; - case 0x00a2: return "Racal Recorder G720.a"; - case 0x00a3: return "Racal G723.1"; - case 0x00a4: return "Racal Tetra ACELP"; - case 0x00b0: return "NEC AAC NEC Corporation"; - case 0x00ff: return "AAC"; - case 0x0100: return "Rhetorex ADPCM"; - case 0x0101: return "IBM u-Law"; - case 0x0102: return "IBM a-Law"; - case 0x0103: return "IBM ADPCM"; - case 0x0111: return "Vivo G.723"; - case 0x0112: return "Vivo Siren"; - case 0x0120: return "Philips Speech Processing CELP"; - case 0x0121: return "Philips Speech Processing GRUNDIG"; - case 0x0123: return "Digital G.723"; - case 0x0125: return "Sanyo LD ADPCM"; - case 0x0130: return "Sipro Lab ACEPLNET"; - case 0x0131: return "Sipro Lab ACELP4800"; - case 0x0132: return "Sipro Lab ACELP8V3"; - case 0x0133: return "Sipro Lab G.729"; - case 0x0134: return "Sipro Lab G.729A"; - case 0x0135: return "Sipro Lab Kelvin"; - case 0x0136: return "VoiceAge AMR"; - case 0x0140: return "Dictaphone G.726 ADPCM"; - case 0x0150: return "Qualcomm PureVoice"; - case 0x0151: return "Qualcomm HalfRate"; - case 0x0155: return "Ring Zero Systems TUBGSM"; - case 0x0160: return "Microsoft Audio1"; - case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio"; - case 0x0162: return "Windows Media Audio Professional V9"; - case 0x0163: return "Windows Media Audio Lossless V9"; - case 0x0164: return "WMA Pro over S/PDIF"; - case 0x0170: return "UNISYS NAP ADPCM"; - case 0x0171: return "UNISYS NAP ULAW"; - case 0x0172: return "UNISYS NAP ALAW"; - case 0x0173: return "UNISYS NAP 16K"; - case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies"; - case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies"; - case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies"; - case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies"; - case 0x0178: return "Knowledge Adventure ADPCM"; - case 0x0180: return "Fraunhofer IIS MPEG2AAC"; - case 0x0190: return "Digital Theater Systems DTS DS"; - case 0x0200: return "Creative Labs ADPCM"; - case 0x0202: return "Creative Labs FASTSPEECH8"; - case 0x0203: return "Creative Labs FASTSPEECH10"; - case 0x0210: return "UHER ADPCM"; - case 0x0215: return "Ulead DV ACM"; - case 0x0216: return "Ulead DV ACM"; - case 0x0220: return "Quarterdeck Corp."; - case 0x0230: return "I-Link VC"; - case 0x0240: return "Aureal Semiconductor Raw Sport"; - case 0x0241: return "ESST AC3"; - case 0x0250: return "Interactive Products HSX"; - case 0x0251: return "Interactive Products RPELP"; - case 0x0260: return "Consistent CS2"; - case 0x0270: return "Sony SCX"; - case 0x0271: return "Sony SCY"; - case 0x0272: return "Sony ATRAC3"; - case 0x0273: return "Sony SPC"; - case 0x0280: return "TELUM Telum Inc."; - case 0x0281: return "TELUMIA Telum Inc."; - case 0x0285: return "Norcom Voice Systems ADPCM"; - case 0x0300: return "Fujitsu FM TOWNS SND"; - case 0x0301: - case 0x0302: - case 0x0303: - case 0x0304: - case 0x0305: - case 0x0306: - case 0x0307: - case 0x0308: return "Fujitsu (not specified)"; - case 0x0350: return "Micronas Semiconductors, Inc. Development"; - case 0x0351: return "Micronas Semiconductors, Inc. CELP833"; - case 0x0400: return "Brooktree Digital"; - case 0x0401: return "Intel Music Coder (IMC)"; - case 0x0402: return "Ligos Indeo Audio"; - case 0x0450: return "QDesign Music"; - case 0x0500: return "On2 VP7 On2 Technologies"; - case 0x0501: return "On2 VP6 On2 Technologies"; - case 0x0680: return "AT&T VME VMPCM"; - case 0x0681: return "AT&T TCP"; - case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)"; - case 0x08ae: return "ClearJump LiteWave (lossless)"; - case 0x1000: return "Olivetti GSM"; - case 0x1001: return "Olivetti ADPCM"; - case 0x1002: return "Olivetti CELP"; - case 0x1003: return "Olivetti SBC"; - case 0x1004: return "Olivetti OPR"; - case 0x1100: return "Lernout & Hauspie"; - case 0x1101: return "Lernout & Hauspie CELP codec"; - case 0x1102: - case 0x1103: - case 0x1104: return "Lernout & Hauspie SBC codec"; - case 0x1400: return "Norris Comm. Inc."; - case 0x1401: return "ISIAudio"; - case 0x1500: return "AT&T Soundspace Music Compression"; - case 0x181c: return "VoxWare RT24 speech codec"; - case 0x181e: return "Lucent elemedia AX24000P Music codec"; - case 0x1971: return "Sonic Foundry LOSSLESS"; - case 0x1979: return "Innings Telecom Inc. ADPCM"; - case 0x1c07: return "Lucent SX8300P speech codec"; - case 0x1c0c: return "Lucent SX5363S G.723 compliant codec"; - case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)"; - case 0x1fc4: return "NCT Soft ALF2CD ACM"; - case 0x2000: return "FAST Multimedia DVM"; - case 0x2001: return "Dolby DTS (Digital Theater System)"; - case 0x2002: return "RealAudio 1 / 2 14.4"; - case 0x2003: return "RealAudio 1 / 2 28.8"; - case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)"; - case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)"; - case 0x2006: return "RealAudio 10 AAC (RAAC)"; - case 0x2007: return "RealAudio 10 AAC+ (RACP)"; - case 0x2500: return "Reserved range to 0x2600 Microsoft"; - case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)"; - case 0x4143: return "Divio MPEG-4 AAC audio"; - case 0x4201: return "Nokia adaptive multirate"; - case 0x4243: return "Divio G726 Divio, Inc."; - case 0x434c: return "LEAD Speech"; - case 0x564c: return "LEAD Vorbis"; - case 0x5756: return "WavPack Audio"; - case 0x674f: return "Ogg Vorbis (mode 1)"; - case 0x6750: return "Ogg Vorbis (mode 2)"; - case 0x6751: return "Ogg Vorbis (mode 3)"; - case 0x676f: return "Ogg Vorbis (mode 1+)"; - case 0x6770: return "Ogg Vorbis (mode 2+)"; - case 0x6771: return "Ogg Vorbis (mode 3+)"; - case 0x7000: return "3COM NBX 3Com Corporation"; - case 0x706d: return "FAAD AAC"; - case 0x7a21: return "GSM-AMR (CBR, no SID)"; - case 0x7a22: return "GSM-AMR (VBR, including SID)"; - case 0xa100: return "Comverse Infosys Ltd. G723 1"; - case 0xa101: return "Comverse Infosys Ltd. AVQSBC"; - case 0xa102: return "Comverse Infosys Ltd. OLDSBC"; - case 0xa103: return "Symbol Technologies G729A"; - case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation"; - case 0xa105: return "Ingenient Technologies Inc. G726"; - case 0xa106: return "ISO/MPEG-4 advanced audio Coding"; - case 0xa107: return "Encore Software Ltd G726"; - case 0xa109: return "Speex ACM Codec xiph.org"; - case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec"; - case 0xf1ac: return "Free Lossless Audio Codec FLAC"; - case 0xfffe: return "Extensible"; - case 0xffff: return "Development"; + case 0x0001: return "PCM"; + case 0x0002: return "Microsoft ADPCM"; + case 0x0003: return "IEEE Float"; + case 0x0004: return "Compaq VSELP"; + case 0x0005: return "IBM CVSD"; + case 0x0006: return "Microsoft a-Law"; + case 0x0007: return "Microsoft u-Law"; + case 0x0008: return "Microsoft DTS"; + case 0x0009: return "DRM"; + case 0x000a: return "WMA 9 Speech"; + case 0x000b: return "Microsoft Windows Media RT Voice"; + case 0x0010: return "OKI-ADPCM"; + case 0x0011: return "Intel IMA/DVI-ADPCM"; + case 0x0012: return "Videologic Mediaspace ADPCM"; + case 0x0013: return "Sierra ADPCM"; + case 0x0014: return "Antex G.723 ADPCM"; + case 0x0015: return "DSP Solutions DIGISTD"; + case 0x0016: return "DSP Solutions DIGIFIX"; + case 0x0017: return "Dialoic OKI ADPCM"; + case 0x0018: return "Media Vision ADPCM"; + case 0x0019: return "HP CU"; + case 0x001a: return "HP Dynamic Voice"; + case 0x0020: return "Yamaha ADPCM"; + case 0x0021: return "SONARC Speech Compression"; + case 0x0022: return "DSP Group True Speech"; + case 0x0023: return "Echo Speech Corp."; + case 0x0024: return "Virtual Music Audiofile AF36"; + case 0x0025: return "Audio Processing Tech."; + case 0x0026: return "Virtual Music Audiofile AF10"; + case 0x0027: return "Aculab Prosody 1612"; + case 0x0028: return "Merging Tech. LRC"; + case 0x0030: return "Dolby AC2"; + case 0x0031: return "Microsoft GSM610"; + case 0x0032: return "MSN Audio"; + case 0x0033: return "Antex ADPCME"; + case 0x0034: return "Control Resources VQLPC"; + case 0x0035: return "DSP Solutions DIGIREAL"; + case 0x0036: return "DSP Solutions DIGIADPCM"; + case 0x0037: return "Control Resources CR10"; + case 0x0038: return "Natural MicroSystems VBX ADPCM"; + case 0x0039: return "Crystal Semiconductor IMA ADPCM"; + case 0x003a: return "Echo Speech ECHOSC3"; + case 0x003b: return "Rockwell ADPCM"; + case 0x003c: return "Rockwell DIGITALK"; + case 0x003d: return "Xebec Multimedia"; + case 0x0040: return "Antex G.721 ADPCM"; + case 0x0041: return "Antex G.728 CELP"; + case 0x0042: return "Microsoft MSG723"; + case 0x0043: return "IBM AVC ADPCM"; + case 0x0045: return "ITU-T G.726"; + case 0x0050: return "Microsoft MPEG"; + case 0x0051: return "RT23 or PAC"; + case 0x0052: return "InSoft RT24"; + case 0x0053: return "InSoft PAC"; + case 0x0055: return "MP3"; + case 0x0059: return "Cirrus"; + case 0x0060: return "Cirrus Logic"; + case 0x0061: return "ESS Tech. PCM"; + case 0x0062: return "Voxware Inc."; + case 0x0063: return "Canopus ATRAC"; + case 0x0064: return "APICOM G.726 ADPCM"; + case 0x0065: return "APICOM G.722 ADPCM"; + case 0x0066: return "Microsoft DSAT"; + case 0x0067: return "Micorsoft DSAT DISPLAY"; + case 0x0069: return "Voxware Byte Aligned"; + case 0x0070: return "Voxware AC8"; + case 0x0071: return "Voxware AC10"; + case 0x0072: return "Voxware AC16"; + case 0x0073: return "Voxware AC20"; + case 0x0074: return "Voxware MetaVoice"; + case 0x0075: return "Voxware MetaSound"; + case 0x0076: return "Voxware RT29HW"; + case 0x0077: return "Voxware VR12"; + case 0x0078: return "Voxware VR18"; + case 0x0079: return "Voxware TQ40"; + case 0x007a: return "Voxware SC3"; + case 0x007b: return "Voxware SC3"; + case 0x0080: return "Soundsoft"; + case 0x0081: return "Voxware TQ60"; + case 0x0082: return "Microsoft MSRT24"; + case 0x0083: return "AT&T G.729A"; + case 0x0084: return "Motion Pixels MVI MV12"; + case 0x0085: return "DataFusion G.726"; + case 0x0086: return "DataFusion GSM610"; + case 0x0088: return "Iterated Systems Audio"; + case 0x0089: return "Onlive"; + case 0x008a: return "Multitude, Inc. FT SX20"; + case 0x008b: return "Infocom ITS A/S G.721 ADPCM"; + case 0x008c: return "Convedia G729"; + case 0x008d: return "Not specified congruency, Inc."; + case 0x0091: return "Siemens SBC24"; + case 0x0092: return "Sonic Foundry Dolby AC3 APDIF"; + case 0x0093: return "MediaSonic G.723"; + case 0x0094: return "Aculab Prosody 8kbps"; + case 0x0097: return "ZyXEL ADPCM"; + case 0x0098: return "Philips LPCBB"; + case 0x0099: return "Studer Professional Audio Packed"; + case 0x00a0: return "Malden PhonyTalk"; + case 0x00a1: return "Racal Recorder GSM"; + case 0x00a2: return "Racal Recorder G720.a"; + case 0x00a3: return "Racal G723.1"; + case 0x00a4: return "Racal Tetra ACELP"; + case 0x00b0: return "NEC AAC NEC Corporation"; + case 0x00ff: return "AAC"; + case 0x0100: return "Rhetorex ADPCM"; + case 0x0101: return "IBM u-Law"; + case 0x0102: return "IBM a-Law"; + case 0x0103: return "IBM ADPCM"; + case 0x0111: return "Vivo G.723"; + case 0x0112: return "Vivo Siren"; + case 0x0120: return "Philips Speech Processing CELP"; + case 0x0121: return "Philips Speech Processing GRUNDIG"; + case 0x0123: return "Digital G.723"; + case 0x0125: return "Sanyo LD ADPCM"; + case 0x0130: return "Sipro Lab ACEPLNET"; + case 0x0131: return "Sipro Lab ACELP4800"; + case 0x0132: return "Sipro Lab ACELP8V3"; + case 0x0133: return "Sipro Lab G.729"; + case 0x0134: return "Sipro Lab G.729A"; + case 0x0135: return "Sipro Lab Kelvin"; + case 0x0136: return "VoiceAge AMR"; + case 0x0140: return "Dictaphone G.726 ADPCM"; + case 0x0150: return "Qualcomm PureVoice"; + case 0x0151: return "Qualcomm HalfRate"; + case 0x0155: return "Ring Zero Systems TUBGSM"; + case 0x0160: return "Microsoft Audio1"; + case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio"; + case 0x0162: return "Windows Media Audio Professional V9"; + case 0x0163: return "Windows Media Audio Lossless V9"; + case 0x0164: return "WMA Pro over S/PDIF"; + case 0x0170: return "UNISYS NAP ADPCM"; + case 0x0171: return "UNISYS NAP ULAW"; + case 0x0172: return "UNISYS NAP ALAW"; + case 0x0173: return "UNISYS NAP 16K"; + case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies"; + case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies"; + case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies"; + case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies"; + case 0x0178: return "Knowledge Adventure ADPCM"; + case 0x0180: return "Fraunhofer IIS MPEG2AAC"; + case 0x0190: return "Digital Theater Systems DTS DS"; + case 0x0200: return "Creative Labs ADPCM"; + case 0x0202: return "Creative Labs FASTSPEECH8"; + case 0x0203: return "Creative Labs FASTSPEECH10"; + case 0x0210: return "UHER ADPCM"; + case 0x0215: return "Ulead DV ACM"; + case 0x0216: return "Ulead DV ACM"; + case 0x0220: return "Quarterdeck Corp."; + case 0x0230: return "I-Link VC"; + case 0x0240: return "Aureal Semiconductor Raw Sport"; + case 0x0241: return "ESST AC3"; + case 0x0250: return "Interactive Products HSX"; + case 0x0251: return "Interactive Products RPELP"; + case 0x0260: return "Consistent CS2"; + case 0x0270: return "Sony SCX"; + case 0x0271: return "Sony SCY"; + case 0x0272: return "Sony ATRAC3"; + case 0x0273: return "Sony SPC"; + case 0x0280: return "TELUM Telum Inc."; + case 0x0281: return "TELUMIA Telum Inc."; + case 0x0285: return "Norcom Voice Systems ADPCM"; + case 0x0300: return "Fujitsu FM TOWNS SND"; + case 0x0301: + case 0x0302: + case 0x0303: + case 0x0304: + case 0x0305: + case 0x0306: + case 0x0307: + case 0x0308: return "Fujitsu (not specified)"; + case 0x0350: return "Micronas Semiconductors, Inc. Development"; + case 0x0351: return "Micronas Semiconductors, Inc. CELP833"; + case 0x0400: return "Brooktree Digital"; + case 0x0401: return "Intel Music Coder (IMC)"; + case 0x0402: return "Ligos Indeo Audio"; + case 0x0450: return "QDesign Music"; + case 0x0500: return "On2 VP7 On2 Technologies"; + case 0x0501: return "On2 VP6 On2 Technologies"; + case 0x0680: return "AT&T VME VMPCM"; + case 0x0681: return "AT&T TCP"; + case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)"; + case 0x08ae: return "ClearJump LiteWave (lossless)"; + case 0x1000: return "Olivetti GSM"; + case 0x1001: return "Olivetti ADPCM"; + case 0x1002: return "Olivetti CELP"; + case 0x1003: return "Olivetti SBC"; + case 0x1004: return "Olivetti OPR"; + case 0x1100: return "Lernout & Hauspie"; + case 0x1101: return "Lernout & Hauspie CELP codec"; + case 0x1102: + case 0x1103: + case 0x1104: return "Lernout & Hauspie SBC codec"; + case 0x1400: return "Norris Comm. Inc."; + case 0x1401: return "ISIAudio"; + case 0x1500: return "AT&T Soundspace Music Compression"; + case 0x181c: return "VoxWare RT24 speech codec"; + case 0x181e: return "Lucent elemedia AX24000P Music codec"; + case 0x1971: return "Sonic Foundry LOSSLESS"; + case 0x1979: return "Innings Telecom Inc. ADPCM"; + case 0x1c07: return "Lucent SX8300P speech codec"; + case 0x1c0c: return "Lucent SX5363S G.723 compliant codec"; + case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)"; + case 0x1fc4: return "NCT Soft ALF2CD ACM"; + case 0x2000: return "FAST Multimedia DVM"; + case 0x2001: return "Dolby DTS (Digital Theater System)"; + case 0x2002: return "RealAudio 1 / 2 14.4"; + case 0x2003: return "RealAudio 1 / 2 28.8"; + case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)"; + case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)"; + case 0x2006: return "RealAudio 10 AAC (RAAC)"; + case 0x2007: return "RealAudio 10 AAC+ (RACP)"; + case 0x2500: return "Reserved range to 0x2600 Microsoft"; + case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)"; + case 0x4143: return "Divio MPEG-4 AAC audio"; + case 0x4201: return "Nokia adaptive multirate"; + case 0x4243: return "Divio G726 Divio, Inc."; + case 0x434c: return "LEAD Speech"; + case 0x564c: return "LEAD Vorbis"; + case 0x5756: return "WavPack Audio"; + case 0x674f: return "Ogg Vorbis (mode 1)"; + case 0x6750: return "Ogg Vorbis (mode 2)"; + case 0x6751: return "Ogg Vorbis (mode 3)"; + case 0x676f: return "Ogg Vorbis (mode 1+)"; + case 0x6770: return "Ogg Vorbis (mode 2+)"; + case 0x6771: return "Ogg Vorbis (mode 3+)"; + case 0x7000: return "3COM NBX 3Com Corporation"; + case 0x706d: return "FAAD AAC"; + case 0x7a21: return "GSM-AMR (CBR, no SID)"; + case 0x7a22: return "GSM-AMR (VBR, including SID)"; + case 0xa100: return "Comverse Infosys Ltd. G723 1"; + case 0xa101: return "Comverse Infosys Ltd. AVQSBC"; + case 0xa102: return "Comverse Infosys Ltd. OLDSBC"; + case 0xa103: return "Symbol Technologies G729A"; + case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation"; + case 0xa105: return "Ingenient Technologies Inc. G726"; + case 0xa106: return "ISO/MPEG-4 advanced audio Coding"; + case 0xa107: return "Encore Software Ltd G726"; + case 0xa109: return "Speex ACM Codec xiph.org"; + case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec"; + case 0xf1ac: return "Free Lossless Audio Codec FLAC"; + case 0xfffe: return "Extensible"; + case 0xffff: return "Development"; + default: + return format("{0:#x}", codec); } - return format("{0:#x}", codec); } \ No newline at end of file diff --git a/rhubarb/src/audio/WaveFileReader.h b/rhubarb/src/audio/WaveFileReader.h index d4c080c..df68f19 100644 --- a/rhubarb/src/audio/WaveFileReader.h +++ b/rhubarb/src/audio/WaveFileReader.h @@ -12,7 +12,7 @@ enum class SampleFormat { class WaveFileReader : public AudioClip { public: - WaveFileReader(boost::filesystem::path filePath); + WaveFileReader(const boost::filesystem::path& filePath); std::unique_ptr clone() const override; int getSampleRate() const override; size_type size() const override; diff --git a/rhubarb/src/audio/audioFileReading.cpp b/rhubarb/src/audio/audioFileReading.cpp index 8c83aa5..f5b2d4c 100644 --- a/rhubarb/src/audio/audioFileReading.cpp +++ b/rhubarb/src/audio/audioFileReading.cpp @@ -20,7 +20,9 @@ std::unique_ptr createAudioFileClip(path filePath) { return std::make_unique(filePath); } throw runtime_error(format( - "Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.", extension)); + "Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.", + extension + )); } catch (...) { std::throw_with_nested(runtime_error(format("Could not open sound file {}.", filePath))); } diff --git a/rhubarb/src/audio/ioTools.h b/rhubarb/src/audio/ioTools.h index 7568462..b74d215 100644 --- a/rhubarb/src/audio/ioTools.h +++ b/rhubarb/src/audio/ioTools.h @@ -4,33 +4,38 @@ namespace little_endian { - template - Type read(std::istream &stream) { + template + Type read(std::istream& stream) { static_assert(bitsToRead % 8 == 0, "Cannot read fractional bytes."); static_assert(bitsToRead <= sizeof(Type) * 8, "Bits to read exceed target type size."); Type result = 0; - char *p = reinterpret_cast(&result); - int bytesToRead = bitsToRead / 8; + char* p = reinterpret_cast(&result); + const int bytesToRead = bitsToRead / 8; for (int byteIndex = 0; byteIndex < bytesToRead; byteIndex++) { *(p + byteIndex) = static_cast(stream.get()); } return result; } - template - void write(Type value, std::ostream &stream) { + template + void write(Type value, std::ostream& stream) { static_assert(bitsToWrite % 8 == 0, "Cannot write fractional bytes."); static_assert(bitsToWrite <= sizeof(Type) * 8, "Bits to write exceed target type size."); - char *p = reinterpret_cast(&value); - int bytesToWrite = bitsToWrite / 8; + char* p = reinterpret_cast(&value); + const int bytesToWrite = bitsToWrite / 8; for (int byteIndex = 0; byteIndex < bytesToWrite; byteIndex++) { stream.put(*(p + byteIndex)); } } - constexpr uint32_t fourcc(unsigned char c0, unsigned char c1, unsigned char c2, unsigned char c3) { + constexpr uint32_t fourcc( + unsigned char c0, + unsigned char c1, + unsigned char c2, + unsigned char c3 + ) { return c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); } diff --git a/rhubarb/src/audio/processing.cpp b/rhubarb/src/audio/processing.cpp index 316efd7..7667445 100644 --- a/rhubarb/src/audio/processing.cpp +++ b/rhubarb/src/audio/processing.cpp @@ -3,7 +3,6 @@ using std::function; using std::vector; -using std::unique_ptr; // Converts a float in the range -1..1 to a signed 16-bit int inline int16_t floatSampleToInt16(float sample) { @@ -12,13 +11,18 @@ inline int16_t floatSampleToInt16(float sample) { return static_cast(((sample + 1) / 2) * (INT16_MAX - INT16_MIN) + INT16_MIN); } -void process16bitAudioClip(const AudioClip& audioClip, function&)> processBuffer, size_t bufferCapacity, ProgressSink& progressSink) { +void process16bitAudioClip( + const AudioClip& audioClip, + const function&)>& processBuffer, + size_t bufferCapacity, + ProgressSink& progressSink +) { // Process entire sound stream vector buffer; buffer.reserve(bufferCapacity); int sampleCount = 0; auto it = audioClip.begin(); - auto end = audioClip.end(); + const auto end = audioClip.end(); do { // Read to buffer buffer.clear(); @@ -32,10 +36,14 @@ void process16bitAudioClip(const AudioClip& audioClip, function(sampleCount) / audioClip.size()); - } while (buffer.size()); + } while (!buffer.empty()); } -void process16bitAudioClip(const AudioClip& audioClip, function&)> processBuffer, ProgressSink& progressSink) { +void process16bitAudioClip( + const AudioClip& audioClip, + const function&)>& processBuffer, + ProgressSink& progressSink +) { const size_t capacity = 1600; // 0.1 second capacity process16bitAudioClip(audioClip, processBuffer, capacity, progressSink); } @@ -46,5 +54,5 @@ vector copyTo16bitBuffer(const AudioClip& audioClip) { for (float sample : audioClip) { result[index++] = floatSampleToInt16(sample); } - return std::move(result); + return result; } diff --git a/rhubarb/src/audio/processing.h b/rhubarb/src/audio/processing.h index dc09c56..f4378bf 100644 --- a/rhubarb/src/audio/processing.h +++ b/rhubarb/src/audio/processing.h @@ -5,6 +5,17 @@ #include "AudioClip.h" #include "tools/progress.h" -void process16bitAudioClip(const AudioClip& audioClip, std::function&)> processBuffer, size_t bufferCapacity, ProgressSink& progressSink); -void process16bitAudioClip(const AudioClip& audioClip, std::function&)> processBuffer, ProgressSink& progressSink); +void process16bitAudioClip( + const AudioClip& audioClip, + const std::function&)>& processBuffer, + size_t bufferCapacity, + ProgressSink& progressSink +); + +void process16bitAudioClip( + const AudioClip& audioClip, + const std::function&)>& processBuffer, + ProgressSink& progressSink +); + std::vector copyTo16bitBuffer(const AudioClip& audioClip); \ No newline at end of file diff --git a/rhubarb/src/audio/voiceActivityDetection.cpp b/rhubarb/src/audio/voiceActivityDetection.cpp index f801960..b89b0a7 100644 --- a/rhubarb/src/audio/voiceActivityDetection.cpp +++ b/rhubarb/src/audio/voiceActivityDetection.cpp @@ -9,7 +9,6 @@ #include #include "tools/parallel.h" #include "AudioSegment.h" -#include "tools/stringTools.h" using std::vector; using boost::adaptors::transformed; @@ -17,7 +16,10 @@ using fmt::format; using std::runtime_error; using std::unique_ptr; -JoiningBoundedTimeline webRtcDetectVoiceActivity(const AudioClip& audioClip, ProgressSink& progressSink) { +JoiningBoundedTimeline webRtcDetectVoiceActivity( + const AudioClip& audioClip, + ProgressSink& progressSink +) { VadInst* vadHandle = WebRtcVad_Create(); if (!vadHandle) throw runtime_error("Error creating WebRTC VAD handle."); @@ -38,14 +40,19 @@ JoiningBoundedTimeline webRtcDetectVoiceActivity(const AudioClip& audioCli JoiningBoundedTimeline activity(audioClip.getTruncatedRange()); centiseconds time = 0_cs; const size_t bufferCapacity = audioClip.getSampleRate() / 100; - auto processBuffer = [&](const vector& buffer) { + const auto processBuffer = [&](const vector& buffer) { // WebRTC is picky regarding buffer size if (buffer.size() < bufferCapacity) return; - int result = WebRtcVad_Process(vadHandle, audioClip.getSampleRate(), buffer.data(), buffer.size()) == 1; + const int result = WebRtcVad_Process( + vadHandle, + audioClip.getSampleRate(), + buffer.data(), + buffer.size() + ) == 1; if (result == -1) throw runtime_error("Error processing audio buffer using WebRTC VAD."); - bool isActive = result != 0; + const bool isActive = result != 0; if (isActive) { activity.set(time, time + 1_cs); } @@ -54,12 +61,14 @@ JoiningBoundedTimeline webRtcDetectVoiceActivity(const AudioClip& audioCli process16bitAudioClip(audioClip, processBuffer, bufferCapacity, pass1ProgressSink); // WebRTC adapts to the audio. This means results may not be correct at the very beginning. - // It sometimes returns false activity at the very beginning, mistaking the background noise for speech. + // It sometimes returns false activity at the very beginning, mistaking the background noise for + // speech. // So we delete the first recognized utterance and re-process the corresponding audio segment. if (!activity.empty()) { TimeRange firstActivity = activity.begin()->getTimeRange(); activity.clear(firstActivity); - unique_ptr streamStart = audioClip.clone() | segment(TimeRange(0_cs, firstActivity.getEnd())); + const unique_ptr streamStart = audioClip.clone() + | segment(TimeRange(0_cs, firstActivity.getEnd())); time = 0_cs; process16bitAudioClip(*streamStart, processBuffer, bufferCapacity, pass2ProgressSink); } @@ -67,24 +76,34 @@ JoiningBoundedTimeline webRtcDetectVoiceActivity(const AudioClip& audioCli return activity; } -JoiningBoundedTimeline detectVoiceActivity(const AudioClip& inputAudioClip, int maxThreadCount, ProgressSink& progressSink) { +JoiningBoundedTimeline detectVoiceActivity( + const AudioClip& inputAudioClip, + int maxThreadCount, + ProgressSink& progressSink +) { // Prepare audio for VAD - const unique_ptr audioClip = inputAudioClip.clone() | resample(16000) | removeDcOffset(); + const unique_ptr audioClip = inputAudioClip.clone() + | resample(16000) + | removeDcOffset(); JoiningBoundedTimeline activity(audioClip->getTruncatedRange()); std::mutex activityMutex; // Split audio into segments and perform parallel VAD const int segmentCount = maxThreadCount; - centiseconds audioDuration = audioClip->getTruncatedRange().getDuration(); + const centiseconds audioDuration = audioClip->getTruncatedRange().getDuration(); vector audioSegments; for (int i = 0; i < segmentCount; ++i) { - TimeRange segmentRange = TimeRange(i * audioDuration / segmentCount, (i + 1) * audioDuration / segmentCount); + TimeRange segmentRange = TimeRange( + i * audioDuration / segmentCount, + (i + 1) * audioDuration / segmentCount + ); audioSegments.push_back(segmentRange); } runParallel([&](const TimeRange& segmentRange, ProgressSink& segmentProgressSink) { - unique_ptr audioSegment = audioClip->clone() | segment(segmentRange); - JoiningBoundedTimeline activitySegment = webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink); + const unique_ptr audioSegment = audioClip->clone() | segment(segmentRange); + JoiningBoundedTimeline activitySegment = + webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink); std::lock_guard lock(activityMutex); for (auto activityRange : activitySegment) { @@ -109,8 +128,13 @@ JoiningBoundedTimeline detectVoiceActivity(const AudioClip& inputAudioClip } } - logging::debugFormat("Found {} sections of voice activity: {}", activity.size(), - join(activity | transformed([](const Timed& t) { return format("{0}-{1}", t.getStart(), t.getEnd()); }), ", ")); + logging::debugFormat( + "Found {} sections of voice activity: {}", + activity.size(), + join(activity | transformed([](const Timed& t) { + return format("{0}-{1}", t.getStart(), t.getEnd()); + }), ", ") + ); return activity; } diff --git a/rhubarb/src/audio/voiceActivityDetection.h b/rhubarb/src/audio/voiceActivityDetection.h index 0e5bcb9..02658e0 100644 --- a/rhubarb/src/audio/voiceActivityDetection.h +++ b/rhubarb/src/audio/voiceActivityDetection.h @@ -3,4 +3,8 @@ #include "time/BoundedTimeline.h" #include "tools/progress.h" -JoiningBoundedTimeline detectVoiceActivity(const AudioClip& audioClip, int maxThreadCount, ProgressSink& progressSink); +JoiningBoundedTimeline detectVoiceActivity( + const AudioClip& audioClip, + int maxThreadCount, + ProgressSink& progressSink +); diff --git a/rhubarb/src/audio/waveFileWriting.cpp b/rhubarb/src/audio/waveFileWriting.cpp index 9feb6c7..ca6776d 100644 --- a/rhubarb/src/audio/waveFileWriting.cpp +++ b/rhubarb/src/audio/waveFileWriting.cpp @@ -12,26 +12,26 @@ void createWaveFile(const AudioClip& audioClip, std::string fileName) { // Write RIFF chunk write(fourcc('R', 'I', 'F', 'F'), file); - uint32_t formatChunkSize = 16; - uint16_t channelCount = 1; - uint16_t frameSize = static_cast(channelCount * sizeof(float)); - uint32_t dataChunkSize = static_cast(audioClip.size() * frameSize); - uint32_t riffChunkSize = 4 + (8 + formatChunkSize) + (8 + dataChunkSize); + const uint32_t formatChunkSize = 16; + const uint16_t channelCount = 1; + const uint16_t frameSize = static_cast(channelCount * sizeof(float)); + const uint32_t dataChunkSize = static_cast(audioClip.size() * frameSize); + const uint32_t riffChunkSize = 4 + (8 + formatChunkSize) + (8 + dataChunkSize); write(riffChunkSize, file); write(fourcc('W', 'A', 'V', 'E'), file); // Write format chunk write(fourcc('f', 'm', 't', ' '), file); write(formatChunkSize, file); - uint16_t codec = 0x03; // 32-bit float + const uint16_t codec = 0x03; // 32-bit float write(codec, file); write(channelCount, file); - uint32_t frameRate = static_cast(audioClip.getSampleRate()); + const uint32_t frameRate = static_cast(audioClip.getSampleRate()); write(frameRate, file); - uint32_t bytesPerSecond = frameRate * frameSize; + const uint32_t bytesPerSecond = frameRate * frameSize; write(bytesPerSecond, file); write(frameSize, file); - uint16_t bitsPerSample = 8 * sizeof(float); + const uint16_t bitsPerSample = 8 * sizeof(float); write(bitsPerSample, file); // Write data chunk diff --git a/rhubarb/src/core/Phone.cpp b/rhubarb/src/core/Phone.cpp index 1d564f7..9ed9274 100644 --- a/rhubarb/src/core/Phone.cpp +++ b/rhubarb/src/core/Phone.cpp @@ -13,7 +13,7 @@ string PhoneConverter::getTypeName() { } EnumConverter::member_data PhoneConverter::getMemberData() { - return member_data{ + return member_data { { Phone::AO, "AO" }, { Phone::AA, "AA" }, { Phone::IY, "IY" }, diff --git a/rhubarb/src/core/Shape.cpp b/rhubarb/src/core/Shape.cpp index 38c4390..626a7ec 100644 --- a/rhubarb/src/core/Shape.cpp +++ b/rhubarb/src/core/Shape.cpp @@ -35,7 +35,7 @@ string ShapeConverter::getTypeName() { } EnumConverter::member_data ShapeConverter::getMemberData() { - return member_data{ + return member_data { { Shape::A, "A" }, { Shape::B, "B" }, { Shape::C, "C" }, diff --git a/rhubarb/src/core/Shape.h b/rhubarb/src/core/Shape.h index 0ee34c5..7594da4 100644 --- a/rhubarb/src/core/Shape.h +++ b/rhubarb/src/core/Shape.h @@ -29,8 +29,8 @@ enum class Shape { class ShapeConverter : public EnumConverter { public: static ShapeConverter& get(); - std::set getBasicShapes(); - std::set getExtendedShapes(); + static std::set getBasicShapes(); + static std::set getExtendedShapes(); protected: std::string getTypeName() override; member_data getMemberData() override; diff --git a/rhubarb/src/exporters/JsonExporter.cpp b/rhubarb/src/exporters/JsonExporter.cpp index ddd3149..208ee9b 100644 --- a/rhubarb/src/exporters/JsonExporter.cpp +++ b/rhubarb/src/exporters/JsonExporter.cpp @@ -6,7 +6,8 @@ using std::string; void JsonExporter::exportAnimation(const ExporterInput& input, std::ostream& outputStream) { // Export as JSON. - // I'm not using a library because the code is short enough without one and it lets me control the formatting. + // I'm not using a library because the code is short enough without one and it lets me control + // the formatting. outputStream << "{\n"; outputStream << " \"metadata\": {\n"; outputStream << " \"soundFile\": \"" << escapeJsonString(input.inputFilePath.string()) << "\",\n"; diff --git a/rhubarb/src/exporters/TsvExporter.cpp b/rhubarb/src/exporters/TsvExporter.cpp index 6cdaaae..9ce1748 100644 --- a/rhubarb/src/exporters/TsvExporter.cpp +++ b/rhubarb/src/exporters/TsvExporter.cpp @@ -4,7 +4,11 @@ void TsvExporter::exportAnimation(const ExporterInput& input, std::ostream& outputStream) { // Output shapes with start times for (auto& timedShape : input.animation) { - outputStream << formatDuration(timedShape.getStart()) << "\t" << timedShape.getValue() << "\n"; + outputStream + << formatDuration(timedShape.getStart()) + << "\t" + << timedShape.getValue() + << "\n"; } // Output closed mouth with end time diff --git a/rhubarb/src/exporters/XmlExporter.cpp b/rhubarb/src/exporters/XmlExporter.cpp index 73b172c..479b401 100644 --- a/rhubarb/src/exporters/XmlExporter.cpp +++ b/rhubarb/src/exporters/XmlExporter.cpp @@ -12,11 +12,17 @@ void XmlExporter::exportAnimation(const ExporterInput& input, std::ostream& outp // Add metadata tree.put("rhubarbResult.metadata.soundFile", input.inputFilePath.string()); - tree.put("rhubarbResult.metadata.duration", formatDuration(input.animation.getRange().getDuration())); + tree.put( + "rhubarbResult.metadata.duration", + formatDuration(input.animation.getRange().getDuration()) + ); // Add mouth cues for (auto& timedShape : dummyShapeIfEmpty(input.animation, input.targetShapeSet)) { - ptree& mouthCueElement = tree.add("rhubarbResult.mouthCues.mouthCue", timedShape.getValue()); + ptree& mouthCueElement = tree.add( + "rhubarbResult.mouthCues.mouthCue", + timedShape.getValue() + ); mouthCueElement.put(".start", formatDuration(timedShape.getStart())); mouthCueElement.put(".end", formatDuration(timedShape.getEnd())); } diff --git a/rhubarb/src/exporters/exporterTools.cpp b/rhubarb/src/exporters/exporterTools.cpp index 51f09b4..4b96ec5 100644 --- a/rhubarb/src/exporters/exporterTools.cpp +++ b/rhubarb/src/exporters/exporterTools.cpp @@ -2,12 +2,15 @@ #include "animation/targetShapeSet.h" // Makes sure there is at least one mouth shape -std::vector> dummyShapeIfEmpty(const JoiningTimeline& animation, const ShapeSet& targetShapeSet) { +std::vector> dummyShapeIfEmpty( + const JoiningTimeline& animation, + const ShapeSet& targetShapeSet +) { std::vector> result; std::copy(animation.begin(), animation.end(), std::back_inserter(result)); if (result.empty()) { // Add zero-length empty mouth - result.push_back(Timed(0_cs, 0_cs, convertToTargetShapeSet(Shape::X, targetShapeSet))); + result.emplace_back(0_cs, 0_cs, convertToTargetShapeSet(Shape::X, targetShapeSet)); } return result; } diff --git a/rhubarb/src/exporters/exporterTools.h b/rhubarb/src/exporters/exporterTools.h index c121f98..79ae3da 100644 --- a/rhubarb/src/exporters/exporterTools.h +++ b/rhubarb/src/exporters/exporterTools.h @@ -4,4 +4,7 @@ #include "time/Timeline.h" // Makes sure there is at least one mouth shape -std::vector> dummyShapeIfEmpty(const JoiningTimeline& animation, const ShapeSet& targetShapeSet); +std::vector> dummyShapeIfEmpty( + const JoiningTimeline& animation, + const ShapeSet& targetShapeSet +); diff --git a/rhubarb/src/logging/Entry.cpp b/rhubarb/src/logging/Entry.cpp index 8772c7b..8bda3f4 100644 --- a/rhubarb/src/logging/Entry.cpp +++ b/rhubarb/src/logging/Entry.cpp @@ -22,12 +22,13 @@ namespace logging { static int lastThreadId = 0; thread_id threadId = std::this_thread::get_id(); if (threadCounters.find(threadId) == threadCounters.end()) { - threadCounters.insert({threadId, ++lastThreadId}); + threadCounters.insert({ threadId, ++lastThreadId }); } return threadCounters.find(threadId)->second; } Entry::Entry(Level level, const string& message) : + timestamp(), level(level), message(message) { diff --git a/rhubarb/src/logging/Level.cpp b/rhubarb/src/logging/Level.cpp index ce4dfff..1e9a18b 100644 --- a/rhubarb/src/logging/Level.cpp +++ b/rhubarb/src/logging/Level.cpp @@ -14,13 +14,13 @@ namespace logging { } EnumConverter::member_data LevelConverter::getMemberData() { - return member_data{ - {Level::Trace, "Trace"}, - {Level::Debug, "Debug"}, - {Level::Info, "Info"}, - {Level::Warn, "Warn"}, - {Level::Error, "Error"}, - {Level::Fatal, "Fatal"} + return member_data { + { Level::Trace, "Trace" }, + { Level::Debug, "Debug" }, + { Level::Info, "Info" }, + { Level::Warn, "Warn" }, + { Level::Error, "Error" }, + { Level::Fatal, "Fatal" } }; } @@ -28,7 +28,7 @@ namespace logging { return LevelConverter::get().write(stream, value); } - std::istream& operator >> (std::istream& stream, Level& value) { + std::istream& operator >>(std::istream& stream, Level& value) { return LevelConverter::get().read(stream, value); } diff --git a/rhubarb/src/logging/Level.h b/rhubarb/src/logging/Level.h index ebec696..f0fa413 100644 --- a/rhubarb/src/logging/Level.h +++ b/rhubarb/src/logging/Level.h @@ -24,6 +24,6 @@ namespace logging { std::ostream& operator<<(std::ostream& stream, Level value); - std::istream& operator >> (std::istream& stream, Level& value); + std::istream& operator >>(std::istream& stream, Level& value); } diff --git a/rhubarb/src/logging/formatters.cpp b/rhubarb/src/logging/formatters.cpp index 3ccfbcc..eabf572 100644 --- a/rhubarb/src/logging/formatters.cpp +++ b/rhubarb/src/logging/formatters.cpp @@ -12,7 +12,12 @@ namespace logging { } string SimpleFileFormatter::format(const Entry& entry) { - return fmt::format("[{0}] {1} {2}", formatTime(entry.timestamp, "%F %H:%M:%S"), entry.threadCounter, consoleFormatter.format(entry)); + return fmt::format( + "[{0}] {1} {2}", + formatTime(entry.timestamp, "%F %H:%M:%S"), + entry.threadCounter, + consoleFormatter.format(entry) + ); } } diff --git a/rhubarb/src/logging/logging.h b/rhubarb/src/logging/logging.h index e13d6e4..0e88e73 100644 --- a/rhubarb/src/logging/logging.h +++ b/rhubarb/src/logging/logging.h @@ -14,7 +14,7 @@ namespace logging { void log(Level level, const std::string& message); - template + template void logFormat(Level level, fmt::CStringRef format, const Args&... args) { log(level, fmt::format(format, args...)); } diff --git a/rhubarb/src/logging/sinks.cpp b/rhubarb/src/logging/sinks.cpp index ce468a0..9c2d25d 100644 --- a/rhubarb/src/logging/sinks.cpp +++ b/rhubarb/src/logging/sinks.cpp @@ -3,7 +3,6 @@ #include "Entry.h" using std::string; -using std::lock_guard; using std::shared_ptr; namespace logging { @@ -25,7 +24,7 @@ namespace logging { {} void StreamSink::receive(const Entry& entry) { - string line = formatter->format(entry); + const string line = formatter->format(entry); *stream << line << std::endl; } diff --git a/rhubarb/src/logging/sinks.h b/rhubarb/src/logging/sinks.h index 5e204e4..7c14872 100644 --- a/rhubarb/src/logging/sinks.h +++ b/rhubarb/src/logging/sinks.h @@ -3,7 +3,6 @@ #include "Sink.h" #include #include "Formatter.h" -#include namespace logging { enum class Level; diff --git a/rhubarb/src/recognition/PhoneticRecognizer.cpp b/rhubarb/src/recognition/PhoneticRecognizer.cpp index bd9c9ac..dff1956 100644 --- a/rhubarb/src/recognition/PhoneticRecognizer.cpp +++ b/rhubarb/src/recognition/PhoneticRecognizer.cpp @@ -26,7 +26,8 @@ static lambda_unique_ptr createDecoder(optional dialo // High values (>= 1.0) can lead to imprecise or freezing animation. "-lw", "0.8", - // The following settings are recommended at http://cmusphinx.sourceforge.net/wiki/phonemerecognition + // The following settings are recommended at + // http://cmusphinx.sourceforge.net/wiki/phonemerecognition // Set beam width applied to every frame in Viterbi search "-beam", "1e-20", @@ -56,7 +57,9 @@ static Timeline utteranceToPhones( paddedTimeRange.grow(padding); paddedTimeRange.trim(audioClip.getTruncatedRange()); - const unique_ptr clipSegment = audioClip.clone() | segment(paddedTimeRange) | resample(sphinxSampleRate); + const unique_ptr clipSegment = audioClip.clone() + | segment(paddedTimeRange) + | resample(sphinxSampleRate); const auto audioBuffer = copyTo16bitBuffer(*clipSegment); // Detect phones (returned as words) diff --git a/rhubarb/src/recognition/PocketSphinxRecognizer.cpp b/rhubarb/src/recognition/PocketSphinxRecognizer.cpp index b97c0b7..c44e465 100644 --- a/rhubarb/src/recognition/PocketSphinxRecognizer.cpp +++ b/rhubarb/src/recognition/PocketSphinxRecognizer.cpp @@ -67,9 +67,15 @@ lambda_unique_ptr createDefaultLanguageModel(ps_decoder_t& decode return result; } -lambda_unique_ptr createDialogLanguageModel(ps_decoder_t& decoder, const string& dialog) { +lambda_unique_ptr createDialogLanguageModel( + ps_decoder_t& decoder, + const string& dialog +) { // Split dialog into normalized words - vector words = tokenizeText(dialog, [&](const string& word) { return dictionaryContains(*decoder.dict, word); }); + vector words = tokenizeText( + dialog, + [&](const string& word) { return dictionaryContains(*decoder.dict, word); } + ); // Add dialog-specific words to the dictionary addMissingDictionaryWords(words, decoder); @@ -80,15 +86,27 @@ lambda_unique_ptr createDialogLanguageModel(ps_decoder_t& decoder return createLanguageModel(words, decoder); } -lambda_unique_ptr createBiasedLanguageModel(ps_decoder_t& decoder, const string& dialog) { +lambda_unique_ptr createBiasedLanguageModel( + ps_decoder_t& decoder, + const string& dialog +) { auto defaultLanguageModel = createDefaultLanguageModel(decoder); auto dialogLanguageModel = createDialogLanguageModel(decoder, dialog); constexpr int modelCount = 2; - array languageModels{ defaultLanguageModel.get(), dialogLanguageModel.get() }; - array modelNames{ "defaultLM", "dialogLM" }; - array modelWeights{ 0.1f, 0.9f }; + array languageModels { + defaultLanguageModel.get(), + dialogLanguageModel.get() + }; + array modelNames { "defaultLM", "dialogLM" }; + array modelWeights { 0.1f, 0.9f }; lambda_unique_ptr result( - ngram_model_set_init(nullptr, languageModels.data(), const_cast(modelNames.data()), modelWeights.data(), modelCount), + ngram_model_set_init( + nullptr, + languageModels.data(), + const_cast(modelNames.data()), + modelWeights.data(), + modelCount + ), [](ngram_model_t* lm) { ngram_model_free(lm); }); if (!result) { throw runtime_error("Error creating biased language model."); @@ -105,7 +123,8 @@ static lambda_unique_ptr createDecoder(optional dialo "-hmm", (getSphinxModelDirectory() / "acoustic-model").string().c_str(), // Set pronunciation dictionary "-dict", (getSphinxModelDirectory() / "cmudict-en-us.dict").string().c_str(), - // Add noise against zero silence (see http://cmusphinx.sourceforge.net/wiki/faq#qwhy_my_accuracy_is_poor) + // Add noise against zero silence + // (see http://cmusphinx.sourceforge.net/wiki/faq#qwhy_my_accuracy_is_poor) "-dither", "yes", // Disable VAD -- we're doing that ourselves "-remove_silence", "no", @@ -184,7 +203,11 @@ optional> getPhoneAlignment( // Extract phones with timestamps char** phoneNames = decoder.dict->mdef->ciname; Timeline result; - for (ps_alignment_iter_t* it = ps_alignment_phones(alignment.get()); it; it = ps_alignment_iter_next(it)) { + for ( + ps_alignment_iter_t* it = ps_alignment_phones(alignment.get()); + it; + it = ps_alignment_iter_next(it) + ) { // Get phone ps_alignment_entry_t* phoneEntry = ps_alignment_iter_get(it); const s3cipid_t phoneId = phoneEntry->id.pid.cipid; @@ -209,7 +232,7 @@ optional> getPhoneAlignment( // Some words have multiple pronunciations, one of which results in better animation than the others. // This function returns the optimal pronunciation for a select set of these words. string fixPronunciation(const string& word) { - const static map replacements{ + const static map replacements { { "into(2)", "into" }, { "to(2)", "to" }, { "to(3)", "to" }, @@ -238,7 +261,9 @@ static Timeline utteranceToPhones( paddedTimeRange.grow(padding); paddedTimeRange.trim(audioClip.getTruncatedRange()); - const unique_ptr clipSegment = audioClip.clone() | segment(paddedTimeRange) | resample(sphinxSampleRate); + const unique_ptr clipSegment = audioClip.clone() + | segment(paddedTimeRange) + | resample(sphinxSampleRate); const auto audioBuffer = copyTo16bitBuffer(*clipSegment); // Get words @@ -273,7 +298,7 @@ static Timeline utteranceToPhones( const string fixedWord = fixPronunciation(timedWord.getValue()); wordIds.push_back(getWordId(fixedWord, *decoder.dict)); } - if (wordIds.empty()) return{}; + if (wordIds.empty()) return {}; // Align the words' phones with speech #if BOOST_VERSION < 105600 // Support legacy syntax @@ -309,5 +334,6 @@ BoundedTimeline PocketSphinxRecognizer::recognizePhones( int maxThreadCount, ProgressSink& progressSink ) const { - return ::recognizePhones(inputAudioClip, dialog, &createDecoder, &utteranceToPhones, maxThreadCount, progressSink); + return ::recognizePhones( + inputAudioClip, dialog, &createDecoder, &utteranceToPhones, maxThreadCount, progressSink); } diff --git a/rhubarb/src/recognition/Recognizer.h b/rhubarb/src/recognition/Recognizer.h index 0903680..6995f1f 100644 --- a/rhubarb/src/recognition/Recognizer.h +++ b/rhubarb/src/recognition/Recognizer.h @@ -9,7 +9,7 @@ class Recognizer { public: virtual ~Recognizer() = default; - virtual BoundedTimelinerecognizePhones( + virtual BoundedTimeline recognizePhones( const AudioClip& audioClip, boost::optional dialog, int maxThreadCount, diff --git a/rhubarb/src/recognition/g2p.cpp b/rhubarb/src/recognition/g2p.cpp index 6299061..e74ad4f 100644 --- a/rhubarb/src/recognition/g2p.cpp +++ b/rhubarb/src/recognition/g2p.cpp @@ -64,8 +64,9 @@ Phone charToPhone(wchar_t c) { case L'r': return Phone::R; case L'l': return Phone::L; case L'h': return Phone::HH; + default: + return Phone::Noise; } - return Phone::Noise; } vector wordToPhones(const std::string& word) { @@ -94,8 +95,11 @@ vector wordToPhones(const std::string& word) { for (wchar_t c : wideWord) { Phone phone = charToPhone(c); if (phone == Phone::Noise) { - logging::errorFormat("G2P error determining pronunciation for '{}': Character '{}' is not a recognized phone shorthand.", - word, static_cast(c)); + logging::errorFormat( + "G2P error determining pronunciation for '{}': Character '{}' is not a recognized phone shorthand.", + word, + static_cast(c) + ); } if (phone != lastPhone) { diff --git a/rhubarb/src/recognition/languageModels.cpp b/rhubarb/src/recognition/languageModels.cpp index 6ccb303..f46d2d6 100644 --- a/rhubarb/src/recognition/languageModels.cpp +++ b/rhubarb/src/recognition/languageModels.cpp @@ -15,83 +15,94 @@ using std::vector; using std::regex; using std::map; using std::tuple; -using std::make_tuple; using std::get; using std::endl; using boost::filesystem::path; -using unigram_t = string; -using bigram_t = tuple; -using trigram_t = tuple; +using Unigram = string; +using Bigram = tuple; +using Trigram = tuple; -map getUnigramCounts(const vector& words) { - map unigramCounts; - for (const unigram_t& unigram : words) { +map getUnigramCounts(const vector& words) { + map unigramCounts; + for (const Unigram& unigram : words) { ++unigramCounts[unigram]; } return unigramCounts; } -map getBigramCounts(const vector& words) { - map bigramCounts; +map getBigramCounts(const vector& words) { + map bigramCounts; for (auto it = words.begin(); it < words.end() - 1; ++it) { - ++bigramCounts[bigram_t(*it, *(it + 1))]; + ++bigramCounts[Bigram(*it, *(it + 1))]; } return bigramCounts; } -map getTrigramCounts(const vector& words) { - map trigramCounts; +map getTrigramCounts(const vector& words) { + map trigramCounts; if (words.size() >= 3) { for (auto it = words.begin(); it < words.end() - 2; ++it) { - ++trigramCounts[trigram_t(*it, *(it + 1), *(it + 2))]; + ++trigramCounts[Trigram(*it, *(it + 1), *(it + 2))]; } } return trigramCounts; } -map getUnigramProbabilities(const vector& words, const map& unigramCounts, const double deflator) { - map unigramProbabilities; +map getUnigramProbabilities( + const vector& words, + const map& unigramCounts, + const double deflator +) { + map unigramProbabilities; for (const auto& pair : unigramCounts) { - unigram_t unigram = get<0>(pair); - int unigramCount = get<1>(pair); + const Unigram& unigram = get<0>(pair); + const int unigramCount = get<1>(pair); unigramProbabilities[unigram] = double(unigramCount) / words.size() * deflator; } return unigramProbabilities; } -map getBigramProbabilities(const map& unigramCounts, const map& bigramCounts, const double deflator) { - map bigramProbabilities; +map getBigramProbabilities( + const map& unigramCounts, + const map& bigramCounts, + const double deflator +) { + map bigramProbabilities; for (const auto& pair : bigramCounts) { - bigram_t bigram = get<0>(pair); - int bigramCount = get<1>(pair); - int unigramPrefixCount = unigramCounts.at(get<0>(bigram)); + Bigram bigram = get<0>(pair); + const int bigramCount = get<1>(pair); + const int unigramPrefixCount = unigramCounts.at(get<0>(bigram)); bigramProbabilities[bigram] = double(bigramCount) / unigramPrefixCount * deflator; } return bigramProbabilities; } -map getTrigramProbabilities(const map& bigramCounts, const map& trigramCounts, const double deflator) { - map trigramProbabilities; +map getTrigramProbabilities( + const map& bigramCounts, + const map& trigramCounts, + const double deflator +) { + map trigramProbabilities; for (const auto& pair : trigramCounts) { - trigram_t trigram = get<0>(pair); - int trigramCount = get<1>(pair); - int bigramPrefixCount = bigramCounts.at(bigram_t(get<0>(trigram), get<1>(trigram))); + Trigram trigram = get<0>(pair); + const int trigramCount = get<1>(pair); + const int bigramPrefixCount = bigramCounts.at(Bigram(get<0>(trigram), get<1>(trigram))); trigramProbabilities[trigram] = double(trigramCount) / bigramPrefixCount * deflator; } return trigramProbabilities; } -map getUnigramBackoffWeights( - const map& unigramCounts, - const map& unigramProbabilities, - const map& bigramCounts, +map getUnigramBackoffWeights( + const map& unigramCounts, + const map& unigramProbabilities, + const map& bigramCounts, const double discountMass) { - map unigramBackoffWeights; - for (const unigram_t& unigram : unigramCounts | boost::adaptors::map_keys) { + map unigramBackoffWeights; + for (const Unigram& unigram : unigramCounts | boost::adaptors::map_keys) { double denominator = 1; - for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) { + for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) { if (get<0>(bigram) == unigram) { denominator -= unigramProbabilities.at(get<1>(bigram)); } @@ -101,18 +112,18 @@ map getUnigramBackoffWeights( return unigramBackoffWeights; } -map getBigramBackoffWeights( - const map& bigramCounts, - const map& bigramProbabilities, - const map& trigramCounts, +map getBigramBackoffWeights( + const map& bigramCounts, + const map& bigramProbabilities, + const map& trigramCounts, const double discountMass) { - map bigramBackoffWeights; - for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) { + map bigramBackoffWeights; + for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) { double denominator = 1; - for (const trigram_t& trigram : trigramCounts | boost::adaptors::map_keys) { - if (bigram_t(get<0>(trigram), get<1>(trigram)) == bigram) { - denominator -= bigramProbabilities.at(bigram_t(get<1>(trigram), get<2>(trigram))); + for (const Trigram& trigram : trigramCounts | boost::adaptors::map_keys) { + if (Bigram(get<0>(trigram), get<1>(trigram)) == bigram) { + denominator -= bigramProbabilities.at(Bigram(get<1>(trigram), get<2>(trigram))); } } bigramBackoffWeights[bigram] = discountMass / denominator; @@ -120,20 +131,25 @@ map getBigramBackoffWeights( return bigramBackoffWeights; } -void createLanguageModelFile(const vector& words, path filePath) { +void createLanguageModelFile(const vector& words, const path& filePath) { const double discountMass = 0.5; const double deflator = 1.0 - discountMass; - map unigramCounts = getUnigramCounts(words); - map bigramCounts = getBigramCounts(words); - map trigramCounts = getTrigramCounts(words); + map unigramCounts = getUnigramCounts(words); + map bigramCounts = getBigramCounts(words); + map trigramCounts = getTrigramCounts(words); - map unigramProbabilities = getUnigramProbabilities(words, unigramCounts, deflator); - map bigramProbabilities = getBigramProbabilities(unigramCounts, bigramCounts, deflator); - map trigramProbabilities = getTrigramProbabilities(bigramCounts, trigramCounts, deflator); + map unigramProbabilities = + getUnigramProbabilities(words, unigramCounts, deflator); + map bigramProbabilities = + getBigramProbabilities(unigramCounts, bigramCounts, deflator); + map trigramProbabilities = + getTrigramProbabilities(bigramCounts, trigramCounts, deflator); - map unigramBackoffWeights = getUnigramBackoffWeights(unigramCounts, unigramProbabilities, bigramCounts, discountMass); - map bigramBackoffWeights = getBigramBackoffWeights(bigramCounts, bigramProbabilities, trigramCounts, discountMass); + map unigramBackoffWeights = + getUnigramBackoffWeights(unigramCounts, unigramProbabilities, bigramCounts, discountMass); + map bigramBackoffWeights = + getBigramBackoffWeights(bigramCounts, bigramProbabilities, trigramCounts, discountMass); boost::filesystem::ofstream file(filePath); file << "Generated by " << appName << " " << appVersion << endl << endl; @@ -146,7 +162,7 @@ void createLanguageModelFile(const vector& words, path filePath) { file.setf(std::ios::fixed, std::ios::floatfield); file.precision(4); file << "\\1-grams:" << endl; - for (const unigram_t& unigram : unigramCounts | boost::adaptors::map_keys) { + for (const Unigram& unigram : unigramCounts | boost::adaptors::map_keys) { file << log10(unigramProbabilities.at(unigram)) << " " << unigram << " " << log10(unigramBackoffWeights.at(unigram)) << endl; @@ -154,7 +170,7 @@ void createLanguageModelFile(const vector& words, path filePath) { file << endl; file << "\\2-grams:" << endl; - for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) { + for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) { file << log10(bigramProbabilities.at(bigram)) << " " << get<0>(bigram) << " " << get<1>(bigram) << " " << log10(bigramBackoffWeights.at(bigram)) << endl; @@ -162,7 +178,7 @@ void createLanguageModelFile(const vector& words, path filePath) { file << endl; file << "\\3-grams:" << endl; - for (const trigram_t& trigram : trigramCounts | boost::adaptors::map_keys) { + for (const Trigram& trigram : trigramCounts | boost::adaptors::map_keys) { file << log10(trigramProbabilities.at(trigram)) << " " << get<0>(trigram) << " " << get<1>(trigram) << " " << get<2>(trigram) << endl; } @@ -171,7 +187,10 @@ void createLanguageModelFile(const vector& words, path filePath) { file << "\\end\\" << endl; } -lambda_unique_ptr createLanguageModel(const vector& words, ps_decoder_t& decoder) { +lambda_unique_ptr createLanguageModel( + const vector& words, + ps_decoder_t& decoder +) { path tempFilePath = getTempFilePath(); createLanguageModelFile(words, tempFilePath); auto deleteTempFile = gsl::finally([&]() { boost::filesystem::remove(tempFilePath); }); diff --git a/rhubarb/src/recognition/languageModels.h b/rhubarb/src/recognition/languageModels.h index 18df80c..5d791c7 100644 --- a/rhubarb/src/recognition/languageModels.h +++ b/rhubarb/src/recognition/languageModels.h @@ -8,4 +8,7 @@ extern "C" { #include } -lambda_unique_ptr createLanguageModel(const std::vector& words, ps_decoder_t& decoder); +lambda_unique_ptr createLanguageModel( + const std::vector& words, + ps_decoder_t& decoder +); diff --git a/rhubarb/src/recognition/pocketSphinxTools.cpp b/rhubarb/src/recognition/pocketSphinxTools.cpp index 87a13ea..27c6ee1 100644 --- a/rhubarb/src/recognition/pocketSphinxTools.cpp +++ b/rhubarb/src/recognition/pocketSphinxTools.cpp @@ -26,18 +26,18 @@ using std::chrono::duration_cast; logging::Level convertSphinxErrorLevel(err_lvl_t errorLevel) { switch (errorLevel) { - case ERR_DEBUG: - case ERR_INFO: - case ERR_INFOCONT: - return logging::Level::Trace; - case ERR_WARN: - return logging::Level::Warn; - case ERR_ERROR: - return logging::Level::Error; - case ERR_FATAL: - return logging::Level::Fatal; - default: - throw invalid_argument("Unknown log level."); + case ERR_DEBUG: + case ERR_INFO: + case ERR_INFOCONT: + return logging::Level::Trace; + case ERR_WARN: + return logging::Level::Warn; + case ERR_ERROR: + return logging::Level::Error; + case ERR_FATAL: + return logging::Level::Fatal; + default: + throw invalid_argument("Unknown log level."); } } @@ -61,7 +61,8 @@ void sphinxLogCallback(void* user_data, err_lvl_t errorLevel, const char* format if (!success) chars.resize(chars.size() * 2); } const regex waste("^(DEBUG|INFO|INFOCONT|WARN|ERROR|FATAL): "); - string message = std::regex_replace(chars.data(), waste, "", std::regex_constants::format_first_only); + string message = + std::regex_replace(chars.data(), waste, "", std::regex_constants::format_first_only); boost::algorithm::trim(message); const logging::Level logLevel = convertSphinxErrorLevel(errorLevel); @@ -115,8 +116,12 @@ BoundedTimeline recognizePhones( const auto processUtterance = [&](Timed timedUtterance, ProgressSink& utteranceProgressSink) { // Detect phones for utterance const auto decoder = decoderPool.acquire(); - Timeline utterancePhones = - utteranceToPhones(*audioClip, timedUtterance.getTimeRange(), *decoder, utteranceProgressSink); + Timeline utterancePhones = utteranceToPhones( + *audioClip, + timedUtterance.getTimeRange(), + *decoder, + utteranceProgressSink + ); // Copy phones to result timeline std::lock_guard lock(resultMutex); @@ -137,13 +142,21 @@ BoundedTimeline recognizePhones( // Don't use more threads than there are utterances to be processed static_cast(utterances.size()), // Don't waste time creating additional threads (and decoders!) if the recording is short - static_cast(duration_cast(audioClip->getTruncatedRange().getDuration()).count() / 5) + static_cast( + duration_cast(audioClip->getTruncatedRange().getDuration()).count() / 5 + ) }); if (threadCount < 1) { threadCount = 1; } logging::debugFormat("Speech recognition using {} threads -- start", threadCount); - runParallel(processUtterance, utterances, threadCount, dialogProgressSink, getUtteranceProgressWeight); + runParallel( + processUtterance, + utterances, + threadCount, + dialogProgressSink, + getUtteranceProgressWeight + ); logging::debug("Speech recognition -- end"); } catch (...) { std::throw_with_nested(runtime_error("Error performing speech recognition via PocketSphinx.")); @@ -200,7 +213,9 @@ BoundedTimeline recognizeWords(const vector& audioBuffer, ps_de error = ps_end_utt(&decoder); if (error) throw runtime_error("Error ending utterance processing for word recognition."); - BoundedTimeline result(TimeRange(0_cs, centiseconds(100 * audioBuffer.size() / sphinxSampleRate))); + BoundedTimeline result( + TimeRange(0_cs, centiseconds(100 * audioBuffer.size() / sphinxSampleRate)) + ); const bool noWordsRecognized = reinterpret_cast(decoder.search)->bpidx == 0; if (noWordsRecognized) { return result; diff --git a/rhubarb/src/recognition/pocketSphinxTools.h b/rhubarb/src/recognition/pocketSphinxTools.h index 9a72199..bd47a80 100644 --- a/rhubarb/src/recognition/pocketSphinxTools.h +++ b/rhubarb/src/recognition/pocketSphinxTools.h @@ -36,4 +36,7 @@ const boost::filesystem::path& getSphinxModelDirectory(); JoiningTimeline getNoiseSounds(TimeRange utteranceTimeRange, const Timeline& phones); -BoundedTimeline recognizeWords(const std::vector& audioBuffer, ps_decoder_t& decoder); +BoundedTimeline recognizeWords( + const std::vector& audioBuffer, + ps_decoder_t& decoder +); diff --git a/rhubarb/src/recognition/tokenization.cpp b/rhubarb/src/recognition/tokenization.cpp index edef618..79db0fe 100644 --- a/rhubarb/src/recognition/tokenization.cpp +++ b/rhubarb/src/recognition/tokenization.cpp @@ -2,6 +2,7 @@ #include "tools/tools.h" #include "tools/stringTools.h" #include +#include extern "C" { #include @@ -21,7 +22,7 @@ lambda_unique_ptr createDummyVoice() { lambda_unique_ptr voice(new_voice(), [](cst_voice* voice) { delete_voice(voice); }); voice->name = "dummy_voice"; usenglish_init(voice.get()); - cst_lexicon *lexicon = cmu_lex_init(); + cst_lexicon* lexicon = cmu_lex_init(); feat_set(voice->features, "lexicon", lexicon_val(lexicon)); return voice; } @@ -37,7 +38,10 @@ vector tokenizeViaFlite(const string& text) { const string asciiText = utf8ToAscii(text); // Create utterance object with text - lambda_unique_ptr utterance(new_utterance(), [](cst_utterance* utterance) { delete_utterance(utterance); }); + lambda_unique_ptr utterance( + new_utterance(), + [](cst_utterance* utterance) { delete_utterance(utterance); } + ); utt_set_input_text(utterance.get(), asciiText.c_str()); lambda_unique_ptr voice = createDummyVoice(); utt_init(utterance.get(), voice.get()); @@ -48,14 +52,21 @@ vector tokenizeViaFlite(const string& text) { } vector result; - for (cst_item* item = relation_head(utt_relation(utterance.get(), "Word")); item; item = item_next(item)) { + for ( + cst_item* item = relation_head(utt_relation(utterance.get(), "Word")); + item; + item = item_next(item) + ) { const char* word = item_feat_string(item, "name"); - result.push_back(word); + result.emplace_back(word); } return result; } -optional findSimilarDictionaryWord(const string& word, function dictionaryContains) { +optional findSimilarDictionaryWord( + const string& word, + const function& dictionaryContains +) { for (bool addPeriod : { false, true }) { for (int apostropheIndex = -1; apostropheIndex <= static_cast(word.size()); ++apostropheIndex) { string modified = word; @@ -75,12 +86,15 @@ optional findSimilarDictionaryWord(const string& word, function tokenizeText(const string& text, function dictionaryContains) { +vector tokenizeText( + const string& text, + const function& dictionaryContains +) { vector words = tokenizeViaFlite(text); - // Join words separated by apostophes + // Join words separated by apostrophes for (int i = words.size() - 1; i > 0; --i) { - if (words[i].size() > 0 && words[i][0] == '\'') { + if (!words[i].empty() && words[i][0] == '\'') { words[i - 1].append(words[i]); words.erase(words.begin() + i); } @@ -95,21 +109,24 @@ vector tokenizeText(const string& text, function di { regex("@"), "at" }, { regex("[^a-z']"), "" } }; - for (size_t i = 0; i < words.size(); ++i) { + for (auto& word : words) { for (const auto& replacement : replacements) { - words[i] = regex_replace(words[i], replacement.first, replacement.second); + word = regex_replace(word, replacement.first, replacement.second); } } // Remove empty words - words.erase(std::remove_if(words.begin(), words.end(), [](const string& s) { return s.empty(); }), words.end()); + words.erase( + std::remove_if(words.begin(), words.end(), [](const string& s) { return s.empty(); }), + words.end() + ); // Try to replace words that are not in the dictionary with similar ones that are - for (size_t i = 0; i < words.size(); ++i) { - if (!dictionaryContains(words[i])) { - optional modifiedWord = findSimilarDictionaryWord(words[i], dictionaryContains); + for (auto& word : words) { + if (!dictionaryContains(word)) { + optional modifiedWord = findSimilarDictionaryWord(word, dictionaryContains); if (modifiedWord) { - words[i] = *modifiedWord; + word = *modifiedWord; } } } diff --git a/rhubarb/src/recognition/tokenization.h b/rhubarb/src/recognition/tokenization.h index c990501..0e506a6 100644 --- a/rhubarb/src/recognition/tokenization.h +++ b/rhubarb/src/recognition/tokenization.h @@ -4,4 +4,7 @@ #include #include -std::vector tokenizeText(const std::string& text, std::function dictionaryContains); +std::vector tokenizeText( + const std::string& text, + const std::function& dictionaryContains +); diff --git a/rhubarb/src/rhubarb/ExportFormat.cpp b/rhubarb/src/rhubarb/ExportFormat.cpp index 5735bed..6619f3f 100644 --- a/rhubarb/src/rhubarb/ExportFormat.cpp +++ b/rhubarb/src/rhubarb/ExportFormat.cpp @@ -12,7 +12,7 @@ string ExportFormatConverter::getTypeName() { } EnumConverter::member_data ExportFormatConverter::getMemberData() { - return member_data{ + return member_data { { ExportFormat::Tsv, "tsv" }, { ExportFormat::Xml, "xml" }, { ExportFormat::Json, "json" } diff --git a/rhubarb/src/rhubarb/RecognizerType.cpp b/rhubarb/src/rhubarb/RecognizerType.cpp index 86f0837..e3633bf 100644 --- a/rhubarb/src/rhubarb/RecognizerType.cpp +++ b/rhubarb/src/rhubarb/RecognizerType.cpp @@ -12,7 +12,7 @@ string RecognizerTypeConverter::getTypeName() { } EnumConverter::member_data RecognizerTypeConverter::getMemberData() { - return member_data{ + return member_data { { RecognizerType::PocketSphinx, "pocketSphinx" }, { RecognizerType::Phonetic, "phonetic" } }; diff --git a/rhubarb/src/rhubarb/main.cpp b/rhubarb/src/rhubarb/main.cpp index 703dd67..3f1f38e 100644 --- a/rhubarb/src/rhubarb/main.cpp +++ b/rhubarb/src/rhubarb/main.cpp @@ -3,7 +3,6 @@ #include #include "core/appInfo.h" #include "tools/NiceCmdLineOutput.h" -#include "tools/ProgressBar.h" #include "logging/logging.h" #include "logging/sinks.h" #include "logging/formatters.h" @@ -52,45 +51,48 @@ namespace TCLAP { struct ArgTraits { typedef ValueLike ValueCategory; }; + template<> struct ArgTraits { typedef ValueLike ValueCategory; }; + template<> struct ArgTraits { typedef ValueLike ValueCategory; }; } -shared_ptr createFileSink(path path, logging::Level minLevel) { +shared_ptr createFileSink(const path& path, logging::Level minLevel) { auto file = make_shared(); file->exceptions(std::ifstream::failbit | std::ifstream::badbit); file->open(path); - auto FileSink = make_shared(file, make_shared()); + auto FileSink = + make_shared(file, make_shared()); return make_shared(FileSink, minLevel); } unique_ptr createRecognizer(RecognizerType recognizerType) { switch (recognizerType) { - case RecognizerType::PocketSphinx: - return make_unique(); - case RecognizerType::Phonetic: - return make_unique(); - default: - throw std::runtime_error("Unknown recognizer."); + case RecognizerType::PocketSphinx: + return make_unique(); + case RecognizerType::Phonetic: + return make_unique(); + default: + throw std::runtime_error("Unknown recognizer."); } } unique_ptr createExporter(ExportFormat exportFormat) { switch (exportFormat) { - case ExportFormat::Tsv: - return make_unique(); - case ExportFormat::Xml: - return make_unique(); - case ExportFormat::Json: - return make_unique(); - default: - throw std::runtime_error("Unknown export format."); + case ExportFormat::Tsv: + return make_unique(); + case ExportFormat::Xml: + return make_unique(); + case ExportFormat::Json: + return make_unique(); + default: + throw std::runtime_error("Unknown export format."); } } @@ -106,7 +108,7 @@ ShapeSet getTargetShapeSet(const string& extendedShapesString) { return result; } -int main(int platformArgc, char *platformArgv[]) { +int main(int platformArgc, char* platformArgv[]) { // Set up default logging so early errors are printed to stdout const logging::Level defaultMinStderrLevel = logging::Level::Error; shared_ptr defaultSink = make_shared(defaultMinStderrLevel); @@ -124,24 +126,71 @@ int main(int platformArgc, char *platformArgv[]) { tclap::CmdLine cmd(appName, argumentValueSeparator, appVersion); cmd.setExceptionHandling(false); cmd.setOutput(new NiceCmdLineOutput()); - tclap::ValueArg outputFileName("o", "output", "The output file path.", false, string(), "string", cmd); + + tclap::ValueArg outputFileName( + "o", "output", "The output file path.", + false, string(), "string", cmd + ); + auto logLevels = vector(logging::LevelConverter::get().getValues()); tclap::ValuesConstraint logLevelConstraint(logLevels); - tclap::ValueArg logLevel("", "logLevel", "The minimum log level that will be written to the log file", false, logging::Level::Debug, &logLevelConstraint, cmd); - tclap::ValueArg logFileName("", "logFile", "The log file path.", false, string(), "string", cmd); - tclap::ValueArg consoleLevel("", "consoleLevel", "The minimum log level that will be printed on the console (stderr)", false, defaultMinStderrLevel, &logLevelConstraint, cmd); - tclap::SwitchArg machineReadableMode("", "machineReadable", "Formats all output to stderr in a structured JSON format.", cmd, false); - tclap::SwitchArg quietMode("q", "quiet", "Suppresses all output to stderr except for warnings and error messages.", cmd, false); - tclap::ValueArg maxThreadCount("", "threads", "The maximum number of worker threads to use.", false, getProcessorCoreCount(), "number", cmd); - tclap::ValueArg extendedShapes("", "extendedShapes", "All extended, optional shapes to use.", false, "GHX", "string", cmd); - tclap::ValueArg dialogFile("d", "dialogFile", "A file containing the text of the dialog.", false, string(), "string", cmd); + tclap::ValueArg logLevel( + "", "logLevel", "The minimum log level that will be written to the log file", + false, logging::Level::Debug, &logLevelConstraint, cmd + ); + + tclap::ValueArg logFileName( + "", "logFile", "The log file path.", + false, string(), "string", cmd + ); + tclap::ValueArg consoleLevel( + "", "consoleLevel", "The minimum log level that will be printed on the console (stderr)", + false, defaultMinStderrLevel, &logLevelConstraint, cmd + ); + + tclap::SwitchArg machineReadableMode( + "", "machineReadable", "Formats all output to stderr in a structured JSON format.", + cmd, false + ); + + tclap::SwitchArg quietMode( + "q", "quiet", "Suppresses all output to stderr except for warnings and error messages.", + cmd, false + ); + + tclap::ValueArg maxThreadCount( + "", "threads", "The maximum number of worker threads to use.", + false, getProcessorCoreCount(), "number", cmd + ); + + tclap::ValueArg extendedShapes( + "", "extendedShapes", "All extended, optional shapes to use.", + false, "GHX", "string", cmd + ); + + tclap::ValueArg dialogFile( + "d", "dialogFile", "A file containing the text of the dialog.", + false, string(), "string", cmd + ); + auto exportFormats = vector(ExportFormatConverter::get().getValues()); tclap::ValuesConstraint exportFormatConstraint(exportFormats); - tclap::ValueArg exportFormat("f", "exportFormat", "The export format.", false, ExportFormat::Tsv, &exportFormatConstraint, cmd); + tclap::ValueArg exportFormat( + "f", "exportFormat", "The export format.", + false, ExportFormat::Tsv, &exportFormatConstraint, cmd + ); + auto recognizerTypes = vector(RecognizerTypeConverter::get().getValues()); tclap::ValuesConstraint recognizerConstraint(recognizerTypes); - tclap::ValueArg recognizerType("r", "recognizer", "The dialog recognizer.", false, RecognizerType::PocketSphinx, &recognizerConstraint, cmd); - tclap::UnlabeledValueArg inputFileName("inputFile", "The input file. Must be a sound file in WAVE format.", true, "", "string", cmd); + tclap::ValueArg recognizerType( + "r", "recognizer", "The dialog recognizer.", + false, RecognizerType::PocketSphinx, &recognizerConstraint, cmd + ); + + tclap::UnlabeledValueArg inputFileName( + "inputFile", "The input file. Must be a sound file in WAVE format.", + true, "", "string", cmd + ); try { // Parse command line @@ -180,13 +229,17 @@ int main(int platformArgc, char *platformArgv[]) { try { // On progress change: Create log message - ProgressForwarder progressSink([](double progress) { logging::log(ProgressEntry(progress)); }); + ProgressForwarder progressSink([](double progress) { + logging::log(ProgressEntry(progress)); + }); // Animate the recording logging::info("Starting animation."); JoiningContinuousTimeline animation = animateWaveFile( inputFilePath, - dialogFile.isSet() ? readUtf8File(path(dialogFile.getValue())) : boost::optional(), + dialogFile.isSet() + ? readUtf8File(path(dialogFile.getValue())) + : boost::optional(), *createRecognizer(recognizerType.getValue()), targetShapeSet, maxThreadCount.getValue(), @@ -207,7 +260,9 @@ int main(int platformArgc, char *platformArgv[]) { logging::log(SuccessEntry()); } catch (...) { - std::throw_with_nested(std::runtime_error(fmt::format("Error processing file {}.", inputFilePath))); + std::throw_with_nested( + std::runtime_error(fmt::format("Error processing file {}.", inputFilePath)) + ); } return 0; diff --git a/rhubarb/src/rhubarb/sinks.cpp b/rhubarb/src/rhubarb/sinks.cpp index ef62778..487377e 100644 --- a/rhubarb/src/rhubarb/sinks.cpp +++ b/rhubarb/src/rhubarb/sinks.cpp @@ -9,7 +9,6 @@ using std::string; using std::make_shared; using logging::Level; -using logging::LevelFilter; using logging::StdErrSink; using logging::SimpleConsoleFormatter; using boost::optional; @@ -21,11 +20,14 @@ NiceStderrSink::NiceStderrSink(Level minLevel) : {} void NiceStderrSink::receive(const logging::Entry& entry) { - // For selected semantic entries, print a user-friendly message instead of the technical log message. - if (const StartEntry* startEntry = dynamic_cast(&entry)) { - std::cerr << fmt::format("Generating lip sync data for {}.", startEntry->getInputFilePath()) << std::endl; + // For selected semantic entries, print a user-friendly message instead of + // the technical log message. + if (const auto* startEntry = dynamic_cast(&entry)) { + std::cerr + << fmt::format("Generating lip sync data for {}.", startEntry->getInputFilePath()) + << std::endl; startProgressIndication(); - } else if (const ProgressEntry* progressEntry = dynamic_cast(&entry)) { + } else if (const auto* progressEntry = dynamic_cast(&entry)) { assert(progressBar); progress = progressEntry->getProgress(); progressBar->reportProgress(progress); @@ -65,7 +67,7 @@ QuietStderrSink::QuietStderrSink(Level minLevel) : void QuietStderrSink::receive(const logging::Entry& entry) { // Set inputFilePath as soon as we get it - if (const StartEntry* startEntry = dynamic_cast(&entry)) { + if (const auto* startEntry = dynamic_cast(&entry)) { inputFilePath = startEntry->getInputFilePath(); } @@ -87,26 +89,42 @@ MachineReadableStderrSink::MachineReadableStderrSink(Level minLevel) : {} string formatLogProperty(const logging::Entry& entry) { - return fmt::format(R"("log": {{ "level": "{}", "message": "{}" }})", entry.level, escapeJsonString(entry.message)); + return fmt::format( + R"("log": {{ "level": "{}", "message": "{}" }})", + entry.level, + escapeJsonString(entry.message) + ); } void MachineReadableStderrSink::receive(const logging::Entry& entry) { optional line; if (dynamic_cast(&entry)) { - if (const StartEntry* startEntry = dynamic_cast(&entry)) { + if (const auto* startEntry = dynamic_cast(&entry)) { const string file = escapeJsonString(startEntry->getInputFilePath().string()); - line = fmt::format(R"({{ "type": "start", "file": "{}", {} }})", file, formatLogProperty(entry)); - } else if (const ProgressEntry* progressEntry = dynamic_cast(&entry)) { + line = fmt::format( + R"({{ "type": "start", "file": "{}", {} }})", + file, + formatLogProperty(entry) + ); + } else if (const auto* progressEntry = dynamic_cast(&entry)) { const int progressPercent = static_cast(progressEntry->getProgress() * 100); if (progressPercent > lastProgressPercent) { - line = fmt::format(R"({{ "type": "progress", "value": {:.2f}, {} }})", progressEntry->getProgress(), formatLogProperty(entry)); + line = fmt::format( + R"({{ "type": "progress", "value": {:.2f}, {} }})", + progressEntry->getProgress(), + formatLogProperty(entry) + ); lastProgressPercent = progressPercent; } } else if (dynamic_cast(&entry)) { line = fmt::format(R"({{ "type": "success", {} }})", formatLogProperty(entry)); - } else if (const FailureEntry* failureEntry = dynamic_cast(&entry)) { + } else if (const auto* failureEntry = dynamic_cast(&entry)) { const string reason = escapeJsonString(failureEntry->getReason()); - line = fmt::format(R"({{ "type": "failure", "reason": "{}", {} }})", reason, formatLogProperty(entry)); + line = fmt::format( + R"({{ "type": "failure", "reason": "{}", {} }})", + reason, + formatLogProperty(entry) + ); } else { throw std::runtime_error("Unsupported type of semantic entry."); } diff --git a/rhubarb/src/time/BoundedTimeline.h b/rhubarb/src/time/BoundedTimeline.h index b84bd0d..a9e9859 100644 --- a/rhubarb/src/time/BoundedTimeline.h +++ b/rhubarb/src/time/BoundedTimeline.h @@ -52,7 +52,10 @@ public: // Clip the value's range to bounds TimeRange& valueRange = timedValue.getTimeRange(); - valueRange.resize(max(range.getStart(), valueRange.getStart()), min(range.getEnd(), valueRange.getEnd())); + valueRange.resize( + max(range.getStart(), valueRange.getStart()), + min(range.getEnd(), valueRange.getEnd()) + ); return Timeline::set(timedValue); } diff --git a/rhubarb/src/time/ContinuousTimeline.h b/rhubarb/src/time/ContinuousTimeline.h index 0935166..a6a1ae5 100644 --- a/rhubarb/src/time/ContinuousTimeline.h +++ b/rhubarb/src/time/ContinuousTimeline.h @@ -29,7 +29,11 @@ public: ContinuousTimeline(range, defaultValue, collection.begin(), collection.end()) {} - ContinuousTimeline(TimeRange range, T defaultValue, std::initializer_list> initializerList) : + ContinuousTimeline( + TimeRange range, + T defaultValue, + std::initializer_list> initializerList + ) : ContinuousTimeline(range, defaultValue, initializerList.begin(), initializerList.end()) {} diff --git a/rhubarb/src/time/TimeRange.cpp b/rhubarb/src/time/TimeRange.cpp index 3835d6f..58273bd 100644 --- a/rhubarb/src/time/TimeRange.cpp +++ b/rhubarb/src/time/TimeRange.cpp @@ -20,7 +20,11 @@ TimeRange::TimeRange(time_type start, time_type end) : end(end) { if (start > end) { - throw std::invalid_argument(fmt::format("Time range start must not be less than end. Start: {0}, end: {1}", start, end)); + throw std::invalid_argument(fmt::format( + "Time range start must not be less than end. Start: {0}, end: {1}", + start, + end + )); } } @@ -88,16 +92,16 @@ void TimeRange::shrink(time_type value) { } void TimeRange::trim(const TimeRange& limits) { - TimeRange newRange(std::max(start, limits.start), std::min(end, limits.end)); + const TimeRange newRange(std::max(start, limits.start), std::min(end, limits.end)); resize(newRange); } void TimeRange::trimLeft(time_type value) { - trim({value, end}); + trim({ value, end }); } void TimeRange::trimRight(time_type value) { - trim({start, value}); + trim({ start, value }); } bool TimeRange::operator==(const TimeRange& rhs) const { diff --git a/rhubarb/src/time/Timed.h b/rhubarb/src/time/Timed.h index 04add08..171aa78 100644 --- a/rhubarb/src/time/Timed.h +++ b/rhubarb/src/time/Timed.h @@ -72,7 +72,12 @@ private: template std::ostream& operator<<(std::ostream& stream, const Timed& timedValue) { - return stream << "Timed(" << timedValue.getStart() << ", " << timedValue.getEnd() << ", " << timedValue.getValue() << ")"; + return stream + << "Timed(" + << timedValue.getStart() << ", " + << timedValue.getEnd() << ", " + << timedValue.getValue() + << ")"; } template<> @@ -130,5 +135,9 @@ private: template<> inline std::ostream& operator<<(std::ostream& stream, const Timed& timedValue) { - return stream << "Timed(" << timedValue.getTimeRange().getStart() << ", " << timedValue.getTimeRange().getEnd() << ")"; + return stream + << "Timed(" + << timedValue.getTimeRange().getStart() << ", " + << timedValue.getTimeRange().getEnd() + << ")"; } diff --git a/rhubarb/src/time/Timeline.h b/rhubarb/src/time/Timeline.h index 88b9179..18eb1e1 100644 --- a/rhubarb/src/time/Timeline.h +++ b/rhubarb/src/time/Timeline.h @@ -36,12 +36,15 @@ private: bool operator()(const Timed& lhs, const Timed& rhs) const { return lhs.getStart() < rhs.getStart(); } + bool operator()(const time_type& lhs, const Timed& rhs) const { return lhs < rhs.getStart(); } + bool operator()(const Timed& lhs, const time_type& rhs) const { return lhs.getStart() < rhs; } + using is_transparent = int; }; @@ -88,7 +91,7 @@ public: time_type time; }; - Timeline() {} + Timeline() = default; template Timeline(InputIterator first, InputIterator last) { @@ -107,7 +110,7 @@ public: Timeline(initializerList.begin(), initializerList.end()) {} - virtual ~Timeline() {} + virtual ~Timeline() = default; bool empty() const { return elements.empty(); @@ -141,35 +144,39 @@ public: iterator find(time_type time, FindMode findMode = FindMode::SampleRight) const { switch (findMode) { - case FindMode::SampleLeft: { - iterator left = find(time, FindMode::SearchLeft); - return left != end() && left->getEnd() >= time ? left : end(); - } - case FindMode::SampleRight: { - iterator right = find(time, FindMode::SearchRight); - return right != end() && right->getStart() <= time ? right : end(); - } - case FindMode::SearchLeft: { - // Get first element starting >= time - iterator it = elements.lower_bound(time); - - // Go one element back - return it != begin() ? --it : end(); - } - case FindMode::SearchRight: { - // Get first element starting > time - iterator it = elements.upper_bound(time); - - // Go one element back - if (it != begin()) { - iterator left = it; - --left; - if (left->getEnd() > time) return left; + case FindMode::SampleLeft: + { + iterator left = find(time, FindMode::SearchLeft); + return left != end() && left->getEnd() >= time ? left : end(); } - return it; - } - default: - throw std::invalid_argument("Unexpected find mode."); + case FindMode::SampleRight: + { + iterator right = find(time, FindMode::SearchRight); + return right != end() && right->getStart() <= time ? right : end(); + } + case FindMode::SearchLeft: + { + // Get first element starting >= time + iterator it = elements.lower_bound(time); + + // Go one element back + return it != begin() ? --it : end(); + } + case FindMode::SearchRight: + { + // Get first element starting > time + iterator it = elements.upper_bound(time); + + // Go one element back + if (it != begin()) { + iterator left = it; + --left; + if (left->getEnd() > time) return left; + } + return it; + } + default: + throw std::invalid_argument("Unexpected find mode."); } } @@ -187,7 +194,10 @@ public: splitAt(range.getEnd()); // Erase overlapping elements - elements.erase(find(range.getStart(), FindMode::SearchRight), find(range.getEnd(), FindMode::SearchRight)); + elements.erase( + find(range.getStart(), FindMode::SearchRight), + find(range.getEnd(), FindMode::SearchRight) + ); } void clear(time_type start, time_type end) { @@ -220,12 +230,19 @@ public: } template - iterator set(const TimeRange& timeRange, const std::enable_if_t::value, T>& value) { + iterator set( + const TimeRange& timeRange, + const std::enable_if_t::value, T>& value + ) { return set(Timed(timeRange, value)); } template - iterator set(time_type start, time_type end, const std::enable_if_t::value, T>& value) { + iterator set( + time_type start, + time_type end, + const std::enable_if_t::value, T>& value + ) { return set(Timed(start, end, value)); } @@ -251,13 +268,16 @@ public: for (auto it = copy.begin(); it != copy.end(); ++it) { const auto rangeBegin = it; auto rangeEnd = std::next(rangeBegin); - while (rangeEnd != copy.end() && rangeEnd->getStart() == rangeBegin->getEnd() && ::internal::valueEquals(*rangeEnd, *rangeBegin)) { + while (rangeEnd != copy.end() + && rangeEnd->getStart() == rangeBegin->getEnd() + && ::internal::valueEquals(*rangeEnd, *rangeBegin) + ) { ++rangeEnd; } if (rangeEnd != std::next(rangeBegin)) { Timed combined = *rangeBegin; - combined.setTimeRange({rangeBegin->getStart(), rangeEnd->getEnd()}); + combined.setTimeRange({ rangeBegin->getStart(), rangeEnd->getEnd() }); set(combined); it = rangeEnd; } diff --git a/rhubarb/src/time/centiseconds.h b/rhubarb/src/time/centiseconds.h index 2f05c46..b03b20b 100644 --- a/rhubarb/src/time/centiseconds.h +++ b/rhubarb/src/time/centiseconds.h @@ -3,9 +3,9 @@ #include #include -typedef std::chrono::duration centiseconds; +using centiseconds = std::chrono::duration; -std::ostream& operator <<(std::ostream& stream, const centiseconds cs); +std::ostream& operator <<(std::ostream& stream, centiseconds cs); #pragma warning(push) #pragma warning(disable: 4455) diff --git a/rhubarb/src/time/timedLogging.h b/rhubarb/src/time/timedLogging.h index 90407af..a2ddf04 100644 --- a/rhubarb/src/time/timedLogging.h +++ b/rhubarb/src/time/timedLogging.h @@ -7,8 +7,13 @@ template void logTimedEvent(const std::string& eventName, const Timed timedValue) { - logging::debugFormat("##{0}[{1}-{2}]: {3}", - eventName, formatDuration(timedValue.getStart()), formatDuration(timedValue.getEnd()), timedValue.getValue()); + logging::debugFormat( + "##{0}[{1}-{2}]: {3}", + eventName, + formatDuration(timedValue.getStart()), + formatDuration(timedValue.getEnd()), + timedValue.getValue() + ); } template @@ -17,6 +22,11 @@ void logTimedEvent(const std::string& eventName, const TimeRange& timeRange, con } template -void logTimedEvent(const std::string& eventName, centiseconds start, centiseconds end, const TValue& value) { +void logTimedEvent( + const std::string& eventName, + centiseconds start, + centiseconds end, + const TValue& value +) { logTimedEvent(eventName, Timed(start, end, value)); } \ No newline at end of file diff --git a/rhubarb/src/tools/EnumConverter.h b/rhubarb/src/tools/EnumConverter.h index 53ce691..85e07b1 100644 --- a/rhubarb/src/tools/EnumConverter.h +++ b/rhubarb/src/tools/EnumConverter.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -30,7 +29,9 @@ public: auto result = tryToString(value); if (!result) { auto numericValue = static_cast::type>(value); - throw std::invalid_argument(fmt::format("{} is not a valid {} value.", numericValue, typeName)); + throw std::invalid_argument( + fmt::format("{} is not a valid {} value.", numericValue, typeName) + ); } return *result; diff --git a/rhubarb/src/tools/Lazy.h b/rhubarb/src/tools/Lazy.h index 14c5d09..e34f91b 100644 --- a/rhubarb/src/tools/Lazy.h +++ b/rhubarb/src/tools/Lazy.h @@ -55,7 +55,10 @@ public: private: void init() const { - std::call_once(state->initialized, [&] { state->value = std::make_unique(state->createValue()); }); + std::call_once( + state->initialized, + [&] { state->value = std::make_unique(state->createValue()); } + ); } std::shared_ptr state = std::make_shared(); diff --git a/rhubarb/src/tools/NiceCmdLineOutput.cpp b/rhubarb/src/tools/NiceCmdLineOutput.cpp index 44de9c2..cb1a700 100644 --- a/rhubarb/src/tools/NiceCmdLineOutput.cpp +++ b/rhubarb/src/tools/NiceCmdLineOutput.cpp @@ -36,7 +36,10 @@ void NiceCmdLineOutput::failure(CmdLineInterface& cli, TCLAP::ArgException& e) { std::cerr << "Short usage:" << endl; printShortUsage(cli, std::cerr); - std::cerr << endl << "For complete usage and help, type `" << getBinaryName() << " --help`" << endl << endl; + std::cerr + << endl + << "For complete usage and help, type `" << getBinaryName() << " --help`" << endl + << endl; } else { usage(cli); } @@ -76,8 +79,9 @@ void NiceCmdLineOutput::printLongUsage(CmdLineInterface& cli, std::ostream& outS const vector> xorArgGroups = xorHandler.getXorList(); for (const vector& xorArgGroup : xorArgGroups) { for (auto arg : xorArgGroup) { - if (arg != xorArgGroup[0]) + if (arg != xorArgGroup[0]) { outStream << "-- or --" << endl; + } tablePrinter.printRow({ arg->longID(), arg->getDescription() }); } diff --git a/rhubarb/src/tools/ObjectPool.h b/rhubarb/src/tools/ObjectPool.h index 6e769da..13c1cad 100644 --- a/rhubarb/src/tools/ObjectPool.h +++ b/rhubarb/src/tools/ObjectPool.h @@ -4,7 +4,7 @@ #include #include -template > +template> class ObjectPool { public: using wrapper_type = lambda_unique_ptr; diff --git a/rhubarb/src/tools/ProgressBar.cpp b/rhubarb/src/tools/ProgressBar.cpp index 4f913d7..462fd09 100644 --- a/rhubarb/src/tools/ProgressBar.cpp +++ b/rhubarb/src/tools/ProgressBar.cpp @@ -54,13 +54,13 @@ void ProgressBar::update(bool showSpinner) { const int blockCount = 20; const string animation = "|/-\\"; - int progressBlockCount = static_cast(currentProgress * blockCount); + const int progressBlockCount = static_cast(currentProgress * blockCount); const double epsilon = 0.0001; - int percent = static_cast(currentProgress * 100 + epsilon); + const int percent = static_cast(currentProgress * 100 + epsilon); const string spinner = showSpinner ? string(1, animation[animationIndex++ % animation.size()]) : ""; - string text = fmt::format("[{0}{1}] {2:3}% {3}", + const string text = fmt::format("[{0}{1}] {2:3}% {3}", string(progressBlockCount, '#'), string(blockCount - progressBlockCount, '-'), percent, spinner @@ -71,7 +71,7 @@ void ProgressBar::update(bool showSpinner) { void ProgressBar::updateText(const string& text) { // Get length of common portion int commonPrefixLength = 0; - int commonLength = std::min(currentText.size(), text.size()); + const int commonLength = std::min(currentText.size(), text.size()); while (commonPrefixLength < commonLength && text[commonPrefixLength] == currentText[commonPrefixLength]) { commonPrefixLength++; } @@ -86,7 +86,7 @@ void ProgressBar::updateText(const string& text) { output.append(text, commonPrefixLength, text.size() - commonPrefixLength); // ... if the new text is shorter than the old one: delete overlapping characters - int overlapCount = currentText.size() - text.size(); + const int overlapCount = currentText.size() - text.size(); if (overlapCount > 0) { output.append(overlapCount, ' '); output.append(overlapCount, '\b'); diff --git a/rhubarb/src/tools/TablePrinter.cpp b/rhubarb/src/tools/TablePrinter.cpp index bb2c4d4..f60c5e0 100644 --- a/rhubarb/src/tools/TablePrinter.cpp +++ b/rhubarb/src/tools/TablePrinter.cpp @@ -10,21 +10,23 @@ using std::invalid_argument; using std::vector; using std::string; -TablePrinter::TablePrinter(ostream *stream, initializer_list columnWidths, int columnSpacing) : +TablePrinter::TablePrinter(ostream* stream, initializer_list columnWidths, int columnSpacing) : stream(stream), columnWidths(columnWidths.begin(), columnWidths.end()), columnSpacing(columnSpacing) { if (stream == nullptr) throw invalid_argument("stream is null."); if (columnWidths.size() < 1) throw invalid_argument("No columns defined."); - if (std::any_of(columnWidths.begin(), columnWidths.end(), [](int width){ return width <= 1; })) { + if (std::any_of(columnWidths.begin(), columnWidths.end(), [](int width) { return width <= 1; })) { throw invalid_argument("All columns must have a width of at least 1."); } if (columnSpacing < 0) throw invalid_argument("columnSpacing must not be negative."); } void TablePrinter::printRow(initializer_list columns) const { - if (columns.size() != columnWidths.size()) throw invalid_argument("Number of specified strings does not match number of defined columns."); + if (columns.size() != columnWidths.size()) { + throw invalid_argument("Number of specified strings does not match number of defined columns."); + } // Some cells may span multiple lines. // Create matrix of text lines in columns. @@ -50,7 +52,7 @@ void TablePrinter::printRow(initializer_list columns) const { // Print lines *stream << std::left; - string spacer(columnSpacing, ' '); + const string spacer(columnSpacing, ' '); for (size_t rowIndex = 0; rowIndex < lineCount; rowIndex++) { for (size_t columnIndex = 0; columnIndex < columns.size(); columnIndex++) { if (columnIndex != 0) { diff --git a/rhubarb/src/tools/TablePrinter.h b/rhubarb/src/tools/TablePrinter.h index 576d354..16c6f39 100644 --- a/rhubarb/src/tools/TablePrinter.h +++ b/rhubarb/src/tools/TablePrinter.h @@ -6,7 +6,11 @@ class TablePrinter { public: - TablePrinter(std::ostream* stream, std::initializer_list columnWidths, int columnSpacing = 2); + TablePrinter( + std::ostream* stream, + std::initializer_list columnWidths, + int columnSpacing = 2 + ); void printRow(std::initializer_list columns) const; private: std::ostream* const stream; diff --git a/rhubarb/src/tools/array.h b/rhubarb/src/tools/array.h index 91bba56..8fba6c2 100644 --- a/rhubarb/src/tools/array.h +++ b/rhubarb/src/tools/array.h @@ -9,13 +9,16 @@ namespace details { struct negation : std::integral_constant {}; template struct is_ref_wrapper : std::false_type {}; + template struct is_ref_wrapper> : std::true_type {}; template using not_ref_wrapper = negation>>; template struct conjunction : std::true_type { }; + template struct conjunction : B1 { }; + template struct conjunction : std::conditional_t, B1> {}; @@ -23,19 +26,20 @@ namespace details { template constexpr bool conjunction_v = conjunction::value; - template struct return_type_helper { using type = D; }; - template + template struct return_type_helper { using type = D; }; + + template struct return_type_helper : std::common_type { static_assert(conjunction_v...>, "Types cannot contain reference_wrappers when D is void"); }; - template + template using return_type = std::array::type, sizeof...(Types)>; } -template < class D = void, class... Types> +template constexpr details::return_type make_array(Types&&... t) { - return {std::forward(t)...}; + return { std::forward(t)... }; } \ No newline at end of file diff --git a/rhubarb/src/tools/fileTools.cpp b/rhubarb/src/tools/fileTools.cpp index 82a539f..f08eb0d 100644 --- a/rhubarb/src/tools/fileTools.cpp +++ b/rhubarb/src/tools/fileTools.cpp @@ -10,7 +10,8 @@ std::ifstream openFile(path filePath) { file.exceptions(std::ifstream::failbit | std::ifstream::badbit); file.open(filePath.c_str(), std::ios::binary); - // Read some dummy data so that we can throw a decent exception in case the file is missing, locked, etc. + // Read some dummy data so that we can throw a decent exception in case the file is missing, + // locked, etc. file.seekg(0, std::ios_base::end); if (file.tellg()) { file.seekg(0); @@ -18,7 +19,7 @@ std::ifstream openFile(path filePath) { file.seekg(0); } - return std::move(file); + return file; } catch (const std::ifstream::failure&) { // Error messages on stream exceptions are mostly useless. throw std::runtime_error(errorNumberToString(errno)); diff --git a/rhubarb/src/tools/nextCombination.h b/rhubarb/src/tools/nextCombination.h index ca39ae7..28cb017 100644 --- a/rhubarb/src/tools/nextCombination.h +++ b/rhubarb/src/tools/nextCombination.h @@ -12,7 +12,7 @@ // After each iteration, the first k elements of the container will be // a combination. When there are no more combinations, the container // will return to the original sorted order. -template +template inline bool next_combination(const Iterator first, Iterator k, const Iterator last) { // Handle degenerate cases if (first == last || std::next(first) == last || first == k || k == last) { diff --git a/rhubarb/src/tools/pairs.h b/rhubarb/src/tools/pairs.h index 8fe45bf..d2015b7 100644 --- a/rhubarb/src/tools/pairs.h +++ b/rhubarb/src/tools/pairs.h @@ -2,7 +2,9 @@ #include template -std::vector> getPairs(const TCollection& collection) { +std::vector> getPairs( + const TCollection& collection +) { using TElement = typename TCollection::value_type; using TPair = std::pair; using TIterator = typename TCollection::const_iterator; diff --git a/rhubarb/src/tools/parallel.h b/rhubarb/src/tools/parallel.h index de8c5d5..1caef68 100644 --- a/rhubarb/src/tools/parallel.h +++ b/rhubarb/src/tools/parallel.h @@ -32,12 +32,12 @@ void runParallel( // Before exiting, wait for all running tasks to finish, but don't re-throw exceptions. // This only applies if one task already failed with an exception. - auto finishRunning = gsl::finally([&]{ + auto finishRunning = gsl::finally([&] { std::unique_lock lock(mutex); elementFinished.wait(lock, [&] { return currentThreadCount == 0; }); }); - // Asyncronously run all elements + // Asynchronously run all elements for (auto it = collection.begin(); it != collection.end(); ++it) { // This variable will later hold the future, but can be value-captured right now auto future = std::make_shared(); @@ -66,7 +66,7 @@ void runParallel( // Wait for threads to finish, if necessary { std::unique_lock lock(mutex); - int targetThreadCount = it == collection.end() ? 0 : maxThreadCount - 1; + const int targetThreadCount = it == collection.end() ? 0 : maxThreadCount - 1; while (currentThreadCount > targetThreadCount) { elementFinished.wait(lock); if (finishedElement.valid()) { @@ -86,7 +86,8 @@ void runParallel( TCollection& collection, int maxThreadCount, ProgressSink& progressSink, - std::function getElementProgressWeight = [](typename TCollection::reference) { return 1.0; }) + std::function getElementProgressWeight = + [](typename TCollection::reference) { return 1.0; }) { // Create a collection of wrapper functions that take care of progress handling ProgressMerger progressMerger(progressSink); @@ -101,7 +102,7 @@ void runParallel( } inline int getProcessorCoreCount() { - int coreCount = std::thread::hardware_concurrency(); + const int coreCount = std::thread::hardware_concurrency(); // If the number of cores cannot be determined, use a reasonable default return coreCount != 0 ? coreCount : 4; diff --git a/rhubarb/src/tools/platformTools.cpp b/rhubarb/src/tools/platformTools.cpp index efeec20..4199b85 100644 --- a/rhubarb/src/tools/platformTools.cpp +++ b/rhubarb/src/tools/platformTools.cpp @@ -14,8 +14,6 @@ #ifdef _WIN32 #include - #include - #include #endif using boost::filesystem::path; @@ -26,13 +24,14 @@ path getBinPath() { static const path binPath = [] { try { // Determine path length - int pathLength = wai_getExecutablePath(nullptr, 0, nullptr); + const int pathLength = wai_getExecutablePath(nullptr, 0, nullptr); if (pathLength == -1) { throw std::runtime_error("Error determining path length."); } // Get path - // Note: According to documentation, pathLength does *not* include the trailing zero. Actually, it does. + // Note: According to documentation, pathLength does *not* include the trailing zero. + // Actually, it does. // In case there are situations where it doesn't, we allocate one character more. std::vector buffer(pathLength + 1); if (wai_getExecutablePath(buffer.data(), buffer.size(), nullptr) == -1) { @@ -41,7 +40,7 @@ path getBinPath() { buffer[pathLength] = 0; // Convert to boost::filesystem::path - string pathString(buffer.data()); + const string pathString(buffer.data()); path result(boost::filesystem::canonical(pathString).make_preferred()); return result; } catch (...) { @@ -56,14 +55,14 @@ path getBinDirectory() { } path getTempFilePath() { - path tempDirectory = boost::filesystem::temp_directory_path(); + const path tempDirectory = boost::filesystem::temp_directory_path(); static boost::uuids::random_generator generateUuid; - string fileName = to_string(generateUuid()); + const string fileName = to_string(generateUuid()); return tempDirectory / fileName; } std::tm getLocalTime(const time_t& time) { - tm timeInfo; + tm timeInfo {}; #if (__unix || __linux || __APPLE__) localtime_r(&time, &timeInfo); #else @@ -92,7 +91,8 @@ vector argsToUtf8(int argc, char* argv[]) { // Get command-line arguments as UTF16 strings int argumentCount; static_assert(sizeof(wchar_t) == sizeof(char16_t), "Expected wchar_t to be a 16-bit type."); - char16_t** args = reinterpret_cast(CommandLineToArgvW(GetCommandLineW(), &argumentCount)); + char16_t** args = + reinterpret_cast(CommandLineToArgvW(GetCommandLineW(), &argumentCount)); if (!args) { throw std::runtime_error("Error splitting the UTF-16 command line arguments."); } @@ -134,7 +134,7 @@ private: }; void useUtf8ForConsole() { -// Unix systems already expect UTF-8-encoded data + // Unix systems already expect UTF-8-encoded data #ifdef _WIN32 // Set console code page to UTF-8 so the console knows how to interpret string data SetConsoleOutputCP(CP_UTF8); @@ -147,7 +147,7 @@ void useUtf8ForConsole() { } void useUtf8ForBoostFilesystem() { - std::locale globalLocale = std::locale(); - std::locale utf8Locale(globalLocale, new boost::filesystem::detail::utf8_codecvt_facet); + const std::locale globalLocale = std::locale(); + const std::locale utf8Locale(globalLocale, new boost::filesystem::detail::utf8_codecvt_facet); path::imbue(utf8Locale); } diff --git a/rhubarb/src/tools/platformTools.h b/rhubarb/src/tools/platformTools.h index 2fec1db..34a0e35 100644 --- a/rhubarb/src/tools/platformTools.h +++ b/rhubarb/src/tools/platformTools.h @@ -11,7 +11,7 @@ boost::filesystem::path getTempFilePath(); std::tm getLocalTime(const time_t& time); std::string errorNumberToString(int errorNumber); -std::vector argsToUtf8(int argc, char *argv[]); +std::vector argsToUtf8(int argc, char* argv[]); void useUtf8ForConsole(); void useUtf8ForBoostFilesystem(); \ No newline at end of file diff --git a/rhubarb/src/tools/progress.cpp b/rhubarb/src/tools/progress.cpp index 34c5848..81e54a7 100644 --- a/rhubarb/src/tools/progress.cpp +++ b/rhubarb/src/tools/progress.cpp @@ -22,10 +22,10 @@ ProgressSink& ProgressMerger::addSink(double weight) { totalWeight += weight; int sinkIndex = weightedValues.size(); weightedValues.push_back(0); - forwarders.push_back(ProgressForwarder([weight, sinkIndex, this](double progress) { + forwarders.emplace_back([weight, sinkIndex, this](double progress) { weightedValues[sinkIndex] = progress * weight; report(); - })); + }); return forwarders.back(); } @@ -37,7 +37,7 @@ void ProgressMerger::report() { for (double weightedValue : weightedValues) { weightedSum += weightedValue; } - double progress = weightedSum / totalWeight; + const double progress = weightedSum / totalWeight; sink.reportProgress(progress); } else { sink.reportProgress(0); diff --git a/rhubarb/src/tools/stringTools.cpp b/rhubarb/src/tools/stringTools.cpp index a223d4e..9517706 100644 --- a/rhubarb/src/tools/stringTools.cpp +++ b/rhubarb/src/tools/stringTools.cpp @@ -9,7 +9,6 @@ using std::string; using std::wstring; using std::u32string; using std::vector; -using boost::optional; using std::regex; using std::regex_replace; @@ -17,7 +16,7 @@ vector splitIntoLines(const string& s) { vector lines; auto p = &s[0]; auto lineBegin = p; - auto end = p + s.size(); + const auto end = p + s.size(); // Iterate over input string while (p <= end) { // Add a new result line when we hit a \n character or the end of the string @@ -45,7 +44,7 @@ vector wrapSingleLineString(const string& s, int lineLength, int hanging auto p = &s[0]; auto lineBegin = p; auto lineEnd = p; - auto end = p + s.size(); + const auto end = p + s.size(); // Iterate over input string while (p <= end) { // If we're at a word boundary: update lineEnd @@ -54,7 +53,7 @@ vector wrapSingleLineString(const string& s, int lineLength, int hanging } // If we've hit lineLength or the end of the string: add a new result line - int currentIndent = lines.empty() ? 0 : hangingIndent; + const int currentIndent = lines.empty() ? 0 : hangingIndent; if (p == end || p - lineBegin == lineLength - currentIndent) { if (lineEnd == lineBegin) { // The line contains a single word, which is too long. Split mid-word. @@ -80,7 +79,7 @@ vector wrapSingleLineString(const string& s, int lineLength, int hanging vector wrapString(const string& s, int lineLength, int hangingIndent) { vector lines; - for (string paragraph : splitIntoLines(s)) { + for (const string& paragraph : splitIntoLines(s)) { auto paragraphLines = wrapSingleLineString(paragraph, lineLength, hangingIndent); copy(paragraphLines.cbegin(), paragraphLines.cend(), back_inserter(lines)); } @@ -100,7 +99,7 @@ wstring latin1ToWide(const string& s) { return result; } -string utf8ToAscii(const string s) { +string utf8ToAscii(const string& s) { // Normalize string, simplifying it as much as possible const NormalizationOptions options = NormalizationOptions::CompatibilityMode | NormalizationOptions::Decompose @@ -111,15 +110,15 @@ string utf8ToAscii(const string s) { string simplified = normalizeUnicode(s, options); // Replace common Unicode characters with ASCII equivalents - static const vector> replacements{ - {regex("«|»|“|”|„|‟"), "\""}, - {regex("‘|’|‚|‛|‹|›"), "'"}, - {regex("‐|‑|‒|⁃|⁻|₋|−|➖|–|—|―|﹘|﹣|-"), "-"}, - {regex("…|⋯"), "..."}, - {regex("•"), "*"}, - {regex("†|+"), "+"}, - {regex("⁄|∕|⧸|/|/"), "/"}, - {regex("×"), "x"}, + static const vector> replacements { + { regex("«|»|“|”|„|‟"), "\"" }, + { regex("‘|’|‚|‛|‹|›"), "'" }, + { regex("‐|‑|‒|⁃|⁻|₋|−|➖|–|—|―|﹘|﹣|-"), "-" }, + { regex("…|⋯"), "..." }, + { regex("•"), "*" }, + { regex("†|+"), "+" }, + { regex("⁄|∕|⧸|/|/"), "/" }, + { regex("×"), "x" }, }; for (const auto& replacement : replacements) { simplified = regex_replace(simplified, replacement.first, replacement.second); @@ -137,7 +136,7 @@ string utf8ToAscii(const string s) { return result; } -string normalizeUnicode(const string s, NormalizationOptions options) { +string normalizeUnicode(const string& s, NormalizationOptions options) { char* result; const utf8proc_ssize_t charCount = utf8proc_map( reinterpret_cast(s.data()), @@ -168,23 +167,23 @@ string escapeJsonString(const string& s) { string result; for (char16_t c : utf16String) { switch (c) { - case '"': result += "\\\""; break; - case '\\': result += "\\\\"; break; - case '\b': result += "\\b"; break; - case '\f': result += "\\f"; break; - case '\n': result += "\\n"; break; - case '\r': result += "\\r"; break; - case '\t': result += "\\t"; break; - default: - { - bool needsEscaping = c < '\x20' || c >= 0x80; - if (needsEscaping) { - result += fmt::format("\\u{0:04x}", c); - } else { - result += static_cast(c); + case '"': result += "\\\""; break; + case '\\': result += "\\\\"; break; + case '\b': result += "\\b"; break; + case '\f': result += "\\f"; break; + case '\n': result += "\\n"; break; + case '\r': result += "\\r"; break; + case '\t': result += "\\t"; break; + default: + { + const bool needsEscaping = c < '\x20' || c >= 0x80; + if (needsEscaping) { + result += fmt::format("\\u{0:04x}", c); + } else { + result += static_cast(c); + } } } - } } return result; } diff --git a/rhubarb/src/tools/stringTools.h b/rhubarb/src/tools/stringTools.h index f1bd933..cf23bbf 100644 --- a/rhubarb/src/tools/stringTools.h +++ b/rhubarb/src/tools/stringTools.h @@ -1,13 +1,16 @@ #pragma once #include -#include #include #include std::vector splitIntoLines(const std::string& s); -std::vector wrapSingleLineString(const std::string& s, int lineLength, int hangingIndent = 0); +std::vector wrapSingleLineString( + const std::string& s, + int lineLength, + int hangingIndent = 0 +); std::vector wrapString(const std::string& s, int lineLength, int hangingIndent = 0); @@ -15,9 +18,7 @@ bool isValidUtf8(const std::string& s); std::wstring latin1ToWide(const std::string& s); -boost::optional toAscii(char32_t ch); - -std::string utf8ToAscii(const std::string s); +std::string utf8ToAscii(const std::string& s); enum class NormalizationOptions : int { CompatibilityMode = UTF8PROC_COMPAT, @@ -35,7 +36,7 @@ operator|(NormalizationOptions a, NormalizationOptions b) { return static_cast(static_cast(a) | static_cast(b)); } -std::string normalizeUnicode(const std::string s, NormalizationOptions options); +std::string normalizeUnicode(const std::string& s, NormalizationOptions options); template std::string join(T range, const std::string separator) { diff --git a/rhubarb/src/tools/tools.h b/rhubarb/src/tools/tools.h index 056eee0..52d8d29 100644 --- a/rhubarb/src/tools/tools.h +++ b/rhubarb/src/tools/tools.h @@ -18,8 +18,8 @@ template void for_each_adjacent( iterator_type begin, iterator_type end, - std::function>&)> f) -{ + std::function>&)> f +) { // Get the first n values iterator_type it = begin; using element_type = std::reference_wrapper; @@ -42,20 +42,28 @@ template void for_each_adjacent( iterator_type begin, iterator_type end, - std::function f) -{ - for_each_adjacent<2>(begin, end, [&](const std::deque>& args) { - f(args[0], args[1]); - }); + std::function f +) { + for_each_adjacent<2>( + begin, + end, + [&](const std::deque>& args) { + f(args[0], args[1]); + } + ); } template void for_each_adjacent( iterator_type begin, iterator_type end, - std::function f) -{ - for_each_adjacent<3>(begin, end, [&](const std::deque>& args) { - f(args[0], args[1], args[2]); - }); + std::function f +) { + for_each_adjacent<3>( + begin, + end, + [&](const std::deque>& args) { + f(args[0], args[1], args[2]); + } + ); } diff --git a/rhubarb/src/tools/tupleHash.h b/rhubarb/src/tools/tupleHash.h index 36d7a05..1b27a57 100644 --- a/rhubarb/src/tools/tupleHash.h +++ b/rhubarb/src/tools/tupleHash.h @@ -6,13 +6,13 @@ namespace std { namespace { - template + template void hash_combine(size_t& seed, const T& value) { seed ^= std::hash()(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } // Recursive template code derived from Matthieu M. - template ::value - 1> + template::value - 1> struct HashValueImpl { static void apply(size_t& seed, const Tuple& tuple) { HashValueImpl::apply(seed, tuple); @@ -20,7 +20,7 @@ namespace std { } }; - template + template struct HashValueImpl { static void apply(size_t& seed, const Tuple& tuple) { hash_combine(seed, std::get<0>(tuple)); @@ -28,11 +28,11 @@ namespace std { }; } - template + template struct hash> { size_t operator()(const tuple& tt) const { size_t seed = 0; - HashValueImpl >::apply(seed, tt); + HashValueImpl>::apply(seed, tt); return seed; } }; diff --git a/rhubarb/tests/BoundedTimelineTests.cpp b/rhubarb/tests/BoundedTimelineTests.cpp index 3a038de..d533c64 100644 --- a/rhubarb/tests/BoundedTimelineTests.cpp +++ b/rhubarb/tests/BoundedTimelineTests.cpp @@ -7,7 +7,7 @@ using boost::optional; using std::initializer_list; TEST(BoundedTimeline, constructors_initializeState) { - TimeRange range(-5_cs, 55_cs); + const TimeRange range(-5_cs, 55_cs); auto args = { Timed(-10_cs, 30_cs, 1), Timed(10_cs, 40_cs, 2), @@ -52,7 +52,7 @@ TEST(BoundedTimeline, getRange) { } TEST(BoundedTimeline, setAndClear) { - TimeRange range(0_cs, 10_cs); + const TimeRange range(0_cs, 10_cs); BoundedTimeline timeline(range); // Out of range @@ -83,8 +83,14 @@ TEST(BoundedTimeline, setAndClear) { } TEST(BoundedTimeline, shift) { - BoundedTimeline timeline(TimeRange(0_cs, 10_cs), { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }); - BoundedTimeline expected(TimeRange(2_cs, 12_cs), { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } }); + BoundedTimeline timeline( + TimeRange(0_cs, 10_cs), + { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } } + ); + BoundedTimeline expected( + TimeRange(2_cs, 12_cs), + { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } } + ); timeline.shift(2_cs); EXPECT_EQ(expected, timeline); } @@ -99,9 +105,11 @@ TEST(BoundedTimeline, equality) { for (size_t i = 0; i < timelines.size(); ++i) { for (size_t j = 0; j < timelines.size(); ++j) { if (i == j) { - EXPECT_EQ(timelines[i], BoundedTimeline(timelines[j])) << "i: " << i << ", j: " << j; + EXPECT_EQ(timelines[i], BoundedTimeline(timelines[j])) + << "i: " << i << ", j: " << j; } else { - EXPECT_NE(timelines[i], timelines[j]) << "i: " << i << ", j: " << j; + EXPECT_NE(timelines[i], timelines[j]) + << "i: " << i << ", j: " << j; } } } diff --git a/rhubarb/tests/ContinuousTimelineTests.cpp b/rhubarb/tests/ContinuousTimelineTests.cpp index e1d4806..51d484f 100644 --- a/rhubarb/tests/ContinuousTimelineTests.cpp +++ b/rhubarb/tests/ContinuousTimelineTests.cpp @@ -7,8 +7,8 @@ using boost::optional; using std::initializer_list; TEST(ContinuousTimeline, constructors_initializeState) { - TimeRange range(-5_cs, 55_cs); - int defaultValue = -1; + const TimeRange range(-5_cs, 55_cs); + const int defaultValue = -1; auto args = { Timed(-10_cs, 30_cs, 1), Timed(10_cs, 40_cs, 2), @@ -49,8 +49,8 @@ TEST(ContinuousTimeline, empty) { } TEST(ContinuousTimeline, setAndClear) { - TimeRange range(0_cs, 10_cs); - int defaultValue = -1; + const TimeRange range(0_cs, 10_cs); + const int defaultValue = -1; ContinuousTimeline timeline(range, defaultValue); // Out of range @@ -82,8 +82,16 @@ TEST(ContinuousTimeline, setAndClear) { } TEST(ContinuousTimeline, shift) { - ContinuousTimeline timeline(TimeRange(0_cs, 10_cs), -1, { { 1_cs, 2_cs, 1 },{ 2_cs, 5_cs, 2 },{ 7_cs, 9_cs, 3 } }); - ContinuousTimeline expected(TimeRange(2_cs, 12_cs), -1, { { 3_cs, 4_cs, 1 },{ 4_cs, 7_cs, 2 },{ 9_cs, 11_cs, 3 } }); + ContinuousTimeline timeline( + TimeRange(0_cs, 10_cs), + -1, + { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } } + ); + ContinuousTimeline expected( + TimeRange(2_cs, 12_cs), + -1, + { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } } + ); timeline.shift(2_cs); EXPECT_EQ(expected, timeline); } @@ -99,7 +107,8 @@ TEST(ContinuousTimeline, equality) { for (size_t i = 0; i < timelines.size(); ++i) { for (size_t j = 0; j < timelines.size(); ++j) { if (i == j) { - EXPECT_EQ(timelines[i], ContinuousTimeline(timelines[j])) << "i: " << i << ", j: " << j; + EXPECT_EQ(timelines[i], ContinuousTimeline(timelines[j])) + << "i: " << i << ", j: " << j; } else { EXPECT_NE(timelines[i], timelines[j]) << "i: " << i << ", j: " << j; } diff --git a/rhubarb/tests/LazyTests.cpp b/rhubarb/tests/LazyTests.cpp index dd17023..cbde8fa 100644 --- a/rhubarb/tests/LazyTests.cpp +++ b/rhubarb/tests/LazyTests.cpp @@ -2,16 +2,15 @@ #include "tools/Lazy.h" using namespace testing; -using std::make_unique; -// Not copyable, no default constrctor, movable +// Not copyable, no default constructor, movable struct Foo { const int value; Foo(int value) : value(value) {} Foo() = delete; Foo(const Foo&) = delete; - Foo& operator=(const Foo &) = delete; + Foo& operator=(const Foo&) = delete; Foo(Foo&&) = default; Foo& operator=(Foo&&) = default; @@ -44,7 +43,7 @@ TEST(Lazy, constUsage) { TEST(Lazy, copying) { Lazy a; int counter = 0; - auto createValue = [&] { return counter++; }; + const auto createValue = [&] { return counter++; }; Lazy b(createValue); a = b; EXPECT_EQ(0, counter); diff --git a/rhubarb/tests/TimelineTests.cpp b/rhubarb/tests/TimelineTests.cpp index ab6e807..162c75b 100644 --- a/rhubarb/tests/TimelineTests.cpp +++ b/rhubarb/tests/TimelineTests.cpp @@ -1,6 +1,5 @@ #include #include "time/Timeline.h" -#include #include using namespace testing; @@ -39,15 +38,15 @@ TEST(Timeline, empty) { EXPECT_TRUE(empty0.empty()); EXPECT_THAT(empty0, IsEmpty()); - Timeline empty1{}; + Timeline empty1 {}; EXPECT_TRUE(empty1.empty()); EXPECT_THAT(empty1, IsEmpty()); - Timeline empty2{ Timed(1_cs, 1_cs, 1) }; + Timeline empty2 { Timed(1_cs, 1_cs, 1) }; EXPECT_TRUE(empty2.empty()); EXPECT_THAT(empty2, IsEmpty()); - Timeline nonEmpty{ Timed(1_cs, 2_cs, 1) }; + Timeline nonEmpty { Timed(1_cs, 2_cs, 1) }; EXPECT_FALSE(nonEmpty.empty()); EXPECT_THAT(nonEmpty, Not(IsEmpty())); } @@ -57,19 +56,19 @@ TEST(Timeline, size) { EXPECT_EQ(0, empty0.size()); EXPECT_THAT(empty0, SizeIs(0)); - Timeline empty1{}; + Timeline empty1 {}; EXPECT_EQ(0, empty1.size()); EXPECT_THAT(empty1, SizeIs(0)); - Timeline empty2{ Timed(1_cs, 1_cs, 1) }; + Timeline empty2 { Timed(1_cs, 1_cs, 1) }; EXPECT_EQ(0, empty2.size()); EXPECT_THAT(empty2, SizeIs(0)); - Timeline size1{ Timed(1_cs, 10_cs, 1) }; + Timeline size1 { Timed(1_cs, 10_cs, 1) }; EXPECT_EQ(1, size1.size()); EXPECT_THAT(size1, SizeIs(1)); - Timeline size2{ Timed(-10_cs, 10_cs, 1), Timed(10_cs, 11_cs, 5) }; + Timeline size2 { Timed(-10_cs, 10_cs, 1), Timed(10_cs, 11_cs, 5) }; EXPECT_EQ(2, size2.size()); EXPECT_THAT(size2, SizeIs(2)); } @@ -78,21 +77,21 @@ TEST(Timeline, getRange) { Timeline empty0; EXPECT_EQ(TimeRange(0_cs, 0_cs), empty0.getRange()); - Timeline empty1{}; + Timeline empty1 {}; EXPECT_EQ(TimeRange(0_cs, 0_cs), empty1.getRange()); - Timeline empty2{ Timed(1_cs, 1_cs, 1) }; + Timeline empty2 { Timed(1_cs, 1_cs, 1) }; EXPECT_EQ(TimeRange(0_cs, 0_cs), empty2.getRange()); - Timeline nonEmpty1{ Timed(1_cs, 10_cs, 1) }; + Timeline nonEmpty1 { Timed(1_cs, 10_cs, 1) }; EXPECT_EQ(TimeRange(1_cs, 10_cs), nonEmpty1.getRange()); - Timeline nonEmpty2{ Timed(-10_cs, 5_cs, 1), Timed(10_cs, 11_cs, 5) }; + Timeline nonEmpty2 { Timed(-10_cs, 5_cs, 1), Timed(10_cs, 11_cs, 5) }; EXPECT_EQ(TimeRange(-10_cs, 11_cs), nonEmpty2.getRange()); } TEST(Timeline, iterators) { - Timeline timeline{ Timed(-5_cs, 0_cs, 10), Timed(5_cs, 15_cs, 9) }; + Timeline timeline { Timed(-5_cs, 0_cs, 10), Timed(5_cs, 15_cs, 9) }; auto expected = { Timed(-5_cs, 0_cs, 10), Timed(5_cs, 15_cs, 9) }; EXPECT_THAT(timeline, ElementsAreArray(expected)); @@ -103,17 +102,24 @@ TEST(Timeline, iterators) { EXPECT_THAT(reversedActual, ElementsAreArray(reversedExpected)); } -void testFind(const Timeline& timeline, FindMode findMode, const initializer_list*> expectedResults) { +void testFind( + const Timeline& timeline, + FindMode findMode, + const initializer_list*> expectedResults +) { int i = -1; for (Timed* expectedResult : expectedResults) { auto it = timeline.find(centiseconds(++i), findMode); if (expectedResult != nullptr) { - EXPECT_NE(it, timeline.end()) << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; + EXPECT_NE(it, timeline.end()) + << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; if (it != timeline.end()) { - EXPECT_EQ(*expectedResult, *it) << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; + EXPECT_EQ(*expectedResult, *it) + << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; } } else { - EXPECT_EQ(timeline.end(), it) << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; + EXPECT_EQ(timeline.end(), it) + << "Timeline: " << timeline << "; findMode: " << static_cast(findMode) << "; i: " << i; } } } @@ -122,7 +128,7 @@ TEST(Timeline, find) { Timed a = Timed(1_cs, 2_cs, 1); Timed b = Timed(2_cs, 5_cs, 2); Timed c = Timed(7_cs, 9_cs, 3); - Timeline timeline{ a, b, c }; + const Timeline timeline { a, b, c }; testFind(timeline, FindMode::SampleLeft, { nullptr, nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr }); testFind(timeline, FindMode::SampleRight, { nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr }); @@ -134,9 +140,10 @@ TEST(Timeline, get) { Timed a = Timed(1_cs, 2_cs, 1); Timed b = Timed(2_cs, 5_cs, 2); Timed c = Timed(7_cs, 9_cs, 3); - Timeline timeline{ a, b, c }; + Timeline timeline { a, b, c }; - initializer_list*> expectedResults = { nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr }; + initializer_list*> expectedResults = + { nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr }; int i = -1; for (Timed* expectedResult : expectedResults) { optional&> value = timeline.get(centiseconds(++i)); @@ -152,7 +159,7 @@ TEST(Timeline, get) { } TEST(Timeline, clear) { - Timeline original{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; + const Timeline original { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; { auto timeline = original; @@ -163,33 +170,33 @@ TEST(Timeline, clear) { { auto timeline = original; timeline.clear(1_cs, 2_cs); - Timeline expected{ { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; + Timeline expected { { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; EXPECT_EQ(expected, timeline); } { auto timeline = original; timeline.clear(3_cs, 4_cs); - Timeline expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 3_cs, 2 }, { 4_cs, 5_cs, 2}, { 7_cs, 9_cs, 3} }; + Timeline expected { { 1_cs, 2_cs, 1 }, { 2_cs, 3_cs, 2 }, { 4_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; EXPECT_EQ(expected, timeline); } { auto timeline = original; timeline.clear(6_cs, 8_cs); - Timeline expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 8_cs, 9_cs, 3 } }; + Timeline expected { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 8_cs, 9_cs, 3 } }; EXPECT_EQ(expected, timeline); } { auto timeline = original; timeline.clear(8_cs, 10_cs); - Timeline expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 8_cs, 3 } }; + Timeline expected { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 8_cs, 3 } }; EXPECT_EQ(expected, timeline); } } -void testSetter(std::function&, Timeline&)> set) { +void testSetter(const std::function&, Timeline&)>& set) { Timeline timeline; vector> expectedValues(20, none); auto newElements = { @@ -218,7 +225,7 @@ void testSetter(std::function&, Timeline&)> set) { set(newElement, timeline); // Update expected value for every index - centiseconds elementStart = max(newElement.getStart(), 0_cs); + const centiseconds elementStart = max(newElement.getStart(), 0_cs); centiseconds elementEnd = newElement.getEnd(); for (centiseconds t = elementStart; t < elementEnd; ++t) { expectedValues[t.count()] = newElement.getValue(); @@ -232,13 +239,14 @@ void testSetter(std::function&, Timeline&)> set) { // Check timeline via iterators for (const auto& element : timeline) { - // No element shound have zero-length + // No element should have zero-length EXPECT_LT(0_cs, element.getDuration()); // Element should match expected values for (centiseconds t = std::max(centiseconds::zero(), element.getStart()); t < element.getEnd(); ++t) { optional expectedValue = expectedValues[t.count()]; - EXPECT_TRUE(expectedValue) << "Index " << t.count() << " should not have a value, but is within element " << element << ". " + EXPECT_TRUE(expectedValue) + << "Index " << t.count() << " should not have a value, but is within element " << element << ". " << "newElementIndex: " << newElementIndex; if (expectedValue) { EXPECT_EQ(*expectedValue, element.getValue()); @@ -261,8 +269,8 @@ TEST(Timeline, set) { } TEST(Timeline, indexer_get) { - Timeline timeline{ { 1_cs, 2_cs, 1 }, { 2_cs, 4_cs, 2 }, { 6_cs, 9_cs, 3 } }; - vector> expectedValues{ none, 1, 2, 2, none, none, 3, 3, 3 }; + Timeline timeline { { 1_cs, 2_cs, 1 }, { 2_cs, 4_cs, 2 }, { 6_cs, 9_cs, 3 } }; + vector> expectedValues { none, 1, 2, 2, none, none, 3, 3, 3 }; for (centiseconds t = 0_cs; t < 9_cs; ++t) { { optional actual = timeline[t]; @@ -294,63 +302,63 @@ TEST(Timeline, indexer_set) { } TEST(Timeline, joinAdjacent) { - Timeline timeline{ - {1_cs, 2_cs, 1}, - {2_cs, 4_cs, 2}, - {3_cs, 6_cs, 2}, - {6_cs, 7_cs, 2}, + Timeline timeline { + { 1_cs, 2_cs, 1 }, + { 2_cs, 4_cs, 2 }, + { 3_cs, 6_cs, 2 }, + { 6_cs, 7_cs, 2 }, // Gap - {8_cs, 10_cs, 2}, - {11_cs, 12_cs, 3} + { 8_cs, 10_cs, 2 }, + { 11_cs, 12_cs, 3 } }; EXPECT_EQ(6, timeline.size()); timeline.joinAdjacent(); EXPECT_EQ(4, timeline.size()); Timed expectedJoined[] = { - {1_cs, 2_cs, 1}, - {2_cs, 7_cs, 2}, + { 1_cs, 2_cs, 1 }, + { 2_cs, 7_cs, 2 }, // Gap - {8_cs, 10_cs, 2}, - {11_cs, 12_cs, 3} + { 8_cs, 10_cs, 2 }, + { 11_cs, 12_cs, 3 } }; EXPECT_THAT(timeline, ElementsAreArray(expectedJoined)); } TEST(Timeline, autoJoin) { - JoiningTimeline timeline{ - {1_cs, 2_cs, 1}, - {2_cs, 4_cs, 2}, - {3_cs, 6_cs, 2}, - {6_cs, 7_cs, 2}, + JoiningTimeline timeline { + { 1_cs, 2_cs, 1 }, + { 2_cs, 4_cs, 2 }, + { 3_cs, 6_cs, 2 }, + { 6_cs, 7_cs, 2 }, // Gap - {8_cs, 10_cs, 2}, - {11_cs, 12_cs, 3} + { 8_cs, 10_cs, 2 }, + { 11_cs, 12_cs, 3 } }; Timed expectedJoined[] = { - {1_cs, 2_cs, 1}, - {2_cs, 7_cs, 2}, + { 1_cs, 2_cs, 1 }, + { 2_cs, 7_cs, 2 }, // Gap - {8_cs, 10_cs, 2}, - {11_cs, 12_cs, 3} + { 8_cs, 10_cs, 2 }, + { 11_cs, 12_cs, 3 } }; EXPECT_EQ(4, timeline.size()); EXPECT_THAT(timeline, ElementsAreArray(expectedJoined)); } TEST(Timeline, shift) { - Timeline timeline{ { 1_cs, 2_cs, 1 },{ 2_cs, 5_cs, 2 },{ 7_cs, 9_cs, 3 } }; - Timeline expected{ { 3_cs, 4_cs, 1 },{ 4_cs, 7_cs, 2 },{ 9_cs, 11_cs, 3 } }; + Timeline timeline { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }; + Timeline expected { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } }; timeline.shift(2_cs); EXPECT_EQ(expected, timeline); } TEST(Timeline, equality) { vector> timelines = { - Timeline{}, - Timeline{ { 1_cs, 2_cs, 0 } }, - Timeline{ { 1_cs, 2_cs, 1 } }, - Timeline{ { -10_cs, 0_cs, 0 } } + Timeline {}, + Timeline { { 1_cs, 2_cs, 0 } }, + Timeline { { 1_cs, 2_cs, 1 } }, + Timeline { { -10_cs, 0_cs, 0 } } }; for (size_t i = 0; i < timelines.size(); ++i) { diff --git a/rhubarb/tests/g2pTests.cpp b/rhubarb/tests/g2pTests.cpp index e246785..68e1652 100644 --- a/rhubarb/tests/g2pTests.cpp +++ b/rhubarb/tests/g2pTests.cpp @@ -13,19 +13,20 @@ TEST(wordToPhones, basic) { // The following phones are based on actual output, *not* ideal output. vector>> words { - { "once", { Phone::AA, Phone::N, Phone::S }}, - { "upon", { Phone::UW, Phone::P, Phone::AH, Phone::N }}, - { "a", { Phone::AH }}, - { "midnight", { Phone::M, Phone::IH, Phone::D, Phone::N, Phone::AY, Phone::T }}, - { "dreary", { Phone::D, Phone::R, Phone::IY, Phone::R, Phone::IY }}, - { "while", { Phone::W, Phone::AY, Phone::L }}, - { "i", { Phone::IY }}, - { "pondered", { Phone::P, Phone::AA, Phone::N, Phone::D, Phone::IY, Phone::R, Phone::EH, Phone::D }}, - { "weak", { Phone::W, Phone::IY, Phone::K }}, - { "and", { Phone::AE, Phone::N, Phone::D }}, - { "weary", { Phone::W, Phone::IY, Phone::R, Phone::IY }} + { "once", { Phone::AA, Phone::N, Phone::S } }, + { "upon", { Phone::UW, Phone::P, Phone::AH, Phone::N } }, + { "a", { Phone::AH } }, + { "midnight", { Phone::M, Phone::IH, Phone::D, Phone::N, Phone::AY, Phone::T } }, + { "dreary", { Phone::D, Phone::R, Phone::IY, Phone::R, Phone::IY } }, + { "while", { Phone::W, Phone::AY, Phone::L } }, + { "i", { Phone::IY } }, + { "pondered", { Phone::P, Phone::AA, Phone::N, Phone::D, Phone::IY, Phone::R, Phone::EH, Phone::D } }, + { "weak", { Phone::W, Phone::IY, Phone::K } }, + { "and", { Phone::AE, Phone::N, Phone::D } }, + { "weary", { Phone::W, Phone::IY, Phone::R, Phone::IY } } }; for (const auto& word : words) { - EXPECT_THAT(wordToPhones(word.first), ElementsAreArray(word.second)) << "Original word: '" << word.first << "'"; + EXPECT_THAT(wordToPhones(word.first), ElementsAreArray(word.second)) + << "Original word: '" << word.first << "'"; } } \ No newline at end of file diff --git a/rhubarb/tests/pairsTests.cpp b/rhubarb/tests/pairsTests.cpp index b2eb27e..6240dad 100644 --- a/rhubarb/tests/pairsTests.cpp +++ b/rhubarb/tests/pairsTests.cpp @@ -3,7 +3,6 @@ using namespace testing; using std::vector; -using std::initializer_list; using std::pair; TEST(getPairs, emptyCollection) { @@ -16,18 +15,18 @@ TEST(getPairs, oneElementCollection) { TEST(getPairs, validCollection) { { - auto actual = getPairs(vector{ 1, 2 }); - vector> expected{ {1, 2} }; + const auto actual = getPairs(vector { 1, 2 }); + const vector> expected { { 1, 2 } }; EXPECT_THAT(actual, ElementsAreArray(expected)); } { - auto actual = getPairs(vector{ 1, 2, 3 }); - vector> expected{ {1, 2}, {2, 3} }; + const auto actual = getPairs(vector { 1, 2, 3 }); + const vector> expected { { 1, 2 }, { 2, 3 } }; EXPECT_THAT(actual, ElementsAreArray(expected)); } { - auto actual = getPairs(vector{ 1, 2, 3, 4 }); - vector> expected{ {1, 2}, {2, 3}, {3, 4} }; + const auto actual = getPairs(vector { 1, 2, 3, 4 }); + const vector> expected { { 1, 2 }, { 2, 3 }, { 3, 4 } }; EXPECT_THAT(actual, ElementsAreArray(expected)); } } diff --git a/rhubarb/tests/stringToolsTests.cpp b/rhubarb/tests/stringToolsTests.cpp index 722de8b..670a6f7 100644 --- a/rhubarb/tests/stringToolsTests.cpp +++ b/rhubarb/tests/stringToolsTests.cpp @@ -20,7 +20,8 @@ TEST(splitIntoLines, handlesEmptyElements) { // wrapSingleLineString TEST(wrapSingleLineString, basic) { - const char* lipsum = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua."; + const char* lipsum = + "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua."; EXPECT_THAT(wrapSingleLineString(lipsum, 30), ElementsAre("Lorem ipsum dolor sit amet,", "consectetur adipisici elit,", "sed eiusmod tempor incidunt ut", "labore et dolore magna aliqua.")); } @@ -76,8 +77,10 @@ TEST(wrapString, basic) { // latin1ToWide TEST(latin1ToWide, basic) { - string pangramLatin1 = "D\350s No\353l o\371 un z\351phyr ha\357 me v\352t de gla\347ons w\374rmiens, je d\356ne d'exquis r\364tis de boeuf au kir \340 l'a\377 d'\342ge m\373r & c\346tera!"; - wstring pangramWide = L"Dès Noël où un zéphyr haï me vêt de glaçons würmiens, je dîne d'exquis rôtis de boeuf au kir à l'aÿ d'âge mûr & cætera!"; + const string pangramLatin1 = + "D\350s No\353l o\371 un z\351phyr ha\357 me v\352t de gla\347ons w\374rmiens, je d\356ne d'exquis r\364tis de boeuf au kir \340 l'a\377 d'\342ge m\373r & c\346tera!"; + wstring pangramWide = + L"Dès Noël où un zéphyr haï me vêt de glaçons würmiens, je dîne d'exquis rôtis de boeuf au kir à l'aÿ d'âge mûr & cætera!"; EXPECT_EQ(pangramWide, latin1ToWide(pangramLatin1)); } diff --git a/rhubarb/tests/tokenizationTests.cpp b/rhubarb/tests/tokenizationTests.cpp index 5714706..7963066 100644 --- a/rhubarb/tests/tokenizationTests.cpp +++ b/rhubarb/tests/tokenizationTests.cpp @@ -40,15 +40,22 @@ TEST(tokenizeText, numbers) { TEST(tokenizeText, abbreviations) { EXPECT_THAT( - tokenizeText("Prof. Foo lives on Dr. Dolittle Dr.", [](const string& word) { return word == "prof."; }), + tokenizeText( + "Prof. Foo lives on Dr. Dolittle Dr.", + [](const string& word) { return word == "prof."; } + ), ElementsAre("prof.", "foo", "lives", "on", "doctor", "dolittle", "drive") ); } TEST(tokenizeText, apostrophes) { EXPECT_THAT( - tokenizeText("'Tis said he'd wish'd for a 'bus 'cause he wouldn't walk.", [](const string& word) { return word == "wouldn't"; }), - ElementsAreArray(vector{ "tis", "said", "he'd", "wish'd", "for", "a", "bus", "cause", "he", "wouldn't", "walk" }) + tokenizeText( + "'Tis said he'd wish'd for a 'bus 'cause he wouldn't walk.", + [](const string& word) { return word == "wouldn't"; } + ), + ElementsAreArray( + vector{ "tis", "said", "he'd", "wish'd", "for", "a", "bus", "cause", "he", "wouldn't", "walk" }) ); } @@ -75,7 +82,7 @@ TEST(tokenizeText, wordsUseLimitedCharacters) { utf8::append(c, back_inserter(input)); } - regex legal("^[a-z']+$"); + const regex legal("^[a-z']+$"); auto words = tokenizeText(input, returnTrue); for (const string& word : words) { EXPECT_TRUE(std::regex_match(word, legal)) << word;