Code cleanup
* Fix linter warnings * Unify code formatting * Fix typos
This commit is contained in:
parent
238687e33b
commit
367c645bb3
|
@ -1,7 +1,50 @@
|
|||
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadAngleBracketsSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadBracesSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadChildStatementIndent/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadColonSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadCommaSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadControlBracesIndent/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadControlBracesLineBreaks/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadDeclarationBracesIndent/@EntryIndexedValue">WARNING</s:String>
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadEmptyBracesLineBreaks/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadExpressionBracesIndent/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadExpressionBracesLineBreaks/@EntryIndexedValue">WARNING</s:String>
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadListLineBreaks/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadMemberAccessSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadNamespaceBracesIndent/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadParensLineBreaks/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadParensSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadSemicolonSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadSpacesAfterKeyword/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadSquareBracketsSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadSwitchBracesIndent/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppBadSymbolSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppClangTidyHicppUseAuto/@EntryIndexedValue">HINT</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppClangTidyMiscUnusedUsingDecls/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppClangTidyModernizePassByValue/@EntryIndexedValue">HINT</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppClangTidyModernizeRawStringLiteral/@EntryIndexedValue">HINT</s:String>
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppFunctionDoesntReturnValue/@EntryIndexedValue">ERROR</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppIncorrectBlankLinesNearBraces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppLocalVariableMayBeConst/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppMissingBlankLines/@EntryIndexedValue">WARNING</s:String>
|
||||
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppMissingIndent/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppMissingLinebreak/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppMissingSpace/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppMultipleSpaces/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppOutdentIsOffPrevLevel/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppRedundantBlankLines/@EntryIndexedValue">WARNING</s:String>
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppRedundantLinebreak/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppRedundantSpace/@EntryIndexedValue">WARNING</s:String>
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=CppTabsAndSpacesMismatch/@EntryIndexedValue">WARNING</s:String>
|
||||
|
||||
|
||||
|
||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=LocalizableElement/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||
|
||||
|
@ -29,9 +72,12 @@
|
|||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/PLACE_WHILE_ON_NEW_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/SIMPLE_EMBEDDED_STATEMENT_STYLE/@EntryValue">ON_SINGLE_LINE</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/SPACE_AFTER_CAST_EXPRESSION_PARENTHESES/@EntryValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/SPACE_BEFORE_INITIALIZER_BRACES/@EntryValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/SPACE_BEFORE_TEMPLATE_PARAMS/@EntryValue">False</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/SPACE_WITHIN_INITIALIZER_BRACES/@EntryValue">True</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/TYPE_DECLARATION_BRACES/@EntryValue">END_OF_LINE</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/WRAP_ENUMERATION_STYLE/@EntryValue">CHOP_ALWAYS</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppFormatting/WRAP_LINES/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/ACCESSOR_DECLARATION_BRACES/@EntryValue">END_OF_LINE</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/ACCESSOR_OWNER_DECLARATION_BRACES/@EntryValue">END_OF_LINE</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/ALIGNMENT_TAB_FILL_STYLE/@EntryValue">USE_TABS_ONLY</s:String>
|
||||
|
@ -58,6 +104,7 @@
|
|||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/VBFormat/ALIGNMENT_TAB_FILL_STYLE/@EntryValue">USE_TABS_ONLY</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/XmlDocFormatter/ALIGNMENT_TAB_FILL_STYLE/@EntryValue">USE_TABS_ONLY</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/XmlFormatter/ALIGNMENT_TAB_FILL_STYLE/@EntryValue">USE_TABS_ONLY</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CppIntroduceType/InsertTypeAlias/@EntryValue">True</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CSharpVarKeywordUsage/ForBuiltInTypes/@EntryValue">UseExplicitType</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CSharpVarKeywordUsage/ForSimpleTypes/@EntryValue">UseVarWhenEvident</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Class_0020and_0020struct_0020fields/@EntryIndexedValue"><NamingElement Priority="10"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="_" Style="aaBb" /></NamingElement></s:String>
|
||||
|
@ -73,7 +120,7 @@
|
|||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Namespaces/@EntryIndexedValue"><NamingElement Priority="16"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="namespace" /><type Name="namespace alias" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Other_0020constants/@EntryIndexedValue"><NamingElement Priority="14"><Descriptor Static="True" Constexpr="Indeterminate" Const="True" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="class field" /><type Name="local variable" /><type Name="struct field" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Parameters/@EntryIndexedValue"><NamingElement Priority="5"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Template_0020parameters/@EntryIndexedValue"><NamingElement Priority="4"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="template parameter" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement></s:String>
|
||||
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Typedefs/@EntryIndexedValue"><NamingElement Priority="17"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="type alias" /><type Name="typedef" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Union_0020members/@EntryIndexedValue"><NamingElement Priority="12"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union member" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></NamingElement></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Unions/@EntryIndexedValue"><NamingElement Priority="3"><Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"><type Name="union" /></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></NamingElement></s:String>
|
||||
|
@ -120,18 +167,59 @@
|
|||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FFIELD/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FRESOURCE/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:String x:Key="/Default/Environment/Hierarchy/PsiConfigurationSettingsKey/CustomLocation/@EntryValue">C:\Users\Daniel\AppData\Local\JetBrains\Transient\ReSharperPlatformVs14\v09\SolutionCaches</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=AutoRecoverer/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=Format/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=ShowAnnotations/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=StartPage_002DIsDownloadRefreshEnabled/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=StartPage_002DOnEnvironmentStatup/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=SyncSettings/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=TextEditor_002DCodeLens/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=TextEditor_002DTrackChanges_002D2/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=VCS/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=VsBulb/@EntryIndexedValue">DO_NOTHING</s:String>
|
||||
<s:String x:Key="/Default/Environment/PerformanceGuide/SwitchBehaviour/=XAML_0020Designer/@EntryIndexedValue">LIVE_MONITOR</s:String>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EFeature_002EServices_002ECpp_002ECodeStyle_002ESettingsUpgrade_002EFunctionReturnStyleSettingsUpgrader/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EFeature_002EServices_002ECpp_002ECodeStyle_002ESettingsUpgrade_002ENamespaceIndentationSettingsUpgrader/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpKeepExistingMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpPlaceEmbeddedOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpUseContinuousIndentInsideBracesMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EAddAccessorOwnerDeclarationBracesMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateBlankLinesAroundFieldToBlankLinesAroundProperty/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateThisQualifierSettings/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002EFormat_002ESettingsUpgrade_002EAlignmentTabFillStyleMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=allphone/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Backoff/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=badbit/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Bigram/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=bigrams/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=bitstream/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=centiseconds/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=cepstral/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=cmudict/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Codepoints/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=cont_0027d/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=deflator/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Downmix/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=downscaling/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=endian/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=failbit/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Flite/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=fourcc/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=inbetween/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=inbetweens/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Matthieu/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=pbeam/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=qwhy/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=rbegin/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=resample/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=retime/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=retimed/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=synth/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=tclap/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=timelines/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Tweens/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Unigram/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=unigrams/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Upsampling/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Viterbi/@EntryIndexedValue">True</s:Boolean>
|
||||
</wpf:ResourceDictionary>
|
|
@ -1,26 +1,36 @@
|
|||
#include "ShapeRule.h"
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <utility>
|
||||
#include "time/ContinuousTimeline.h"
|
||||
|
||||
using boost::optional;
|
||||
using boost::adaptors::transformed;
|
||||
|
||||
template<typename T, bool AutoJoin>
|
||||
ContinuousTimeline<optional<T>, AutoJoin> boundedTimelinetoContinuousOptional(const BoundedTimeline<T, AutoJoin>& timeline) {
|
||||
return{
|
||||
timeline.getRange(), boost::none,
|
||||
timeline | transformed([](const Timed<T>& timedValue) { return Timed<optional<T>>(timedValue.getTimeRange(), timedValue.getValue()); })
|
||||
ContinuousTimeline<optional<T>, AutoJoin> boundedTimelinetoContinuousOptional(
|
||||
const BoundedTimeline<T, AutoJoin>& timeline
|
||||
) {
|
||||
return {
|
||||
timeline.getRange(),
|
||||
boost::none,
|
||||
timeline | transformed([](const Timed<T>& timedValue) {
|
||||
return Timed<optional<T>>(timedValue.getTimeRange(), timedValue.getValue());
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
ShapeRule::ShapeRule(const ShapeSet& shapeSet, const optional<Phone>& phone, TimeRange phoneTiming) :
|
||||
shapeSet(shapeSet),
|
||||
phone(phone),
|
||||
ShapeRule::ShapeRule(
|
||||
ShapeSet shapeSet,
|
||||
optional<Phone> phone,
|
||||
TimeRange phoneTiming
|
||||
) :
|
||||
shapeSet(std::move(shapeSet)),
|
||||
phone(std::move(phone)),
|
||||
phoneTiming(phoneTiming)
|
||||
{}
|
||||
|
||||
ShapeRule ShapeRule::getInvalid() {
|
||||
return {{}, boost::none,{0_cs, 0_cs}};
|
||||
return { {}, boost::none, { 0_cs, 0_cs } };
|
||||
}
|
||||
|
||||
bool ShapeRule::operator==(const ShapeRule& rhs) const {
|
||||
|
@ -43,11 +53,14 @@ ContinuousTimeline<ShapeRule> getShapeRules(const BoundedTimeline<Phone>& phones
|
|||
auto continuousPhones = boundedTimelinetoContinuousOptional(phones);
|
||||
|
||||
// Create timeline of shape rules
|
||||
ContinuousTimeline<ShapeRule> shapeRules(phones.getRange(), {{Shape::X}, boost::none, {0_cs, 0_cs}});
|
||||
ContinuousTimeline<ShapeRule> shapeRules(
|
||||
phones.getRange(),
|
||||
{ { Shape::X }, boost::none, { 0_cs, 0_cs } }
|
||||
);
|
||||
centiseconds previousDuration = 0_cs;
|
||||
for (const auto& timedPhone : continuousPhones) {
|
||||
optional<Phone> phone = timedPhone.getValue();
|
||||
centiseconds duration = timedPhone.getDuration();
|
||||
const centiseconds duration = timedPhone.getDuration();
|
||||
|
||||
if (phone) {
|
||||
// Animate one phone
|
||||
|
@ -59,7 +72,10 @@ ContinuousTimeline<ShapeRule> getShapeRules(const BoundedTimeline<Phone>& phones
|
|||
// Copy to timeline.
|
||||
// Later shape sets may overwrite earlier ones if overlapping.
|
||||
for (const auto& timedShapeSet : phoneShapeSets) {
|
||||
shapeRules.set(timedShapeSet.getTimeRange(), ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange()));
|
||||
shapeRules.set(
|
||||
timedShapeSet.getTimeRange(),
|
||||
ShapeRule(timedShapeSet.getValue(), phone, timedPhone.getTimeRange())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ struct ShapeRule {
|
|||
boost::optional<Phone> phone;
|
||||
TimeRange phoneTiming;
|
||||
|
||||
ShapeRule(const ShapeSet& shapeSet, const boost::optional<Phone>& phone, TimeRange phoneTiming);
|
||||
ShapeRule(ShapeSet shapeSet, boost::optional<Phone> phone, TimeRange phoneTiming);
|
||||
|
||||
static ShapeRule getInvalid();
|
||||
|
||||
|
|
|
@ -14,12 +14,14 @@ using std::map;
|
|||
constexpr size_t shapeValueCount = static_cast<size_t>(Shape::EndSentinel);
|
||||
|
||||
Shape getBasicShape(Shape shape) {
|
||||
static constexpr array<Shape, shapeValueCount> basicShapes = make_array(A, B, C, D, E, F, B, C, A);
|
||||
static constexpr array<Shape, shapeValueCount> basicShapes =
|
||||
make_array(A, B, C, D, E, F, B, C, A);
|
||||
return basicShapes[static_cast<size_t>(shape)];
|
||||
}
|
||||
|
||||
Shape relax(Shape shape) {
|
||||
static constexpr array<Shape, shapeValueCount> relaxedShapes = make_array(A, B, B, C, C, B, X, B, X);
|
||||
static constexpr array<Shape, shapeValueCount> relaxedShapes =
|
||||
make_array(A, B, B, C, C, B, X, B, X);
|
||||
return relaxedShapes[static_cast<size_t>(shape)];
|
||||
}
|
||||
|
||||
|
@ -28,7 +30,8 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) {
|
|||
throw std::invalid_argument("Cannot select from empty set of shapes.");
|
||||
}
|
||||
|
||||
// A matrix that for each shape contains all shapes in ascending order of effort required to move to them
|
||||
// A matrix that for each shape contains all shapes in ascending order of effort required to
|
||||
// move to them
|
||||
constexpr static array<array<Shape, shapeValueCount>, shapeValueCount> effortMatrix = make_array(
|
||||
/* A */ make_array(A, X, G, B, C, H, E, D, F),
|
||||
/* B */ make_array(B, G, A, X, C, H, E, D, F),
|
||||
|
@ -38,7 +41,7 @@ Shape getClosestShape(Shape reference, ShapeSet shapes) {
|
|||
/* F */ make_array(F, B, G, A, X, C, H, E, D),
|
||||
/* G */ make_array(G, B, C, H, A, X, E, D, F),
|
||||
/* H */ make_array(H, C, B, G, D, A, X, E, F), // Like C
|
||||
/* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A
|
||||
/* X */ make_array(X, A, G, B, C, H, E, D, F) // Like A
|
||||
);
|
||||
|
||||
auto& closestShapes = effortMatrix.at(static_cast<size_t>(reference));
|
||||
|
@ -55,107 +58,109 @@ optional<pair<Shape, TweenTiming>> getTween(Shape first, Shape second) {
|
|||
// Note that most of the following rules work in one direction only.
|
||||
// That's because in animation, the mouth should usually "pop" open without inbetweens,
|
||||
// then close slowly.
|
||||
static const map<pair<Shape, Shape>, pair<Shape, TweenTiming>> lookup{
|
||||
{{D, A}, {C, TweenTiming::Early}},
|
||||
{{D, B}, {C, TweenTiming::Centered}},
|
||||
{{D, G}, {C, TweenTiming::Early}},
|
||||
{{D, X}, {C, TweenTiming::Late}},
|
||||
{{C, F}, {E, TweenTiming::Centered}}, {{F, C}, {E, TweenTiming::Centered}},
|
||||
{{D, F}, {E, TweenTiming::Centered}},
|
||||
{{H, F}, {E, TweenTiming::Late}}, {{F, H}, {E, TweenTiming::Early}}
|
||||
static const map<pair<Shape, Shape>, pair<Shape, TweenTiming>> lookup {
|
||||
{ { D, A }, { C, TweenTiming::Early } },
|
||||
{ { D, B }, { C, TweenTiming::Centered } },
|
||||
{ { D, G }, { C, TweenTiming::Early } },
|
||||
{ { D, X }, { C, TweenTiming::Late } },
|
||||
{ { C, F }, { E, TweenTiming::Centered } }, { { F, C }, { E, TweenTiming::Centered } },
|
||||
{ { D, F }, { E, TweenTiming::Centered } },
|
||||
{ { H, F }, { E, TweenTiming::Late } }, { { F, H }, { E, TweenTiming::Early } }
|
||||
};
|
||||
auto it = lookup.find({first, second});
|
||||
const auto it = lookup.find({ first, second });
|
||||
return it != lookup.end() ? it->second : optional<pair<Shape, TweenTiming>>();
|
||||
}
|
||||
|
||||
Timeline<ShapeSet> getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration) {
|
||||
// Returns a timeline with a single shape set
|
||||
auto single = [duration](ShapeSet value) {
|
||||
return Timeline<ShapeSet> {{0_cs, duration, value}};
|
||||
const auto single = [duration](ShapeSet value) {
|
||||
return Timeline<ShapeSet> { { 0_cs, duration, value } };
|
||||
};
|
||||
|
||||
// Returns a timeline with two shape sets, timed as a diphthong
|
||||
auto diphthong = [duration](ShapeSet first, ShapeSet second) {
|
||||
centiseconds firstDuration = duration_cast<centiseconds>(duration * 0.6);
|
||||
const auto diphthong = [duration](ShapeSet first, ShapeSet second) {
|
||||
const centiseconds firstDuration = duration_cast<centiseconds>(duration * 0.6);
|
||||
return Timeline<ShapeSet> {
|
||||
{0_cs, firstDuration, first},
|
||||
{firstDuration, duration, second}
|
||||
{ 0_cs, firstDuration, first },
|
||||
{ firstDuration, duration, second }
|
||||
};
|
||||
};
|
||||
|
||||
// Returns a timeline with two shape sets, timed as a plosive
|
||||
auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) {
|
||||
centiseconds minOcclusionDuration = 4_cs;
|
||||
centiseconds maxOcclusionDuration = 12_cs;
|
||||
centiseconds occlusionDuration = clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration);
|
||||
const auto plosive = [duration, previousDuration](ShapeSet first, ShapeSet second) {
|
||||
const centiseconds minOcclusionDuration = 4_cs;
|
||||
const centiseconds maxOcclusionDuration = 12_cs;
|
||||
const centiseconds occlusionDuration =
|
||||
clamp(previousDuration / 2, minOcclusionDuration, maxOcclusionDuration);
|
||||
return Timeline<ShapeSet> {
|
||||
{-occlusionDuration, 0_cs, first},
|
||||
{0_cs, duration, second}
|
||||
{ -occlusionDuration, 0_cs, first },
|
||||
{ 0_cs, duration, second }
|
||||
};
|
||||
};
|
||||
|
||||
// Returns the result of `getShapeSets` when called with identical arguments
|
||||
// except for a different phone.
|
||||
auto like = [duration, previousDuration](Phone referencePhone) {
|
||||
const auto like = [duration, previousDuration](Phone referencePhone) {
|
||||
return getShapeSets(referencePhone, duration, previousDuration);
|
||||
};
|
||||
|
||||
static const ShapeSet any{A, B, C, D, E, F, G, H, X};
|
||||
static const ShapeSet anyOpen{B, C, D, E, F, G, H};
|
||||
static const ShapeSet any { A, B, C, D, E, F, G, H, X };
|
||||
static const ShapeSet anyOpen { B, C, D, E, F, G, H };
|
||||
|
||||
// Note:
|
||||
// The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more than one of these shapes.
|
||||
// The shapes {A, B, G, X} are very similar. You should avoid regular shape sets containing more
|
||||
// than one of these shapes.
|
||||
// Otherwise, the resulting shape may be more or less random and might not be a good fit.
|
||||
// As an exception, a very flexible rule may contain *all* these shapes.
|
||||
|
||||
switch (phone) {
|
||||
case Phone::AO: return single({E});
|
||||
case Phone::AA: return single({D});
|
||||
case Phone::IY: return single({B});
|
||||
case Phone::UW: return single({F});
|
||||
case Phone::EH: return single({C});
|
||||
case Phone::IH: return single({B});
|
||||
case Phone::UH: return single({F});
|
||||
case Phone::AH: return duration < 20_cs ? single({C}) : single({D});
|
||||
case Phone::Schwa: return single({B, C});
|
||||
case Phone::AE: return single({C});
|
||||
case Phone::EY: return diphthong({C}, {B});
|
||||
case Phone::AY: return duration < 20_cs ? diphthong({C}, {B}) : diphthong({D}, {B});
|
||||
case Phone::OW: return single({F});
|
||||
case Phone::AW: return duration < 30_cs ? diphthong({C}, {E}) : diphthong({D}, {E});
|
||||
case Phone::OY: return diphthong({E}, {B});
|
||||
case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({E});
|
||||
case Phone::AO: return single({ E });
|
||||
case Phone::AA: return single({ D });
|
||||
case Phone::IY: return single({ B });
|
||||
case Phone::UW: return single({ F });
|
||||
case Phone::EH: return single({ C });
|
||||
case Phone::IH: return single({ B });
|
||||
case Phone::UH: return single({ F });
|
||||
case Phone::AH: return duration < 20_cs ? single({ C }) : single({ D });
|
||||
case Phone::Schwa: return single({ B, C });
|
||||
case Phone::AE: return single({ C });
|
||||
case Phone::EY: return diphthong({ C }, { B });
|
||||
case Phone::AY: return duration < 20_cs ? diphthong({ C }, { B }) : diphthong({ D }, { B });
|
||||
case Phone::OW: return single({ F });
|
||||
case Phone::AW: return duration < 30_cs ? diphthong({ C }, { E }) : diphthong({ D }, { E });
|
||||
case Phone::OY: return diphthong({ E }, { B });
|
||||
case Phone::ER: return duration < 7_cs ? like(Phone::Schwa) : single({ E });
|
||||
|
||||
case Phone::P:
|
||||
case Phone::B: return plosive({A}, any);
|
||||
case Phone::T:
|
||||
case Phone::D: return plosive({B, F}, anyOpen);
|
||||
case Phone::K:
|
||||
case Phone::G: return plosive({B, C, E, F, H}, anyOpen);
|
||||
case Phone::CH:
|
||||
case Phone::JH: return single({B, F});
|
||||
case Phone::F:
|
||||
case Phone::V: return single({G});
|
||||
case Phone::TH:
|
||||
case Phone::DH:
|
||||
case Phone::S:
|
||||
case Phone::Z:
|
||||
case Phone::SH:
|
||||
case Phone::ZH: return single({B, F});
|
||||
case Phone::HH: return single(any); // think "m-hm"
|
||||
case Phone::M: return single({A});
|
||||
case Phone::N: return single({B, C, F, H});
|
||||
case Phone::NG: return single({B, C, E, F});
|
||||
case Phone::L: return duration < 20_cs ? single({B, E, F, H}) : single({H});
|
||||
case Phone::R: return single({B, E, F});
|
||||
case Phone::Y: return single({B, C, F});
|
||||
case Phone::W: return single({F});
|
||||
case Phone::P:
|
||||
case Phone::B: return plosive({ A }, any);
|
||||
case Phone::T:
|
||||
case Phone::D: return plosive({ B, F }, anyOpen);
|
||||
case Phone::K:
|
||||
case Phone::G: return plosive({ B, C, E, F, H }, anyOpen);
|
||||
case Phone::CH:
|
||||
case Phone::JH: return single({ B, F });
|
||||
case Phone::F:
|
||||
case Phone::V: return single({ G });
|
||||
case Phone::TH:
|
||||
case Phone::DH:
|
||||
case Phone::S:
|
||||
case Phone::Z:
|
||||
case Phone::SH:
|
||||
case Phone::ZH: return single({ B, F });
|
||||
case Phone::HH: return single(any); // think "m-hm"
|
||||
case Phone::M: return single({ A });
|
||||
case Phone::N: return single({ B, C, F, H });
|
||||
case Phone::NG: return single({ B, C, E, F });
|
||||
case Phone::L: return duration < 20_cs ? single({ B, E, F, H }) : single({ H });
|
||||
case Phone::R: return single({ B, E, F });
|
||||
case Phone::Y: return single({ B, C, F });
|
||||
case Phone::W: return single({ F });
|
||||
|
||||
case Phone::Breath:
|
||||
case Phone::Cough:
|
||||
case Phone::Smack: return single({C});
|
||||
case Phone::Noise: return single({B});
|
||||
case Phone::Breath:
|
||||
case Phone::Cough:
|
||||
case Phone::Smack: return single({ C });
|
||||
case Phone::Noise: return single({ B });
|
||||
|
||||
default: throw std::invalid_argument("Unexpected phone.");
|
||||
default: throw std::invalid_argument("Unexpected phone.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,5 +31,6 @@ boost::optional<std::pair<Shape, TweenTiming>> getTween(Shape first, Shape secon
|
|||
|
||||
// Returns the shape set(s) to use for a given phone.
|
||||
// The resulting timeline will always cover the entire duration of the phone (starting at 0 cs).
|
||||
// It may extend into the negative time range if animation is required prior to the sound being heard.
|
||||
// It may extend into the negative time range if animation is required prior to the sound being
|
||||
// heard.
|
||||
Timeline<ShapeSet> getShapeSets(Phone phone, centiseconds duration, centiseconds previousDuration);
|
||||
|
|
|
@ -8,17 +8,21 @@
|
|||
#include "targetShapeSet.h"
|
||||
#include "staticSegments.h"
|
||||
|
||||
JoiningContinuousTimeline<Shape> animate(const BoundedTimeline<Phone> &phones, const ShapeSet& targetShapeSet) {
|
||||
JoiningContinuousTimeline<Shape> animate(
|
||||
const BoundedTimeline<Phone>& phones,
|
||||
const ShapeSet& targetShapeSet
|
||||
) {
|
||||
// Create timeline of shape rules
|
||||
ContinuousTimeline<ShapeRule> shapeRules = getShapeRules(phones);
|
||||
|
||||
// Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and will be replaced later
|
||||
// Modify shape rules to only contain allowed shapes -- plus X, which is needed for pauses and
|
||||
// will be replaced later
|
||||
ShapeSet targetShapeSetPlusX = targetShapeSet;
|
||||
targetShapeSetPlusX.insert(Shape::X);
|
||||
shapeRules = convertToTargetShapeSet(shapeRules, targetShapeSetPlusX);
|
||||
|
||||
// Animate in multiple steps
|
||||
auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) {
|
||||
const auto performMainAnimationSteps = [&targetShapeSet](const auto& shapeRules) {
|
||||
JoiningContinuousTimeline<Shape> animation = animateRough(shapeRules);
|
||||
animation = optimizeTiming(animation);
|
||||
animation = animatePauses(animation);
|
||||
|
@ -26,7 +30,8 @@ JoiningContinuousTimeline<Shape> animate(const BoundedTimeline<Phone> &phones, c
|
|||
animation = convertToTargetShapeSet(animation, targetShapeSet);
|
||||
return animation;
|
||||
};
|
||||
const JoiningContinuousTimeline<Shape> result = avoidStaticSegments(shapeRules, performMainAnimationSteps);
|
||||
const JoiningContinuousTimeline<Shape> result =
|
||||
avoidStaticSegments(shapeRules, performMainAnimationSteps);
|
||||
|
||||
for (const auto& timedShape : result) {
|
||||
logTimedEvent("shape", timedShape);
|
||||
|
|
|
@ -5,4 +5,7 @@
|
|||
#include "time/ContinuousTimeline.h"
|
||||
#include "targetShapeSet.h"
|
||||
|
||||
JoiningContinuousTimeline<Shape> animate(const BoundedTimeline<Phone>& phones, const ShapeSet& targetShapeSet);
|
||||
JoiningContinuousTimeline<Shape> animate(
|
||||
const BoundedTimeline<Phone>& phones,
|
||||
const ShapeSet& targetShapeSet
|
||||
);
|
||||
|
|
|
@ -12,7 +12,7 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) {
|
|||
// It looks odd if the pause shape is identical to the next shape.
|
||||
// Make sure we find a relaxed shape that's different from the next one.
|
||||
for (Shape currentRelaxedShape = previous;;) {
|
||||
Shape nextRelaxedShape = relax(currentRelaxedShape);
|
||||
const Shape nextRelaxedShape = relax(currentRelaxedShape);
|
||||
if (nextRelaxedShape != next) {
|
||||
return nextRelaxedShape;
|
||||
}
|
||||
|
@ -31,11 +31,18 @@ Shape getPauseShape(Shape previous, Shape next, centiseconds duration) {
|
|||
JoiningContinuousTimeline<Shape> animatePauses(const JoiningContinuousTimeline<Shape>& animation) {
|
||||
JoiningContinuousTimeline<Shape> result(animation);
|
||||
|
||||
for_each_adjacent(animation.begin(), animation.end(), [&](const Timed<Shape>& previous, const Timed<Shape>& pause, const Timed<Shape>& next) {
|
||||
if (pause.getValue() != Shape::X) return;
|
||||
for_each_adjacent(
|
||||
animation.begin(),
|
||||
animation.end(),
|
||||
[&](const Timed<Shape>& previous, const Timed<Shape>& pause, const Timed<Shape>& next) {
|
||||
if (pause.getValue() != Shape::X) return;
|
||||
|
||||
result.set(pause.getTimeRange(), getPauseShape(previous.getValue(), next.getValue(), pause.getDuration()));
|
||||
});
|
||||
result.set(
|
||||
pause.getTimeRange(),
|
||||
getPauseShape(previous.getValue(), next.getValue(), pause.getDuration())
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
#include "roughAnimation.h"
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
using boost::optional;
|
||||
|
||||
// Create timeline of shapes using a bidirectional algorithm.
|
||||
// Here's a rough sketch:
|
||||
//
|
||||
// * Most consonants result in shape sets with multiple options; most vowels have only one shape option.
|
||||
// * Most consonants result in shape sets with multiple options; most vowels have only one shape
|
||||
// option.
|
||||
// * When speaking, we tend to slur mouth shapes into each other. So we animate from start to end,
|
||||
// always choosing a shape from the current set that resembles the last shape and is somewhat relaxed.
|
||||
// always choosing a shape from the current set that resembles the last shape and is somewhat
|
||||
// relaxed.
|
||||
// * When speaking, we anticipate vowels, trying to form their shape before the actual vowel.
|
||||
// So whenever we come across a one-shape vowel, we backtrack a little, spreating that shape to the left.
|
||||
// So whenever we come across a one-shape vowel, we backtrack a little, spreading that shape to
|
||||
// the left.
|
||||
JoiningContinuousTimeline<Shape> animateRough(const ContinuousTimeline<ShapeRule>& shapeRules) {
|
||||
JoiningContinuousTimeline<Shape> animation(shapeRules.getRange(), Shape::X);
|
||||
|
||||
|
@ -21,24 +22,28 @@ JoiningContinuousTimeline<Shape> animateRough(const ContinuousTimeline<ShapeRule
|
|||
const ShapeRule shapeRule = it->getValue();
|
||||
const Shape shape = getClosestShape(referenceShape, shapeRule.shapeSet);
|
||||
animation.set(it->getTimeRange(), shape);
|
||||
const bool anticipateShape = shapeRule.phone && isVowel(*shapeRule.phone) && shapeRule.shapeSet.size() == 1;
|
||||
const bool anticipateShape = shapeRule.phone
|
||||
&& isVowel(*shapeRule.phone)
|
||||
&& shapeRule.shapeSet.size() == 1;
|
||||
if (anticipateShape) {
|
||||
// Animate backwards a little
|
||||
const Shape anticipatedShape = shape;
|
||||
const centiseconds anticipatedShapeStart = it->getStart();
|
||||
referenceShape = anticipatedShape;
|
||||
for (auto reverseIt = it; reverseIt != shapeRules.begin(); ) {
|
||||
for (auto reverseIt = it; reverseIt != shapeRules.begin();) {
|
||||
--reverseIt;
|
||||
|
||||
// Make sure we haven't animated too far back
|
||||
centiseconds anticipatingShapeStart = reverseIt->getStart();
|
||||
if (anticipatingShapeStart == lastAnticipatedShapeStart) break;
|
||||
const centiseconds maxAnticipationDuration = 20_cs;
|
||||
const centiseconds anticipationDuration = anticipatedShapeStart - anticipatingShapeStart;
|
||||
const centiseconds anticipationDuration =
|
||||
anticipatedShapeStart - anticipatingShapeStart;
|
||||
if (anticipationDuration > maxAnticipationDuration) break;
|
||||
|
||||
// Overwrite forward-animated shape with backwards-animated, anticipating shape
|
||||
const Shape anticipatingShape = getClosestShape(referenceShape, reverseIt->getValue().shapeSet);
|
||||
const Shape anticipatingShape =
|
||||
getClosestShape(referenceShape, reverseIt->getValue().shapeSet);
|
||||
animation.set(reverseIt->getTimeRange(), anticipatingShape);
|
||||
|
||||
// Make sure the new, backwards-animated shape still resembles the anticipated shape
|
||||
|
|
|
@ -2,5 +2,6 @@
|
|||
|
||||
#include "ShapeRule.h"
|
||||
|
||||
// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional algorithm.
|
||||
// Does a rough animation (no tweening, special pause animation, etc.) using a bidirectional
|
||||
// algorithm.
|
||||
JoiningContinuousTimeline<Shape> animateRough(const ContinuousTimeline<ShapeRule>& shapeRules);
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "tools/nextCombination.h"
|
||||
|
||||
using std::vector;
|
||||
using boost::optional;
|
||||
|
||||
int getSyllableCount(const ContinuousTimeline<ShapeRule>& shapeRules, TimeRange timeRange) {
|
||||
if (timeRange.empty()) return 0;
|
||||
|
@ -31,16 +30,22 @@ int getSyllableCount(const ContinuousTimeline<ShapeRule>& shapeRules, TimeRange
|
|||
}
|
||||
|
||||
// A static segment is a prolonged period during which the mouth shape doesn't change
|
||||
vector<TimeRange> getStaticSegments(const ContinuousTimeline<ShapeRule>& shapeRules, const JoiningContinuousTimeline<Shape>& animation) {
|
||||
vector<TimeRange> getStaticSegments(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const JoiningContinuousTimeline<Shape>& animation
|
||||
) {
|
||||
// A static segment must contain a certain number of syllables to look distractingly static
|
||||
const int minSyllableCount = 3;
|
||||
// It must also have a minimum duration. The same number of syllables in fast speech usually looks good.
|
||||
// It must also have a minimum duration. The same number of syllables in fast speech usually
|
||||
// looks good.
|
||||
const centiseconds minDuration = 75_cs;
|
||||
|
||||
vector<TimeRange> result;
|
||||
for (const auto& timedShape : animation) {
|
||||
const TimeRange timeRange = timedShape.getTimeRange();
|
||||
if (timeRange.getDuration() >= minDuration && getSyllableCount(shapeRules, timeRange) >= minSyllableCount) {
|
||||
const bool isStatic = timeRange.getDuration() >= minDuration
|
||||
&& getSyllableCount(shapeRules, timeRange) >= minSyllableCount;
|
||||
if (isStatic) {
|
||||
result.push_back(timeRange);
|
||||
}
|
||||
}
|
||||
|
@ -48,20 +53,22 @@ vector<TimeRange> getStaticSegments(const ContinuousTimeline<ShapeRule>& shapeRu
|
|||
return result;
|
||||
}
|
||||
|
||||
// Indicates whether this shape rule can potentially be replaced by a modified version that breaks up long static segments
|
||||
// Indicates whether this shape rule can potentially be replaced by a modified version that breaks
|
||||
// up long static segments
|
||||
bool canChange(const ShapeRule& rule) {
|
||||
return rule.phone && isVowel(*rule.phone) && rule.shapeSet.size() == 1;
|
||||
}
|
||||
|
||||
// Returns a new shape rule that is identical to the specified one, except that it leads to a slightly different visualization
|
||||
// Returns a new shape rule that is identical to the specified one, except that it leads to a
|
||||
// slightly different visualization
|
||||
ShapeRule getChangedShapeRule(const ShapeRule& rule) {
|
||||
assert(canChange(rule));
|
||||
|
||||
ShapeRule result(rule);
|
||||
// So far, I've only encountered B as a static shape.
|
||||
// If there is ever a problem with another static shape, this function can easily be extended.
|
||||
if (rule.shapeSet == ShapeSet{Shape::B}) {
|
||||
result.shapeSet = {Shape::C};
|
||||
if (rule.shapeSet == ShapeSet { Shape::B }) {
|
||||
result.shapeSet = { Shape::C };
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -70,7 +77,10 @@ ShapeRule getChangedShapeRule(const ShapeRule& rule) {
|
|||
using RuleChanges = vector<centiseconds>;
|
||||
|
||||
// Replaces the indicated shape rules with slightly different ones, breaking up long static segments
|
||||
ContinuousTimeline<ShapeRule> applyChanges(const ContinuousTimeline<ShapeRule>& shapeRules, const RuleChanges& changes) {
|
||||
ContinuousTimeline<ShapeRule> applyChanges(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const RuleChanges& changes
|
||||
) {
|
||||
ContinuousTimeline<ShapeRule> result(shapeRules);
|
||||
for (centiseconds changedRuleStart : changes) {
|
||||
const Timed<ShapeRule> timedOriginalRule = *shapeRules.get(changedRuleStart);
|
||||
|
@ -85,14 +95,16 @@ public:
|
|||
RuleChangeScenario(
|
||||
const ContinuousTimeline<ShapeRule>& originalRules,
|
||||
const RuleChanges& changes,
|
||||
AnimationFunction animate) :
|
||||
const AnimationFunction& animate
|
||||
) :
|
||||
changedRules(applyChanges(originalRules, changes)),
|
||||
animation(animate(changedRules)),
|
||||
staticSegments(getStaticSegments(changedRules, animation)) {}
|
||||
staticSegments(getStaticSegments(changedRules, animation))
|
||||
{}
|
||||
|
||||
bool isBetterThan(const RuleChangeScenario& rhs) const {
|
||||
// We want zero static segments
|
||||
if (staticSegments.size() == 0 && rhs.staticSegments.size() > 0) return true;
|
||||
if (staticSegments.empty() && !rhs.staticSegments.empty()) return true;
|
||||
|
||||
// Short shapes are better than long ones. Minimize sum-of-squares.
|
||||
if (getSumOfShapeDurationSquares() < rhs.getSumOfShapeDurationSquares()) return true;
|
||||
|
@ -114,10 +126,17 @@ private:
|
|||
vector<TimeRange> staticSegments;
|
||||
|
||||
double getSumOfShapeDurationSquares() const {
|
||||
return std::accumulate(animation.begin(), animation.end(), 0.0, [](const double sum, const Timed<Shape>& timedShape) {
|
||||
const double duration = std::chrono::duration_cast<std::chrono::duration<double>>(timedShape.getDuration()).count();
|
||||
return sum + duration * duration;
|
||||
});
|
||||
return std::accumulate(
|
||||
animation.begin(),
|
||||
animation.end(),
|
||||
0.0,
|
||||
[](const double sum, const Timed<Shape>& timedShape) {
|
||||
const double duration = std::chrono::duration_cast<std::chrono::duration<double>>(
|
||||
timedShape.getDuration()
|
||||
).count();
|
||||
return sum + duration * duration;
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -132,8 +151,12 @@ RuleChanges getPossibleRuleChanges(const ContinuousTimeline<ShapeRule>& shapeRul
|
|||
return result;
|
||||
}
|
||||
|
||||
ContinuousTimeline<ShapeRule> fixStaticSegmentRules(const ContinuousTimeline<ShapeRule>& shapeRules, AnimationFunction animate) {
|
||||
// The complexity of this function is exponential with the number of replacements. So let's cap that value.
|
||||
ContinuousTimeline<ShapeRule> fixStaticSegmentRules(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const AnimationFunction& animate
|
||||
) {
|
||||
// The complexity of this function is exponential with the number of replacements.
|
||||
// So let's cap that value.
|
||||
const int maxReplacementCount = 3;
|
||||
|
||||
// All potential changes
|
||||
|
@ -142,14 +165,18 @@ ContinuousTimeline<ShapeRule> fixStaticSegmentRules(const ContinuousTimeline<Sha
|
|||
// Find best solution. Start with a single replacement, then increase as necessary.
|
||||
RuleChangeScenario bestScenario(shapeRules, {}, animate);
|
||||
for (
|
||||
int replacementCount = 1;
|
||||
bestScenario.getStaticSegmentCount() > 0 && replacementCount <= std::min(static_cast<int>(possibleRuleChanges.size()), maxReplacementCount);
|
||||
++replacementCount
|
||||
) {
|
||||
int replacementCount = 1;
|
||||
bestScenario.getStaticSegmentCount() > 0 && replacementCount <= std::min(static_cast<int>(possibleRuleChanges.size()), maxReplacementCount);
|
||||
++replacementCount
|
||||
) {
|
||||
// Only the first <replacementCount> elements of `currentRuleChanges` count
|
||||
auto currentRuleChanges(possibleRuleChanges);
|
||||
do {
|
||||
RuleChangeScenario currentScenario(shapeRules, {currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount}, animate);
|
||||
RuleChangeScenario currentScenario(
|
||||
shapeRules,
|
||||
{ currentRuleChanges.begin(), currentRuleChanges.begin() + replacementCount },
|
||||
animate
|
||||
);
|
||||
if (currentScenario.isBetterThan(bestScenario)) {
|
||||
bestScenario = currentScenario;
|
||||
}
|
||||
|
@ -164,8 +191,12 @@ bool isFlexible(const ShapeRule& rule) {
|
|||
return rule.shapeSet.size() > 1;
|
||||
}
|
||||
|
||||
// Extends the specified time range until it starts and ends with a non-flexible shape rule, if possible
|
||||
TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimeline<ShapeRule>& shapeRules) {
|
||||
// Extends the specified time range until it starts and ends with a non-flexible shape rule, if
|
||||
// possible
|
||||
TimeRange extendToFixedRules(
|
||||
const TimeRange& timeRange,
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules
|
||||
) {
|
||||
auto first = shapeRules.find(timeRange.getStart());
|
||||
while (first != shapeRules.begin() && isFlexible(first->getValue())) {
|
||||
--first;
|
||||
|
@ -174,10 +205,13 @@ TimeRange extendToFixedRules(const TimeRange& timeRange, const ContinuousTimelin
|
|||
while (std::next(last) != shapeRules.end() && isFlexible(last->getValue())) {
|
||||
++last;
|
||||
}
|
||||
return TimeRange(first->getStart(), last->getEnd());
|
||||
return { first->getStart(), last->getEnd() };
|
||||
}
|
||||
|
||||
JoiningContinuousTimeline<Shape> avoidStaticSegments(const ContinuousTimeline<ShapeRule>& shapeRules, AnimationFunction animate) {
|
||||
JoiningContinuousTimeline<Shape> avoidStaticSegments(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const AnimationFunction& animate
|
||||
) {
|
||||
const auto animation = animate(shapeRules);
|
||||
const vector<TimeRange> staticSegments = getStaticSegments(shapeRules, animation);
|
||||
if (staticSegments.empty()) {
|
||||
|
@ -187,11 +221,15 @@ JoiningContinuousTimeline<Shape> avoidStaticSegments(const ContinuousTimeline<Sh
|
|||
// Modify shape rules to eliminate static segments
|
||||
ContinuousTimeline<ShapeRule> fixedShapeRules(shapeRules);
|
||||
for (const TimeRange& staticSegment : staticSegments) {
|
||||
// Extend time range to the left and right so we don't lose adjacent rules that might influence the animation
|
||||
// Extend time range to the left and right so we don't lose adjacent rules that might
|
||||
// influence the animation
|
||||
const TimeRange extendedStaticSegment = extendToFixedRules(staticSegment, shapeRules);
|
||||
|
||||
// Fix shape rules within the static segment
|
||||
const auto fixedSegmentShapeRules = fixStaticSegmentRules({extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules}, animate);
|
||||
const auto fixedSegmentShapeRules = fixStaticSegmentRules(
|
||||
{ extendedStaticSegment, ShapeRule::getInvalid(), fixedShapeRules },
|
||||
animate
|
||||
);
|
||||
for (const auto& timedShapeRule : fixedSegmentShapeRules) {
|
||||
fixedShapeRules.set(timedShapeRule);
|
||||
}
|
||||
|
|
|
@ -8,7 +8,11 @@
|
|||
using AnimationFunction = std::function<JoiningContinuousTimeline<Shape>(const ContinuousTimeline<ShapeRule>&)>;
|
||||
|
||||
// Calls the specified animation function with the specified shape rules.
|
||||
// If the resulting animation contains long static segments, the shape rules are tweaked and animated again.
|
||||
// If the resulting animation contains long static segments, the shape rules are tweaked and
|
||||
// animated again.
|
||||
// Static segments happen rather often.
|
||||
// See http://animateducated.blogspot.de/2016/10/lip-sync-animation-2.html?showComment=1478861729702#c2940729096183546458.
|
||||
JoiningContinuousTimeline<Shape> avoidStaticSegments(const ContinuousTimeline<ShapeRule>& shapeRules, AnimationFunction animate);
|
||||
JoiningContinuousTimeline<Shape> avoidStaticSegments(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const AnimationFunction& animate
|
||||
);
|
||||
|
|
|
@ -4,9 +4,10 @@ Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet) {
|
|||
if (targetShapeSet.find(shape) != targetShapeSet.end()) {
|
||||
return shape;
|
||||
}
|
||||
Shape basicShape = getBasicShape(shape);
|
||||
const Shape basicShape = getBasicShape(shape);
|
||||
if (targetShapeSet.find(basicShape) == targetShapeSet.end()) {
|
||||
throw std::invalid_argument(fmt::format("Target shape set must contain basic shape {}.", basicShape));
|
||||
throw std::invalid_argument(
|
||||
fmt::format("Target shape set must contain basic shape {}.", basicShape));
|
||||
}
|
||||
return basicShape;
|
||||
}
|
||||
|
@ -19,7 +20,10 @@ ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetS
|
|||
return result;
|
||||
}
|
||||
|
||||
ContinuousTimeline<ShapeRule> convertToTargetShapeSet(const ContinuousTimeline<ShapeRule>& shapeRules, const ShapeSet& targetShapeSet) {
|
||||
ContinuousTimeline<ShapeRule> convertToTargetShapeSet(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const ShapeSet& targetShapeSet
|
||||
) {
|
||||
ContinuousTimeline<ShapeRule> result(shapeRules);
|
||||
for (const auto& timedShapeRule : shapeRules) {
|
||||
ShapeRule rule = timedShapeRule.getValue();
|
||||
|
@ -29,10 +33,16 @@ ContinuousTimeline<ShapeRule> convertToTargetShapeSet(const ContinuousTimeline<S
|
|||
return result;
|
||||
}
|
||||
|
||||
JoiningContinuousTimeline<Shape> convertToTargetShapeSet(const JoiningContinuousTimeline<Shape>& animation, const ShapeSet& targetShapeSet) {
|
||||
JoiningContinuousTimeline<Shape> convertToTargetShapeSet(
|
||||
const JoiningContinuousTimeline<Shape>& animation,
|
||||
const ShapeSet& targetShapeSet
|
||||
) {
|
||||
JoiningContinuousTimeline<Shape> result(animation);
|
||||
for (const auto& timedShape : animation) {
|
||||
result.set(timedShape.getTimeRange(), convertToTargetShapeSet(timedShape.getValue(), targetShapeSet));
|
||||
result.set(
|
||||
timedShape.getTimeRange(),
|
||||
convertToTargetShapeSet(timedShape.getValue(), targetShapeSet)
|
||||
);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -6,11 +6,19 @@
|
|||
// Returns the closest shape to the specified one that occurs in the target shape set.
|
||||
Shape convertToTargetShapeSet(Shape shape, const ShapeSet& targetShapeSet);
|
||||
|
||||
// Replaces each shape in the specified set with the closest shape that occurs in the target shape set.
|
||||
// Replaces each shape in the specified set with the closest shape that occurs in the target shape
|
||||
// set.
|
||||
ShapeSet convertToTargetShapeSet(const ShapeSet& shapes, const ShapeSet& targetShapeSet);
|
||||
|
||||
// Replaces each shape in each rule with the closest shape that occurs in the target shape set.
|
||||
ContinuousTimeline<ShapeRule> convertToTargetShapeSet(const ContinuousTimeline<ShapeRule>& shapeRules, const ShapeSet& targetShapeSet);
|
||||
ContinuousTimeline<ShapeRule> convertToTargetShapeSet(
|
||||
const ContinuousTimeline<ShapeRule>& shapeRules,
|
||||
const ShapeSet& targetShapeSet
|
||||
);
|
||||
|
||||
// Replaces each shape in the specified animation with the closest shape that occurs in the target shape set.
|
||||
JoiningContinuousTimeline<Shape> convertToTargetShapeSet(const JoiningContinuousTimeline<Shape>& animation, const ShapeSet& targetShapeSet);
|
||||
// Replaces each shape in the specified animation with the closest shape that occurs in the target
|
||||
// shape set.
|
||||
JoiningContinuousTimeline<Shape> convertToTargetShapeSet(
|
||||
const JoiningContinuousTimeline<Shape>& animation,
|
||||
const ShapeSet& targetShapeSet
|
||||
);
|
||||
|
|
|
@ -11,7 +11,7 @@ using std::map;
|
|||
string getShapesString(const JoiningContinuousTimeline<Shape>& shapes) {
|
||||
string result;
|
||||
for (const auto& timedShape : shapes) {
|
||||
if (result.size()) {
|
||||
if (!result.empty()) {
|
||||
result.append(" ");
|
||||
}
|
||||
result.append(boost::lexical_cast<std::string>(timedShape.getValue()));
|
||||
|
@ -44,12 +44,10 @@ Shape getRepresentativeShape(const JoiningTimeline<Shape>& timeline) {
|
|||
struct ShapeReduction {
|
||||
ShapeReduction(const JoiningTimeline<Shape>& sourceShapes) :
|
||||
sourceShapes(sourceShapes),
|
||||
shape(getRepresentativeShape(sourceShapes))
|
||||
{}
|
||||
shape(getRepresentativeShape(sourceShapes)) {}
|
||||
|
||||
ShapeReduction(const JoiningTimeline<Shape>& sourceShapes, TimeRange candidateRange) :
|
||||
ShapeReduction(JoiningBoundedTimeline<Shape>(candidateRange, sourceShapes))
|
||||
{}
|
||||
ShapeReduction(JoiningBoundedTimeline<Shape>(candidateRange, sourceShapes)) {}
|
||||
|
||||
JoiningTimeline<Shape> sourceShapes;
|
||||
Shape shape;
|
||||
|
@ -57,7 +55,8 @@ struct ShapeReduction {
|
|||
|
||||
// Returns a time range of candidate shapes for the next shape to draw.
|
||||
// Guaranteed to be non-empty.
|
||||
TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline<Shape>& sourceShapes, const TimeRange targetRange, const centiseconds writePosition) {
|
||||
TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline<Shape>& sourceShapes,
|
||||
const TimeRange targetRange, const centiseconds writePosition) {
|
||||
if (sourceShapes.empty()) {
|
||||
throw std::invalid_argument("Cannot determine candidate range for empty source timeline.");
|
||||
}
|
||||
|
@ -70,12 +69,15 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline<Shape>& s
|
|||
const centiseconds remainingTargetDuration = writePosition - targetRange.getStart();
|
||||
const bool canFitOneOrLess = remainingTargetDuration <= minShapeDuration;
|
||||
const bool canFitTwo = remainingTargetDuration >= 2 * minShapeDuration;
|
||||
const centiseconds duration = canFitOneOrLess || canFitTwo ? minShapeDuration : remainingTargetDuration / 2;
|
||||
const centiseconds duration = canFitOneOrLess || canFitTwo
|
||||
? minShapeDuration
|
||||
: remainingTargetDuration / 2;
|
||||
|
||||
TimeRange candidateRange(writePosition - duration, writePosition);
|
||||
if (writePosition == targetRange.getEnd()) {
|
||||
// This is the first iteration.
|
||||
// Extend the candidate range to the right in order to consider all source shapes after the target range.
|
||||
// Extend the candidate range to the right in order to consider all source shapes after the
|
||||
// target range.
|
||||
candidateRange.setEndIfLater(sourceShapes.getRange().getEnd());
|
||||
}
|
||||
if (candidateRange.getStart() >= sourceShapes.getRange().getEnd()) {
|
||||
|
@ -92,19 +94,31 @@ TimeRange getNextMinimalCandidateRange(const JoiningContinuousTimeline<Shape>& s
|
|||
return candidateRange;
|
||||
}
|
||||
|
||||
ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline<Shape>& sourceShapes, const TimeRange targetRange, centiseconds writePosition) {
|
||||
ShapeReduction getNextShapeReduction(
|
||||
const JoiningContinuousTimeline<Shape>& sourceShapes,
|
||||
const TimeRange targetRange,
|
||||
centiseconds writePosition
|
||||
) {
|
||||
// Determine the next time range of candidate shapes. Consider two scenarios:
|
||||
|
||||
// ... the shortest-possible candidate range
|
||||
const ShapeReduction minReduction(sourceShapes, getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition));
|
||||
const ShapeReduction minReduction(sourceShapes,
|
||||
getNextMinimalCandidateRange(sourceShapes, targetRange, writePosition));
|
||||
|
||||
// ... a candidate range extended to the left to fully encompass its left-most shape
|
||||
const ShapeReduction extendedReduction(sourceShapes,
|
||||
{minReduction.sourceShapes.begin()->getStart(), minReduction.sourceShapes.getRange().getEnd()});
|
||||
{
|
||||
minReduction.sourceShapes.begin()->getStart(),
|
||||
minReduction.sourceShapes.getRange().getEnd()
|
||||
}
|
||||
);
|
||||
|
||||
// Determine the shape that might be picked *next* if we choose the shortest-possible candidate range now
|
||||
const ShapeReduction nextReduction(sourceShapes,
|
||||
getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart()));
|
||||
// Determine the shape that might be picked *next* if we choose the shortest-possible candidate
|
||||
// range now
|
||||
const ShapeReduction nextReduction(
|
||||
sourceShapes,
|
||||
getNextMinimalCandidateRange(sourceShapes, targetRange, minReduction.sourceShapes.getRange().getStart())
|
||||
);
|
||||
|
||||
const bool minEqualsExtended = minReduction.shape == extendedReduction.shape;
|
||||
const bool extendedIsSpecial = extendedReduction.shape != minReduction.shape
|
||||
|
@ -113,8 +127,10 @@ ShapeReduction getNextShapeReduction(const JoiningContinuousTimeline<Shape>& sou
|
|||
return minEqualsExtended || extendedIsSpecial ? extendedReduction : minReduction;
|
||||
}
|
||||
|
||||
// Modifies the timing of the given animation to fit into the specified target time range without jitter.
|
||||
JoiningContinuousTimeline<Shape> retime(const JoiningContinuousTimeline<Shape>& sourceShapes, const TimeRange targetRange) {
|
||||
// Modifies the timing of the given animation to fit into the specified target time range without
|
||||
// jitter.
|
||||
JoiningContinuousTimeline<Shape> retime(const JoiningContinuousTimeline<Shape>& sourceShapes,
|
||||
const TimeRange targetRange) {
|
||||
logTimedEvent("segment", targetRange, getShapesString(sourceShapes));
|
||||
|
||||
JoiningContinuousTimeline<Shape> result(targetRange, Shape::X);
|
||||
|
@ -125,7 +141,8 @@ JoiningContinuousTimeline<Shape> retime(const JoiningContinuousTimeline<Shape>&
|
|||
while (writePosition > targetRange.getStart()) {
|
||||
|
||||
// Decide which shape to show next, possibly discarding short shapes
|
||||
const ShapeReduction shapeReduction = getNextShapeReduction(sourceShapes, targetRange, writePosition);
|
||||
const ShapeReduction shapeReduction =
|
||||
getNextShapeReduction(sourceShapes, targetRange, writePosition);
|
||||
|
||||
// Determine how long to display the shape
|
||||
TimeRange targetShapeRange(shapeReduction.sourceShapes.getRange());
|
||||
|
@ -144,7 +161,11 @@ JoiningContinuousTimeline<Shape> retime(const JoiningContinuousTimeline<Shape>&
|
|||
return result;
|
||||
}
|
||||
|
||||
JoiningContinuousTimeline<Shape> retime(const JoiningContinuousTimeline<Shape>& animation, TimeRange sourceRange, TimeRange targetRange) {
|
||||
JoiningContinuousTimeline<Shape> retime(
|
||||
const JoiningContinuousTimeline<Shape>& animation,
|
||||
TimeRange sourceRange,
|
||||
TimeRange targetRange
|
||||
) {
|
||||
const auto sourceShapes = JoiningContinuousTimeline<Shape>(sourceRange, Shape::X, animation);
|
||||
return retime(sourceShapes, targetRange);
|
||||
}
|
||||
|
@ -160,7 +181,12 @@ JoiningContinuousTimeline<Shape> optimizeTiming(const JoiningContinuousTimeline<
|
|||
JoiningContinuousTimeline<MouthState> segments(animation.getRange(), MouthState::Idle);
|
||||
for (const auto& timedShape : animation) {
|
||||
const Shape shape = timedShape.getValue();
|
||||
const MouthState mouthState = shape == Shape::X ? MouthState::Idle : shape == Shape::A ? MouthState::Closed : MouthState::Open;
|
||||
const MouthState mouthState =
|
||||
shape == Shape::X
|
||||
? MouthState::Idle
|
||||
: shape == Shape::A
|
||||
? MouthState::Closed
|
||||
: MouthState::Open;
|
||||
segments.set(timedShape.getTimeRange(), mouthState);
|
||||
}
|
||||
|
||||
|
@ -171,7 +197,8 @@ JoiningContinuousTimeline<Shape> optimizeTiming(const JoiningContinuousTimeline<
|
|||
|
||||
// Make sure all open and closed segments are long enough to register visually.
|
||||
JoiningContinuousTimeline<Shape> result(animation.getRange(), Shape::X);
|
||||
// ... we're filling the result timeline from right to left, so `resultStart` points to the earliest shape already written
|
||||
// ... we're filling the result timeline from right to left, so `resultStart` points to the
|
||||
// earliest shape already written
|
||||
centiseconds resultStart = result.getRange().getEnd();
|
||||
for (auto segmentIt = segments.rbegin(); segmentIt != segments.rend(); ++segmentIt) {
|
||||
// We don't care about idle shapes at this point.
|
||||
|
@ -188,26 +215,40 @@ JoiningContinuousTimeline<Shape> optimizeTiming(const JoiningContinuousTimeline<
|
|||
resultStart = targetRange.getStart();
|
||||
} else {
|
||||
// The segment is too short; we have to extend it to the left.
|
||||
// Find all adjacent segments to our left that are also too short, then distribute them evenly.
|
||||
// Find all adjacent segments to our left that are also too short, then distribute them
|
||||
// evenly.
|
||||
const auto begin = segmentIt;
|
||||
auto end = std::next(begin);
|
||||
while (end != segments.rend() && end->getValue() != MouthState::Idle && end->getDuration() < minSegmentDuration) ++end;
|
||||
while (
|
||||
end != segments.rend()
|
||||
&& end->getValue() != MouthState::Idle
|
||||
&& end->getDuration() < minSegmentDuration
|
||||
) {
|
||||
++end;
|
||||
}
|
||||
|
||||
// Determine how much we should extend the entire set of short segments to the left
|
||||
const size_t shortSegmentCount = std::distance(begin, end);
|
||||
const centiseconds desiredDuration = minSegmentDuration * shortSegmentCount;
|
||||
const centiseconds currentDuration = begin->getEnd() - std::prev(end)->getStart();
|
||||
const centiseconds desiredExtensionDuration = desiredDuration - currentDuration;
|
||||
const centiseconds availableExtensionDuration = end != segments.rend() ? end->getDuration() - 1_cs : 0_cs;
|
||||
const centiseconds extensionDuration = std::min({desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration});
|
||||
const centiseconds availableExtensionDuration = end != segments.rend()
|
||||
? end->getDuration() - 1_cs
|
||||
: 0_cs;
|
||||
const centiseconds extensionDuration = std::min({
|
||||
desiredExtensionDuration, availableExtensionDuration, maxExtensionDuration
|
||||
});
|
||||
|
||||
// Distribute available time range evenly among all short segments
|
||||
const centiseconds shortSegmentsTargetStart = std::prev(end)->getStart() - extensionDuration;
|
||||
const centiseconds shortSegmentsTargetStart =
|
||||
std::prev(end)->getStart() - extensionDuration;
|
||||
for (auto shortSegmentIt = begin; shortSegmentIt != end; ++shortSegmentIt) {
|
||||
size_t remainingShortSegmentCount = std::distance(shortSegmentIt, end);
|
||||
const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) / remainingShortSegmentCount;
|
||||
const centiseconds segmentDuration = (resultStart - shortSegmentsTargetStart) /
|
||||
remainingShortSegmentCount;
|
||||
const TimeRange segmentTargetRange(resultStart - segmentDuration, resultStart);
|
||||
const auto retimedSegment = retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange);
|
||||
const auto retimedSegment =
|
||||
retime(animation, shortSegmentIt->getTimeRange(), segmentTargetRange);
|
||||
for (const auto& timedShape : retimedSegment) {
|
||||
result.set(timedShape);
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "core/Shape.h"
|
||||
#include "time/ContinuousTimeline.h"
|
||||
|
||||
// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register visually.
|
||||
// Changes the timing of an existing animation to reduce jitter and to make sure all shapes register
|
||||
// visually.
|
||||
// In some cases, shapes may be omitted.
|
||||
JoiningContinuousTimeline<Shape> optimizeTiming(const JoiningContinuousTimeline<Shape>& animation);
|
||||
|
|
|
@ -19,21 +19,30 @@ JoiningContinuousTimeline<Shape> insertTweens(const JoiningContinuousTimeline<Sh
|
|||
|
||||
centiseconds tweenStart, tweenDuration;
|
||||
switch (tweenTiming) {
|
||||
case TweenTiming::Early: {
|
||||
tweenDuration = std::min(firstTimeRange.getDuration() / 3, maxTweenDuration);
|
||||
tweenStart = firstTimeRange.getEnd() - tweenDuration;
|
||||
break;
|
||||
}
|
||||
case TweenTiming::Centered: {
|
||||
tweenDuration = std::min({firstTimeRange.getDuration() / 4, secondTimeRange.getDuration() / 4, maxTweenDuration});
|
||||
tweenStart = firstTimeRange.getEnd() - tweenDuration / 2;
|
||||
break;
|
||||
}
|
||||
case TweenTiming::Late: {
|
||||
tweenDuration = std::min(secondTimeRange.getDuration() / 3, maxTweenDuration);
|
||||
tweenStart = secondTimeRange.getStart();
|
||||
break;
|
||||
}
|
||||
case TweenTiming::Early:
|
||||
{
|
||||
tweenDuration = std::min(firstTimeRange.getDuration() / 3, maxTweenDuration);
|
||||
tweenStart = firstTimeRange.getEnd() - tweenDuration;
|
||||
break;
|
||||
}
|
||||
case TweenTiming::Centered:
|
||||
{
|
||||
tweenDuration = std::min({
|
||||
firstTimeRange.getDuration() / 4, secondTimeRange.getDuration() / 4, maxTweenDuration
|
||||
});
|
||||
tweenStart = firstTimeRange.getEnd() - tweenDuration / 2;
|
||||
break;
|
||||
}
|
||||
case TweenTiming::Late:
|
||||
{
|
||||
tweenDuration = std::min(secondTimeRange.getDuration() / 3, maxTweenDuration);
|
||||
tweenStart = secondTimeRange.getStart();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw std::runtime_error("Unexpected tween timing.");
|
||||
}
|
||||
}
|
||||
|
||||
if (tweenDuration < minTweenDuration) return;
|
||||
|
|
|
@ -28,7 +28,11 @@ inline AudioClip::value_type SafeSampleReader::operator()(AudioClip::size_type i
|
|||
throw invalid_argument(fmt::format("Cannot read from sample index {}. Index < 0.", index));
|
||||
}
|
||||
if (index >= size) {
|
||||
throw invalid_argument(fmt::format("Cannot read from sample index {}. Clip size is {}.", index, size));
|
||||
throw invalid_argument(fmt::format(
|
||||
"Cannot read from sample index {}. Clip size is {}.",
|
||||
index,
|
||||
size
|
||||
));
|
||||
}
|
||||
if (index == lastIndex) {
|
||||
return lastSample;
|
||||
|
@ -51,7 +55,7 @@ AudioClip::iterator AudioClip::end() const {
|
|||
return SampleIterator(*this, size());
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioClip> operator|(std::unique_ptr<AudioClip> clip, AudioEffect effect) {
|
||||
std::unique_ptr<AudioClip> operator|(std::unique_ptr<AudioClip> clip, const AudioEffect& effect) {
|
||||
return effect(std::move(clip));
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ private:
|
|||
|
||||
using AudioEffect = std::function<std::unique_ptr<AudioClip>(std::unique_ptr<AudioClip>)>;
|
||||
|
||||
std::unique_ptr<AudioClip> operator|(std::unique_ptr<AudioClip> clip, AudioEffect effect);
|
||||
std::unique_ptr<AudioClip> operator|(std::unique_ptr<AudioClip> clip, const AudioEffect& effect);
|
||||
|
||||
using SampleReader = AudioClip::SampleReader;
|
||||
|
||||
|
|
|
@ -15,15 +15,19 @@ unique_ptr<AudioClip> DcOffset::clone() const {
|
|||
}
|
||||
|
||||
SampleReader DcOffset::createUnsafeSampleReader() const {
|
||||
return [read = inputClip->createSampleReader(), factor = factor, offset = offset](size_type index) {
|
||||
float sample = read(index);
|
||||
return [
|
||||
read = inputClip->createSampleReader(),
|
||||
factor = factor,
|
||||
offset = offset
|
||||
](size_type index) {
|
||||
const float sample = read(index);
|
||||
return sample * factor + offset;
|
||||
};
|
||||
}
|
||||
|
||||
float getDcOffset(const AudioClip& audioClip) {
|
||||
int flatMeanSampleCount, fadingMeanSampleCount;
|
||||
int sampleRate = audioClip.getSampleRate();
|
||||
const int sampleRate = audioClip.getSampleRate();
|
||||
if (audioClip.size() > 4 * sampleRate) {
|
||||
// Long audio file. Average over the first 3 seconds, then fade out over the 4th.
|
||||
flatMeanSampleCount = 3 * sampleRate;
|
||||
|
@ -34,31 +38,32 @@ float getDcOffset(const AudioClip& audioClip) {
|
|||
fadingMeanSampleCount = 0;
|
||||
}
|
||||
|
||||
auto read = audioClip.createSampleReader();
|
||||
const auto read = audioClip.createSampleReader();
|
||||
double sum = 0;
|
||||
for (int i = 0; i < flatMeanSampleCount; ++i) {
|
||||
sum += read(i);
|
||||
}
|
||||
for (int i = 0; i < fadingMeanSampleCount; ++i) {
|
||||
double weight = static_cast<double>(fadingMeanSampleCount - i) / fadingMeanSampleCount;
|
||||
const double weight =
|
||||
static_cast<double>(fadingMeanSampleCount - i) / fadingMeanSampleCount;
|
||||
sum += read(flatMeanSampleCount + i) * weight;
|
||||
}
|
||||
|
||||
double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0;
|
||||
double offset = sum / totalWeight;
|
||||
const double totalWeight = flatMeanSampleCount + (fadingMeanSampleCount + 1) / 2.0;
|
||||
const double offset = sum / totalWeight;
|
||||
return static_cast<float>(offset);
|
||||
}
|
||||
|
||||
AudioEffect addDcOffset(float offset, float epsilon) {
|
||||
return [offset, epsilon](unique_ptr<AudioClip> inputClip) -> unique_ptr<AudioClip> {
|
||||
if (std::abs(offset) < epsilon) return std::move(inputClip);
|
||||
if (std::abs(offset) < epsilon) return inputClip;
|
||||
return make_unique<DcOffset>(std::move(inputClip), offset);
|
||||
};
|
||||
}
|
||||
|
||||
AudioEffect removeDcOffset(float epsilon) {
|
||||
return [epsilon](unique_ptr<AudioClip> inputClip) {
|
||||
float offset = getDcOffset(*inputClip);
|
||||
const float offset = getDcOffset(*inputClip);
|
||||
return std::move(inputClip) | addDcOffset(-offset, epsilon);
|
||||
};
|
||||
}
|
||||
|
|
|
@ -14,30 +14,30 @@ using std::ios_base;
|
|||
|
||||
std::string vorbisErrorToString(int64_t errorCode) {
|
||||
switch (errorCode) {
|
||||
case OV_EREAD:
|
||||
return "Read error while fetching compressed data for decode.";
|
||||
case OV_EFAULT:
|
||||
return "Internal logic fault; indicates a bug or heap/stack corruption.";
|
||||
case OV_EIMPL:
|
||||
return "Feature not implemented";
|
||||
case OV_EINVAL:
|
||||
return "Either an invalid argument, or incompletely initialized argument passed to a call.";
|
||||
case OV_ENOTVORBIS:
|
||||
return "The given file/data was not recognized as Ogg Vorbis data.";
|
||||
case OV_EBADHEADER:
|
||||
return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header.";
|
||||
case OV_EVERSION:
|
||||
return "The bitstream format revision of the given Vorbis stream is not supported.";
|
||||
case OV_ENOTAUDIO:
|
||||
return "Packet is not an audio packet.";
|
||||
case OV_EBADPACKET:
|
||||
return "Error in packet.";
|
||||
case OV_EBADLINK:
|
||||
return "The given link exists in the Vorbis data stream, but is not decipherable due to garbacge or corruption.";
|
||||
case OV_ENOSEEK:
|
||||
return "The given stream is not seekable.";
|
||||
default:
|
||||
return "An unexpected Vorbis error occurred.";
|
||||
case OV_EREAD:
|
||||
return "Read error while fetching compressed data for decode.";
|
||||
case OV_EFAULT:
|
||||
return "Internal logic fault; indicates a bug or heap/stack corruption.";
|
||||
case OV_EIMPL:
|
||||
return "Feature not implemented";
|
||||
case OV_EINVAL:
|
||||
return "Either an invalid argument, or incompletely initialized argument passed to a call.";
|
||||
case OV_ENOTVORBIS:
|
||||
return "The given file/data was not recognized as Ogg Vorbis data.";
|
||||
case OV_EBADHEADER:
|
||||
return "The file/data is apparently an Ogg Vorbis stream, but contains a corrupted or undecipherable header.";
|
||||
case OV_EVERSION:
|
||||
return "The bitstream format revision of the given Vorbis stream is not supported.";
|
||||
case OV_ENOTAUDIO:
|
||||
return "Packet is not an audio packet.";
|
||||
case OV_EBADPACKET:
|
||||
return "Error in packet.";
|
||||
case OV_EBADLINK:
|
||||
return "The given link exists in the Vorbis data stream, but is not decipherable due to garbage or corruption.";
|
||||
case OV_ENOSEEK:
|
||||
return "The given stream is not seekable.";
|
||||
default:
|
||||
return "An unexpected Vorbis error occurred.";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,13 +64,13 @@ size_t readCallback(void* buffer, size_t elementSize, size_t elementCount, void*
|
|||
}
|
||||
|
||||
int seekCallback(void* dataSource, ogg_int64_t offset, int origin) {
|
||||
static const vector<ios_base::seekdir> seekDirections{
|
||||
static const vector<ios_base::seekdir> seekDirections {
|
||||
ios_base::beg, ios_base::cur, ios_base::end
|
||||
};
|
||||
|
||||
ifstream& stream = *static_cast<ifstream*>(dataSource);
|
||||
stream.seekg(offset, seekDirections.at(origin));
|
||||
stream.clear(); // In case we seeked to EOF
|
||||
stream.clear(); // In case we sought to EOF
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -82,26 +82,13 @@ long tellCallback(void* dataSource) {
|
|||
}
|
||||
|
||||
// RAII wrapper around OggVorbis_File
|
||||
class OggVorbisFile {
|
||||
class OggVorbisFile final {
|
||||
public:
|
||||
OggVorbisFile(const path& filePath);
|
||||
|
||||
OggVorbisFile(const OggVorbisFile&) = delete;
|
||||
OggVorbisFile& operator=(const OggVorbisFile&) = delete;
|
||||
|
||||
OggVorbisFile(const path& filePath) :
|
||||
stream(openFile(filePath))
|
||||
{
|
||||
// Throw only on badbit, not on failbit.
|
||||
// Ogg Vorbis expects read operations past the end of the file to
|
||||
// succeed, not to throw.
|
||||
stream.exceptions(ifstream::badbit);
|
||||
|
||||
// Ogg Vorbis normally uses the `FILE` API from the C standard library.
|
||||
// This doesn't handle Unicode paths on Windows.
|
||||
// Use wrapper functions around `ifstream` instead.
|
||||
const ov_callbacks callbacks{readCallback, seekCallback, nullptr, tellCallback};
|
||||
throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks));
|
||||
}
|
||||
|
||||
OggVorbis_File* get() {
|
||||
return &oggVorbisHandle;
|
||||
}
|
||||
|
@ -115,6 +102,22 @@ private:
|
|||
ifstream stream;
|
||||
};
|
||||
|
||||
OggVorbisFile::OggVorbisFile(const path& filePath) :
|
||||
oggVorbisHandle(),
|
||||
stream(openFile(filePath))
|
||||
{
|
||||
// Throw only on badbit, not on failbit.
|
||||
// Ogg Vorbis expects read operations past the end of the file to
|
||||
// succeed, not to throw.
|
||||
stream.exceptions(ifstream::badbit);
|
||||
|
||||
// Ogg Vorbis normally uses the `FILE` API from the C standard library.
|
||||
// This doesn't handle Unicode paths on Windows.
|
||||
// Use wrapper functions around `ifstream` instead.
|
||||
const ov_callbacks callbacks { readCallback, seekCallback, nullptr, tellCallback };
|
||||
throwOnError(ov_open_callbacks(&stream, &oggVorbisHandle, nullptr, 0, callbacks));
|
||||
}
|
||||
|
||||
OggVorbisFileReader::OggVorbisFileReader(const path& filePath) :
|
||||
filePath(filePath)
|
||||
{
|
||||
|
@ -153,7 +156,7 @@ SampleReader OggVorbisFileReader::createUnsafeSampleReader() const {
|
|||
}
|
||||
|
||||
// Downmix channels
|
||||
size_type bufferIndex = index - bufferStart;
|
||||
const size_type bufferIndex = index - bufferStart;
|
||||
value_type sum = 0.0f;
|
||||
for (int channel = 0; channel < channelCount; ++channel) {
|
||||
sum += buffer[channel][bufferIndex];
|
||||
|
|
|
@ -17,7 +17,10 @@ SampleRateConverter::SampleRateConverter(unique_ptr<AudioClip> inputClip, int ou
|
|||
throw invalid_argument("Sample rate must be positive.");
|
||||
}
|
||||
if (this->inputClip->getSampleRate() < outputSampleRate) {
|
||||
throw invalid_argument(fmt::format("Upsampling not supported. Input sample rate must not be below {}Hz.", outputSampleRate));
|
||||
throw invalid_argument(fmt::format(
|
||||
"Upsampling not supported. Input sample rate must not be below {}Hz.",
|
||||
outputSampleRate
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,11 +33,11 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) {
|
|||
double sum = 0;
|
||||
|
||||
// ... first sample (weight <= 1)
|
||||
int64_t startIndex = static_cast<int64_t>(inputStart);
|
||||
const int64_t startIndex = static_cast<int64_t>(inputStart);
|
||||
sum += read(startIndex) * ((startIndex + 1) - inputStart);
|
||||
|
||||
// ... middle samples (weight 1 each)
|
||||
int64_t endIndex = static_cast<int64_t>(inputEnd);
|
||||
const int64_t endIndex = static_cast<int64_t>(inputEnd);
|
||||
for (int64_t index = startIndex + 1; index < endIndex; ++index) {
|
||||
sum += read(index);
|
||||
}
|
||||
|
@ -48,9 +51,14 @@ float mean(double inputStart, double inputEnd, const SampleReader& read) {
|
|||
}
|
||||
|
||||
SampleReader SampleRateConverter::createUnsafeSampleReader() const {
|
||||
return[read = inputClip->createSampleReader(), downscalingFactor = downscalingFactor, size = inputClip->size()](size_type index) {
|
||||
double inputStart = index * downscalingFactor;
|
||||
double inputEnd = std::min((index + 1) * downscalingFactor, static_cast<double>(size));
|
||||
return [
|
||||
read = inputClip->createSampleReader(),
|
||||
downscalingFactor = downscalingFactor,
|
||||
size = inputClip->size()
|
||||
](size_type index) {
|
||||
const double inputStart = index * downscalingFactor;
|
||||
const double inputEnd =
|
||||
std::min((index + 1) * downscalingFactor, static_cast<double>(size));
|
||||
return mean(inputStart, inputEnd, read);
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include <format.h>
|
||||
#include "WaveFileReader.h"
|
||||
#include "ioTools.h"
|
||||
#include <iostream>
|
||||
#include "tools/platformTools.h"
|
||||
#include "tools/fileTools.h"
|
||||
|
||||
|
@ -32,9 +33,9 @@ namespace Codec {
|
|||
|
||||
string codecToString(int codec);
|
||||
|
||||
WaveFileReader::WaveFileReader(path filePath) :
|
||||
WaveFileReader::WaveFileReader(const path& filePath) :
|
||||
filePath(filePath),
|
||||
formatInfo{}
|
||||
formatInfo {}
|
||||
{
|
||||
auto file = openFile(filePath);
|
||||
|
||||
|
@ -43,7 +44,7 @@ WaveFileReader::WaveFileReader(path filePath) :
|
|||
file.seekg(0);
|
||||
|
||||
auto remaining = [&](int byteCount) {
|
||||
std::streamoff filePosition = file.tellg();
|
||||
const std::streamoff filePosition = file.tellg();
|
||||
return byteCount <= fileSize - filePosition;
|
||||
};
|
||||
|
||||
|
@ -51,7 +52,7 @@ WaveFileReader::WaveFileReader(path filePath) :
|
|||
if (!remaining(10)) {
|
||||
throw runtime_error("WAVE file is corrupt. Header not found.");
|
||||
}
|
||||
uint32_t rootChunkId = read<uint32_t>(file);
|
||||
auto rootChunkId = read<uint32_t>(file);
|
||||
if (rootChunkId != fourcc('R', 'I', 'F', 'F')) {
|
||||
throw runtime_error("Unknown file format. Only WAVE files are supported.");
|
||||
}
|
||||
|
@ -67,69 +68,75 @@ WaveFileReader::WaveFileReader(path filePath) :
|
|||
uint32_t chunkId = read<uint32_t>(file);
|
||||
int chunkSize = read<uint32_t>(file);
|
||||
switch (chunkId) {
|
||||
case fourcc('f', 'm', 't', ' '): {
|
||||
// Read relevant data
|
||||
uint16_t codec = read<uint16_t>(file);
|
||||
formatInfo.channelCount = read<uint16_t>(file);
|
||||
formatInfo.frameRate = read<uint32_t>(file);
|
||||
read<uint32_t>(file); // Bytes per second
|
||||
int frameSize = read<uint16_t>(file);
|
||||
int bitsPerSample = read<uint16_t>(file);
|
||||
case fourcc('f', 'm', 't', ' '):
|
||||
{
|
||||
// Read relevant data
|
||||
uint16_t codec = read<uint16_t>(file);
|
||||
formatInfo.channelCount = read<uint16_t>(file);
|
||||
formatInfo.frameRate = read<uint32_t>(file);
|
||||
read<uint32_t>(file); // Bytes per second
|
||||
int frameSize = read<uint16_t>(file);
|
||||
int bitsPerSample = read<uint16_t>(file);
|
||||
|
||||
// We've read 16 bytes so far. Skip the remainder.
|
||||
file.seekg(roundToEven(chunkSize) - 16, file.cur);
|
||||
// We've read 16 bytes so far. Skip the remainder.
|
||||
file.seekg(roundToEven(chunkSize) - 16, std::ios_base::cur);
|
||||
|
||||
// Determine sample format
|
||||
int bytesPerSample;
|
||||
switch (codec) {
|
||||
case Codec::Pcm:
|
||||
// Determine sample size.
|
||||
// According to the WAVE standard, sample sizes that are not multiples of 8 bits
|
||||
// (e.g. 12 bits) can be treated like the next-larger byte size.
|
||||
if (bitsPerSample == 8) {
|
||||
formatInfo.sampleFormat = SampleFormat::UInt8;
|
||||
bytesPerSample = 1;
|
||||
} else if (bitsPerSample <= 16) {
|
||||
formatInfo.sampleFormat = SampleFormat::Int16;
|
||||
bytesPerSample = 2;
|
||||
} else if (bitsPerSample <= 24) {
|
||||
formatInfo.sampleFormat = SampleFormat::Int24;
|
||||
bytesPerSample = 3;
|
||||
} else {
|
||||
throw runtime_error(
|
||||
format("Unsupported sample format: {}-bit PCM.", bitsPerSample));
|
||||
}
|
||||
if (bytesPerSample != frameSize / formatInfo.channelCount) {
|
||||
throw runtime_error("Unsupported sample organization.");
|
||||
// Determine sample format
|
||||
int bytesPerSample;
|
||||
switch (codec) {
|
||||
case Codec::Pcm:
|
||||
// Determine sample size.
|
||||
// According to the WAVE standard, sample sizes that are not multiples of 8
|
||||
// bits (e.g. 12 bits) can be treated like the next-larger byte size.
|
||||
if (bitsPerSample == 8) {
|
||||
formatInfo.sampleFormat = SampleFormat::UInt8;
|
||||
bytesPerSample = 1;
|
||||
} else if (bitsPerSample <= 16) {
|
||||
formatInfo.sampleFormat = SampleFormat::Int16;
|
||||
bytesPerSample = 2;
|
||||
} else if (bitsPerSample <= 24) {
|
||||
formatInfo.sampleFormat = SampleFormat::Int24;
|
||||
bytesPerSample = 3;
|
||||
} else {
|
||||
throw runtime_error(
|
||||
format("Unsupported sample format: {}-bit PCM.", bitsPerSample));
|
||||
}
|
||||
if (bytesPerSample != frameSize / formatInfo.channelCount) {
|
||||
throw runtime_error("Unsupported sample organization.");
|
||||
}
|
||||
break;
|
||||
case Codec::Float:
|
||||
if (bitsPerSample == 32) {
|
||||
formatInfo.sampleFormat = SampleFormat::Float32;
|
||||
bytesPerSample = 4;
|
||||
} else {
|
||||
throw runtime_error(
|
||||
format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample)
|
||||
);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw runtime_error(format(
|
||||
"Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.",
|
||||
codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float)
|
||||
));
|
||||
}
|
||||
formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount;
|
||||
break;
|
||||
}
|
||||
case fourcc('d', 'a', 't', 'a'):
|
||||
{
|
||||
reachedDataChunk = true;
|
||||
formatInfo.dataOffset = file.tellg();
|
||||
formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
// Skip unknown chunk
|
||||
file.seekg(roundToEven(chunkSize), std::ios_base::cur);
|
||||
break;
|
||||
case Codec::Float:
|
||||
if (bitsPerSample == 32) {
|
||||
formatInfo.sampleFormat = SampleFormat::Float32;
|
||||
bytesPerSample = 4;
|
||||
} else {
|
||||
throw runtime_error(format("Unsupported sample format: {}-bit IEEE Float.", bitsPerSample));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw runtime_error(format(
|
||||
"Unsupported audio codec: '{}'. Only uncompressed codecs ('{}' and '{}') are supported.",
|
||||
codecToString(codec), codecToString(Codec::Pcm), codecToString(Codec::Float)));
|
||||
}
|
||||
formatInfo.bytesPerFrame = bytesPerSample * formatInfo.channelCount;
|
||||
break;
|
||||
}
|
||||
case fourcc('d', 'a', 't', 'a'): {
|
||||
reachedDataChunk = true;
|
||||
formatInfo.dataOffset = file.tellg();
|
||||
formatInfo.frameCount = chunkSize / formatInfo.bytesPerFrame;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
// Skip unknown chunk
|
||||
file.seekg(roundToEven(chunkSize), file.cur);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -138,30 +145,38 @@ unique_ptr<AudioClip> WaveFileReader::clone() const {
|
|||
return make_unique<WaveFileReader>(*this);
|
||||
}
|
||||
|
||||
inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sampleFormat, int channelCount) {
|
||||
inline AudioClip::value_type readSample(
|
||||
std::ifstream& file,
|
||||
SampleFormat sampleFormat,
|
||||
int channelCount
|
||||
) {
|
||||
float sum = 0;
|
||||
for (int channelIndex = 0; channelIndex < channelCount; channelIndex++) {
|
||||
switch (sampleFormat) {
|
||||
case SampleFormat::UInt8: {
|
||||
uint8_t raw = read<uint8_t>(file);
|
||||
sum += toNormalizedFloat(raw, 0, UINT8_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Int16: {
|
||||
int16_t raw = read<int16_t>(file);
|
||||
sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Int24: {
|
||||
int raw = read<int, 24>(file);
|
||||
if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement
|
||||
sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Float32: {
|
||||
sum += read<float>(file);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::UInt8:
|
||||
{
|
||||
const uint8_t raw = read<uint8_t>(file);
|
||||
sum += toNormalizedFloat(raw, 0, UINT8_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Int16:
|
||||
{
|
||||
const int16_t raw = read<int16_t>(file);
|
||||
sum += toNormalizedFloat(raw, INT16_MIN, INT16_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Int24:
|
||||
{
|
||||
int raw = read<int, 24>(file);
|
||||
if (raw & 0x800000) raw |= 0xFF000000; // Fix two's complement
|
||||
sum += toNormalizedFloat(raw, INT24_MIN, INT24_MAX);
|
||||
break;
|
||||
}
|
||||
case SampleFormat::Float32:
|
||||
{
|
||||
sum += read<float>(file);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,10 +184,17 @@ inline AudioClip::value_type readSample(std::ifstream& file, SampleFormat sample
|
|||
}
|
||||
|
||||
SampleReader WaveFileReader::createUnsafeSampleReader() const {
|
||||
return [formatInfo = formatInfo, file = std::make_shared<std::ifstream>(openFile(filePath)), filePos = std::streampos(0)](size_type index) mutable {
|
||||
std::streampos newFilePos = formatInfo.dataOffset + static_cast<std::streamoff>(index * formatInfo.bytesPerFrame);
|
||||
return
|
||||
[
|
||||
formatInfo = formatInfo,
|
||||
file = std::make_shared<std::ifstream>(openFile(filePath)),
|
||||
filePos = std::streampos(0)
|
||||
](size_type index) mutable {
|
||||
const std::streampos newFilePos = formatInfo.dataOffset
|
||||
+ static_cast<std::streamoff>(index * formatInfo.bytesPerFrame);
|
||||
file->seekg(newFilePos);
|
||||
value_type result = readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount);
|
||||
const value_type result =
|
||||
readSample(*file, formatInfo.sampleFormat, formatInfo.channelCount);
|
||||
filePos = newFilePos + static_cast<std::streamoff>(formatInfo.bytesPerFrame);
|
||||
return result;
|
||||
};
|
||||
|
@ -180,248 +202,249 @@ SampleReader WaveFileReader::createUnsafeSampleReader() const {
|
|||
|
||||
string codecToString(int codec) {
|
||||
switch (codec) {
|
||||
case 0x0001: return "PCM";
|
||||
case 0x0002: return "Microsoft ADPCM";
|
||||
case 0x0003: return "IEEE Float";
|
||||
case 0x0004: return "Compaq VSELP";
|
||||
case 0x0005: return "IBM CVSD";
|
||||
case 0x0006: return "Microsoft a-Law";
|
||||
case 0x0007: return "Microsoft u-Law";
|
||||
case 0x0008: return "Microsoft DTS";
|
||||
case 0x0009: return "DRM";
|
||||
case 0x000a: return "WMA 9 Speech";
|
||||
case 0x000b: return "Microsoft Windows Media RT Voice";
|
||||
case 0x0010: return "OKI-ADPCM";
|
||||
case 0x0011: return "Intel IMA/DVI-ADPCM";
|
||||
case 0x0012: return "Videologic Mediaspace ADPCM";
|
||||
case 0x0013: return "Sierra ADPCM";
|
||||
case 0x0014: return "Antex G.723 ADPCM";
|
||||
case 0x0015: return "DSP Solutions DIGISTD";
|
||||
case 0x0016: return "DSP Solutions DIGIFIX";
|
||||
case 0x0017: return "Dialoic OKI ADPCM";
|
||||
case 0x0018: return "Media Vision ADPCM";
|
||||
case 0x0019: return "HP CU";
|
||||
case 0x001a: return "HP Dynamic Voice";
|
||||
case 0x0020: return "Yamaha ADPCM";
|
||||
case 0x0021: return "SONARC Speech Compression";
|
||||
case 0x0022: return "DSP Group True Speech";
|
||||
case 0x0023: return "Echo Speech Corp.";
|
||||
case 0x0024: return "Virtual Music Audiofile AF36";
|
||||
case 0x0025: return "Audio Processing Tech.";
|
||||
case 0x0026: return "Virtual Music Audiofile AF10";
|
||||
case 0x0027: return "Aculab Prosody 1612";
|
||||
case 0x0028: return "Merging Tech. LRC";
|
||||
case 0x0030: return "Dolby AC2";
|
||||
case 0x0031: return "Microsoft GSM610";
|
||||
case 0x0032: return "MSN Audio";
|
||||
case 0x0033: return "Antex ADPCME";
|
||||
case 0x0034: return "Control Resources VQLPC";
|
||||
case 0x0035: return "DSP Solutions DIGIREAL";
|
||||
case 0x0036: return "DSP Solutions DIGIADPCM";
|
||||
case 0x0037: return "Control Resources CR10";
|
||||
case 0x0038: return "Natural MicroSystems VBX ADPCM";
|
||||
case 0x0039: return "Crystal Semiconductor IMA ADPCM";
|
||||
case 0x003a: return "Echo Speech ECHOSC3";
|
||||
case 0x003b: return "Rockwell ADPCM";
|
||||
case 0x003c: return "Rockwell DIGITALK";
|
||||
case 0x003d: return "Xebec Multimedia";
|
||||
case 0x0040: return "Antex G.721 ADPCM";
|
||||
case 0x0041: return "Antex G.728 CELP";
|
||||
case 0x0042: return "Microsoft MSG723";
|
||||
case 0x0043: return "IBM AVC ADPCM";
|
||||
case 0x0045: return "ITU-T G.726";
|
||||
case 0x0050: return "Microsoft MPEG";
|
||||
case 0x0051: return "RT23 or PAC";
|
||||
case 0x0052: return "InSoft RT24";
|
||||
case 0x0053: return "InSoft PAC";
|
||||
case 0x0055: return "MP3";
|
||||
case 0x0059: return "Cirrus";
|
||||
case 0x0060: return "Cirrus Logic";
|
||||
case 0x0061: return "ESS Tech. PCM";
|
||||
case 0x0062: return "Voxware Inc.";
|
||||
case 0x0063: return "Canopus ATRAC";
|
||||
case 0x0064: return "APICOM G.726 ADPCM";
|
||||
case 0x0065: return "APICOM G.722 ADPCM";
|
||||
case 0x0066: return "Microsoft DSAT";
|
||||
case 0x0067: return "Micorsoft DSAT DISPLAY";
|
||||
case 0x0069: return "Voxware Byte Aligned";
|
||||
case 0x0070: return "Voxware AC8";
|
||||
case 0x0071: return "Voxware AC10";
|
||||
case 0x0072: return "Voxware AC16";
|
||||
case 0x0073: return "Voxware AC20";
|
||||
case 0x0074: return "Voxware MetaVoice";
|
||||
case 0x0075: return "Voxware MetaSound";
|
||||
case 0x0076: return "Voxware RT29HW";
|
||||
case 0x0077: return "Voxware VR12";
|
||||
case 0x0078: return "Voxware VR18";
|
||||
case 0x0079: return "Voxware TQ40";
|
||||
case 0x007a: return "Voxware SC3";
|
||||
case 0x007b: return "Voxware SC3";
|
||||
case 0x0080: return "Soundsoft";
|
||||
case 0x0081: return "Voxware TQ60";
|
||||
case 0x0082: return "Microsoft MSRT24";
|
||||
case 0x0083: return "AT&T G.729A";
|
||||
case 0x0084: return "Motion Pixels MVI MV12";
|
||||
case 0x0085: return "DataFusion G.726";
|
||||
case 0x0086: return "DataFusion GSM610";
|
||||
case 0x0088: return "Iterated Systems Audio";
|
||||
case 0x0089: return "Onlive";
|
||||
case 0x008a: return "Multitude, Inc. FT SX20";
|
||||
case 0x008b: return "Infocom ITS A/S G.721 ADPCM";
|
||||
case 0x008c: return "Convedia G729";
|
||||
case 0x008d: return "Not specified congruency, Inc.";
|
||||
case 0x0091: return "Siemens SBC24";
|
||||
case 0x0092: return "Sonic Foundry Dolby AC3 APDIF";
|
||||
case 0x0093: return "MediaSonic G.723";
|
||||
case 0x0094: return "Aculab Prosody 8kbps";
|
||||
case 0x0097: return "ZyXEL ADPCM";
|
||||
case 0x0098: return "Philips LPCBB";
|
||||
case 0x0099: return "Studer Professional Audio Packed";
|
||||
case 0x00a0: return "Malden PhonyTalk";
|
||||
case 0x00a1: return "Racal Recorder GSM";
|
||||
case 0x00a2: return "Racal Recorder G720.a";
|
||||
case 0x00a3: return "Racal G723.1";
|
||||
case 0x00a4: return "Racal Tetra ACELP";
|
||||
case 0x00b0: return "NEC AAC NEC Corporation";
|
||||
case 0x00ff: return "AAC";
|
||||
case 0x0100: return "Rhetorex ADPCM";
|
||||
case 0x0101: return "IBM u-Law";
|
||||
case 0x0102: return "IBM a-Law";
|
||||
case 0x0103: return "IBM ADPCM";
|
||||
case 0x0111: return "Vivo G.723";
|
||||
case 0x0112: return "Vivo Siren";
|
||||
case 0x0120: return "Philips Speech Processing CELP";
|
||||
case 0x0121: return "Philips Speech Processing GRUNDIG";
|
||||
case 0x0123: return "Digital G.723";
|
||||
case 0x0125: return "Sanyo LD ADPCM";
|
||||
case 0x0130: return "Sipro Lab ACEPLNET";
|
||||
case 0x0131: return "Sipro Lab ACELP4800";
|
||||
case 0x0132: return "Sipro Lab ACELP8V3";
|
||||
case 0x0133: return "Sipro Lab G.729";
|
||||
case 0x0134: return "Sipro Lab G.729A";
|
||||
case 0x0135: return "Sipro Lab Kelvin";
|
||||
case 0x0136: return "VoiceAge AMR";
|
||||
case 0x0140: return "Dictaphone G.726 ADPCM";
|
||||
case 0x0150: return "Qualcomm PureVoice";
|
||||
case 0x0151: return "Qualcomm HalfRate";
|
||||
case 0x0155: return "Ring Zero Systems TUBGSM";
|
||||
case 0x0160: return "Microsoft Audio1";
|
||||
case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio";
|
||||
case 0x0162: return "Windows Media Audio Professional V9";
|
||||
case 0x0163: return "Windows Media Audio Lossless V9";
|
||||
case 0x0164: return "WMA Pro over S/PDIF";
|
||||
case 0x0170: return "UNISYS NAP ADPCM";
|
||||
case 0x0171: return "UNISYS NAP ULAW";
|
||||
case 0x0172: return "UNISYS NAP ALAW";
|
||||
case 0x0173: return "UNISYS NAP 16K";
|
||||
case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies";
|
||||
case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies";
|
||||
case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies";
|
||||
case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies";
|
||||
case 0x0178: return "Knowledge Adventure ADPCM";
|
||||
case 0x0180: return "Fraunhofer IIS MPEG2AAC";
|
||||
case 0x0190: return "Digital Theater Systems DTS DS";
|
||||
case 0x0200: return "Creative Labs ADPCM";
|
||||
case 0x0202: return "Creative Labs FASTSPEECH8";
|
||||
case 0x0203: return "Creative Labs FASTSPEECH10";
|
||||
case 0x0210: return "UHER ADPCM";
|
||||
case 0x0215: return "Ulead DV ACM";
|
||||
case 0x0216: return "Ulead DV ACM";
|
||||
case 0x0220: return "Quarterdeck Corp.";
|
||||
case 0x0230: return "I-Link VC";
|
||||
case 0x0240: return "Aureal Semiconductor Raw Sport";
|
||||
case 0x0241: return "ESST AC3";
|
||||
case 0x0250: return "Interactive Products HSX";
|
||||
case 0x0251: return "Interactive Products RPELP";
|
||||
case 0x0260: return "Consistent CS2";
|
||||
case 0x0270: return "Sony SCX";
|
||||
case 0x0271: return "Sony SCY";
|
||||
case 0x0272: return "Sony ATRAC3";
|
||||
case 0x0273: return "Sony SPC";
|
||||
case 0x0280: return "TELUM Telum Inc.";
|
||||
case 0x0281: return "TELUMIA Telum Inc.";
|
||||
case 0x0285: return "Norcom Voice Systems ADPCM";
|
||||
case 0x0300: return "Fujitsu FM TOWNS SND";
|
||||
case 0x0301:
|
||||
case 0x0302:
|
||||
case 0x0303:
|
||||
case 0x0304:
|
||||
case 0x0305:
|
||||
case 0x0306:
|
||||
case 0x0307:
|
||||
case 0x0308: return "Fujitsu (not specified)";
|
||||
case 0x0350: return "Micronas Semiconductors, Inc. Development";
|
||||
case 0x0351: return "Micronas Semiconductors, Inc. CELP833";
|
||||
case 0x0400: return "Brooktree Digital";
|
||||
case 0x0401: return "Intel Music Coder (IMC)";
|
||||
case 0x0402: return "Ligos Indeo Audio";
|
||||
case 0x0450: return "QDesign Music";
|
||||
case 0x0500: return "On2 VP7 On2 Technologies";
|
||||
case 0x0501: return "On2 VP6 On2 Technologies";
|
||||
case 0x0680: return "AT&T VME VMPCM";
|
||||
case 0x0681: return "AT&T TCP";
|
||||
case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)";
|
||||
case 0x08ae: return "ClearJump LiteWave (lossless)";
|
||||
case 0x1000: return "Olivetti GSM";
|
||||
case 0x1001: return "Olivetti ADPCM";
|
||||
case 0x1002: return "Olivetti CELP";
|
||||
case 0x1003: return "Olivetti SBC";
|
||||
case 0x1004: return "Olivetti OPR";
|
||||
case 0x1100: return "Lernout & Hauspie";
|
||||
case 0x1101: return "Lernout & Hauspie CELP codec";
|
||||
case 0x1102:
|
||||
case 0x1103:
|
||||
case 0x1104: return "Lernout & Hauspie SBC codec";
|
||||
case 0x1400: return "Norris Comm. Inc.";
|
||||
case 0x1401: return "ISIAudio";
|
||||
case 0x1500: return "AT&T Soundspace Music Compression";
|
||||
case 0x181c: return "VoxWare RT24 speech codec";
|
||||
case 0x181e: return "Lucent elemedia AX24000P Music codec";
|
||||
case 0x1971: return "Sonic Foundry LOSSLESS";
|
||||
case 0x1979: return "Innings Telecom Inc. ADPCM";
|
||||
case 0x1c07: return "Lucent SX8300P speech codec";
|
||||
case 0x1c0c: return "Lucent SX5363S G.723 compliant codec";
|
||||
case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)";
|
||||
case 0x1fc4: return "NCT Soft ALF2CD ACM";
|
||||
case 0x2000: return "FAST Multimedia DVM";
|
||||
case 0x2001: return "Dolby DTS (Digital Theater System)";
|
||||
case 0x2002: return "RealAudio 1 / 2 14.4";
|
||||
case 0x2003: return "RealAudio 1 / 2 28.8";
|
||||
case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)";
|
||||
case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)";
|
||||
case 0x2006: return "RealAudio 10 AAC (RAAC)";
|
||||
case 0x2007: return "RealAudio 10 AAC+ (RACP)";
|
||||
case 0x2500: return "Reserved range to 0x2600 Microsoft";
|
||||
case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)";
|
||||
case 0x4143: return "Divio MPEG-4 AAC audio";
|
||||
case 0x4201: return "Nokia adaptive multirate";
|
||||
case 0x4243: return "Divio G726 Divio, Inc.";
|
||||
case 0x434c: return "LEAD Speech";
|
||||
case 0x564c: return "LEAD Vorbis";
|
||||
case 0x5756: return "WavPack Audio";
|
||||
case 0x674f: return "Ogg Vorbis (mode 1)";
|
||||
case 0x6750: return "Ogg Vorbis (mode 2)";
|
||||
case 0x6751: return "Ogg Vorbis (mode 3)";
|
||||
case 0x676f: return "Ogg Vorbis (mode 1+)";
|
||||
case 0x6770: return "Ogg Vorbis (mode 2+)";
|
||||
case 0x6771: return "Ogg Vorbis (mode 3+)";
|
||||
case 0x7000: return "3COM NBX 3Com Corporation";
|
||||
case 0x706d: return "FAAD AAC";
|
||||
case 0x7a21: return "GSM-AMR (CBR, no SID)";
|
||||
case 0x7a22: return "GSM-AMR (VBR, including SID)";
|
||||
case 0xa100: return "Comverse Infosys Ltd. G723 1";
|
||||
case 0xa101: return "Comverse Infosys Ltd. AVQSBC";
|
||||
case 0xa102: return "Comverse Infosys Ltd. OLDSBC";
|
||||
case 0xa103: return "Symbol Technologies G729A";
|
||||
case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation";
|
||||
case 0xa105: return "Ingenient Technologies Inc. G726";
|
||||
case 0xa106: return "ISO/MPEG-4 advanced audio Coding";
|
||||
case 0xa107: return "Encore Software Ltd G726";
|
||||
case 0xa109: return "Speex ACM Codec xiph.org";
|
||||
case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec";
|
||||
case 0xf1ac: return "Free Lossless Audio Codec FLAC";
|
||||
case 0xfffe: return "Extensible";
|
||||
case 0xffff: return "Development";
|
||||
case 0x0001: return "PCM";
|
||||
case 0x0002: return "Microsoft ADPCM";
|
||||
case 0x0003: return "IEEE Float";
|
||||
case 0x0004: return "Compaq VSELP";
|
||||
case 0x0005: return "IBM CVSD";
|
||||
case 0x0006: return "Microsoft a-Law";
|
||||
case 0x0007: return "Microsoft u-Law";
|
||||
case 0x0008: return "Microsoft DTS";
|
||||
case 0x0009: return "DRM";
|
||||
case 0x000a: return "WMA 9 Speech";
|
||||
case 0x000b: return "Microsoft Windows Media RT Voice";
|
||||
case 0x0010: return "OKI-ADPCM";
|
||||
case 0x0011: return "Intel IMA/DVI-ADPCM";
|
||||
case 0x0012: return "Videologic Mediaspace ADPCM";
|
||||
case 0x0013: return "Sierra ADPCM";
|
||||
case 0x0014: return "Antex G.723 ADPCM";
|
||||
case 0x0015: return "DSP Solutions DIGISTD";
|
||||
case 0x0016: return "DSP Solutions DIGIFIX";
|
||||
case 0x0017: return "Dialoic OKI ADPCM";
|
||||
case 0x0018: return "Media Vision ADPCM";
|
||||
case 0x0019: return "HP CU";
|
||||
case 0x001a: return "HP Dynamic Voice";
|
||||
case 0x0020: return "Yamaha ADPCM";
|
||||
case 0x0021: return "SONARC Speech Compression";
|
||||
case 0x0022: return "DSP Group True Speech";
|
||||
case 0x0023: return "Echo Speech Corp.";
|
||||
case 0x0024: return "Virtual Music Audiofile AF36";
|
||||
case 0x0025: return "Audio Processing Tech.";
|
||||
case 0x0026: return "Virtual Music Audiofile AF10";
|
||||
case 0x0027: return "Aculab Prosody 1612";
|
||||
case 0x0028: return "Merging Tech. LRC";
|
||||
case 0x0030: return "Dolby AC2";
|
||||
case 0x0031: return "Microsoft GSM610";
|
||||
case 0x0032: return "MSN Audio";
|
||||
case 0x0033: return "Antex ADPCME";
|
||||
case 0x0034: return "Control Resources VQLPC";
|
||||
case 0x0035: return "DSP Solutions DIGIREAL";
|
||||
case 0x0036: return "DSP Solutions DIGIADPCM";
|
||||
case 0x0037: return "Control Resources CR10";
|
||||
case 0x0038: return "Natural MicroSystems VBX ADPCM";
|
||||
case 0x0039: return "Crystal Semiconductor IMA ADPCM";
|
||||
case 0x003a: return "Echo Speech ECHOSC3";
|
||||
case 0x003b: return "Rockwell ADPCM";
|
||||
case 0x003c: return "Rockwell DIGITALK";
|
||||
case 0x003d: return "Xebec Multimedia";
|
||||
case 0x0040: return "Antex G.721 ADPCM";
|
||||
case 0x0041: return "Antex G.728 CELP";
|
||||
case 0x0042: return "Microsoft MSG723";
|
||||
case 0x0043: return "IBM AVC ADPCM";
|
||||
case 0x0045: return "ITU-T G.726";
|
||||
case 0x0050: return "Microsoft MPEG";
|
||||
case 0x0051: return "RT23 or PAC";
|
||||
case 0x0052: return "InSoft RT24";
|
||||
case 0x0053: return "InSoft PAC";
|
||||
case 0x0055: return "MP3";
|
||||
case 0x0059: return "Cirrus";
|
||||
case 0x0060: return "Cirrus Logic";
|
||||
case 0x0061: return "ESS Tech. PCM";
|
||||
case 0x0062: return "Voxware Inc.";
|
||||
case 0x0063: return "Canopus ATRAC";
|
||||
case 0x0064: return "APICOM G.726 ADPCM";
|
||||
case 0x0065: return "APICOM G.722 ADPCM";
|
||||
case 0x0066: return "Microsoft DSAT";
|
||||
case 0x0067: return "Micorsoft DSAT DISPLAY";
|
||||
case 0x0069: return "Voxware Byte Aligned";
|
||||
case 0x0070: return "Voxware AC8";
|
||||
case 0x0071: return "Voxware AC10";
|
||||
case 0x0072: return "Voxware AC16";
|
||||
case 0x0073: return "Voxware AC20";
|
||||
case 0x0074: return "Voxware MetaVoice";
|
||||
case 0x0075: return "Voxware MetaSound";
|
||||
case 0x0076: return "Voxware RT29HW";
|
||||
case 0x0077: return "Voxware VR12";
|
||||
case 0x0078: return "Voxware VR18";
|
||||
case 0x0079: return "Voxware TQ40";
|
||||
case 0x007a: return "Voxware SC3";
|
||||
case 0x007b: return "Voxware SC3";
|
||||
case 0x0080: return "Soundsoft";
|
||||
case 0x0081: return "Voxware TQ60";
|
||||
case 0x0082: return "Microsoft MSRT24";
|
||||
case 0x0083: return "AT&T G.729A";
|
||||
case 0x0084: return "Motion Pixels MVI MV12";
|
||||
case 0x0085: return "DataFusion G.726";
|
||||
case 0x0086: return "DataFusion GSM610";
|
||||
case 0x0088: return "Iterated Systems Audio";
|
||||
case 0x0089: return "Onlive";
|
||||
case 0x008a: return "Multitude, Inc. FT SX20";
|
||||
case 0x008b: return "Infocom ITS A/S G.721 ADPCM";
|
||||
case 0x008c: return "Convedia G729";
|
||||
case 0x008d: return "Not specified congruency, Inc.";
|
||||
case 0x0091: return "Siemens SBC24";
|
||||
case 0x0092: return "Sonic Foundry Dolby AC3 APDIF";
|
||||
case 0x0093: return "MediaSonic G.723";
|
||||
case 0x0094: return "Aculab Prosody 8kbps";
|
||||
case 0x0097: return "ZyXEL ADPCM";
|
||||
case 0x0098: return "Philips LPCBB";
|
||||
case 0x0099: return "Studer Professional Audio Packed";
|
||||
case 0x00a0: return "Malden PhonyTalk";
|
||||
case 0x00a1: return "Racal Recorder GSM";
|
||||
case 0x00a2: return "Racal Recorder G720.a";
|
||||
case 0x00a3: return "Racal G723.1";
|
||||
case 0x00a4: return "Racal Tetra ACELP";
|
||||
case 0x00b0: return "NEC AAC NEC Corporation";
|
||||
case 0x00ff: return "AAC";
|
||||
case 0x0100: return "Rhetorex ADPCM";
|
||||
case 0x0101: return "IBM u-Law";
|
||||
case 0x0102: return "IBM a-Law";
|
||||
case 0x0103: return "IBM ADPCM";
|
||||
case 0x0111: return "Vivo G.723";
|
||||
case 0x0112: return "Vivo Siren";
|
||||
case 0x0120: return "Philips Speech Processing CELP";
|
||||
case 0x0121: return "Philips Speech Processing GRUNDIG";
|
||||
case 0x0123: return "Digital G.723";
|
||||
case 0x0125: return "Sanyo LD ADPCM";
|
||||
case 0x0130: return "Sipro Lab ACEPLNET";
|
||||
case 0x0131: return "Sipro Lab ACELP4800";
|
||||
case 0x0132: return "Sipro Lab ACELP8V3";
|
||||
case 0x0133: return "Sipro Lab G.729";
|
||||
case 0x0134: return "Sipro Lab G.729A";
|
||||
case 0x0135: return "Sipro Lab Kelvin";
|
||||
case 0x0136: return "VoiceAge AMR";
|
||||
case 0x0140: return "Dictaphone G.726 ADPCM";
|
||||
case 0x0150: return "Qualcomm PureVoice";
|
||||
case 0x0151: return "Qualcomm HalfRate";
|
||||
case 0x0155: return "Ring Zero Systems TUBGSM";
|
||||
case 0x0160: return "Microsoft Audio1";
|
||||
case 0x0161: return "Windows Media Audio V2 V7 V8 V9 / DivX audio (WMA) / Alex AC3 Audio";
|
||||
case 0x0162: return "Windows Media Audio Professional V9";
|
||||
case 0x0163: return "Windows Media Audio Lossless V9";
|
||||
case 0x0164: return "WMA Pro over S/PDIF";
|
||||
case 0x0170: return "UNISYS NAP ADPCM";
|
||||
case 0x0171: return "UNISYS NAP ULAW";
|
||||
case 0x0172: return "UNISYS NAP ALAW";
|
||||
case 0x0173: return "UNISYS NAP 16K";
|
||||
case 0x0174: return "MM SYCOM ACM SYC008 SyCom Technologies";
|
||||
case 0x0175: return "MM SYCOM ACM SYC701 G726L SyCom Technologies";
|
||||
case 0x0176: return "MM SYCOM ACM SYC701 CELP54 SyCom Technologies";
|
||||
case 0x0177: return "MM SYCOM ACM SYC701 CELP68 SyCom Technologies";
|
||||
case 0x0178: return "Knowledge Adventure ADPCM";
|
||||
case 0x0180: return "Fraunhofer IIS MPEG2AAC";
|
||||
case 0x0190: return "Digital Theater Systems DTS DS";
|
||||
case 0x0200: return "Creative Labs ADPCM";
|
||||
case 0x0202: return "Creative Labs FASTSPEECH8";
|
||||
case 0x0203: return "Creative Labs FASTSPEECH10";
|
||||
case 0x0210: return "UHER ADPCM";
|
||||
case 0x0215: return "Ulead DV ACM";
|
||||
case 0x0216: return "Ulead DV ACM";
|
||||
case 0x0220: return "Quarterdeck Corp.";
|
||||
case 0x0230: return "I-Link VC";
|
||||
case 0x0240: return "Aureal Semiconductor Raw Sport";
|
||||
case 0x0241: return "ESST AC3";
|
||||
case 0x0250: return "Interactive Products HSX";
|
||||
case 0x0251: return "Interactive Products RPELP";
|
||||
case 0x0260: return "Consistent CS2";
|
||||
case 0x0270: return "Sony SCX";
|
||||
case 0x0271: return "Sony SCY";
|
||||
case 0x0272: return "Sony ATRAC3";
|
||||
case 0x0273: return "Sony SPC";
|
||||
case 0x0280: return "TELUM Telum Inc.";
|
||||
case 0x0281: return "TELUMIA Telum Inc.";
|
||||
case 0x0285: return "Norcom Voice Systems ADPCM";
|
||||
case 0x0300: return "Fujitsu FM TOWNS SND";
|
||||
case 0x0301:
|
||||
case 0x0302:
|
||||
case 0x0303:
|
||||
case 0x0304:
|
||||
case 0x0305:
|
||||
case 0x0306:
|
||||
case 0x0307:
|
||||
case 0x0308: return "Fujitsu (not specified)";
|
||||
case 0x0350: return "Micronas Semiconductors, Inc. Development";
|
||||
case 0x0351: return "Micronas Semiconductors, Inc. CELP833";
|
||||
case 0x0400: return "Brooktree Digital";
|
||||
case 0x0401: return "Intel Music Coder (IMC)";
|
||||
case 0x0402: return "Ligos Indeo Audio";
|
||||
case 0x0450: return "QDesign Music";
|
||||
case 0x0500: return "On2 VP7 On2 Technologies";
|
||||
case 0x0501: return "On2 VP6 On2 Technologies";
|
||||
case 0x0680: return "AT&T VME VMPCM";
|
||||
case 0x0681: return "AT&T TCP";
|
||||
case 0x0700: return "YMPEG Alpha (dummy for MPEG-2 compressor)";
|
||||
case 0x08ae: return "ClearJump LiteWave (lossless)";
|
||||
case 0x1000: return "Olivetti GSM";
|
||||
case 0x1001: return "Olivetti ADPCM";
|
||||
case 0x1002: return "Olivetti CELP";
|
||||
case 0x1003: return "Olivetti SBC";
|
||||
case 0x1004: return "Olivetti OPR";
|
||||
case 0x1100: return "Lernout & Hauspie";
|
||||
case 0x1101: return "Lernout & Hauspie CELP codec";
|
||||
case 0x1102:
|
||||
case 0x1103:
|
||||
case 0x1104: return "Lernout & Hauspie SBC codec";
|
||||
case 0x1400: return "Norris Comm. Inc.";
|
||||
case 0x1401: return "ISIAudio";
|
||||
case 0x1500: return "AT&T Soundspace Music Compression";
|
||||
case 0x181c: return "VoxWare RT24 speech codec";
|
||||
case 0x181e: return "Lucent elemedia AX24000P Music codec";
|
||||
case 0x1971: return "Sonic Foundry LOSSLESS";
|
||||
case 0x1979: return "Innings Telecom Inc. ADPCM";
|
||||
case 0x1c07: return "Lucent SX8300P speech codec";
|
||||
case 0x1c0c: return "Lucent SX5363S G.723 compliant codec";
|
||||
case 0x1f03: return "CUseeMe DigiTalk (ex-Rocwell)";
|
||||
case 0x1fc4: return "NCT Soft ALF2CD ACM";
|
||||
case 0x2000: return "FAST Multimedia DVM";
|
||||
case 0x2001: return "Dolby DTS (Digital Theater System)";
|
||||
case 0x2002: return "RealAudio 1 / 2 14.4";
|
||||
case 0x2003: return "RealAudio 1 / 2 28.8";
|
||||
case 0x2004: return "RealAudio G2 / 8 Cook (low bitrate)";
|
||||
case 0x2005: return "RealAudio 3 / 4 / 5 Music (DNET)";
|
||||
case 0x2006: return "RealAudio 10 AAC (RAAC)";
|
||||
case 0x2007: return "RealAudio 10 AAC+ (RACP)";
|
||||
case 0x2500: return "Reserved range to 0x2600 Microsoft";
|
||||
case 0x3313: return "makeAVIS (ffvfw fake AVI sound from AviSynth scripts)";
|
||||
case 0x4143: return "Divio MPEG-4 AAC audio";
|
||||
case 0x4201: return "Nokia adaptive multirate";
|
||||
case 0x4243: return "Divio G726 Divio, Inc.";
|
||||
case 0x434c: return "LEAD Speech";
|
||||
case 0x564c: return "LEAD Vorbis";
|
||||
case 0x5756: return "WavPack Audio";
|
||||
case 0x674f: return "Ogg Vorbis (mode 1)";
|
||||
case 0x6750: return "Ogg Vorbis (mode 2)";
|
||||
case 0x6751: return "Ogg Vorbis (mode 3)";
|
||||
case 0x676f: return "Ogg Vorbis (mode 1+)";
|
||||
case 0x6770: return "Ogg Vorbis (mode 2+)";
|
||||
case 0x6771: return "Ogg Vorbis (mode 3+)";
|
||||
case 0x7000: return "3COM NBX 3Com Corporation";
|
||||
case 0x706d: return "FAAD AAC";
|
||||
case 0x7a21: return "GSM-AMR (CBR, no SID)";
|
||||
case 0x7a22: return "GSM-AMR (VBR, including SID)";
|
||||
case 0xa100: return "Comverse Infosys Ltd. G723 1";
|
||||
case 0xa101: return "Comverse Infosys Ltd. AVQSBC";
|
||||
case 0xa102: return "Comverse Infosys Ltd. OLDSBC";
|
||||
case 0xa103: return "Symbol Technologies G729A";
|
||||
case 0xa104: return "VoiceAge AMR WB VoiceAge Corporation";
|
||||
case 0xa105: return "Ingenient Technologies Inc. G726";
|
||||
case 0xa106: return "ISO/MPEG-4 advanced audio Coding";
|
||||
case 0xa107: return "Encore Software Ltd G726";
|
||||
case 0xa109: return "Speex ACM Codec xiph.org";
|
||||
case 0xdfac: return "DebugMode SonicFoundry Vegas FrameServer ACM Codec";
|
||||
case 0xf1ac: return "Free Lossless Audio Codec FLAC";
|
||||
case 0xfffe: return "Extensible";
|
||||
case 0xffff: return "Development";
|
||||
default:
|
||||
return format("{0:#x}", codec);
|
||||
}
|
||||
return format("{0:#x}", codec);
|
||||
}
|
|
@ -12,7 +12,7 @@ enum class SampleFormat {
|
|||
|
||||
class WaveFileReader : public AudioClip {
|
||||
public:
|
||||
WaveFileReader(boost::filesystem::path filePath);
|
||||
WaveFileReader(const boost::filesystem::path& filePath);
|
||||
std::unique_ptr<AudioClip> clone() const override;
|
||||
int getSampleRate() const override;
|
||||
size_type size() const override;
|
||||
|
|
|
@ -20,7 +20,9 @@ std::unique_ptr<AudioClip> createAudioFileClip(path filePath) {
|
|||
return std::make_unique<OggVorbisFileReader>(filePath);
|
||||
}
|
||||
throw runtime_error(format(
|
||||
"Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.", extension));
|
||||
"Unsupported file extension '{}'. Supported extensions are '.wav' and '.ogg'.",
|
||||
extension
|
||||
));
|
||||
} catch (...) {
|
||||
std::throw_with_nested(runtime_error(format("Could not open sound file {}.", filePath)));
|
||||
}
|
||||
|
|
|
@ -4,33 +4,38 @@
|
|||
|
||||
namespace little_endian {
|
||||
|
||||
template <typename Type, int bitsToRead = 8 * sizeof(Type)>
|
||||
Type read(std::istream &stream) {
|
||||
template<typename Type, int bitsToRead = 8 * sizeof(Type)>
|
||||
Type read(std::istream& stream) {
|
||||
static_assert(bitsToRead % 8 == 0, "Cannot read fractional bytes.");
|
||||
static_assert(bitsToRead <= sizeof(Type) * 8, "Bits to read exceed target type size.");
|
||||
|
||||
Type result = 0;
|
||||
char *p = reinterpret_cast<char*>(&result);
|
||||
int bytesToRead = bitsToRead / 8;
|
||||
char* p = reinterpret_cast<char*>(&result);
|
||||
const int bytesToRead = bitsToRead / 8;
|
||||
for (int byteIndex = 0; byteIndex < bytesToRead; byteIndex++) {
|
||||
*(p + byteIndex) = static_cast<char>(stream.get());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename Type, int bitsToWrite = 8 * sizeof(Type)>
|
||||
void write(Type value, std::ostream &stream) {
|
||||
template<typename Type, int bitsToWrite = 8 * sizeof(Type)>
|
||||
void write(Type value, std::ostream& stream) {
|
||||
static_assert(bitsToWrite % 8 == 0, "Cannot write fractional bytes.");
|
||||
static_assert(bitsToWrite <= sizeof(Type) * 8, "Bits to write exceed target type size.");
|
||||
|
||||
char *p = reinterpret_cast<char*>(&value);
|
||||
int bytesToWrite = bitsToWrite / 8;
|
||||
char* p = reinterpret_cast<char*>(&value);
|
||||
const int bytesToWrite = bitsToWrite / 8;
|
||||
for (int byteIndex = 0; byteIndex < bytesToWrite; byteIndex++) {
|
||||
stream.put(*(p + byteIndex));
|
||||
}
|
||||
}
|
||||
|
||||
constexpr uint32_t fourcc(unsigned char c0, unsigned char c1, unsigned char c2, unsigned char c3) {
|
||||
constexpr uint32_t fourcc(
|
||||
unsigned char c0,
|
||||
unsigned char c1,
|
||||
unsigned char c2,
|
||||
unsigned char c3
|
||||
) {
|
||||
return c0 | (c1 << 8) | (c2 << 16) | (c3 << 24);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
using std::function;
|
||||
using std::vector;
|
||||
using std::unique_ptr;
|
||||
|
||||
// Converts a float in the range -1..1 to a signed 16-bit int
|
||||
inline int16_t floatSampleToInt16(float sample) {
|
||||
|
@ -12,13 +11,18 @@ inline int16_t floatSampleToInt16(float sample) {
|
|||
return static_cast<int16_t>(((sample + 1) / 2) * (INT16_MAX - INT16_MIN) + INT16_MIN);
|
||||
}
|
||||
|
||||
void process16bitAudioClip(const AudioClip& audioClip, function<void(const vector<int16_t>&)> processBuffer, size_t bufferCapacity, ProgressSink& progressSink) {
|
||||
void process16bitAudioClip(
|
||||
const AudioClip& audioClip,
|
||||
const function<void(const vector<int16_t>&)>& processBuffer,
|
||||
size_t bufferCapacity,
|
||||
ProgressSink& progressSink
|
||||
) {
|
||||
// Process entire sound stream
|
||||
vector<int16_t> buffer;
|
||||
buffer.reserve(bufferCapacity);
|
||||
int sampleCount = 0;
|
||||
auto it = audioClip.begin();
|
||||
auto end = audioClip.end();
|
||||
const auto end = audioClip.end();
|
||||
do {
|
||||
// Read to buffer
|
||||
buffer.clear();
|
||||
|
@ -32,10 +36,14 @@ void process16bitAudioClip(const AudioClip& audioClip, function<void(const vecto
|
|||
|
||||
sampleCount += buffer.size();
|
||||
progressSink.reportProgress(static_cast<double>(sampleCount) / audioClip.size());
|
||||
} while (buffer.size());
|
||||
} while (!buffer.empty());
|
||||
}
|
||||
|
||||
void process16bitAudioClip(const AudioClip& audioClip, function<void(const vector<int16_t>&)> processBuffer, ProgressSink& progressSink) {
|
||||
void process16bitAudioClip(
|
||||
const AudioClip& audioClip,
|
||||
const function<void(const vector<int16_t>&)>& processBuffer,
|
||||
ProgressSink& progressSink
|
||||
) {
|
||||
const size_t capacity = 1600; // 0.1 second capacity
|
||||
process16bitAudioClip(audioClip, processBuffer, capacity, progressSink);
|
||||
}
|
||||
|
@ -46,5 +54,5 @@ vector<int16_t> copyTo16bitBuffer(const AudioClip& audioClip) {
|
|||
for (float sample : audioClip) {
|
||||
result[index++] = floatSampleToInt16(sample);
|
||||
}
|
||||
return std::move(result);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,17 @@
|
|||
#include "AudioClip.h"
|
||||
#include "tools/progress.h"
|
||||
|
||||
void process16bitAudioClip(const AudioClip& audioClip, std::function<void(const std::vector<int16_t>&)> processBuffer, size_t bufferCapacity, ProgressSink& progressSink);
|
||||
void process16bitAudioClip(const AudioClip& audioClip, std::function<void(const std::vector<int16_t>&)> processBuffer, ProgressSink& progressSink);
|
||||
void process16bitAudioClip(
|
||||
const AudioClip& audioClip,
|
||||
const std::function<void(const std::vector<int16_t>&)>& processBuffer,
|
||||
size_t bufferCapacity,
|
||||
ProgressSink& progressSink
|
||||
);
|
||||
|
||||
void process16bitAudioClip(
|
||||
const AudioClip& audioClip,
|
||||
const std::function<void(const std::vector<int16_t>&)>& processBuffer,
|
||||
ProgressSink& progressSink
|
||||
);
|
||||
|
||||
std::vector<int16_t> copyTo16bitBuffer(const AudioClip& audioClip);
|
|
@ -9,7 +9,6 @@
|
|||
#include <gsl_util.h>
|
||||
#include "tools/parallel.h"
|
||||
#include "AudioSegment.h"
|
||||
#include "tools/stringTools.h"
|
||||
|
||||
using std::vector;
|
||||
using boost::adaptors::transformed;
|
||||
|
@ -17,7 +16,10 @@ using fmt::format;
|
|||
using std::runtime_error;
|
||||
using std::unique_ptr;
|
||||
|
||||
JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioClip, ProgressSink& progressSink) {
|
||||
JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(
|
||||
const AudioClip& audioClip,
|
||||
ProgressSink& progressSink
|
||||
) {
|
||||
VadInst* vadHandle = WebRtcVad_Create();
|
||||
if (!vadHandle) throw runtime_error("Error creating WebRTC VAD handle.");
|
||||
|
||||
|
@ -38,14 +40,19 @@ JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioCli
|
|||
JoiningBoundedTimeline<void> activity(audioClip.getTruncatedRange());
|
||||
centiseconds time = 0_cs;
|
||||
const size_t bufferCapacity = audioClip.getSampleRate() / 100;
|
||||
auto processBuffer = [&](const vector<int16_t>& buffer) {
|
||||
const auto processBuffer = [&](const vector<int16_t>& buffer) {
|
||||
// WebRTC is picky regarding buffer size
|
||||
if (buffer.size() < bufferCapacity) return;
|
||||
|
||||
int result = WebRtcVad_Process(vadHandle, audioClip.getSampleRate(), buffer.data(), buffer.size()) == 1;
|
||||
const int result = WebRtcVad_Process(
|
||||
vadHandle,
|
||||
audioClip.getSampleRate(),
|
||||
buffer.data(),
|
||||
buffer.size()
|
||||
) == 1;
|
||||
if (result == -1) throw runtime_error("Error processing audio buffer using WebRTC VAD.");
|
||||
|
||||
bool isActive = result != 0;
|
||||
const bool isActive = result != 0;
|
||||
if (isActive) {
|
||||
activity.set(time, time + 1_cs);
|
||||
}
|
||||
|
@ -54,12 +61,14 @@ JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioCli
|
|||
process16bitAudioClip(audioClip, processBuffer, bufferCapacity, pass1ProgressSink);
|
||||
|
||||
// WebRTC adapts to the audio. This means results may not be correct at the very beginning.
|
||||
// It sometimes returns false activity at the very beginning, mistaking the background noise for speech.
|
||||
// It sometimes returns false activity at the very beginning, mistaking the background noise for
|
||||
// speech.
|
||||
// So we delete the first recognized utterance and re-process the corresponding audio segment.
|
||||
if (!activity.empty()) {
|
||||
TimeRange firstActivity = activity.begin()->getTimeRange();
|
||||
activity.clear(firstActivity);
|
||||
unique_ptr<AudioClip> streamStart = audioClip.clone() | segment(TimeRange(0_cs, firstActivity.getEnd()));
|
||||
const unique_ptr<AudioClip> streamStart = audioClip.clone()
|
||||
| segment(TimeRange(0_cs, firstActivity.getEnd()));
|
||||
time = 0_cs;
|
||||
process16bitAudioClip(*streamStart, processBuffer, bufferCapacity, pass2ProgressSink);
|
||||
}
|
||||
|
@ -67,24 +76,34 @@ JoiningBoundedTimeline<void> webRtcDetectVoiceActivity(const AudioClip& audioCli
|
|||
return activity;
|
||||
}
|
||||
|
||||
JoiningBoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip, int maxThreadCount, ProgressSink& progressSink) {
|
||||
JoiningBoundedTimeline<void> detectVoiceActivity(
|
||||
const AudioClip& inputAudioClip,
|
||||
int maxThreadCount,
|
||||
ProgressSink& progressSink
|
||||
) {
|
||||
// Prepare audio for VAD
|
||||
const unique_ptr<AudioClip> audioClip = inputAudioClip.clone() | resample(16000) | removeDcOffset();
|
||||
const unique_ptr<AudioClip> audioClip = inputAudioClip.clone()
|
||||
| resample(16000)
|
||||
| removeDcOffset();
|
||||
|
||||
JoiningBoundedTimeline<void> activity(audioClip->getTruncatedRange());
|
||||
std::mutex activityMutex;
|
||||
|
||||
// Split audio into segments and perform parallel VAD
|
||||
const int segmentCount = maxThreadCount;
|
||||
centiseconds audioDuration = audioClip->getTruncatedRange().getDuration();
|
||||
const centiseconds audioDuration = audioClip->getTruncatedRange().getDuration();
|
||||
vector<TimeRange> audioSegments;
|
||||
for (int i = 0; i < segmentCount; ++i) {
|
||||
TimeRange segmentRange = TimeRange(i * audioDuration / segmentCount, (i + 1) * audioDuration / segmentCount);
|
||||
TimeRange segmentRange = TimeRange(
|
||||
i * audioDuration / segmentCount,
|
||||
(i + 1) * audioDuration / segmentCount
|
||||
);
|
||||
audioSegments.push_back(segmentRange);
|
||||
}
|
||||
runParallel([&](const TimeRange& segmentRange, ProgressSink& segmentProgressSink) {
|
||||
unique_ptr<AudioClip> audioSegment = audioClip->clone() | segment(segmentRange);
|
||||
JoiningBoundedTimeline<void> activitySegment = webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink);
|
||||
const unique_ptr<AudioClip> audioSegment = audioClip->clone() | segment(segmentRange);
|
||||
JoiningBoundedTimeline<void> activitySegment =
|
||||
webRtcDetectVoiceActivity(*audioSegment, segmentProgressSink);
|
||||
|
||||
std::lock_guard<std::mutex> lock(activityMutex);
|
||||
for (auto activityRange : activitySegment) {
|
||||
|
@ -109,8 +128,13 @@ JoiningBoundedTimeline<void> detectVoiceActivity(const AudioClip& inputAudioClip
|
|||
}
|
||||
}
|
||||
|
||||
logging::debugFormat("Found {} sections of voice activity: {}", activity.size(),
|
||||
join(activity | transformed([](const Timed<void>& t) { return format("{0}-{1}", t.getStart(), t.getEnd()); }), ", "));
|
||||
logging::debugFormat(
|
||||
"Found {} sections of voice activity: {}",
|
||||
activity.size(),
|
||||
join(activity | transformed([](const Timed<void>& t) {
|
||||
return format("{0}-{1}", t.getStart(), t.getEnd());
|
||||
}), ", ")
|
||||
);
|
||||
|
||||
return activity;
|
||||
}
|
||||
|
|
|
@ -3,4 +3,8 @@
|
|||
#include "time/BoundedTimeline.h"
|
||||
#include "tools/progress.h"
|
||||
|
||||
JoiningBoundedTimeline<void> detectVoiceActivity(const AudioClip& audioClip, int maxThreadCount, ProgressSink& progressSink);
|
||||
JoiningBoundedTimeline<void> detectVoiceActivity(
|
||||
const AudioClip& audioClip,
|
||||
int maxThreadCount,
|
||||
ProgressSink& progressSink
|
||||
);
|
||||
|
|
|
@ -12,26 +12,26 @@ void createWaveFile(const AudioClip& audioClip, std::string fileName) {
|
|||
|
||||
// Write RIFF chunk
|
||||
write<uint32_t>(fourcc('R', 'I', 'F', 'F'), file);
|
||||
uint32_t formatChunkSize = 16;
|
||||
uint16_t channelCount = 1;
|
||||
uint16_t frameSize = static_cast<uint16_t>(channelCount * sizeof(float));
|
||||
uint32_t dataChunkSize = static_cast<uint32_t>(audioClip.size() * frameSize);
|
||||
uint32_t riffChunkSize = 4 + (8 + formatChunkSize) + (8 + dataChunkSize);
|
||||
const uint32_t formatChunkSize = 16;
|
||||
const uint16_t channelCount = 1;
|
||||
const uint16_t frameSize = static_cast<uint16_t>(channelCount * sizeof(float));
|
||||
const uint32_t dataChunkSize = static_cast<uint32_t>(audioClip.size() * frameSize);
|
||||
const uint32_t riffChunkSize = 4 + (8 + formatChunkSize) + (8 + dataChunkSize);
|
||||
write<uint32_t>(riffChunkSize, file);
|
||||
write<uint32_t>(fourcc('W', 'A', 'V', 'E'), file);
|
||||
|
||||
// Write format chunk
|
||||
write<uint32_t>(fourcc('f', 'm', 't', ' '), file);
|
||||
write<uint32_t>(formatChunkSize, file);
|
||||
uint16_t codec = 0x03; // 32-bit float
|
||||
const uint16_t codec = 0x03; // 32-bit float
|
||||
write<uint16_t>(codec, file);
|
||||
write<uint16_t>(channelCount, file);
|
||||
uint32_t frameRate = static_cast<uint16_t>(audioClip.getSampleRate());
|
||||
const uint32_t frameRate = static_cast<uint16_t>(audioClip.getSampleRate());
|
||||
write<uint32_t>(frameRate, file);
|
||||
uint32_t bytesPerSecond = frameRate * frameSize;
|
||||
const uint32_t bytesPerSecond = frameRate * frameSize;
|
||||
write<uint32_t>(bytesPerSecond, file);
|
||||
write<uint16_t>(frameSize, file);
|
||||
uint16_t bitsPerSample = 8 * sizeof(float);
|
||||
const uint16_t bitsPerSample = 8 * sizeof(float);
|
||||
write<uint16_t>(bitsPerSample, file);
|
||||
|
||||
// Write data chunk
|
||||
|
|
|
@ -13,7 +13,7 @@ string PhoneConverter::getTypeName() {
|
|||
}
|
||||
|
||||
EnumConverter<Phone>::member_data PhoneConverter::getMemberData() {
|
||||
return member_data{
|
||||
return member_data {
|
||||
{ Phone::AO, "AO" },
|
||||
{ Phone::AA, "AA" },
|
||||
{ Phone::IY, "IY" },
|
||||
|
|
|
@ -35,7 +35,7 @@ string ShapeConverter::getTypeName() {
|
|||
}
|
||||
|
||||
EnumConverter<Shape>::member_data ShapeConverter::getMemberData() {
|
||||
return member_data{
|
||||
return member_data {
|
||||
{ Shape::A, "A" },
|
||||
{ Shape::B, "B" },
|
||||
{ Shape::C, "C" },
|
||||
|
|
|
@ -29,8 +29,8 @@ enum class Shape {
|
|||
class ShapeConverter : public EnumConverter<Shape> {
|
||||
public:
|
||||
static ShapeConverter& get();
|
||||
std::set<Shape> getBasicShapes();
|
||||
std::set<Shape> getExtendedShapes();
|
||||
static std::set<Shape> getBasicShapes();
|
||||
static std::set<Shape> getExtendedShapes();
|
||||
protected:
|
||||
std::string getTypeName() override;
|
||||
member_data getMemberData() override;
|
||||
|
|
|
@ -6,7 +6,8 @@ using std::string;
|
|||
|
||||
void JsonExporter::exportAnimation(const ExporterInput& input, std::ostream& outputStream) {
|
||||
// Export as JSON.
|
||||
// I'm not using a library because the code is short enough without one and it lets me control the formatting.
|
||||
// I'm not using a library because the code is short enough without one and it lets me control
|
||||
// the formatting.
|
||||
outputStream << "{\n";
|
||||
outputStream << " \"metadata\": {\n";
|
||||
outputStream << " \"soundFile\": \"" << escapeJsonString(input.inputFilePath.string()) << "\",\n";
|
||||
|
|
|
@ -4,7 +4,11 @@
|
|||
void TsvExporter::exportAnimation(const ExporterInput& input, std::ostream& outputStream) {
|
||||
// Output shapes with start times
|
||||
for (auto& timedShape : input.animation) {
|
||||
outputStream << formatDuration(timedShape.getStart()) << "\t" << timedShape.getValue() << "\n";
|
||||
outputStream
|
||||
<< formatDuration(timedShape.getStart())
|
||||
<< "\t"
|
||||
<< timedShape.getValue()
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
// Output closed mouth with end time
|
||||
|
|
|
@ -12,11 +12,17 @@ void XmlExporter::exportAnimation(const ExporterInput& input, std::ostream& outp
|
|||
|
||||
// Add metadata
|
||||
tree.put("rhubarbResult.metadata.soundFile", input.inputFilePath.string());
|
||||
tree.put("rhubarbResult.metadata.duration", formatDuration(input.animation.getRange().getDuration()));
|
||||
tree.put(
|
||||
"rhubarbResult.metadata.duration",
|
||||
formatDuration(input.animation.getRange().getDuration())
|
||||
);
|
||||
|
||||
// Add mouth cues
|
||||
for (auto& timedShape : dummyShapeIfEmpty(input.animation, input.targetShapeSet)) {
|
||||
ptree& mouthCueElement = tree.add("rhubarbResult.mouthCues.mouthCue", timedShape.getValue());
|
||||
ptree& mouthCueElement = tree.add(
|
||||
"rhubarbResult.mouthCues.mouthCue",
|
||||
timedShape.getValue()
|
||||
);
|
||||
mouthCueElement.put("<xmlattr>.start", formatDuration(timedShape.getStart()));
|
||||
mouthCueElement.put("<xmlattr>.end", formatDuration(timedShape.getEnd()));
|
||||
}
|
||||
|
|
|
@ -2,12 +2,15 @@
|
|||
#include "animation/targetShapeSet.h"
|
||||
|
||||
// Makes sure there is at least one mouth shape
|
||||
std::vector<Timed<Shape>> dummyShapeIfEmpty(const JoiningTimeline<Shape>& animation, const ShapeSet& targetShapeSet) {
|
||||
std::vector<Timed<Shape>> dummyShapeIfEmpty(
|
||||
const JoiningTimeline<Shape>& animation,
|
||||
const ShapeSet& targetShapeSet
|
||||
) {
|
||||
std::vector<Timed<Shape>> result;
|
||||
std::copy(animation.begin(), animation.end(), std::back_inserter(result));
|
||||
if (result.empty()) {
|
||||
// Add zero-length empty mouth
|
||||
result.push_back(Timed<Shape>(0_cs, 0_cs, convertToTargetShapeSet(Shape::X, targetShapeSet)));
|
||||
result.emplace_back(0_cs, 0_cs, convertToTargetShapeSet(Shape::X, targetShapeSet));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -4,4 +4,7 @@
|
|||
#include "time/Timeline.h"
|
||||
|
||||
// Makes sure there is at least one mouth shape
|
||||
std::vector<Timed<Shape>> dummyShapeIfEmpty(const JoiningTimeline<Shape>& animation, const ShapeSet& targetShapeSet);
|
||||
std::vector<Timed<Shape>> dummyShapeIfEmpty(
|
||||
const JoiningTimeline<Shape>& animation,
|
||||
const ShapeSet& targetShapeSet
|
||||
);
|
||||
|
|
|
@ -22,12 +22,13 @@ namespace logging {
|
|||
static int lastThreadId = 0;
|
||||
thread_id threadId = std::this_thread::get_id();
|
||||
if (threadCounters.find(threadId) == threadCounters.end()) {
|
||||
threadCounters.insert({threadId, ++lastThreadId});
|
||||
threadCounters.insert({ threadId, ++lastThreadId });
|
||||
}
|
||||
return threadCounters.find(threadId)->second;
|
||||
}
|
||||
|
||||
Entry::Entry(Level level, const string& message) :
|
||||
timestamp(),
|
||||
level(level),
|
||||
message(message)
|
||||
{
|
||||
|
|
|
@ -14,13 +14,13 @@ namespace logging {
|
|||
}
|
||||
|
||||
EnumConverter<Level>::member_data LevelConverter::getMemberData() {
|
||||
return member_data{
|
||||
{Level::Trace, "Trace"},
|
||||
{Level::Debug, "Debug"},
|
||||
{Level::Info, "Info"},
|
||||
{Level::Warn, "Warn"},
|
||||
{Level::Error, "Error"},
|
||||
{Level::Fatal, "Fatal"}
|
||||
return member_data {
|
||||
{ Level::Trace, "Trace" },
|
||||
{ Level::Debug, "Debug" },
|
||||
{ Level::Info, "Info" },
|
||||
{ Level::Warn, "Warn" },
|
||||
{ Level::Error, "Error" },
|
||||
{ Level::Fatal, "Fatal" }
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ namespace logging {
|
|||
return LevelConverter::get().write(stream, value);
|
||||
}
|
||||
|
||||
std::istream& operator >> (std::istream& stream, Level& value) {
|
||||
std::istream& operator >>(std::istream& stream, Level& value) {
|
||||
return LevelConverter::get().read(stream, value);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,6 @@ namespace logging {
|
|||
|
||||
std::ostream& operator<<(std::ostream& stream, Level value);
|
||||
|
||||
std::istream& operator >> (std::istream& stream, Level& value);
|
||||
std::istream& operator >>(std::istream& stream, Level& value);
|
||||
|
||||
}
|
||||
|
|
|
@ -12,7 +12,12 @@ namespace logging {
|
|||
}
|
||||
|
||||
string SimpleFileFormatter::format(const Entry& entry) {
|
||||
return fmt::format("[{0}] {1} {2}", formatTime(entry.timestamp, "%F %H:%M:%S"), entry.threadCounter, consoleFormatter.format(entry));
|
||||
return fmt::format(
|
||||
"[{0}] {1} {2}",
|
||||
formatTime(entry.timestamp, "%F %H:%M:%S"),
|
||||
entry.threadCounter,
|
||||
consoleFormatter.format(entry)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ namespace logging {
|
|||
|
||||
void log(Level level, const std::string& message);
|
||||
|
||||
template <typename... Args>
|
||||
template<typename... Args>
|
||||
void logFormat(Level level, fmt::CStringRef format, const Args&... args) {
|
||||
log(level, fmt::format(format, args...));
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#include "Entry.h"
|
||||
|
||||
using std::string;
|
||||
using std::lock_guard;
|
||||
using std::shared_ptr;
|
||||
|
||||
namespace logging {
|
||||
|
@ -25,7 +24,7 @@ namespace logging {
|
|||
{}
|
||||
|
||||
void StreamSink::receive(const Entry& entry) {
|
||||
string line = formatter->format(entry);
|
||||
const string line = formatter->format(entry);
|
||||
*stream << line << std::endl;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#include "Sink.h"
|
||||
#include <memory>
|
||||
#include "Formatter.h"
|
||||
#include <mutex>
|
||||
|
||||
namespace logging {
|
||||
enum class Level;
|
||||
|
|
|
@ -26,7 +26,8 @@ static lambda_unique_ptr<ps_decoder_t> createDecoder(optional<std::string> dialo
|
|||
// High values (>= 1.0) can lead to imprecise or freezing animation.
|
||||
"-lw", "0.8",
|
||||
|
||||
// The following settings are recommended at http://cmusphinx.sourceforge.net/wiki/phonemerecognition
|
||||
// The following settings are recommended at
|
||||
// http://cmusphinx.sourceforge.net/wiki/phonemerecognition
|
||||
|
||||
// Set beam width applied to every frame in Viterbi search
|
||||
"-beam", "1e-20",
|
||||
|
@ -56,7 +57,9 @@ static Timeline<Phone> utteranceToPhones(
|
|||
paddedTimeRange.grow(padding);
|
||||
paddedTimeRange.trim(audioClip.getTruncatedRange());
|
||||
|
||||
const unique_ptr<AudioClip> clipSegment = audioClip.clone() | segment(paddedTimeRange) | resample(sphinxSampleRate);
|
||||
const unique_ptr<AudioClip> clipSegment = audioClip.clone()
|
||||
| segment(paddedTimeRange)
|
||||
| resample(sphinxSampleRate);
|
||||
const auto audioBuffer = copyTo16bitBuffer(*clipSegment);
|
||||
|
||||
// Detect phones (returned as words)
|
||||
|
|
|
@ -67,9 +67,15 @@ lambda_unique_ptr<ngram_model_t> createDefaultLanguageModel(ps_decoder_t& decode
|
|||
return result;
|
||||
}
|
||||
|
||||
lambda_unique_ptr<ngram_model_t> createDialogLanguageModel(ps_decoder_t& decoder, const string& dialog) {
|
||||
lambda_unique_ptr<ngram_model_t> createDialogLanguageModel(
|
||||
ps_decoder_t& decoder,
|
||||
const string& dialog
|
||||
) {
|
||||
// Split dialog into normalized words
|
||||
vector<string> words = tokenizeText(dialog, [&](const string& word) { return dictionaryContains(*decoder.dict, word); });
|
||||
vector<string> words = tokenizeText(
|
||||
dialog,
|
||||
[&](const string& word) { return dictionaryContains(*decoder.dict, word); }
|
||||
);
|
||||
|
||||
// Add dialog-specific words to the dictionary
|
||||
addMissingDictionaryWords(words, decoder);
|
||||
|
@ -80,15 +86,27 @@ lambda_unique_ptr<ngram_model_t> createDialogLanguageModel(ps_decoder_t& decoder
|
|||
return createLanguageModel(words, decoder);
|
||||
}
|
||||
|
||||
lambda_unique_ptr<ngram_model_t> createBiasedLanguageModel(ps_decoder_t& decoder, const string& dialog) {
|
||||
lambda_unique_ptr<ngram_model_t> createBiasedLanguageModel(
|
||||
ps_decoder_t& decoder,
|
||||
const string& dialog
|
||||
) {
|
||||
auto defaultLanguageModel = createDefaultLanguageModel(decoder);
|
||||
auto dialogLanguageModel = createDialogLanguageModel(decoder, dialog);
|
||||
constexpr int modelCount = 2;
|
||||
array<ngram_model_t*, modelCount> languageModels{ defaultLanguageModel.get(), dialogLanguageModel.get() };
|
||||
array<const char*, modelCount> modelNames{ "defaultLM", "dialogLM" };
|
||||
array<float, modelCount> modelWeights{ 0.1f, 0.9f };
|
||||
array<ngram_model_t*, modelCount> languageModels {
|
||||
defaultLanguageModel.get(),
|
||||
dialogLanguageModel.get()
|
||||
};
|
||||
array<const char*, modelCount> modelNames { "defaultLM", "dialogLM" };
|
||||
array<float, modelCount> modelWeights { 0.1f, 0.9f };
|
||||
lambda_unique_ptr<ngram_model_t> result(
|
||||
ngram_model_set_init(nullptr, languageModels.data(), const_cast<char**>(modelNames.data()), modelWeights.data(), modelCount),
|
||||
ngram_model_set_init(
|
||||
nullptr,
|
||||
languageModels.data(),
|
||||
const_cast<char**>(modelNames.data()),
|
||||
modelWeights.data(),
|
||||
modelCount
|
||||
),
|
||||
[](ngram_model_t* lm) { ngram_model_free(lm); });
|
||||
if (!result) {
|
||||
throw runtime_error("Error creating biased language model.");
|
||||
|
@ -105,7 +123,8 @@ static lambda_unique_ptr<ps_decoder_t> createDecoder(optional<std::string> dialo
|
|||
"-hmm", (getSphinxModelDirectory() / "acoustic-model").string().c_str(),
|
||||
// Set pronunciation dictionary
|
||||
"-dict", (getSphinxModelDirectory() / "cmudict-en-us.dict").string().c_str(),
|
||||
// Add noise against zero silence (see http://cmusphinx.sourceforge.net/wiki/faq#qwhy_my_accuracy_is_poor)
|
||||
// Add noise against zero silence
|
||||
// (see http://cmusphinx.sourceforge.net/wiki/faq#qwhy_my_accuracy_is_poor)
|
||||
"-dither", "yes",
|
||||
// Disable VAD -- we're doing that ourselves
|
||||
"-remove_silence", "no",
|
||||
|
@ -184,7 +203,11 @@ optional<Timeline<Phone>> getPhoneAlignment(
|
|||
// Extract phones with timestamps
|
||||
char** phoneNames = decoder.dict->mdef->ciname;
|
||||
Timeline<Phone> result;
|
||||
for (ps_alignment_iter_t* it = ps_alignment_phones(alignment.get()); it; it = ps_alignment_iter_next(it)) {
|
||||
for (
|
||||
ps_alignment_iter_t* it = ps_alignment_phones(alignment.get());
|
||||
it;
|
||||
it = ps_alignment_iter_next(it)
|
||||
) {
|
||||
// Get phone
|
||||
ps_alignment_entry_t* phoneEntry = ps_alignment_iter_get(it);
|
||||
const s3cipid_t phoneId = phoneEntry->id.pid.cipid;
|
||||
|
@ -209,7 +232,7 @@ optional<Timeline<Phone>> getPhoneAlignment(
|
|||
// Some words have multiple pronunciations, one of which results in better animation than the others.
|
||||
// This function returns the optimal pronunciation for a select set of these words.
|
||||
string fixPronunciation(const string& word) {
|
||||
const static map<string, string> replacements{
|
||||
const static map<string, string> replacements {
|
||||
{ "into(2)", "into" },
|
||||
{ "to(2)", "to" },
|
||||
{ "to(3)", "to" },
|
||||
|
@ -238,7 +261,9 @@ static Timeline<Phone> utteranceToPhones(
|
|||
paddedTimeRange.grow(padding);
|
||||
paddedTimeRange.trim(audioClip.getTruncatedRange());
|
||||
|
||||
const unique_ptr<AudioClip> clipSegment = audioClip.clone() | segment(paddedTimeRange) | resample(sphinxSampleRate);
|
||||
const unique_ptr<AudioClip> clipSegment = audioClip.clone()
|
||||
| segment(paddedTimeRange)
|
||||
| resample(sphinxSampleRate);
|
||||
const auto audioBuffer = copyTo16bitBuffer(*clipSegment);
|
||||
|
||||
// Get words
|
||||
|
@ -273,7 +298,7 @@ static Timeline<Phone> utteranceToPhones(
|
|||
const string fixedWord = fixPronunciation(timedWord.getValue());
|
||||
wordIds.push_back(getWordId(fixedWord, *decoder.dict));
|
||||
}
|
||||
if (wordIds.empty()) return{};
|
||||
if (wordIds.empty()) return {};
|
||||
|
||||
// Align the words' phones with speech
|
||||
#if BOOST_VERSION < 105600 // Support legacy syntax
|
||||
|
@ -309,5 +334,6 @@ BoundedTimeline<Phone> PocketSphinxRecognizer::recognizePhones(
|
|||
int maxThreadCount,
|
||||
ProgressSink& progressSink
|
||||
) const {
|
||||
return ::recognizePhones(inputAudioClip, dialog, &createDecoder, &utteranceToPhones, maxThreadCount, progressSink);
|
||||
return ::recognizePhones(
|
||||
inputAudioClip, dialog, &createDecoder, &utteranceToPhones, maxThreadCount, progressSink);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ class Recognizer {
|
|||
public:
|
||||
virtual ~Recognizer() = default;
|
||||
|
||||
virtual BoundedTimeline<Phone>recognizePhones(
|
||||
virtual BoundedTimeline<Phone> recognizePhones(
|
||||
const AudioClip& audioClip,
|
||||
boost::optional<std::string> dialog,
|
||||
int maxThreadCount,
|
||||
|
|
|
@ -64,8 +64,9 @@ Phone charToPhone(wchar_t c) {
|
|||
case L'r': return Phone::R;
|
||||
case L'l': return Phone::L;
|
||||
case L'h': return Phone::HH;
|
||||
default:
|
||||
return Phone::Noise;
|
||||
}
|
||||
return Phone::Noise;
|
||||
}
|
||||
|
||||
vector<Phone> wordToPhones(const std::string& word) {
|
||||
|
@ -94,8 +95,11 @@ vector<Phone> wordToPhones(const std::string& word) {
|
|||
for (wchar_t c : wideWord) {
|
||||
Phone phone = charToPhone(c);
|
||||
if (phone == Phone::Noise) {
|
||||
logging::errorFormat("G2P error determining pronunciation for '{}': Character '{}' is not a recognized phone shorthand.",
|
||||
word, static_cast<char>(c));
|
||||
logging::errorFormat(
|
||||
"G2P error determining pronunciation for '{}': Character '{}' is not a recognized phone shorthand.",
|
||||
word,
|
||||
static_cast<char>(c)
|
||||
);
|
||||
}
|
||||
|
||||
if (phone != lastPhone) {
|
||||
|
|
|
@ -15,83 +15,94 @@ using std::vector;
|
|||
using std::regex;
|
||||
using std::map;
|
||||
using std::tuple;
|
||||
using std::make_tuple;
|
||||
using std::get;
|
||||
using std::endl;
|
||||
using boost::filesystem::path;
|
||||
|
||||
using unigram_t = string;
|
||||
using bigram_t = tuple<string, string>;
|
||||
using trigram_t = tuple<string, string, string>;
|
||||
using Unigram = string;
|
||||
using Bigram = tuple<string, string>;
|
||||
using Trigram = tuple<string, string, string>;
|
||||
|
||||
map<unigram_t, int> getUnigramCounts(const vector<string>& words) {
|
||||
map<unigram_t, int> unigramCounts;
|
||||
for (const unigram_t& unigram : words) {
|
||||
map<Unigram, int> getUnigramCounts(const vector<string>& words) {
|
||||
map<Unigram, int> unigramCounts;
|
||||
for (const Unigram& unigram : words) {
|
||||
++unigramCounts[unigram];
|
||||
}
|
||||
return unigramCounts;
|
||||
}
|
||||
|
||||
map<bigram_t, int> getBigramCounts(const vector<string>& words) {
|
||||
map<bigram_t, int> bigramCounts;
|
||||
map<Bigram, int> getBigramCounts(const vector<string>& words) {
|
||||
map<Bigram, int> bigramCounts;
|
||||
for (auto it = words.begin(); it < words.end() - 1; ++it) {
|
||||
++bigramCounts[bigram_t(*it, *(it + 1))];
|
||||
++bigramCounts[Bigram(*it, *(it + 1))];
|
||||
}
|
||||
return bigramCounts;
|
||||
}
|
||||
|
||||
map<trigram_t, int> getTrigramCounts(const vector<string>& words) {
|
||||
map<trigram_t, int> trigramCounts;
|
||||
map<Trigram, int> getTrigramCounts(const vector<string>& words) {
|
||||
map<Trigram, int> trigramCounts;
|
||||
if (words.size() >= 3) {
|
||||
for (auto it = words.begin(); it < words.end() - 2; ++it) {
|
||||
++trigramCounts[trigram_t(*it, *(it + 1), *(it + 2))];
|
||||
++trigramCounts[Trigram(*it, *(it + 1), *(it + 2))];
|
||||
}
|
||||
}
|
||||
return trigramCounts;
|
||||
}
|
||||
|
||||
map<unigram_t, double> getUnigramProbabilities(const vector<string>& words, const map<unigram_t, int>& unigramCounts, const double deflator) {
|
||||
map<unigram_t, double> unigramProbabilities;
|
||||
map<Unigram, double> getUnigramProbabilities(
|
||||
const vector<string>& words,
|
||||
const map<Unigram, int>& unigramCounts,
|
||||
const double deflator
|
||||
) {
|
||||
map<Unigram, double> unigramProbabilities;
|
||||
for (const auto& pair : unigramCounts) {
|
||||
unigram_t unigram = get<0>(pair);
|
||||
int unigramCount = get<1>(pair);
|
||||
const Unigram& unigram = get<0>(pair);
|
||||
const int unigramCount = get<1>(pair);
|
||||
unigramProbabilities[unigram] = double(unigramCount) / words.size() * deflator;
|
||||
}
|
||||
return unigramProbabilities;
|
||||
}
|
||||
|
||||
map<bigram_t, double> getBigramProbabilities(const map<unigram_t, int>& unigramCounts, const map<bigram_t, int>& bigramCounts, const double deflator) {
|
||||
map<bigram_t, double> bigramProbabilities;
|
||||
map<Bigram, double> getBigramProbabilities(
|
||||
const map<Unigram, int>& unigramCounts,
|
||||
const map<Bigram, int>& bigramCounts,
|
||||
const double deflator
|
||||
) {
|
||||
map<Bigram, double> bigramProbabilities;
|
||||
for (const auto& pair : bigramCounts) {
|
||||
bigram_t bigram = get<0>(pair);
|
||||
int bigramCount = get<1>(pair);
|
||||
int unigramPrefixCount = unigramCounts.at(get<0>(bigram));
|
||||
Bigram bigram = get<0>(pair);
|
||||
const int bigramCount = get<1>(pair);
|
||||
const int unigramPrefixCount = unigramCounts.at(get<0>(bigram));
|
||||
bigramProbabilities[bigram] = double(bigramCount) / unigramPrefixCount * deflator;
|
||||
}
|
||||
return bigramProbabilities;
|
||||
}
|
||||
|
||||
map<trigram_t, double> getTrigramProbabilities(const map<bigram_t, int>& bigramCounts, const map<trigram_t, int>& trigramCounts, const double deflator) {
|
||||
map<trigram_t, double> trigramProbabilities;
|
||||
map<Trigram, double> getTrigramProbabilities(
|
||||
const map<Bigram, int>& bigramCounts,
|
||||
const map<Trigram, int>& trigramCounts,
|
||||
const double deflator
|
||||
) {
|
||||
map<Trigram, double> trigramProbabilities;
|
||||
for (const auto& pair : trigramCounts) {
|
||||
trigram_t trigram = get<0>(pair);
|
||||
int trigramCount = get<1>(pair);
|
||||
int bigramPrefixCount = bigramCounts.at(bigram_t(get<0>(trigram), get<1>(trigram)));
|
||||
Trigram trigram = get<0>(pair);
|
||||
const int trigramCount = get<1>(pair);
|
||||
const int bigramPrefixCount = bigramCounts.at(Bigram(get<0>(trigram), get<1>(trigram)));
|
||||
trigramProbabilities[trigram] = double(trigramCount) / bigramPrefixCount * deflator;
|
||||
}
|
||||
return trigramProbabilities;
|
||||
}
|
||||
|
||||
map<unigram_t, double> getUnigramBackoffWeights(
|
||||
const map<unigram_t, int>& unigramCounts,
|
||||
const map<unigram_t, double>& unigramProbabilities,
|
||||
const map<bigram_t, int>& bigramCounts,
|
||||
map<Unigram, double> getUnigramBackoffWeights(
|
||||
const map<Unigram, int>& unigramCounts,
|
||||
const map<Unigram, double>& unigramProbabilities,
|
||||
const map<Bigram, int>& bigramCounts,
|
||||
const double discountMass)
|
||||
{
|
||||
map<unigram_t, double> unigramBackoffWeights;
|
||||
for (const unigram_t& unigram : unigramCounts | boost::adaptors::map_keys) {
|
||||
map<Unigram, double> unigramBackoffWeights;
|
||||
for (const Unigram& unigram : unigramCounts | boost::adaptors::map_keys) {
|
||||
double denominator = 1;
|
||||
for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
if (get<0>(bigram) == unigram) {
|
||||
denominator -= unigramProbabilities.at(get<1>(bigram));
|
||||
}
|
||||
|
@ -101,18 +112,18 @@ map<unigram_t, double> getUnigramBackoffWeights(
|
|||
return unigramBackoffWeights;
|
||||
}
|
||||
|
||||
map<bigram_t, double> getBigramBackoffWeights(
|
||||
const map<bigram_t, int>& bigramCounts,
|
||||
const map<bigram_t, double>& bigramProbabilities,
|
||||
const map<trigram_t, int>& trigramCounts,
|
||||
map<Bigram, double> getBigramBackoffWeights(
|
||||
const map<Bigram, int>& bigramCounts,
|
||||
const map<Bigram, double>& bigramProbabilities,
|
||||
const map<Trigram, int>& trigramCounts,
|
||||
const double discountMass)
|
||||
{
|
||||
map<bigram_t, double> bigramBackoffWeights;
|
||||
for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
map<Bigram, double> bigramBackoffWeights;
|
||||
for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
double denominator = 1;
|
||||
for (const trigram_t& trigram : trigramCounts | boost::adaptors::map_keys) {
|
||||
if (bigram_t(get<0>(trigram), get<1>(trigram)) == bigram) {
|
||||
denominator -= bigramProbabilities.at(bigram_t(get<1>(trigram), get<2>(trigram)));
|
||||
for (const Trigram& trigram : trigramCounts | boost::adaptors::map_keys) {
|
||||
if (Bigram(get<0>(trigram), get<1>(trigram)) == bigram) {
|
||||
denominator -= bigramProbabilities.at(Bigram(get<1>(trigram), get<2>(trigram)));
|
||||
}
|
||||
}
|
||||
bigramBackoffWeights[bigram] = discountMass / denominator;
|
||||
|
@ -120,20 +131,25 @@ map<bigram_t, double> getBigramBackoffWeights(
|
|||
return bigramBackoffWeights;
|
||||
}
|
||||
|
||||
void createLanguageModelFile(const vector<string>& words, path filePath) {
|
||||
void createLanguageModelFile(const vector<string>& words, const path& filePath) {
|
||||
const double discountMass = 0.5;
|
||||
const double deflator = 1.0 - discountMass;
|
||||
|
||||
map<unigram_t, int> unigramCounts = getUnigramCounts(words);
|
||||
map<bigram_t, int> bigramCounts = getBigramCounts(words);
|
||||
map<trigram_t, int> trigramCounts = getTrigramCounts(words);
|
||||
map<Unigram, int> unigramCounts = getUnigramCounts(words);
|
||||
map<Bigram, int> bigramCounts = getBigramCounts(words);
|
||||
map<Trigram, int> trigramCounts = getTrigramCounts(words);
|
||||
|
||||
map<unigram_t, double> unigramProbabilities = getUnigramProbabilities(words, unigramCounts, deflator);
|
||||
map<bigram_t, double> bigramProbabilities = getBigramProbabilities(unigramCounts, bigramCounts, deflator);
|
||||
map<trigram_t, double> trigramProbabilities = getTrigramProbabilities(bigramCounts, trigramCounts, deflator);
|
||||
map<Unigram, double> unigramProbabilities =
|
||||
getUnigramProbabilities(words, unigramCounts, deflator);
|
||||
map<Bigram, double> bigramProbabilities =
|
||||
getBigramProbabilities(unigramCounts, bigramCounts, deflator);
|
||||
map<Trigram, double> trigramProbabilities =
|
||||
getTrigramProbabilities(bigramCounts, trigramCounts, deflator);
|
||||
|
||||
map<unigram_t, double> unigramBackoffWeights = getUnigramBackoffWeights(unigramCounts, unigramProbabilities, bigramCounts, discountMass);
|
||||
map<bigram_t, double> bigramBackoffWeights = getBigramBackoffWeights(bigramCounts, bigramProbabilities, trigramCounts, discountMass);
|
||||
map<Unigram, double> unigramBackoffWeights =
|
||||
getUnigramBackoffWeights(unigramCounts, unigramProbabilities, bigramCounts, discountMass);
|
||||
map<Bigram, double> bigramBackoffWeights =
|
||||
getBigramBackoffWeights(bigramCounts, bigramProbabilities, trigramCounts, discountMass);
|
||||
|
||||
boost::filesystem::ofstream file(filePath);
|
||||
file << "Generated by " << appName << " " << appVersion << endl << endl;
|
||||
|
@ -146,7 +162,7 @@ void createLanguageModelFile(const vector<string>& words, path filePath) {
|
|||
file.setf(std::ios::fixed, std::ios::floatfield);
|
||||
file.precision(4);
|
||||
file << "\\1-grams:" << endl;
|
||||
for (const unigram_t& unigram : unigramCounts | boost::adaptors::map_keys) {
|
||||
for (const Unigram& unigram : unigramCounts | boost::adaptors::map_keys) {
|
||||
file << log10(unigramProbabilities.at(unigram))
|
||||
<< " " << unigram
|
||||
<< " " << log10(unigramBackoffWeights.at(unigram)) << endl;
|
||||
|
@ -154,7 +170,7 @@ void createLanguageModelFile(const vector<string>& words, path filePath) {
|
|||
file << endl;
|
||||
|
||||
file << "\\2-grams:" << endl;
|
||||
for (const bigram_t& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
for (const Bigram& bigram : bigramCounts | boost::adaptors::map_keys) {
|
||||
file << log10(bigramProbabilities.at(bigram))
|
||||
<< " " << get<0>(bigram) << " " << get<1>(bigram)
|
||||
<< " " << log10(bigramBackoffWeights.at(bigram)) << endl;
|
||||
|
@ -162,7 +178,7 @@ void createLanguageModelFile(const vector<string>& words, path filePath) {
|
|||
file << endl;
|
||||
|
||||
file << "\\3-grams:" << endl;
|
||||
for (const trigram_t& trigram : trigramCounts | boost::adaptors::map_keys) {
|
||||
for (const Trigram& trigram : trigramCounts | boost::adaptors::map_keys) {
|
||||
file << log10(trigramProbabilities.at(trigram))
|
||||
<< " " << get<0>(trigram) << " " << get<1>(trigram) << " " << get<2>(trigram) << endl;
|
||||
}
|
||||
|
@ -171,7 +187,10 @@ void createLanguageModelFile(const vector<string>& words, path filePath) {
|
|||
file << "\\end\\" << endl;
|
||||
}
|
||||
|
||||
lambda_unique_ptr<ngram_model_t> createLanguageModel(const vector<string>& words, ps_decoder_t& decoder) {
|
||||
lambda_unique_ptr<ngram_model_t> createLanguageModel(
|
||||
const vector<string>& words,
|
||||
ps_decoder_t& decoder
|
||||
) {
|
||||
path tempFilePath = getTempFilePath();
|
||||
createLanguageModelFile(words, tempFilePath);
|
||||
auto deleteTempFile = gsl::finally([&]() { boost::filesystem::remove(tempFilePath); });
|
||||
|
|
|
@ -8,4 +8,7 @@ extern "C" {
|
|||
#include <ngram_search.h>
|
||||
}
|
||||
|
||||
lambda_unique_ptr<ngram_model_t> createLanguageModel(const std::vector<std::string>& words, ps_decoder_t& decoder);
|
||||
lambda_unique_ptr<ngram_model_t> createLanguageModel(
|
||||
const std::vector<std::string>& words,
|
||||
ps_decoder_t& decoder
|
||||
);
|
||||
|
|
|
@ -26,18 +26,18 @@ using std::chrono::duration_cast;
|
|||
|
||||
logging::Level convertSphinxErrorLevel(err_lvl_t errorLevel) {
|
||||
switch (errorLevel) {
|
||||
case ERR_DEBUG:
|
||||
case ERR_INFO:
|
||||
case ERR_INFOCONT:
|
||||
return logging::Level::Trace;
|
||||
case ERR_WARN:
|
||||
return logging::Level::Warn;
|
||||
case ERR_ERROR:
|
||||
return logging::Level::Error;
|
||||
case ERR_FATAL:
|
||||
return logging::Level::Fatal;
|
||||
default:
|
||||
throw invalid_argument("Unknown log level.");
|
||||
case ERR_DEBUG:
|
||||
case ERR_INFO:
|
||||
case ERR_INFOCONT:
|
||||
return logging::Level::Trace;
|
||||
case ERR_WARN:
|
||||
return logging::Level::Warn;
|
||||
case ERR_ERROR:
|
||||
return logging::Level::Error;
|
||||
case ERR_FATAL:
|
||||
return logging::Level::Fatal;
|
||||
default:
|
||||
throw invalid_argument("Unknown log level.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,8 @@ void sphinxLogCallback(void* user_data, err_lvl_t errorLevel, const char* format
|
|||
if (!success) chars.resize(chars.size() * 2);
|
||||
}
|
||||
const regex waste("^(DEBUG|INFO|INFOCONT|WARN|ERROR|FATAL): ");
|
||||
string message = std::regex_replace(chars.data(), waste, "", std::regex_constants::format_first_only);
|
||||
string message =
|
||||
std::regex_replace(chars.data(), waste, "", std::regex_constants::format_first_only);
|
||||
boost::algorithm::trim(message);
|
||||
|
||||
const logging::Level logLevel = convertSphinxErrorLevel(errorLevel);
|
||||
|
@ -115,8 +116,12 @@ BoundedTimeline<Phone> recognizePhones(
|
|||
const auto processUtterance = [&](Timed<void> timedUtterance, ProgressSink& utteranceProgressSink) {
|
||||
// Detect phones for utterance
|
||||
const auto decoder = decoderPool.acquire();
|
||||
Timeline<Phone> utterancePhones =
|
||||
utteranceToPhones(*audioClip, timedUtterance.getTimeRange(), *decoder, utteranceProgressSink);
|
||||
Timeline<Phone> utterancePhones = utteranceToPhones(
|
||||
*audioClip,
|
||||
timedUtterance.getTimeRange(),
|
||||
*decoder,
|
||||
utteranceProgressSink
|
||||
);
|
||||
|
||||
// Copy phones to result timeline
|
||||
std::lock_guard<std::mutex> lock(resultMutex);
|
||||
|
@ -137,13 +142,21 @@ BoundedTimeline<Phone> recognizePhones(
|
|||
// Don't use more threads than there are utterances to be processed
|
||||
static_cast<int>(utterances.size()),
|
||||
// Don't waste time creating additional threads (and decoders!) if the recording is short
|
||||
static_cast<int>(duration_cast<std::chrono::seconds>(audioClip->getTruncatedRange().getDuration()).count() / 5)
|
||||
static_cast<int>(
|
||||
duration_cast<std::chrono::seconds>(audioClip->getTruncatedRange().getDuration()).count() / 5
|
||||
)
|
||||
});
|
||||
if (threadCount < 1) {
|
||||
threadCount = 1;
|
||||
}
|
||||
logging::debugFormat("Speech recognition using {} threads -- start", threadCount);
|
||||
runParallel(processUtterance, utterances, threadCount, dialogProgressSink, getUtteranceProgressWeight);
|
||||
runParallel(
|
||||
processUtterance,
|
||||
utterances,
|
||||
threadCount,
|
||||
dialogProgressSink,
|
||||
getUtteranceProgressWeight
|
||||
);
|
||||
logging::debug("Speech recognition -- end");
|
||||
} catch (...) {
|
||||
std::throw_with_nested(runtime_error("Error performing speech recognition via PocketSphinx."));
|
||||
|
@ -200,7 +213,9 @@ BoundedTimeline<string> recognizeWords(const vector<int16_t>& audioBuffer, ps_de
|
|||
error = ps_end_utt(&decoder);
|
||||
if (error) throw runtime_error("Error ending utterance processing for word recognition.");
|
||||
|
||||
BoundedTimeline<string> result(TimeRange(0_cs, centiseconds(100 * audioBuffer.size() / sphinxSampleRate)));
|
||||
BoundedTimeline<string> result(
|
||||
TimeRange(0_cs, centiseconds(100 * audioBuffer.size() / sphinxSampleRate))
|
||||
);
|
||||
const bool noWordsRecognized = reinterpret_cast<ngram_search_t*>(decoder.search)->bpidx == 0;
|
||||
if (noWordsRecognized) {
|
||||
return result;
|
||||
|
|
|
@ -36,4 +36,7 @@ const boost::filesystem::path& getSphinxModelDirectory();
|
|||
|
||||
JoiningTimeline<void> getNoiseSounds(TimeRange utteranceTimeRange, const Timeline<Phone>& phones);
|
||||
|
||||
BoundedTimeline<std::string> recognizeWords(const std::vector<int16_t>& audioBuffer, ps_decoder_t& decoder);
|
||||
BoundedTimeline<std::string> recognizeWords(
|
||||
const std::vector<int16_t>& audioBuffer,
|
||||
ps_decoder_t& decoder
|
||||
);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include "tools/tools.h"
|
||||
#include "tools/stringTools.h"
|
||||
#include <regex>
|
||||
#include <boost/optional/optional.hpp>
|
||||
|
||||
extern "C" {
|
||||
#include <cst_utt_utils.h>
|
||||
|
@ -21,7 +22,7 @@ lambda_unique_ptr<cst_voice> createDummyVoice() {
|
|||
lambda_unique_ptr<cst_voice> voice(new_voice(), [](cst_voice* voice) { delete_voice(voice); });
|
||||
voice->name = "dummy_voice";
|
||||
usenglish_init(voice.get());
|
||||
cst_lexicon *lexicon = cmu_lex_init();
|
||||
cst_lexicon* lexicon = cmu_lex_init();
|
||||
feat_set(voice->features, "lexicon", lexicon_val(lexicon));
|
||||
return voice;
|
||||
}
|
||||
|
@ -37,7 +38,10 @@ vector<string> tokenizeViaFlite(const string& text) {
|
|||
const string asciiText = utf8ToAscii(text);
|
||||
|
||||
// Create utterance object with text
|
||||
lambda_unique_ptr<cst_utterance> utterance(new_utterance(), [](cst_utterance* utterance) { delete_utterance(utterance); });
|
||||
lambda_unique_ptr<cst_utterance> utterance(
|
||||
new_utterance(),
|
||||
[](cst_utterance* utterance) { delete_utterance(utterance); }
|
||||
);
|
||||
utt_set_input_text(utterance.get(), asciiText.c_str());
|
||||
lambda_unique_ptr<cst_voice> voice = createDummyVoice();
|
||||
utt_init(utterance.get(), voice.get());
|
||||
|
@ -48,14 +52,21 @@ vector<string> tokenizeViaFlite(const string& text) {
|
|||
}
|
||||
|
||||
vector<string> result;
|
||||
for (cst_item* item = relation_head(utt_relation(utterance.get(), "Word")); item; item = item_next(item)) {
|
||||
for (
|
||||
cst_item* item = relation_head(utt_relation(utterance.get(), "Word"));
|
||||
item;
|
||||
item = item_next(item)
|
||||
) {
|
||||
const char* word = item_feat_string(item, "name");
|
||||
result.push_back(word);
|
||||
result.emplace_back(word);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
optional<string> findSimilarDictionaryWord(const string& word, function<bool(const string&)> dictionaryContains) {
|
||||
optional<string> findSimilarDictionaryWord(
|
||||
const string& word,
|
||||
const function<bool(const string&)>& dictionaryContains
|
||||
) {
|
||||
for (bool addPeriod : { false, true }) {
|
||||
for (int apostropheIndex = -1; apostropheIndex <= static_cast<int>(word.size()); ++apostropheIndex) {
|
||||
string modified = word;
|
||||
|
@ -75,12 +86,15 @@ optional<string> findSimilarDictionaryWord(const string& word, function<bool(con
|
|||
return boost::none;
|
||||
}
|
||||
|
||||
vector<string> tokenizeText(const string& text, function<bool(const string&)> dictionaryContains) {
|
||||
vector<string> tokenizeText(
|
||||
const string& text,
|
||||
const function<bool(const string&)>& dictionaryContains
|
||||
) {
|
||||
vector<string> words = tokenizeViaFlite(text);
|
||||
|
||||
// Join words separated by apostophes
|
||||
// Join words separated by apostrophes
|
||||
for (int i = words.size() - 1; i > 0; --i) {
|
||||
if (words[i].size() > 0 && words[i][0] == '\'') {
|
||||
if (!words[i].empty() && words[i][0] == '\'') {
|
||||
words[i - 1].append(words[i]);
|
||||
words.erase(words.begin() + i);
|
||||
}
|
||||
|
@ -95,21 +109,24 @@ vector<string> tokenizeText(const string& text, function<bool(const string&)> di
|
|||
{ regex("@"), "at" },
|
||||
{ regex("[^a-z']"), "" }
|
||||
};
|
||||
for (size_t i = 0; i < words.size(); ++i) {
|
||||
for (auto& word : words) {
|
||||
for (const auto& replacement : replacements) {
|
||||
words[i] = regex_replace(words[i], replacement.first, replacement.second);
|
||||
word = regex_replace(word, replacement.first, replacement.second);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove empty words
|
||||
words.erase(std::remove_if(words.begin(), words.end(), [](const string& s) { return s.empty(); }), words.end());
|
||||
words.erase(
|
||||
std::remove_if(words.begin(), words.end(), [](const string& s) { return s.empty(); }),
|
||||
words.end()
|
||||
);
|
||||
|
||||
// Try to replace words that are not in the dictionary with similar ones that are
|
||||
for (size_t i = 0; i < words.size(); ++i) {
|
||||
if (!dictionaryContains(words[i])) {
|
||||
optional<string> modifiedWord = findSimilarDictionaryWord(words[i], dictionaryContains);
|
||||
for (auto& word : words) {
|
||||
if (!dictionaryContains(word)) {
|
||||
optional<string> modifiedWord = findSimilarDictionaryWord(word, dictionaryContains);
|
||||
if (modifiedWord) {
|
||||
words[i] = *modifiedWord;
|
||||
word = *modifiedWord;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,4 +4,7 @@
|
|||
#include <functional>
|
||||
#include <string>
|
||||
|
||||
std::vector<std::string> tokenizeText(const std::string& text, std::function<bool(const std::string&)> dictionaryContains);
|
||||
std::vector<std::string> tokenizeText(
|
||||
const std::string& text,
|
||||
const std::function<bool(const std::string&)>& dictionaryContains
|
||||
);
|
||||
|
|
|
@ -12,7 +12,7 @@ string ExportFormatConverter::getTypeName() {
|
|||
}
|
||||
|
||||
EnumConverter<ExportFormat>::member_data ExportFormatConverter::getMemberData() {
|
||||
return member_data{
|
||||
return member_data {
|
||||
{ ExportFormat::Tsv, "tsv" },
|
||||
{ ExportFormat::Xml, "xml" },
|
||||
{ ExportFormat::Json, "json" }
|
||||
|
|
|
@ -12,7 +12,7 @@ string RecognizerTypeConverter::getTypeName() {
|
|||
}
|
||||
|
||||
EnumConverter<RecognizerType>::member_data RecognizerTypeConverter::getMemberData() {
|
||||
return member_data{
|
||||
return member_data {
|
||||
{ RecognizerType::PocketSphinx, "pocketSphinx" },
|
||||
{ RecognizerType::Phonetic, "phonetic" }
|
||||
};
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#include <tclap/CmdLine.h>
|
||||
#include "core/appInfo.h"
|
||||
#include "tools/NiceCmdLineOutput.h"
|
||||
#include "tools/ProgressBar.h"
|
||||
#include "logging/logging.h"
|
||||
#include "logging/sinks.h"
|
||||
#include "logging/formatters.h"
|
||||
|
@ -52,45 +51,48 @@ namespace TCLAP {
|
|||
struct ArgTraits<logging::Level> {
|
||||
typedef ValueLike ValueCategory;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct ArgTraits<ExportFormat> {
|
||||
typedef ValueLike ValueCategory;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct ArgTraits<RecognizerType> {
|
||||
typedef ValueLike ValueCategory;
|
||||
};
|
||||
}
|
||||
|
||||
shared_ptr<logging::Sink> createFileSink(path path, logging::Level minLevel) {
|
||||
shared_ptr<logging::Sink> createFileSink(const path& path, logging::Level minLevel) {
|
||||
auto file = make_shared<boost::filesystem::ofstream>();
|
||||
file->exceptions(std::ifstream::failbit | std::ifstream::badbit);
|
||||
file->open(path);
|
||||
auto FileSink = make_shared<logging::StreamSink>(file, make_shared<logging::SimpleFileFormatter>());
|
||||
auto FileSink =
|
||||
make_shared<logging::StreamSink>(file, make_shared<logging::SimpleFileFormatter>());
|
||||
return make_shared<logging::LevelFilter>(FileSink, minLevel);
|
||||
}
|
||||
|
||||
unique_ptr<Recognizer> createRecognizer(RecognizerType recognizerType) {
|
||||
switch (recognizerType) {
|
||||
case RecognizerType::PocketSphinx:
|
||||
return make_unique<PocketSphinxRecognizer>();
|
||||
case RecognizerType::Phonetic:
|
||||
return make_unique<PhoneticRecognizer>();
|
||||
default:
|
||||
throw std::runtime_error("Unknown recognizer.");
|
||||
case RecognizerType::PocketSphinx:
|
||||
return make_unique<PocketSphinxRecognizer>();
|
||||
case RecognizerType::Phonetic:
|
||||
return make_unique<PhoneticRecognizer>();
|
||||
default:
|
||||
throw std::runtime_error("Unknown recognizer.");
|
||||
}
|
||||
}
|
||||
|
||||
unique_ptr<Exporter> createExporter(ExportFormat exportFormat) {
|
||||
switch (exportFormat) {
|
||||
case ExportFormat::Tsv:
|
||||
return make_unique<TsvExporter>();
|
||||
case ExportFormat::Xml:
|
||||
return make_unique<XmlExporter>();
|
||||
case ExportFormat::Json:
|
||||
return make_unique<JsonExporter>();
|
||||
default:
|
||||
throw std::runtime_error("Unknown export format.");
|
||||
case ExportFormat::Tsv:
|
||||
return make_unique<TsvExporter>();
|
||||
case ExportFormat::Xml:
|
||||
return make_unique<XmlExporter>();
|
||||
case ExportFormat::Json:
|
||||
return make_unique<JsonExporter>();
|
||||
default:
|
||||
throw std::runtime_error("Unknown export format.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,7 +108,7 @@ ShapeSet getTargetShapeSet(const string& extendedShapesString) {
|
|||
return result;
|
||||
}
|
||||
|
||||
int main(int platformArgc, char *platformArgv[]) {
|
||||
int main(int platformArgc, char* platformArgv[]) {
|
||||
// Set up default logging so early errors are printed to stdout
|
||||
const logging::Level defaultMinStderrLevel = logging::Level::Error;
|
||||
shared_ptr<logging::Sink> defaultSink = make_shared<NiceStderrSink>(defaultMinStderrLevel);
|
||||
|
@ -124,24 +126,71 @@ int main(int platformArgc, char *platformArgv[]) {
|
|||
tclap::CmdLine cmd(appName, argumentValueSeparator, appVersion);
|
||||
cmd.setExceptionHandling(false);
|
||||
cmd.setOutput(new NiceCmdLineOutput());
|
||||
tclap::ValueArg<string> outputFileName("o", "output", "The output file path.", false, string(), "string", cmd);
|
||||
|
||||
tclap::ValueArg<string> outputFileName(
|
||||
"o", "output", "The output file path.",
|
||||
false, string(), "string", cmd
|
||||
);
|
||||
|
||||
auto logLevels = vector<logging::Level>(logging::LevelConverter::get().getValues());
|
||||
tclap::ValuesConstraint<logging::Level> logLevelConstraint(logLevels);
|
||||
tclap::ValueArg<logging::Level> logLevel("", "logLevel", "The minimum log level that will be written to the log file", false, logging::Level::Debug, &logLevelConstraint, cmd);
|
||||
tclap::ValueArg<string> logFileName("", "logFile", "The log file path.", false, string(), "string", cmd);
|
||||
tclap::ValueArg<logging::Level> consoleLevel("", "consoleLevel", "The minimum log level that will be printed on the console (stderr)", false, defaultMinStderrLevel, &logLevelConstraint, cmd);
|
||||
tclap::SwitchArg machineReadableMode("", "machineReadable", "Formats all output to stderr in a structured JSON format.", cmd, false);
|
||||
tclap::SwitchArg quietMode("q", "quiet", "Suppresses all output to stderr except for warnings and error messages.", cmd, false);
|
||||
tclap::ValueArg<int> maxThreadCount("", "threads", "The maximum number of worker threads to use.", false, getProcessorCoreCount(), "number", cmd);
|
||||
tclap::ValueArg<string> extendedShapes("", "extendedShapes", "All extended, optional shapes to use.", false, "GHX", "string", cmd);
|
||||
tclap::ValueArg<string> dialogFile("d", "dialogFile", "A file containing the text of the dialog.", false, string(), "string", cmd);
|
||||
tclap::ValueArg<logging::Level> logLevel(
|
||||
"", "logLevel", "The minimum log level that will be written to the log file",
|
||||
false, logging::Level::Debug, &logLevelConstraint, cmd
|
||||
);
|
||||
|
||||
tclap::ValueArg<string> logFileName(
|
||||
"", "logFile", "The log file path.",
|
||||
false, string(), "string", cmd
|
||||
);
|
||||
tclap::ValueArg<logging::Level> consoleLevel(
|
||||
"", "consoleLevel", "The minimum log level that will be printed on the console (stderr)",
|
||||
false, defaultMinStderrLevel, &logLevelConstraint, cmd
|
||||
);
|
||||
|
||||
tclap::SwitchArg machineReadableMode(
|
||||
"", "machineReadable", "Formats all output to stderr in a structured JSON format.",
|
||||
cmd, false
|
||||
);
|
||||
|
||||
tclap::SwitchArg quietMode(
|
||||
"q", "quiet", "Suppresses all output to stderr except for warnings and error messages.",
|
||||
cmd, false
|
||||
);
|
||||
|
||||
tclap::ValueArg<int> maxThreadCount(
|
||||
"", "threads", "The maximum number of worker threads to use.",
|
||||
false, getProcessorCoreCount(), "number", cmd
|
||||
);
|
||||
|
||||
tclap::ValueArg<string> extendedShapes(
|
||||
"", "extendedShapes", "All extended, optional shapes to use.",
|
||||
false, "GHX", "string", cmd
|
||||
);
|
||||
|
||||
tclap::ValueArg<string> dialogFile(
|
||||
"d", "dialogFile", "A file containing the text of the dialog.",
|
||||
false, string(), "string", cmd
|
||||
);
|
||||
|
||||
auto exportFormats = vector<ExportFormat>(ExportFormatConverter::get().getValues());
|
||||
tclap::ValuesConstraint<ExportFormat> exportFormatConstraint(exportFormats);
|
||||
tclap::ValueArg<ExportFormat> exportFormat("f", "exportFormat", "The export format.", false, ExportFormat::Tsv, &exportFormatConstraint, cmd);
|
||||
tclap::ValueArg<ExportFormat> exportFormat(
|
||||
"f", "exportFormat", "The export format.",
|
||||
false, ExportFormat::Tsv, &exportFormatConstraint, cmd
|
||||
);
|
||||
|
||||
auto recognizerTypes = vector<RecognizerType>(RecognizerTypeConverter::get().getValues());
|
||||
tclap::ValuesConstraint<RecognizerType> recognizerConstraint(recognizerTypes);
|
||||
tclap::ValueArg<RecognizerType> recognizerType("r", "recognizer", "The dialog recognizer.", false, RecognizerType::PocketSphinx, &recognizerConstraint, cmd);
|
||||
tclap::UnlabeledValueArg<string> inputFileName("inputFile", "The input file. Must be a sound file in WAVE format.", true, "", "string", cmd);
|
||||
tclap::ValueArg<RecognizerType> recognizerType(
|
||||
"r", "recognizer", "The dialog recognizer.",
|
||||
false, RecognizerType::PocketSphinx, &recognizerConstraint, cmd
|
||||
);
|
||||
|
||||
tclap::UnlabeledValueArg<string> inputFileName(
|
||||
"inputFile", "The input file. Must be a sound file in WAVE format.",
|
||||
true, "", "string", cmd
|
||||
);
|
||||
|
||||
try {
|
||||
// Parse command line
|
||||
|
@ -180,13 +229,17 @@ int main(int platformArgc, char *platformArgv[]) {
|
|||
|
||||
try {
|
||||
// On progress change: Create log message
|
||||
ProgressForwarder progressSink([](double progress) { logging::log(ProgressEntry(progress)); });
|
||||
ProgressForwarder progressSink([](double progress) {
|
||||
logging::log(ProgressEntry(progress));
|
||||
});
|
||||
|
||||
// Animate the recording
|
||||
logging::info("Starting animation.");
|
||||
JoiningContinuousTimeline<Shape> animation = animateWaveFile(
|
||||
inputFilePath,
|
||||
dialogFile.isSet() ? readUtf8File(path(dialogFile.getValue())) : boost::optional<string>(),
|
||||
dialogFile.isSet()
|
||||
? readUtf8File(path(dialogFile.getValue()))
|
||||
: boost::optional<string>(),
|
||||
*createRecognizer(recognizerType.getValue()),
|
||||
targetShapeSet,
|
||||
maxThreadCount.getValue(),
|
||||
|
@ -207,7 +260,9 @@ int main(int platformArgc, char *platformArgv[]) {
|
|||
|
||||
logging::log(SuccessEntry());
|
||||
} catch (...) {
|
||||
std::throw_with_nested(std::runtime_error(fmt::format("Error processing file {}.", inputFilePath)));
|
||||
std::throw_with_nested(
|
||||
std::runtime_error(fmt::format("Error processing file {}.", inputFilePath))
|
||||
);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
using std::string;
|
||||
using std::make_shared;
|
||||
using logging::Level;
|
||||
using logging::LevelFilter;
|
||||
using logging::StdErrSink;
|
||||
using logging::SimpleConsoleFormatter;
|
||||
using boost::optional;
|
||||
|
@ -21,11 +20,14 @@ NiceStderrSink::NiceStderrSink(Level minLevel) :
|
|||
{}
|
||||
|
||||
void NiceStderrSink::receive(const logging::Entry& entry) {
|
||||
// For selected semantic entries, print a user-friendly message instead of the technical log message.
|
||||
if (const StartEntry* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
std::cerr << fmt::format("Generating lip sync data for {}.", startEntry->getInputFilePath()) << std::endl;
|
||||
// For selected semantic entries, print a user-friendly message instead of
|
||||
// the technical log message.
|
||||
if (const auto* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
std::cerr
|
||||
<< fmt::format("Generating lip sync data for {}.", startEntry->getInputFilePath())
|
||||
<< std::endl;
|
||||
startProgressIndication();
|
||||
} else if (const ProgressEntry* progressEntry = dynamic_cast<const ProgressEntry*>(&entry)) {
|
||||
} else if (const auto* progressEntry = dynamic_cast<const ProgressEntry*>(&entry)) {
|
||||
assert(progressBar);
|
||||
progress = progressEntry->getProgress();
|
||||
progressBar->reportProgress(progress);
|
||||
|
@ -65,7 +67,7 @@ QuietStderrSink::QuietStderrSink(Level minLevel) :
|
|||
|
||||
void QuietStderrSink::receive(const logging::Entry& entry) {
|
||||
// Set inputFilePath as soon as we get it
|
||||
if (const StartEntry* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
if (const auto* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
inputFilePath = startEntry->getInputFilePath();
|
||||
}
|
||||
|
||||
|
@ -87,26 +89,42 @@ MachineReadableStderrSink::MachineReadableStderrSink(Level minLevel) :
|
|||
{}
|
||||
|
||||
string formatLogProperty(const logging::Entry& entry) {
|
||||
return fmt::format(R"("log": {{ "level": "{}", "message": "{}" }})", entry.level, escapeJsonString(entry.message));
|
||||
return fmt::format(
|
||||
R"("log": {{ "level": "{}", "message": "{}" }})",
|
||||
entry.level,
|
||||
escapeJsonString(entry.message)
|
||||
);
|
||||
}
|
||||
|
||||
void MachineReadableStderrSink::receive(const logging::Entry& entry) {
|
||||
optional<string> line;
|
||||
if (dynamic_cast<const SemanticEntry*>(&entry)) {
|
||||
if (const StartEntry* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
if (const auto* startEntry = dynamic_cast<const StartEntry*>(&entry)) {
|
||||
const string file = escapeJsonString(startEntry->getInputFilePath().string());
|
||||
line = fmt::format(R"({{ "type": "start", "file": "{}", {} }})", file, formatLogProperty(entry));
|
||||
} else if (const ProgressEntry* progressEntry = dynamic_cast<const ProgressEntry*>(&entry)) {
|
||||
line = fmt::format(
|
||||
R"({{ "type": "start", "file": "{}", {} }})",
|
||||
file,
|
||||
formatLogProperty(entry)
|
||||
);
|
||||
} else if (const auto* progressEntry = dynamic_cast<const ProgressEntry*>(&entry)) {
|
||||
const int progressPercent = static_cast<int>(progressEntry->getProgress() * 100);
|
||||
if (progressPercent > lastProgressPercent) {
|
||||
line = fmt::format(R"({{ "type": "progress", "value": {:.2f}, {} }})", progressEntry->getProgress(), formatLogProperty(entry));
|
||||
line = fmt::format(
|
||||
R"({{ "type": "progress", "value": {:.2f}, {} }})",
|
||||
progressEntry->getProgress(),
|
||||
formatLogProperty(entry)
|
||||
);
|
||||
lastProgressPercent = progressPercent;
|
||||
}
|
||||
} else if (dynamic_cast<const SuccessEntry*>(&entry)) {
|
||||
line = fmt::format(R"({{ "type": "success", {} }})", formatLogProperty(entry));
|
||||
} else if (const FailureEntry* failureEntry = dynamic_cast<const FailureEntry*>(&entry)) {
|
||||
} else if (const auto* failureEntry = dynamic_cast<const FailureEntry*>(&entry)) {
|
||||
const string reason = escapeJsonString(failureEntry->getReason());
|
||||
line = fmt::format(R"({{ "type": "failure", "reason": "{}", {} }})", reason, formatLogProperty(entry));
|
||||
line = fmt::format(
|
||||
R"({{ "type": "failure", "reason": "{}", {} }})",
|
||||
reason,
|
||||
formatLogProperty(entry)
|
||||
);
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported type of semantic entry.");
|
||||
}
|
||||
|
|
|
@ -52,7 +52,10 @@ public:
|
|||
|
||||
// Clip the value's range to bounds
|
||||
TimeRange& valueRange = timedValue.getTimeRange();
|
||||
valueRange.resize(max(range.getStart(), valueRange.getStart()), min(range.getEnd(), valueRange.getEnd()));
|
||||
valueRange.resize(
|
||||
max(range.getStart(), valueRange.getStart()),
|
||||
min(range.getEnd(), valueRange.getEnd())
|
||||
);
|
||||
|
||||
return Timeline<T, AutoJoin>::set(timedValue);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,11 @@ public:
|
|||
ContinuousTimeline(range, defaultValue, collection.begin(), collection.end())
|
||||
{}
|
||||
|
||||
ContinuousTimeline(TimeRange range, T defaultValue, std::initializer_list<Timed<T>> initializerList) :
|
||||
ContinuousTimeline(
|
||||
TimeRange range,
|
||||
T defaultValue,
|
||||
std::initializer_list<Timed<T>> initializerList
|
||||
) :
|
||||
ContinuousTimeline(range, defaultValue, initializerList.begin(), initializerList.end())
|
||||
{}
|
||||
|
||||
|
|
|
@ -20,7 +20,11 @@ TimeRange::TimeRange(time_type start, time_type end) :
|
|||
end(end)
|
||||
{
|
||||
if (start > end) {
|
||||
throw std::invalid_argument(fmt::format("Time range start must not be less than end. Start: {0}, end: {1}", start, end));
|
||||
throw std::invalid_argument(fmt::format(
|
||||
"Time range start must not be less than end. Start: {0}, end: {1}",
|
||||
start,
|
||||
end
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,16 +92,16 @@ void TimeRange::shrink(time_type value) {
|
|||
}
|
||||
|
||||
void TimeRange::trim(const TimeRange& limits) {
|
||||
TimeRange newRange(std::max(start, limits.start), std::min(end, limits.end));
|
||||
const TimeRange newRange(std::max(start, limits.start), std::min(end, limits.end));
|
||||
resize(newRange);
|
||||
}
|
||||
|
||||
void TimeRange::trimLeft(time_type value) {
|
||||
trim({value, end});
|
||||
trim({ value, end });
|
||||
}
|
||||
|
||||
void TimeRange::trimRight(time_type value) {
|
||||
trim({start, value});
|
||||
trim({ start, value });
|
||||
}
|
||||
|
||||
bool TimeRange::operator==(const TimeRange& rhs) const {
|
||||
|
|
|
@ -72,7 +72,12 @@ private:
|
|||
|
||||
template<typename T>
|
||||
std::ostream& operator<<(std::ostream& stream, const Timed<T>& timedValue) {
|
||||
return stream << "Timed(" << timedValue.getStart() << ", " << timedValue.getEnd() << ", " << timedValue.getValue() << ")";
|
||||
return stream
|
||||
<< "Timed("
|
||||
<< timedValue.getStart() << ", "
|
||||
<< timedValue.getEnd() << ", "
|
||||
<< timedValue.getValue()
|
||||
<< ")";
|
||||
}
|
||||
|
||||
template<>
|
||||
|
@ -130,5 +135,9 @@ private:
|
|||
|
||||
template<>
|
||||
inline std::ostream& operator<<(std::ostream& stream, const Timed<void>& timedValue) {
|
||||
return stream << "Timed<void>(" << timedValue.getTimeRange().getStart() << ", " << timedValue.getTimeRange().getEnd() << ")";
|
||||
return stream
|
||||
<< "Timed<void>("
|
||||
<< timedValue.getTimeRange().getStart() << ", "
|
||||
<< timedValue.getTimeRange().getEnd()
|
||||
<< ")";
|
||||
}
|
||||
|
|
|
@ -36,12 +36,15 @@ private:
|
|||
bool operator()(const Timed<T>& lhs, const Timed<T>& rhs) const {
|
||||
return lhs.getStart() < rhs.getStart();
|
||||
}
|
||||
|
||||
bool operator()(const time_type& lhs, const Timed<T>& rhs) const {
|
||||
return lhs < rhs.getStart();
|
||||
}
|
||||
|
||||
bool operator()(const Timed<T>& lhs, const time_type& rhs) const {
|
||||
return lhs.getStart() < rhs;
|
||||
}
|
||||
|
||||
using is_transparent = int;
|
||||
};
|
||||
|
||||
|
@ -88,7 +91,7 @@ public:
|
|||
time_type time;
|
||||
};
|
||||
|
||||
Timeline() {}
|
||||
Timeline() = default;
|
||||
|
||||
template<typename InputIterator>
|
||||
Timeline(InputIterator first, InputIterator last) {
|
||||
|
@ -107,7 +110,7 @@ public:
|
|||
Timeline(initializerList.begin(), initializerList.end())
|
||||
{}
|
||||
|
||||
virtual ~Timeline() {}
|
||||
virtual ~Timeline() = default;
|
||||
|
||||
bool empty() const {
|
||||
return elements.empty();
|
||||
|
@ -141,35 +144,39 @@ public:
|
|||
|
||||
iterator find(time_type time, FindMode findMode = FindMode::SampleRight) const {
|
||||
switch (findMode) {
|
||||
case FindMode::SampleLeft: {
|
||||
iterator left = find(time, FindMode::SearchLeft);
|
||||
return left != end() && left->getEnd() >= time ? left : end();
|
||||
}
|
||||
case FindMode::SampleRight: {
|
||||
iterator right = find(time, FindMode::SearchRight);
|
||||
return right != end() && right->getStart() <= time ? right : end();
|
||||
}
|
||||
case FindMode::SearchLeft: {
|
||||
// Get first element starting >= time
|
||||
iterator it = elements.lower_bound(time);
|
||||
|
||||
// Go one element back
|
||||
return it != begin() ? --it : end();
|
||||
}
|
||||
case FindMode::SearchRight: {
|
||||
// Get first element starting > time
|
||||
iterator it = elements.upper_bound(time);
|
||||
|
||||
// Go one element back
|
||||
if (it != begin()) {
|
||||
iterator left = it;
|
||||
--left;
|
||||
if (left->getEnd() > time) return left;
|
||||
case FindMode::SampleLeft:
|
||||
{
|
||||
iterator left = find(time, FindMode::SearchLeft);
|
||||
return left != end() && left->getEnd() >= time ? left : end();
|
||||
}
|
||||
return it;
|
||||
}
|
||||
default:
|
||||
throw std::invalid_argument("Unexpected find mode.");
|
||||
case FindMode::SampleRight:
|
||||
{
|
||||
iterator right = find(time, FindMode::SearchRight);
|
||||
return right != end() && right->getStart() <= time ? right : end();
|
||||
}
|
||||
case FindMode::SearchLeft:
|
||||
{
|
||||
// Get first element starting >= time
|
||||
iterator it = elements.lower_bound(time);
|
||||
|
||||
// Go one element back
|
||||
return it != begin() ? --it : end();
|
||||
}
|
||||
case FindMode::SearchRight:
|
||||
{
|
||||
// Get first element starting > time
|
||||
iterator it = elements.upper_bound(time);
|
||||
|
||||
// Go one element back
|
||||
if (it != begin()) {
|
||||
iterator left = it;
|
||||
--left;
|
||||
if (left->getEnd() > time) return left;
|
||||
}
|
||||
return it;
|
||||
}
|
||||
default:
|
||||
throw std::invalid_argument("Unexpected find mode.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,7 +194,10 @@ public:
|
|||
splitAt(range.getEnd());
|
||||
|
||||
// Erase overlapping elements
|
||||
elements.erase(find(range.getStart(), FindMode::SearchRight), find(range.getEnd(), FindMode::SearchRight));
|
||||
elements.erase(
|
||||
find(range.getStart(), FindMode::SearchRight),
|
||||
find(range.getEnd(), FindMode::SearchRight)
|
||||
);
|
||||
}
|
||||
|
||||
void clear(time_type start, time_type end) {
|
||||
|
@ -220,12 +230,19 @@ public:
|
|||
}
|
||||
|
||||
template<typename TElement = T>
|
||||
iterator set(const TimeRange& timeRange, const std::enable_if_t<!std::is_void<TElement>::value, T>& value) {
|
||||
iterator set(
|
||||
const TimeRange& timeRange,
|
||||
const std::enable_if_t<!std::is_void<TElement>::value, T>& value
|
||||
) {
|
||||
return set(Timed<T>(timeRange, value));
|
||||
}
|
||||
|
||||
template<typename TElement = T>
|
||||
iterator set(time_type start, time_type end, const std::enable_if_t<!std::is_void<TElement>::value, T>& value) {
|
||||
iterator set(
|
||||
time_type start,
|
||||
time_type end,
|
||||
const std::enable_if_t<!std::is_void<TElement>::value, T>& value
|
||||
) {
|
||||
return set(Timed<T>(start, end, value));
|
||||
}
|
||||
|
||||
|
@ -251,13 +268,16 @@ public:
|
|||
for (auto it = copy.begin(); it != copy.end(); ++it) {
|
||||
const auto rangeBegin = it;
|
||||
auto rangeEnd = std::next(rangeBegin);
|
||||
while (rangeEnd != copy.end() && rangeEnd->getStart() == rangeBegin->getEnd() && ::internal::valueEquals(*rangeEnd, *rangeBegin)) {
|
||||
while (rangeEnd != copy.end()
|
||||
&& rangeEnd->getStart() == rangeBegin->getEnd()
|
||||
&& ::internal::valueEquals(*rangeEnd, *rangeBegin)
|
||||
) {
|
||||
++rangeEnd;
|
||||
}
|
||||
|
||||
if (rangeEnd != std::next(rangeBegin)) {
|
||||
Timed<T> combined = *rangeBegin;
|
||||
combined.setTimeRange({rangeBegin->getStart(), rangeEnd->getEnd()});
|
||||
combined.setTimeRange({ rangeBegin->getStart(), rangeEnd->getEnd() });
|
||||
set(combined);
|
||||
it = rangeEnd;
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
#include <chrono>
|
||||
#include <ostream>
|
||||
|
||||
typedef std::chrono::duration<int, std::centi> centiseconds;
|
||||
using centiseconds = std::chrono::duration<int, std::centi>;
|
||||
|
||||
std::ostream& operator <<(std::ostream& stream, const centiseconds cs);
|
||||
std::ostream& operator <<(std::ostream& stream, centiseconds cs);
|
||||
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4455)
|
||||
|
|
|
@ -7,8 +7,13 @@
|
|||
|
||||
template<typename TValue>
|
||||
void logTimedEvent(const std::string& eventName, const Timed<TValue> timedValue) {
|
||||
logging::debugFormat("##{0}[{1}-{2}]: {3}",
|
||||
eventName, formatDuration(timedValue.getStart()), formatDuration(timedValue.getEnd()), timedValue.getValue());
|
||||
logging::debugFormat(
|
||||
"##{0}[{1}-{2}]: {3}",
|
||||
eventName,
|
||||
formatDuration(timedValue.getStart()),
|
||||
formatDuration(timedValue.getEnd()),
|
||||
timedValue.getValue()
|
||||
);
|
||||
}
|
||||
|
||||
template<typename TValue>
|
||||
|
@ -17,6 +22,11 @@ void logTimedEvent(const std::string& eventName, const TimeRange& timeRange, con
|
|||
}
|
||||
|
||||
template<typename TValue>
|
||||
void logTimedEvent(const std::string& eventName, centiseconds start, centiseconds end, const TValue& value) {
|
||||
void logTimedEvent(
|
||||
const std::string& eventName,
|
||||
centiseconds start,
|
||||
centiseconds end,
|
||||
const TValue& value
|
||||
) {
|
||||
logTimedEvent(eventName, Timed<TValue>(start, end, value));
|
||||
}
|
|
@ -1,6 +1,5 @@
|
|||
#pragma once
|
||||
|
||||
#include <initializer_list>
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
@ -30,7 +29,9 @@ public:
|
|||
auto result = tryToString(value);
|
||||
if (!result) {
|
||||
auto numericValue = static_cast<typename std::underlying_type<TEnum>::type>(value);
|
||||
throw std::invalid_argument(fmt::format("{} is not a valid {} value.", numericValue, typeName));
|
||||
throw std::invalid_argument(
|
||||
fmt::format("{} is not a valid {} value.", numericValue, typeName)
|
||||
);
|
||||
}
|
||||
|
||||
return *result;
|
||||
|
|
|
@ -55,7 +55,10 @@ public:
|
|||
|
||||
private:
|
||||
void init() const {
|
||||
std::call_once(state->initialized, [&] { state->value = std::make_unique<T>(state->createValue()); });
|
||||
std::call_once(
|
||||
state->initialized,
|
||||
[&] { state->value = std::make_unique<T>(state->createValue()); }
|
||||
);
|
||||
}
|
||||
|
||||
std::shared_ptr<State> state = std::make_shared<State>();
|
||||
|
|
|
@ -36,7 +36,10 @@ void NiceCmdLineOutput::failure(CmdLineInterface& cli, TCLAP::ArgException& e) {
|
|||
std::cerr << "Short usage:" << endl;
|
||||
printShortUsage(cli, std::cerr);
|
||||
|
||||
std::cerr << endl << "For complete usage and help, type `" << getBinaryName() << " --help`" << endl << endl;
|
||||
std::cerr
|
||||
<< endl
|
||||
<< "For complete usage and help, type `" << getBinaryName() << " --help`" << endl
|
||||
<< endl;
|
||||
} else {
|
||||
usage(cli);
|
||||
}
|
||||
|
@ -76,8 +79,9 @@ void NiceCmdLineOutput::printLongUsage(CmdLineInterface& cli, std::ostream& outS
|
|||
const vector<vector<TCLAP::Arg*>> xorArgGroups = xorHandler.getXorList();
|
||||
for (const vector<TCLAP::Arg*>& xorArgGroup : xorArgGroups) {
|
||||
for (auto arg : xorArgGroup) {
|
||||
if (arg != xorArgGroup[0])
|
||||
if (arg != xorArgGroup[0]) {
|
||||
outStream << "-- or --" << endl;
|
||||
}
|
||||
|
||||
tablePrinter.printRow({ arg->longID(), arg->getDescription() });
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <stack>
|
||||
#include <mutex>
|
||||
|
||||
template <typename value_type, typename pointer_type = std::unique_ptr<value_type>>
|
||||
template<typename value_type, typename pointer_type = std::unique_ptr<value_type>>
|
||||
class ObjectPool {
|
||||
public:
|
||||
using wrapper_type = lambda_unique_ptr<value_type>;
|
||||
|
|
|
@ -54,13 +54,13 @@ void ProgressBar::update(bool showSpinner) {
|
|||
const int blockCount = 20;
|
||||
const string animation = "|/-\\";
|
||||
|
||||
int progressBlockCount = static_cast<int>(currentProgress * blockCount);
|
||||
const int progressBlockCount = static_cast<int>(currentProgress * blockCount);
|
||||
const double epsilon = 0.0001;
|
||||
int percent = static_cast<int>(currentProgress * 100 + epsilon);
|
||||
const int percent = static_cast<int>(currentProgress * 100 + epsilon);
|
||||
const string spinner = showSpinner
|
||||
? string(1, animation[animationIndex++ % animation.size()])
|
||||
: "";
|
||||
string text = fmt::format("[{0}{1}] {2:3}% {3}",
|
||||
const string text = fmt::format("[{0}{1}] {2:3}% {3}",
|
||||
string(progressBlockCount, '#'), string(blockCount - progressBlockCount, '-'),
|
||||
percent,
|
||||
spinner
|
||||
|
@ -71,7 +71,7 @@ void ProgressBar::update(bool showSpinner) {
|
|||
void ProgressBar::updateText(const string& text) {
|
||||
// Get length of common portion
|
||||
int commonPrefixLength = 0;
|
||||
int commonLength = std::min(currentText.size(), text.size());
|
||||
const int commonLength = std::min(currentText.size(), text.size());
|
||||
while (commonPrefixLength < commonLength && text[commonPrefixLength] == currentText[commonPrefixLength]) {
|
||||
commonPrefixLength++;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ void ProgressBar::updateText(const string& text) {
|
|||
output.append(text, commonPrefixLength, text.size() - commonPrefixLength);
|
||||
|
||||
// ... if the new text is shorter than the old one: delete overlapping characters
|
||||
int overlapCount = currentText.size() - text.size();
|
||||
const int overlapCount = currentText.size() - text.size();
|
||||
if (overlapCount > 0) {
|
||||
output.append(overlapCount, ' ');
|
||||
output.append(overlapCount, '\b');
|
||||
|
|
|
@ -10,21 +10,23 @@ using std::invalid_argument;
|
|||
using std::vector;
|
||||
using std::string;
|
||||
|
||||
TablePrinter::TablePrinter(ostream *stream, initializer_list<int> columnWidths, int columnSpacing) :
|
||||
TablePrinter::TablePrinter(ostream* stream, initializer_list<int> columnWidths, int columnSpacing) :
|
||||
stream(stream),
|
||||
columnWidths(columnWidths.begin(), columnWidths.end()),
|
||||
columnSpacing(columnSpacing)
|
||||
{
|
||||
if (stream == nullptr) throw invalid_argument("stream is null.");
|
||||
if (columnWidths.size() < 1) throw invalid_argument("No columns defined.");
|
||||
if (std::any_of(columnWidths.begin(), columnWidths.end(), [](int width){ return width <= 1; })) {
|
||||
if (std::any_of(columnWidths.begin(), columnWidths.end(), [](int width) { return width <= 1; })) {
|
||||
throw invalid_argument("All columns must have a width of at least 1.");
|
||||
}
|
||||
if (columnSpacing < 0) throw invalid_argument("columnSpacing must not be negative.");
|
||||
}
|
||||
|
||||
void TablePrinter::printRow(initializer_list<string> columns) const {
|
||||
if (columns.size() != columnWidths.size()) throw invalid_argument("Number of specified strings does not match number of defined columns.");
|
||||
if (columns.size() != columnWidths.size()) {
|
||||
throw invalid_argument("Number of specified strings does not match number of defined columns.");
|
||||
}
|
||||
|
||||
// Some cells may span multiple lines.
|
||||
// Create matrix of text lines in columns.
|
||||
|
@ -50,7 +52,7 @@ void TablePrinter::printRow(initializer_list<string> columns) const {
|
|||
|
||||
// Print lines
|
||||
*stream << std::left;
|
||||
string spacer(columnSpacing, ' ');
|
||||
const string spacer(columnSpacing, ' ');
|
||||
for (size_t rowIndex = 0; rowIndex < lineCount; rowIndex++) {
|
||||
for (size_t columnIndex = 0; columnIndex < columns.size(); columnIndex++) {
|
||||
if (columnIndex != 0) {
|
||||
|
|
|
@ -6,7 +6,11 @@
|
|||
|
||||
class TablePrinter {
|
||||
public:
|
||||
TablePrinter(std::ostream* stream, std::initializer_list<int> columnWidths, int columnSpacing = 2);
|
||||
TablePrinter(
|
||||
std::ostream* stream,
|
||||
std::initializer_list<int> columnWidths,
|
||||
int columnSpacing = 2
|
||||
);
|
||||
void printRow(std::initializer_list<std::string> columns) const;
|
||||
private:
|
||||
std::ostream* const stream;
|
||||
|
|
|
@ -9,13 +9,16 @@ namespace details {
|
|||
struct negation : std::integral_constant<bool, !B::value> {};
|
||||
|
||||
template<class> struct is_ref_wrapper : std::false_type {};
|
||||
|
||||
template<class T> struct is_ref_wrapper<std::reference_wrapper<T>> : std::true_type {};
|
||||
|
||||
template<class T>
|
||||
using not_ref_wrapper = negation<is_ref_wrapper<std::decay_t<T>>>;
|
||||
|
||||
template<class...> struct conjunction : std::true_type { };
|
||||
|
||||
template<class B1> struct conjunction<B1> : B1 { };
|
||||
|
||||
template<class B1, class... Bn>
|
||||
struct conjunction<B1, Bn...>
|
||||
: std::conditional_t<bool(B1::value), conjunction<Bn...>, B1> {};
|
||||
|
@ -23,19 +26,20 @@ namespace details {
|
|||
template<class... B>
|
||||
constexpr bool conjunction_v = conjunction<B...>::value;
|
||||
|
||||
template <class D, class...> struct return_type_helper { using type = D; };
|
||||
template <class... Types>
|
||||
template<class D, class...> struct return_type_helper { using type = D; };
|
||||
|
||||
template<class... Types>
|
||||
struct return_type_helper<void, Types...> : std::common_type<Types...> {
|
||||
static_assert(conjunction_v<not_ref_wrapper<Types>...>,
|
||||
"Types cannot contain reference_wrappers when D is void");
|
||||
};
|
||||
|
||||
template <class D, class... Types>
|
||||
template<class D, class... Types>
|
||||
using return_type = std::array<typename return_type_helper<D, Types...>::type,
|
||||
sizeof...(Types)>;
|
||||
}
|
||||
|
||||
template < class D = void, class... Types>
|
||||
template<class D = void, class... Types>
|
||||
constexpr details::return_type<D, Types...> make_array(Types&&... t) {
|
||||
return {std::forward<Types>(t)...};
|
||||
return { std::forward<Types>(t)... };
|
||||
}
|
|
@ -10,7 +10,8 @@ std::ifstream openFile(path filePath) {
|
|||
file.exceptions(std::ifstream::failbit | std::ifstream::badbit);
|
||||
file.open(filePath.c_str(), std::ios::binary);
|
||||
|
||||
// Read some dummy data so that we can throw a decent exception in case the file is missing, locked, etc.
|
||||
// Read some dummy data so that we can throw a decent exception in case the file is missing,
|
||||
// locked, etc.
|
||||
file.seekg(0, std::ios_base::end);
|
||||
if (file.tellg()) {
|
||||
file.seekg(0);
|
||||
|
@ -18,7 +19,7 @@ std::ifstream openFile(path filePath) {
|
|||
file.seekg(0);
|
||||
}
|
||||
|
||||
return std::move(file);
|
||||
return file;
|
||||
} catch (const std::ifstream::failure&) {
|
||||
// Error messages on stream exceptions are mostly useless.
|
||||
throw std::runtime_error(errorNumberToString(errno));
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// After each iteration, the first k elements of the container will be
|
||||
// a combination. When there are no more combinations, the container
|
||||
// will return to the original sorted order.
|
||||
template <typename Iterator>
|
||||
template<typename Iterator>
|
||||
inline bool next_combination(const Iterator first, Iterator k, const Iterator last) {
|
||||
// Handle degenerate cases
|
||||
if (first == last || std::next(first) == last || first == k || k == last) {
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
#include <vector>
|
||||
|
||||
template<typename TCollection>
|
||||
std::vector<std::pair<typename TCollection::value_type, typename TCollection::value_type>> getPairs(const TCollection& collection) {
|
||||
std::vector<std::pair<typename TCollection::value_type, typename TCollection::value_type>> getPairs(
|
||||
const TCollection& collection
|
||||
) {
|
||||
using TElement = typename TCollection::value_type;
|
||||
using TPair = std::pair<TElement, TElement>;
|
||||
using TIterator = typename TCollection::const_iterator;
|
||||
|
|
|
@ -32,12 +32,12 @@ void runParallel(
|
|||
|
||||
// Before exiting, wait for all running tasks to finish, but don't re-throw exceptions.
|
||||
// This only applies if one task already failed with an exception.
|
||||
auto finishRunning = gsl::finally([&]{
|
||||
auto finishRunning = gsl::finally([&] {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
elementFinished.wait(lock, [&] { return currentThreadCount == 0; });
|
||||
});
|
||||
|
||||
// Asyncronously run all elements
|
||||
// Asynchronously run all elements
|
||||
for (auto it = collection.begin(); it != collection.end(); ++it) {
|
||||
// This variable will later hold the future, but can be value-captured right now
|
||||
auto future = std::make_shared<future_type>();
|
||||
|
@ -66,7 +66,7 @@ void runParallel(
|
|||
// Wait for threads to finish, if necessary
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
int targetThreadCount = it == collection.end() ? 0 : maxThreadCount - 1;
|
||||
const int targetThreadCount = it == collection.end() ? 0 : maxThreadCount - 1;
|
||||
while (currentThreadCount > targetThreadCount) {
|
||||
elementFinished.wait(lock);
|
||||
if (finishedElement.valid()) {
|
||||
|
@ -86,7 +86,8 @@ void runParallel(
|
|||
TCollection& collection,
|
||||
int maxThreadCount,
|
||||
ProgressSink& progressSink,
|
||||
std::function<double(const typename TCollection::reference)> getElementProgressWeight = [](typename TCollection::reference) { return 1.0; })
|
||||
std::function<double(typename TCollection::reference)> getElementProgressWeight =
|
||||
[](typename TCollection::reference) { return 1.0; })
|
||||
{
|
||||
// Create a collection of wrapper functions that take care of progress handling
|
||||
ProgressMerger progressMerger(progressSink);
|
||||
|
@ -101,7 +102,7 @@ void runParallel(
|
|||
}
|
||||
|
||||
inline int getProcessorCoreCount() {
|
||||
int coreCount = std::thread::hardware_concurrency();
|
||||
const int coreCount = std::thread::hardware_concurrency();
|
||||
|
||||
// If the number of cores cannot be determined, use a reasonable default
|
||||
return coreCount != 0 ? coreCount : 4;
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
|
||||
#ifdef _WIN32
|
||||
#include <Windows.h>
|
||||
#include <io.h>
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
|
||||
using boost::filesystem::path;
|
||||
|
@ -26,13 +24,14 @@ path getBinPath() {
|
|||
static const path binPath = [] {
|
||||
try {
|
||||
// Determine path length
|
||||
int pathLength = wai_getExecutablePath(nullptr, 0, nullptr);
|
||||
const int pathLength = wai_getExecutablePath(nullptr, 0, nullptr);
|
||||
if (pathLength == -1) {
|
||||
throw std::runtime_error("Error determining path length.");
|
||||
}
|
||||
|
||||
// Get path
|
||||
// Note: According to documentation, pathLength does *not* include the trailing zero. Actually, it does.
|
||||
// Note: According to documentation, pathLength does *not* include the trailing zero.
|
||||
// Actually, it does.
|
||||
// In case there are situations where it doesn't, we allocate one character more.
|
||||
std::vector<char> buffer(pathLength + 1);
|
||||
if (wai_getExecutablePath(buffer.data(), buffer.size(), nullptr) == -1) {
|
||||
|
@ -41,7 +40,7 @@ path getBinPath() {
|
|||
buffer[pathLength] = 0;
|
||||
|
||||
// Convert to boost::filesystem::path
|
||||
string pathString(buffer.data());
|
||||
const string pathString(buffer.data());
|
||||
path result(boost::filesystem::canonical(pathString).make_preferred());
|
||||
return result;
|
||||
} catch (...) {
|
||||
|
@ -56,14 +55,14 @@ path getBinDirectory() {
|
|||
}
|
||||
|
||||
path getTempFilePath() {
|
||||
path tempDirectory = boost::filesystem::temp_directory_path();
|
||||
const path tempDirectory = boost::filesystem::temp_directory_path();
|
||||
static boost::uuids::random_generator generateUuid;
|
||||
string fileName = to_string(generateUuid());
|
||||
const string fileName = to_string(generateUuid());
|
||||
return tempDirectory / fileName;
|
||||
}
|
||||
|
||||
std::tm getLocalTime(const time_t& time) {
|
||||
tm timeInfo;
|
||||
tm timeInfo {};
|
||||
#if (__unix || __linux || __APPLE__)
|
||||
localtime_r(&time, &timeInfo);
|
||||
#else
|
||||
|
@ -92,7 +91,8 @@ vector<string> argsToUtf8(int argc, char* argv[]) {
|
|||
// Get command-line arguments as UTF16 strings
|
||||
int argumentCount;
|
||||
static_assert(sizeof(wchar_t) == sizeof(char16_t), "Expected wchar_t to be a 16-bit type.");
|
||||
char16_t** args = reinterpret_cast<char16_t**>(CommandLineToArgvW(GetCommandLineW(), &argumentCount));
|
||||
char16_t** args =
|
||||
reinterpret_cast<char16_t**>(CommandLineToArgvW(GetCommandLineW(), &argumentCount));
|
||||
if (!args) {
|
||||
throw std::runtime_error("Error splitting the UTF-16 command line arguments.");
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ private:
|
|||
};
|
||||
|
||||
void useUtf8ForConsole() {
|
||||
// Unix systems already expect UTF-8-encoded data
|
||||
// Unix systems already expect UTF-8-encoded data
|
||||
#ifdef _WIN32
|
||||
// Set console code page to UTF-8 so the console knows how to interpret string data
|
||||
SetConsoleOutputCP(CP_UTF8);
|
||||
|
@ -147,7 +147,7 @@ void useUtf8ForConsole() {
|
|||
}
|
||||
|
||||
void useUtf8ForBoostFilesystem() {
|
||||
std::locale globalLocale = std::locale();
|
||||
std::locale utf8Locale(globalLocale, new boost::filesystem::detail::utf8_codecvt_facet);
|
||||
const std::locale globalLocale = std::locale();
|
||||
const std::locale utf8Locale(globalLocale, new boost::filesystem::detail::utf8_codecvt_facet);
|
||||
path::imbue(utf8Locale);
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ boost::filesystem::path getTempFilePath();
|
|||
std::tm getLocalTime(const time_t& time);
|
||||
std::string errorNumberToString(int errorNumber);
|
||||
|
||||
std::vector<std::string> argsToUtf8(int argc, char *argv[]);
|
||||
std::vector<std::string> argsToUtf8(int argc, char* argv[]);
|
||||
|
||||
void useUtf8ForConsole();
|
||||
void useUtf8ForBoostFilesystem();
|
|
@ -22,10 +22,10 @@ ProgressSink& ProgressMerger::addSink(double weight) {
|
|||
totalWeight += weight;
|
||||
int sinkIndex = weightedValues.size();
|
||||
weightedValues.push_back(0);
|
||||
forwarders.push_back(ProgressForwarder([weight, sinkIndex, this](double progress) {
|
||||
forwarders.emplace_back([weight, sinkIndex, this](double progress) {
|
||||
weightedValues[sinkIndex] = progress * weight;
|
||||
report();
|
||||
}));
|
||||
});
|
||||
return forwarders.back();
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ void ProgressMerger::report() {
|
|||
for (double weightedValue : weightedValues) {
|
||||
weightedSum += weightedValue;
|
||||
}
|
||||
double progress = weightedSum / totalWeight;
|
||||
const double progress = weightedSum / totalWeight;
|
||||
sink.reportProgress(progress);
|
||||
} else {
|
||||
sink.reportProgress(0);
|
||||
|
|
|
@ -9,7 +9,6 @@ using std::string;
|
|||
using std::wstring;
|
||||
using std::u32string;
|
||||
using std::vector;
|
||||
using boost::optional;
|
||||
using std::regex;
|
||||
using std::regex_replace;
|
||||
|
||||
|
@ -17,7 +16,7 @@ vector<string> splitIntoLines(const string& s) {
|
|||
vector<string> lines;
|
||||
auto p = &s[0];
|
||||
auto lineBegin = p;
|
||||
auto end = p + s.size();
|
||||
const auto end = p + s.size();
|
||||
// Iterate over input string
|
||||
while (p <= end) {
|
||||
// Add a new result line when we hit a \n character or the end of the string
|
||||
|
@ -45,7 +44,7 @@ vector<string> wrapSingleLineString(const string& s, int lineLength, int hanging
|
|||
auto p = &s[0];
|
||||
auto lineBegin = p;
|
||||
auto lineEnd = p;
|
||||
auto end = p + s.size();
|
||||
const auto end = p + s.size();
|
||||
// Iterate over input string
|
||||
while (p <= end) {
|
||||
// If we're at a word boundary: update lineEnd
|
||||
|
@ -54,7 +53,7 @@ vector<string> wrapSingleLineString(const string& s, int lineLength, int hanging
|
|||
}
|
||||
|
||||
// If we've hit lineLength or the end of the string: add a new result line
|
||||
int currentIndent = lines.empty() ? 0 : hangingIndent;
|
||||
const int currentIndent = lines.empty() ? 0 : hangingIndent;
|
||||
if (p == end || p - lineBegin == lineLength - currentIndent) {
|
||||
if (lineEnd == lineBegin) {
|
||||
// The line contains a single word, which is too long. Split mid-word.
|
||||
|
@ -80,7 +79,7 @@ vector<string> wrapSingleLineString(const string& s, int lineLength, int hanging
|
|||
|
||||
vector<string> wrapString(const string& s, int lineLength, int hangingIndent) {
|
||||
vector<string> lines;
|
||||
for (string paragraph : splitIntoLines(s)) {
|
||||
for (const string& paragraph : splitIntoLines(s)) {
|
||||
auto paragraphLines = wrapSingleLineString(paragraph, lineLength, hangingIndent);
|
||||
copy(paragraphLines.cbegin(), paragraphLines.cend(), back_inserter(lines));
|
||||
}
|
||||
|
@ -100,7 +99,7 @@ wstring latin1ToWide(const string& s) {
|
|||
return result;
|
||||
}
|
||||
|
||||
string utf8ToAscii(const string s) {
|
||||
string utf8ToAscii(const string& s) {
|
||||
// Normalize string, simplifying it as much as possible
|
||||
const NormalizationOptions options = NormalizationOptions::CompatibilityMode
|
||||
| NormalizationOptions::Decompose
|
||||
|
@ -111,15 +110,15 @@ string utf8ToAscii(const string s) {
|
|||
string simplified = normalizeUnicode(s, options);
|
||||
|
||||
// Replace common Unicode characters with ASCII equivalents
|
||||
static const vector<std::pair<regex, string>> replacements{
|
||||
{regex("«|»|“|”|„|‟"), "\""},
|
||||
{regex("‘|’|‚|‛|‹|›"), "'"},
|
||||
{regex("‐|‑|‒|⁃|⁻|₋|−|➖|–|—|―|﹘|﹣|-"), "-"},
|
||||
{regex("…|⋯"), "..."},
|
||||
{regex("•"), "*"},
|
||||
{regex("†|+"), "+"},
|
||||
{regex("⁄|∕|⧸|/|/"), "/"},
|
||||
{regex("×"), "x"},
|
||||
static const vector<std::pair<regex, string>> replacements {
|
||||
{ regex("«|»|“|”|„|‟"), "\"" },
|
||||
{ regex("‘|’|‚|‛|‹|›"), "'" },
|
||||
{ regex("‐|‑|‒|⁃|⁻|₋|−|➖|–|—|―|﹘|﹣|-"), "-" },
|
||||
{ regex("…|⋯"), "..." },
|
||||
{ regex("•"), "*" },
|
||||
{ regex("†|+"), "+" },
|
||||
{ regex("⁄|∕|⧸|/|/"), "/" },
|
||||
{ regex("×"), "x" },
|
||||
};
|
||||
for (const auto& replacement : replacements) {
|
||||
simplified = regex_replace(simplified, replacement.first, replacement.second);
|
||||
|
@ -137,7 +136,7 @@ string utf8ToAscii(const string s) {
|
|||
return result;
|
||||
}
|
||||
|
||||
string normalizeUnicode(const string s, NormalizationOptions options) {
|
||||
string normalizeUnicode(const string& s, NormalizationOptions options) {
|
||||
char* result;
|
||||
const utf8proc_ssize_t charCount = utf8proc_map(
|
||||
reinterpret_cast<const uint8_t*>(s.data()),
|
||||
|
@ -168,23 +167,23 @@ string escapeJsonString(const string& s) {
|
|||
string result;
|
||||
for (char16_t c : utf16String) {
|
||||
switch (c) {
|
||||
case '"': result += "\\\""; break;
|
||||
case '\\': result += "\\\\"; break;
|
||||
case '\b': result += "\\b"; break;
|
||||
case '\f': result += "\\f"; break;
|
||||
case '\n': result += "\\n"; break;
|
||||
case '\r': result += "\\r"; break;
|
||||
case '\t': result += "\\t"; break;
|
||||
default:
|
||||
{
|
||||
bool needsEscaping = c < '\x20' || c >= 0x80;
|
||||
if (needsEscaping) {
|
||||
result += fmt::format("\\u{0:04x}", c);
|
||||
} else {
|
||||
result += static_cast<char>(c);
|
||||
case '"': result += "\\\""; break;
|
||||
case '\\': result += "\\\\"; break;
|
||||
case '\b': result += "\\b"; break;
|
||||
case '\f': result += "\\f"; break;
|
||||
case '\n': result += "\\n"; break;
|
||||
case '\r': result += "\\r"; break;
|
||||
case '\t': result += "\\t"; break;
|
||||
default:
|
||||
{
|
||||
const bool needsEscaping = c < '\x20' || c >= 0x80;
|
||||
if (needsEscaping) {
|
||||
result += fmt::format("\\u{0:04x}", c);
|
||||
} else {
|
||||
result += static_cast<char>(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <utf8proc.h>
|
||||
|
||||
std::vector<std::string> splitIntoLines(const std::string& s);
|
||||
|
||||
std::vector<std::string> wrapSingleLineString(const std::string& s, int lineLength, int hangingIndent = 0);
|
||||
std::vector<std::string> wrapSingleLineString(
|
||||
const std::string& s,
|
||||
int lineLength,
|
||||
int hangingIndent = 0
|
||||
);
|
||||
|
||||
std::vector<std::string> wrapString(const std::string& s, int lineLength, int hangingIndent = 0);
|
||||
|
||||
|
@ -15,9 +18,7 @@ bool isValidUtf8(const std::string& s);
|
|||
|
||||
std::wstring latin1ToWide(const std::string& s);
|
||||
|
||||
boost::optional<char> toAscii(char32_t ch);
|
||||
|
||||
std::string utf8ToAscii(const std::string s);
|
||||
std::string utf8ToAscii(const std::string& s);
|
||||
|
||||
enum class NormalizationOptions : int {
|
||||
CompatibilityMode = UTF8PROC_COMPAT,
|
||||
|
@ -35,7 +36,7 @@ operator|(NormalizationOptions a, NormalizationOptions b) {
|
|||
return static_cast<NormalizationOptions>(static_cast<int>(a) | static_cast<int>(b));
|
||||
}
|
||||
|
||||
std::string normalizeUnicode(const std::string s, NormalizationOptions options);
|
||||
std::string normalizeUnicode(const std::string& s, NormalizationOptions options);
|
||||
|
||||
template<typename T>
|
||||
std::string join(T range, const std::string separator) {
|
||||
|
|
|
@ -18,8 +18,8 @@ template<unsigned int n, typename iterator_type>
|
|||
void for_each_adjacent(
|
||||
iterator_type begin,
|
||||
iterator_type end,
|
||||
std::function<void(const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>&)> f)
|
||||
{
|
||||
std::function<void(const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>&)> f
|
||||
) {
|
||||
// Get the first n values
|
||||
iterator_type it = begin;
|
||||
using element_type = std::reference_wrapper<const typename iterator_type::value_type>;
|
||||
|
@ -42,20 +42,28 @@ template<typename iterator_type>
|
|||
void for_each_adjacent(
|
||||
iterator_type begin,
|
||||
iterator_type end,
|
||||
std::function<void(const typename iterator_type::reference a, const typename iterator_type::reference b)> f)
|
||||
{
|
||||
for_each_adjacent<2>(begin, end, [&](const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>& args) {
|
||||
f(args[0], args[1]);
|
||||
});
|
||||
std::function<void(const typename iterator_type::reference a, const typename iterator_type::reference b)> f
|
||||
) {
|
||||
for_each_adjacent<2>(
|
||||
begin,
|
||||
end,
|
||||
[&](const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>& args) {
|
||||
f(args[0], args[1]);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
template<typename iterator_type>
|
||||
void for_each_adjacent(
|
||||
iterator_type begin,
|
||||
iterator_type end,
|
||||
std::function<void(const typename iterator_type::reference a, const typename iterator_type::reference b, const typename iterator_type::reference c)> f)
|
||||
{
|
||||
for_each_adjacent<3>(begin, end, [&](const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>& args) {
|
||||
f(args[0], args[1], args[2]);
|
||||
});
|
||||
std::function<void(const typename iterator_type::reference a, const typename iterator_type::reference b, const typename iterator_type::reference c)> f
|
||||
) {
|
||||
for_each_adjacent<3>(
|
||||
begin,
|
||||
end,
|
||||
[&](const std::deque<std::reference_wrapper<const typename iterator_type::value_type>>& args) {
|
||||
f(args[0], args[1], args[2]);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
@ -6,13 +6,13 @@ namespace std {
|
|||
|
||||
namespace {
|
||||
|
||||
template <typename T>
|
||||
template<typename T>
|
||||
void hash_combine(size_t& seed, const T& value) {
|
||||
seed ^= std::hash<T>()(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
|
||||
// Recursive template code derived from Matthieu M.
|
||||
template <typename Tuple, size_t Index = tuple_size<Tuple>::value - 1>
|
||||
template<typename Tuple, size_t Index = tuple_size<Tuple>::value - 1>
|
||||
struct HashValueImpl {
|
||||
static void apply(size_t& seed, const Tuple& tuple) {
|
||||
HashValueImpl<Tuple, Index - 1>::apply(seed, tuple);
|
||||
|
@ -20,7 +20,7 @@ namespace std {
|
|||
}
|
||||
};
|
||||
|
||||
template <typename Tuple>
|
||||
template<typename Tuple>
|
||||
struct HashValueImpl<Tuple, 0> {
|
||||
static void apply(size_t& seed, const Tuple& tuple) {
|
||||
hash_combine(seed, std::get<0>(tuple));
|
||||
|
@ -28,11 +28,11 @@ namespace std {
|
|||
};
|
||||
}
|
||||
|
||||
template <typename ... TT>
|
||||
template<typename ... TT>
|
||||
struct hash<tuple<TT...>> {
|
||||
size_t operator()(const tuple<TT...>& tt) const {
|
||||
size_t seed = 0;
|
||||
HashValueImpl<tuple<TT...> >::apply(seed, tt);
|
||||
HashValueImpl<tuple<TT...>>::apply(seed, tt);
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -7,7 +7,7 @@ using boost::optional;
|
|||
using std::initializer_list;
|
||||
|
||||
TEST(BoundedTimeline, constructors_initializeState) {
|
||||
TimeRange range(-5_cs, 55_cs);
|
||||
const TimeRange range(-5_cs, 55_cs);
|
||||
auto args = {
|
||||
Timed<int>(-10_cs, 30_cs, 1),
|
||||
Timed<int>(10_cs, 40_cs, 2),
|
||||
|
@ -52,7 +52,7 @@ TEST(BoundedTimeline, getRange) {
|
|||
}
|
||||
|
||||
TEST(BoundedTimeline, setAndClear) {
|
||||
TimeRange range(0_cs, 10_cs);
|
||||
const TimeRange range(0_cs, 10_cs);
|
||||
BoundedTimeline<int> timeline(range);
|
||||
|
||||
// Out of range
|
||||
|
@ -83,8 +83,14 @@ TEST(BoundedTimeline, setAndClear) {
|
|||
}
|
||||
|
||||
TEST(BoundedTimeline, shift) {
|
||||
BoundedTimeline<int> timeline(TimeRange(0_cs, 10_cs), { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } });
|
||||
BoundedTimeline<int> expected(TimeRange(2_cs, 12_cs), { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } });
|
||||
BoundedTimeline<int> timeline(
|
||||
TimeRange(0_cs, 10_cs),
|
||||
{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }
|
||||
);
|
||||
BoundedTimeline<int> expected(
|
||||
TimeRange(2_cs, 12_cs),
|
||||
{ { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } }
|
||||
);
|
||||
timeline.shift(2_cs);
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
@ -99,9 +105,11 @@ TEST(BoundedTimeline, equality) {
|
|||
for (size_t i = 0; i < timelines.size(); ++i) {
|
||||
for (size_t j = 0; j < timelines.size(); ++j) {
|
||||
if (i == j) {
|
||||
EXPECT_EQ(timelines[i], BoundedTimeline<int>(timelines[j])) << "i: " << i << ", j: " << j;
|
||||
EXPECT_EQ(timelines[i], BoundedTimeline<int>(timelines[j]))
|
||||
<< "i: " << i << ", j: " << j;
|
||||
} else {
|
||||
EXPECT_NE(timelines[i], timelines[j]) << "i: " << i << ", j: " << j;
|
||||
EXPECT_NE(timelines[i], timelines[j])
|
||||
<< "i: " << i << ", j: " << j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@ using boost::optional;
|
|||
using std::initializer_list;
|
||||
|
||||
TEST(ContinuousTimeline, constructors_initializeState) {
|
||||
TimeRange range(-5_cs, 55_cs);
|
||||
int defaultValue = -1;
|
||||
const TimeRange range(-5_cs, 55_cs);
|
||||
const int defaultValue = -1;
|
||||
auto args = {
|
||||
Timed<int>(-10_cs, 30_cs, 1),
|
||||
Timed<int>(10_cs, 40_cs, 2),
|
||||
|
@ -49,8 +49,8 @@ TEST(ContinuousTimeline, empty) {
|
|||
}
|
||||
|
||||
TEST(ContinuousTimeline, setAndClear) {
|
||||
TimeRange range(0_cs, 10_cs);
|
||||
int defaultValue = -1;
|
||||
const TimeRange range(0_cs, 10_cs);
|
||||
const int defaultValue = -1;
|
||||
ContinuousTimeline<int> timeline(range, defaultValue);
|
||||
|
||||
// Out of range
|
||||
|
@ -82,8 +82,16 @@ TEST(ContinuousTimeline, setAndClear) {
|
|||
}
|
||||
|
||||
TEST(ContinuousTimeline, shift) {
|
||||
ContinuousTimeline<int> timeline(TimeRange(0_cs, 10_cs), -1, { { 1_cs, 2_cs, 1 },{ 2_cs, 5_cs, 2 },{ 7_cs, 9_cs, 3 } });
|
||||
ContinuousTimeline<int> expected(TimeRange(2_cs, 12_cs), -1, { { 3_cs, 4_cs, 1 },{ 4_cs, 7_cs, 2 },{ 9_cs, 11_cs, 3 } });
|
||||
ContinuousTimeline<int> timeline(
|
||||
TimeRange(0_cs, 10_cs),
|
||||
-1,
|
||||
{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } }
|
||||
);
|
||||
ContinuousTimeline<int> expected(
|
||||
TimeRange(2_cs, 12_cs),
|
||||
-1,
|
||||
{ { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } }
|
||||
);
|
||||
timeline.shift(2_cs);
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
@ -99,7 +107,8 @@ TEST(ContinuousTimeline, equality) {
|
|||
for (size_t i = 0; i < timelines.size(); ++i) {
|
||||
for (size_t j = 0; j < timelines.size(); ++j) {
|
||||
if (i == j) {
|
||||
EXPECT_EQ(timelines[i], ContinuousTimeline<int>(timelines[j])) << "i: " << i << ", j: " << j;
|
||||
EXPECT_EQ(timelines[i], ContinuousTimeline<int>(timelines[j]))
|
||||
<< "i: " << i << ", j: " << j;
|
||||
} else {
|
||||
EXPECT_NE(timelines[i], timelines[j]) << "i: " << i << ", j: " << j;
|
||||
}
|
||||
|
|
|
@ -2,16 +2,15 @@
|
|||
#include "tools/Lazy.h"
|
||||
|
||||
using namespace testing;
|
||||
using std::make_unique;
|
||||
|
||||
// Not copyable, no default constrctor, movable
|
||||
// Not copyable, no default constructor, movable
|
||||
struct Foo {
|
||||
const int value;
|
||||
Foo(int value) : value(value) {}
|
||||
|
||||
Foo() = delete;
|
||||
Foo(const Foo&) = delete;
|
||||
Foo& operator=(const Foo &) = delete;
|
||||
Foo& operator=(const Foo&) = delete;
|
||||
|
||||
Foo(Foo&&) = default;
|
||||
Foo& operator=(Foo&&) = default;
|
||||
|
@ -44,7 +43,7 @@ TEST(Lazy, constUsage) {
|
|||
TEST(Lazy, copying) {
|
||||
Lazy<Foo> a;
|
||||
int counter = 0;
|
||||
auto createValue = [&] { return counter++; };
|
||||
const auto createValue = [&] { return counter++; };
|
||||
Lazy<Foo> b(createValue);
|
||||
a = b;
|
||||
EXPECT_EQ(0, counter);
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#include <gmock/gmock.h>
|
||||
#include "time/Timeline.h"
|
||||
#include <limits>
|
||||
#include <functional>
|
||||
|
||||
using namespace testing;
|
||||
|
@ -39,15 +38,15 @@ TEST(Timeline, empty) {
|
|||
EXPECT_TRUE(empty0.empty());
|
||||
EXPECT_THAT(empty0, IsEmpty());
|
||||
|
||||
Timeline<int> empty1{};
|
||||
Timeline<int> empty1 {};
|
||||
EXPECT_TRUE(empty1.empty());
|
||||
EXPECT_THAT(empty1, IsEmpty());
|
||||
|
||||
Timeline<int> empty2{ Timed<int>(1_cs, 1_cs, 1) };
|
||||
Timeline<int> empty2 { Timed<int>(1_cs, 1_cs, 1) };
|
||||
EXPECT_TRUE(empty2.empty());
|
||||
EXPECT_THAT(empty2, IsEmpty());
|
||||
|
||||
Timeline<int> nonEmpty{ Timed<int>(1_cs, 2_cs, 1) };
|
||||
Timeline<int> nonEmpty { Timed<int>(1_cs, 2_cs, 1) };
|
||||
EXPECT_FALSE(nonEmpty.empty());
|
||||
EXPECT_THAT(nonEmpty, Not(IsEmpty()));
|
||||
}
|
||||
|
@ -57,19 +56,19 @@ TEST(Timeline, size) {
|
|||
EXPECT_EQ(0, empty0.size());
|
||||
EXPECT_THAT(empty0, SizeIs(0));
|
||||
|
||||
Timeline<int> empty1{};
|
||||
Timeline<int> empty1 {};
|
||||
EXPECT_EQ(0, empty1.size());
|
||||
EXPECT_THAT(empty1, SizeIs(0));
|
||||
|
||||
Timeline<int> empty2{ Timed<int>(1_cs, 1_cs, 1) };
|
||||
Timeline<int> empty2 { Timed<int>(1_cs, 1_cs, 1) };
|
||||
EXPECT_EQ(0, empty2.size());
|
||||
EXPECT_THAT(empty2, SizeIs(0));
|
||||
|
||||
Timeline<int> size1{ Timed<int>(1_cs, 10_cs, 1) };
|
||||
Timeline<int> size1 { Timed<int>(1_cs, 10_cs, 1) };
|
||||
EXPECT_EQ(1, size1.size());
|
||||
EXPECT_THAT(size1, SizeIs(1));
|
||||
|
||||
Timeline<int> size2{ Timed<int>(-10_cs, 10_cs, 1), Timed<int>(10_cs, 11_cs, 5) };
|
||||
Timeline<int> size2 { Timed<int>(-10_cs, 10_cs, 1), Timed<int>(10_cs, 11_cs, 5) };
|
||||
EXPECT_EQ(2, size2.size());
|
||||
EXPECT_THAT(size2, SizeIs(2));
|
||||
}
|
||||
|
@ -78,21 +77,21 @@ TEST(Timeline, getRange) {
|
|||
Timeline<int> empty0;
|
||||
EXPECT_EQ(TimeRange(0_cs, 0_cs), empty0.getRange());
|
||||
|
||||
Timeline<int> empty1{};
|
||||
Timeline<int> empty1 {};
|
||||
EXPECT_EQ(TimeRange(0_cs, 0_cs), empty1.getRange());
|
||||
|
||||
Timeline<int> empty2{ Timed<int>(1_cs, 1_cs, 1) };
|
||||
Timeline<int> empty2 { Timed<int>(1_cs, 1_cs, 1) };
|
||||
EXPECT_EQ(TimeRange(0_cs, 0_cs), empty2.getRange());
|
||||
|
||||
Timeline<int> nonEmpty1{ Timed<int>(1_cs, 10_cs, 1) };
|
||||
Timeline<int> nonEmpty1 { Timed<int>(1_cs, 10_cs, 1) };
|
||||
EXPECT_EQ(TimeRange(1_cs, 10_cs), nonEmpty1.getRange());
|
||||
|
||||
Timeline<int> nonEmpty2{ Timed<int>(-10_cs, 5_cs, 1), Timed<int>(10_cs, 11_cs, 5) };
|
||||
Timeline<int> nonEmpty2 { Timed<int>(-10_cs, 5_cs, 1), Timed<int>(10_cs, 11_cs, 5) };
|
||||
EXPECT_EQ(TimeRange(-10_cs, 11_cs), nonEmpty2.getRange());
|
||||
}
|
||||
|
||||
TEST(Timeline, iterators) {
|
||||
Timeline<int> timeline{ Timed<int>(-5_cs, 0_cs, 10), Timed<int>(5_cs, 15_cs, 9) };
|
||||
Timeline<int> timeline { Timed<int>(-5_cs, 0_cs, 10), Timed<int>(5_cs, 15_cs, 9) };
|
||||
auto expected = { Timed<int>(-5_cs, 0_cs, 10), Timed<int>(5_cs, 15_cs, 9) };
|
||||
EXPECT_THAT(timeline, ElementsAreArray(expected));
|
||||
|
||||
|
@ -103,17 +102,24 @@ TEST(Timeline, iterators) {
|
|||
EXPECT_THAT(reversedActual, ElementsAreArray(reversedExpected));
|
||||
}
|
||||
|
||||
void testFind(const Timeline<int>& timeline, FindMode findMode, const initializer_list<Timed<int>*> expectedResults) {
|
||||
void testFind(
|
||||
const Timeline<int>& timeline,
|
||||
FindMode findMode,
|
||||
const initializer_list<Timed<int>*> expectedResults
|
||||
) {
|
||||
int i = -1;
|
||||
for (Timed<int>* expectedResult : expectedResults) {
|
||||
auto it = timeline.find(centiseconds(++i), findMode);
|
||||
if (expectedResult != nullptr) {
|
||||
EXPECT_NE(it, timeline.end()) << "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
EXPECT_NE(it, timeline.end())
|
||||
<< "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
if (it != timeline.end()) {
|
||||
EXPECT_EQ(*expectedResult, *it) << "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
EXPECT_EQ(*expectedResult, *it)
|
||||
<< "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
}
|
||||
} else {
|
||||
EXPECT_EQ(timeline.end(), it) << "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
EXPECT_EQ(timeline.end(), it)
|
||||
<< "Timeline: " << timeline << "; findMode: " << static_cast<int>(findMode) << "; i: " << i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +128,7 @@ TEST(Timeline, find) {
|
|||
Timed<int> a = Timed<int>(1_cs, 2_cs, 1);
|
||||
Timed<int> b = Timed<int>(2_cs, 5_cs, 2);
|
||||
Timed<int> c = Timed<int>(7_cs, 9_cs, 3);
|
||||
Timeline<int> timeline{ a, b, c };
|
||||
const Timeline<int> timeline { a, b, c };
|
||||
|
||||
testFind(timeline, FindMode::SampleLeft, { nullptr, nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr });
|
||||
testFind(timeline, FindMode::SampleRight, { nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr });
|
||||
|
@ -134,9 +140,10 @@ TEST(Timeline, get) {
|
|||
Timed<int> a = Timed<int>(1_cs, 2_cs, 1);
|
||||
Timed<int> b = Timed<int>(2_cs, 5_cs, 2);
|
||||
Timed<int> c = Timed<int>(7_cs, 9_cs, 3);
|
||||
Timeline<int> timeline{ a, b, c };
|
||||
Timeline<int> timeline { a, b, c };
|
||||
|
||||
initializer_list<Timed<int>*> expectedResults = { nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr };
|
||||
initializer_list<Timed<int>*> expectedResults =
|
||||
{ nullptr, &a, &b, &b, &b, nullptr, nullptr, &c, &c, nullptr, nullptr };
|
||||
int i = -1;
|
||||
for (Timed<int>* expectedResult : expectedResults) {
|
||||
optional<const Timed<int>&> value = timeline.get(centiseconds(++i));
|
||||
|
@ -152,7 +159,7 @@ TEST(Timeline, get) {
|
|||
}
|
||||
|
||||
TEST(Timeline, clear) {
|
||||
Timeline<int> original{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
const Timeline<int> original { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
|
||||
{
|
||||
auto timeline = original;
|
||||
|
@ -163,33 +170,33 @@ TEST(Timeline, clear) {
|
|||
{
|
||||
auto timeline = original;
|
||||
timeline.clear(1_cs, 2_cs);
|
||||
Timeline<int> expected{ { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
Timeline<int> expected { { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
||||
{
|
||||
auto timeline = original;
|
||||
timeline.clear(3_cs, 4_cs);
|
||||
Timeline<int> expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 3_cs, 2 }, { 4_cs, 5_cs, 2}, { 7_cs, 9_cs, 3} };
|
||||
Timeline<int> expected { { 1_cs, 2_cs, 1 }, { 2_cs, 3_cs, 2 }, { 4_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
||||
{
|
||||
auto timeline = original;
|
||||
timeline.clear(6_cs, 8_cs);
|
||||
Timeline<int> expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 8_cs, 9_cs, 3 } };
|
||||
Timeline<int> expected { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 8_cs, 9_cs, 3 } };
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
||||
{
|
||||
auto timeline = original;
|
||||
timeline.clear(8_cs, 10_cs);
|
||||
Timeline<int> expected{ { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 8_cs, 3 } };
|
||||
Timeline<int> expected { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 8_cs, 3 } };
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
}
|
||||
|
||||
void testSetter(std::function<void(const Timed<int>&, Timeline<int>&)> set) {
|
||||
void testSetter(const std::function<void(const Timed<int>&, Timeline<int>&)>& set) {
|
||||
Timeline<int> timeline;
|
||||
vector<optional<int>> expectedValues(20, none);
|
||||
auto newElements = {
|
||||
|
@ -218,7 +225,7 @@ void testSetter(std::function<void(const Timed<int>&, Timeline<int>&)> set) {
|
|||
set(newElement, timeline);
|
||||
|
||||
// Update expected value for every index
|
||||
centiseconds elementStart = max(newElement.getStart(), 0_cs);
|
||||
const centiseconds elementStart = max(newElement.getStart(), 0_cs);
|
||||
centiseconds elementEnd = newElement.getEnd();
|
||||
for (centiseconds t = elementStart; t < elementEnd; ++t) {
|
||||
expectedValues[t.count()] = newElement.getValue();
|
||||
|
@ -232,13 +239,14 @@ void testSetter(std::function<void(const Timed<int>&, Timeline<int>&)> set) {
|
|||
|
||||
// Check timeline via iterators
|
||||
for (const auto& element : timeline) {
|
||||
// No element shound have zero-length
|
||||
// No element should have zero-length
|
||||
EXPECT_LT(0_cs, element.getDuration());
|
||||
|
||||
// Element should match expected values
|
||||
for (centiseconds t = std::max(centiseconds::zero(), element.getStart()); t < element.getEnd(); ++t) {
|
||||
optional<int> expectedValue = expectedValues[t.count()];
|
||||
EXPECT_TRUE(expectedValue) << "Index " << t.count() << " should not have a value, but is within element " << element << ". "
|
||||
EXPECT_TRUE(expectedValue)
|
||||
<< "Index " << t.count() << " should not have a value, but is within element " << element << ". "
|
||||
<< "newElementIndex: " << newElementIndex;
|
||||
if (expectedValue) {
|
||||
EXPECT_EQ(*expectedValue, element.getValue());
|
||||
|
@ -261,8 +269,8 @@ TEST(Timeline, set) {
|
|||
}
|
||||
|
||||
TEST(Timeline, indexer_get) {
|
||||
Timeline<int> timeline{ { 1_cs, 2_cs, 1 }, { 2_cs, 4_cs, 2 }, { 6_cs, 9_cs, 3 } };
|
||||
vector<optional<int>> expectedValues{ none, 1, 2, 2, none, none, 3, 3, 3 };
|
||||
Timeline<int> timeline { { 1_cs, 2_cs, 1 }, { 2_cs, 4_cs, 2 }, { 6_cs, 9_cs, 3 } };
|
||||
vector<optional<int>> expectedValues { none, 1, 2, 2, none, none, 3, 3, 3 };
|
||||
for (centiseconds t = 0_cs; t < 9_cs; ++t) {
|
||||
{
|
||||
optional<int> actual = timeline[t];
|
||||
|
@ -294,63 +302,63 @@ TEST(Timeline, indexer_set) {
|
|||
}
|
||||
|
||||
TEST(Timeline, joinAdjacent) {
|
||||
Timeline<int> timeline{
|
||||
{1_cs, 2_cs, 1},
|
||||
{2_cs, 4_cs, 2},
|
||||
{3_cs, 6_cs, 2},
|
||||
{6_cs, 7_cs, 2},
|
||||
Timeline<int> timeline {
|
||||
{ 1_cs, 2_cs, 1 },
|
||||
{ 2_cs, 4_cs, 2 },
|
||||
{ 3_cs, 6_cs, 2 },
|
||||
{ 6_cs, 7_cs, 2 },
|
||||
// Gap
|
||||
{8_cs, 10_cs, 2},
|
||||
{11_cs, 12_cs, 3}
|
||||
{ 8_cs, 10_cs, 2 },
|
||||
{ 11_cs, 12_cs, 3 }
|
||||
};
|
||||
EXPECT_EQ(6, timeline.size());
|
||||
timeline.joinAdjacent();
|
||||
EXPECT_EQ(4, timeline.size());
|
||||
|
||||
Timed<int> expectedJoined[] = {
|
||||
{1_cs, 2_cs, 1},
|
||||
{2_cs, 7_cs, 2},
|
||||
{ 1_cs, 2_cs, 1 },
|
||||
{ 2_cs, 7_cs, 2 },
|
||||
// Gap
|
||||
{8_cs, 10_cs, 2},
|
||||
{11_cs, 12_cs, 3}
|
||||
{ 8_cs, 10_cs, 2 },
|
||||
{ 11_cs, 12_cs, 3 }
|
||||
};
|
||||
EXPECT_THAT(timeline, ElementsAreArray(expectedJoined));
|
||||
}
|
||||
|
||||
TEST(Timeline, autoJoin) {
|
||||
JoiningTimeline<int> timeline{
|
||||
{1_cs, 2_cs, 1},
|
||||
{2_cs, 4_cs, 2},
|
||||
{3_cs, 6_cs, 2},
|
||||
{6_cs, 7_cs, 2},
|
||||
JoiningTimeline<int> timeline {
|
||||
{ 1_cs, 2_cs, 1 },
|
||||
{ 2_cs, 4_cs, 2 },
|
||||
{ 3_cs, 6_cs, 2 },
|
||||
{ 6_cs, 7_cs, 2 },
|
||||
// Gap
|
||||
{8_cs, 10_cs, 2},
|
||||
{11_cs, 12_cs, 3}
|
||||
{ 8_cs, 10_cs, 2 },
|
||||
{ 11_cs, 12_cs, 3 }
|
||||
};
|
||||
Timed<int> expectedJoined[] = {
|
||||
{1_cs, 2_cs, 1},
|
||||
{2_cs, 7_cs, 2},
|
||||
{ 1_cs, 2_cs, 1 },
|
||||
{ 2_cs, 7_cs, 2 },
|
||||
// Gap
|
||||
{8_cs, 10_cs, 2},
|
||||
{11_cs, 12_cs, 3}
|
||||
{ 8_cs, 10_cs, 2 },
|
||||
{ 11_cs, 12_cs, 3 }
|
||||
};
|
||||
EXPECT_EQ(4, timeline.size());
|
||||
EXPECT_THAT(timeline, ElementsAreArray(expectedJoined));
|
||||
}
|
||||
|
||||
TEST(Timeline, shift) {
|
||||
Timeline<int> timeline{ { 1_cs, 2_cs, 1 },{ 2_cs, 5_cs, 2 },{ 7_cs, 9_cs, 3 } };
|
||||
Timeline<int> expected{ { 3_cs, 4_cs, 1 },{ 4_cs, 7_cs, 2 },{ 9_cs, 11_cs, 3 } };
|
||||
Timeline<int> timeline { { 1_cs, 2_cs, 1 }, { 2_cs, 5_cs, 2 }, { 7_cs, 9_cs, 3 } };
|
||||
Timeline<int> expected { { 3_cs, 4_cs, 1 }, { 4_cs, 7_cs, 2 }, { 9_cs, 11_cs, 3 } };
|
||||
timeline.shift(2_cs);
|
||||
EXPECT_EQ(expected, timeline);
|
||||
}
|
||||
|
||||
TEST(Timeline, equality) {
|
||||
vector<Timeline<int>> timelines = {
|
||||
Timeline<int>{},
|
||||
Timeline<int>{ { 1_cs, 2_cs, 0 } },
|
||||
Timeline<int>{ { 1_cs, 2_cs, 1 } },
|
||||
Timeline<int>{ { -10_cs, 0_cs, 0 } }
|
||||
Timeline<int> {},
|
||||
Timeline<int> { { 1_cs, 2_cs, 0 } },
|
||||
Timeline<int> { { 1_cs, 2_cs, 1 } },
|
||||
Timeline<int> { { -10_cs, 0_cs, 0 } }
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < timelines.size(); ++i) {
|
||||
|
|
|
@ -13,19 +13,20 @@ TEST(wordToPhones, basic) {
|
|||
|
||||
// The following phones are based on actual output, *not* ideal output.
|
||||
vector<pair<string, vector<Phone>>> words {
|
||||
{ "once", { Phone::AA, Phone::N, Phone::S }},
|
||||
{ "upon", { Phone::UW, Phone::P, Phone::AH, Phone::N }},
|
||||
{ "a", { Phone::AH }},
|
||||
{ "midnight", { Phone::M, Phone::IH, Phone::D, Phone::N, Phone::AY, Phone::T }},
|
||||
{ "dreary", { Phone::D, Phone::R, Phone::IY, Phone::R, Phone::IY }},
|
||||
{ "while", { Phone::W, Phone::AY, Phone::L }},
|
||||
{ "i", { Phone::IY }},
|
||||
{ "pondered", { Phone::P, Phone::AA, Phone::N, Phone::D, Phone::IY, Phone::R, Phone::EH, Phone::D }},
|
||||
{ "weak", { Phone::W, Phone::IY, Phone::K }},
|
||||
{ "and", { Phone::AE, Phone::N, Phone::D }},
|
||||
{ "weary", { Phone::W, Phone::IY, Phone::R, Phone::IY }}
|
||||
{ "once", { Phone::AA, Phone::N, Phone::S } },
|
||||
{ "upon", { Phone::UW, Phone::P, Phone::AH, Phone::N } },
|
||||
{ "a", { Phone::AH } },
|
||||
{ "midnight", { Phone::M, Phone::IH, Phone::D, Phone::N, Phone::AY, Phone::T } },
|
||||
{ "dreary", { Phone::D, Phone::R, Phone::IY, Phone::R, Phone::IY } },
|
||||
{ "while", { Phone::W, Phone::AY, Phone::L } },
|
||||
{ "i", { Phone::IY } },
|
||||
{ "pondered", { Phone::P, Phone::AA, Phone::N, Phone::D, Phone::IY, Phone::R, Phone::EH, Phone::D } },
|
||||
{ "weak", { Phone::W, Phone::IY, Phone::K } },
|
||||
{ "and", { Phone::AE, Phone::N, Phone::D } },
|
||||
{ "weary", { Phone::W, Phone::IY, Phone::R, Phone::IY } }
|
||||
};
|
||||
for (const auto& word : words) {
|
||||
EXPECT_THAT(wordToPhones(word.first), ElementsAreArray(word.second)) << "Original word: '" << word.first << "'";
|
||||
EXPECT_THAT(wordToPhones(word.first), ElementsAreArray(word.second))
|
||||
<< "Original word: '" << word.first << "'";
|
||||
}
|
||||
}
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
using namespace testing;
|
||||
using std::vector;
|
||||
using std::initializer_list;
|
||||
using std::pair;
|
||||
|
||||
TEST(getPairs, emptyCollection) {
|
||||
|
@ -16,18 +15,18 @@ TEST(getPairs, oneElementCollection) {
|
|||
|
||||
TEST(getPairs, validCollection) {
|
||||
{
|
||||
auto actual = getPairs(vector<int>{ 1, 2 });
|
||||
vector<pair<int, int>> expected{ {1, 2} };
|
||||
const auto actual = getPairs(vector<int> { 1, 2 });
|
||||
const vector<pair<int, int>> expected { { 1, 2 } };
|
||||
EXPECT_THAT(actual, ElementsAreArray(expected));
|
||||
}
|
||||
{
|
||||
auto actual = getPairs(vector<int>{ 1, 2, 3 });
|
||||
vector<pair<int, int>> expected{ {1, 2}, {2, 3} };
|
||||
const auto actual = getPairs(vector<int> { 1, 2, 3 });
|
||||
const vector<pair<int, int>> expected { { 1, 2 }, { 2, 3 } };
|
||||
EXPECT_THAT(actual, ElementsAreArray(expected));
|
||||
}
|
||||
{
|
||||
auto actual = getPairs(vector<int>{ 1, 2, 3, 4 });
|
||||
vector<pair<int, int>> expected{ {1, 2}, {2, 3}, {3, 4} };
|
||||
const auto actual = getPairs(vector<int> { 1, 2, 3, 4 });
|
||||
const vector<pair<int, int>> expected { { 1, 2 }, { 2, 3 }, { 3, 4 } };
|
||||
EXPECT_THAT(actual, ElementsAreArray(expected));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,8 @@ TEST(splitIntoLines, handlesEmptyElements) {
|
|||
// wrapSingleLineString
|
||||
|
||||
TEST(wrapSingleLineString, basic) {
|
||||
const char* lipsum = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua.";
|
||||
const char* lipsum =
|
||||
"Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua.";
|
||||
EXPECT_THAT(wrapSingleLineString(lipsum, 30), ElementsAre("Lorem ipsum dolor sit amet,", "consectetur adipisici elit,", "sed eiusmod tempor incidunt ut", "labore et dolore magna aliqua."));
|
||||
}
|
||||
|
||||
|
@ -76,8 +77,10 @@ TEST(wrapString, basic) {
|
|||
// latin1ToWide
|
||||
|
||||
TEST(latin1ToWide, basic) {
|
||||
string pangramLatin1 = "D\350s No\353l o\371 un z\351phyr ha\357 me v\352t de gla\347ons w\374rmiens, je d\356ne d'exquis r\364tis de boeuf au kir \340 l'a\377 d'\342ge m\373r & c\346tera!";
|
||||
wstring pangramWide = L"Dès Noël où un zéphyr haï me vêt de glaçons würmiens, je dîne d'exquis rôtis de boeuf au kir à l'aÿ d'âge mûr & cætera!";
|
||||
const string pangramLatin1 =
|
||||
"D\350s No\353l o\371 un z\351phyr ha\357 me v\352t de gla\347ons w\374rmiens, je d\356ne d'exquis r\364tis de boeuf au kir \340 l'a\377 d'\342ge m\373r & c\346tera!";
|
||||
wstring pangramWide =
|
||||
L"Dès Noël où un zéphyr haï me vêt de glaçons würmiens, je dîne d'exquis rôtis de boeuf au kir à l'aÿ d'âge mûr & cætera!";
|
||||
EXPECT_EQ(pangramWide, latin1ToWide(pangramLatin1));
|
||||
}
|
||||
|
||||
|
|
|
@ -40,15 +40,22 @@ TEST(tokenizeText, numbers) {
|
|||
|
||||
TEST(tokenizeText, abbreviations) {
|
||||
EXPECT_THAT(
|
||||
tokenizeText("Prof. Foo lives on Dr. Dolittle Dr.", [](const string& word) { return word == "prof."; }),
|
||||
tokenizeText(
|
||||
"Prof. Foo lives on Dr. Dolittle Dr.",
|
||||
[](const string& word) { return word == "prof."; }
|
||||
),
|
||||
ElementsAre("prof.", "foo", "lives", "on", "doctor", "dolittle", "drive")
|
||||
);
|
||||
}
|
||||
|
||||
TEST(tokenizeText, apostrophes) {
|
||||
EXPECT_THAT(
|
||||
tokenizeText("'Tis said he'd wish'd for a 'bus 'cause he wouldn't walk.", [](const string& word) { return word == "wouldn't"; }),
|
||||
ElementsAreArray(vector<string>{ "tis", "said", "he'd", "wish'd", "for", "a", "bus", "cause", "he", "wouldn't", "walk" })
|
||||
tokenizeText(
|
||||
"'Tis said he'd wish'd for a 'bus 'cause he wouldn't walk.",
|
||||
[](const string& word) { return word == "wouldn't"; }
|
||||
),
|
||||
ElementsAreArray(
|
||||
vector<string>{ "tis", "said", "he'd", "wish'd", "for", "a", "bus", "cause", "he", "wouldn't", "walk" })
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -75,7 +82,7 @@ TEST(tokenizeText, wordsUseLimitedCharacters) {
|
|||
utf8::append(c, back_inserter(input));
|
||||
}
|
||||
|
||||
regex legal("^[a-z']+$");
|
||||
const regex legal("^[a-z']+$");
|
||||
auto words = tokenizeText(input, returnTrue);
|
||||
for (const string& word : words) {
|
||||
EXPECT_TRUE(std::regex_match(word, legal)) << word;
|
||||
|
|
Loading…
Reference in New Issue