From de9cd63f1353ea7c7cd70c1577d8e7d907c4475f Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 18 Feb 2026 02:53:17 +0000 Subject: [PATCH 1/3] Implement Talk Easy: on-device voice dictation app for iOS 26 Three-layer architecture: - Layer 1: AVAudioEngine for microphone capture - Layer 2: SpeechAnalyzer (iOS 26) for real-time on-device transcription - Layer 3: Foundation Models (~3B LLM) for text cleanup and formatting Core components: - SwiftData models: DictationMode, DictationRecord, VocabularyEntry - Services: AudioEngine, SpeechRecognition, TextProcessing, Clipboard - Multi-pass LLM pipeline with @Generable guided generation - RecordingViewModel orchestrating the full recording flow SwiftUI views: - RecordingView with animated record button and live transcript - ModeSelector with 7 built-in modes (General, Text, Email, Notes, etc.) - ModesView/ModeEditorView for custom mode management - HistoryView with search across past dictations - SettingsView with permissions, privacy info, data management Xcode project: com.idlefusion.talkeasy, iOS 26, Swift 6, iPhone only, objectVersion 77 with file system synchronized groups. https://claude.ai/code/session_01UoaxzhgT4YzixnKEmr2uKE --- README.md | 90 ++++- TalkEasy.xcodeproj/project.pbxproj | 345 ++++++++++++++++++ TalkEasy/ContentView.swift | 34 ++ TalkEasy/Models/DictationMode.swift | 46 +++ TalkEasy/Models/DictationRecord.swift | 27 ++ TalkEasy/Models/FormattingRules.swift | 23 ++ TalkEasy/Models/VocabularyEntry.swift | 21 ++ .../AccentColor.colorset/Contents.json | 38 ++ .../AppIcon.appiconset/Contents.json | 13 + .../Resources/Assets.xcassets/Contents.json | 6 + TalkEasy/Services/AudioEngineService.swift | 69 ++++ TalkEasy/Services/ClipboardService.swift | 22 ++ .../Services/SpeechRecognitionService.swift | 93 +++++ TalkEasy/Services/TextProcessingService.swift | 199 ++++++++++ TalkEasy/TalkEasyApp.swift | 49 +++ TalkEasy/Utilities/DefaultModes.swift | 100 +++++ TalkEasy/Utilities/HapticManager.swift | 44 +++ TalkEasy/Utilities/PermissionsManager.swift | 63 ++++ TalkEasy/Utilities/TextChunker.swift | 76 ++++ TalkEasy/ViewModels/RecordingViewModel.swift | 314 ++++++++++++++++ TalkEasy/Views/ActionBar.swift | 110 ++++++ TalkEasy/Views/HistoryView.swift | 220 +++++++++++ TalkEasy/Views/ModeEditorView.swift | 177 +++++++++ TalkEasy/Views/ModeSelector.swift | 72 ++++ TalkEasy/Views/ModesView.swift | 117 ++++++ TalkEasy/Views/RecordButton.swift | 78 ++++ TalkEasy/Views/RecordingView.swift | 95 +++++ TalkEasy/Views/SettingsView.swift | 217 +++++++++++ TalkEasy/Views/TranscriptView.swift | 108 ++++++ 29 files changed, 2865 insertions(+), 1 deletion(-) create mode 100644 TalkEasy.xcodeproj/project.pbxproj create mode 100644 TalkEasy/ContentView.swift create mode 100644 TalkEasy/Models/DictationMode.swift create mode 100644 TalkEasy/Models/DictationRecord.swift create mode 100644 TalkEasy/Models/FormattingRules.swift create mode 100644 TalkEasy/Models/VocabularyEntry.swift create mode 100644 TalkEasy/Resources/Assets.xcassets/AccentColor.colorset/Contents.json create mode 100644 TalkEasy/Resources/Assets.xcassets/AppIcon.appiconset/Contents.json create mode 100644 TalkEasy/Resources/Assets.xcassets/Contents.json create mode 100644 TalkEasy/Services/AudioEngineService.swift create mode 100644 TalkEasy/Services/ClipboardService.swift create mode 100644 TalkEasy/Services/SpeechRecognitionService.swift create mode 100644 TalkEasy/Services/TextProcessingService.swift create mode 100644 TalkEasy/TalkEasyApp.swift create mode 100644 TalkEasy/Utilities/DefaultModes.swift create mode 100644 TalkEasy/Utilities/HapticManager.swift create mode 100644 TalkEasy/Utilities/PermissionsManager.swift create mode 100644 TalkEasy/Utilities/TextChunker.swift create mode 100644 TalkEasy/ViewModels/RecordingViewModel.swift create mode 100644 TalkEasy/Views/ActionBar.swift create mode 100644 TalkEasy/Views/HistoryView.swift create mode 100644 TalkEasy/Views/ModeEditorView.swift create mode 100644 TalkEasy/Views/ModeSelector.swift create mode 100644 TalkEasy/Views/ModesView.swift create mode 100644 TalkEasy/Views/RecordButton.swift create mode 100644 TalkEasy/Views/RecordingView.swift create mode 100644 TalkEasy/Views/SettingsView.swift create mode 100644 TalkEasy/Views/TranscriptView.swift diff --git a/README.md b/README.md index 372b11f..c5cd8e3 100644 --- a/README.md +++ b/README.md @@ -1 +1,89 @@ -# talk-easy \ No newline at end of file +# Talk Easy + +100% on-device voice dictation for iPhone. Speak naturally, get clean formatted text. + +## What It Does + +Talk Easy captures your voice, transcribes it in real-time using Apple's SpeechAnalyzer, then cleans and formats the text using the on-device Foundation Models LLM. No servers, no API keys, no subscriptions. Everything stays on your device. + +## Requirements + +- iPhone 15 Pro or later (Apple Intelligence required) +- iOS 26.0+ +- Xcode 26+ + +## Architecture + +``` +Layer 3: Apple Foundation Models (~3B on-device LLM) + Text cleanup, tone adaptation, context-aware formatting + +Layer 2: SpeechAnalyzer (iOS 26) + Real-time streaming transcription, voice activity detection + +Layer 1: AVAudioEngine + UIPasteboard + Microphone input, copy-to-clipboard, share sheet +``` + +## Project Structure + +``` +TalkEasy/ +├── TalkEasyApp.swift App entry point, SwiftData setup +├── ContentView.swift Tab-based navigation +├── Models/ +│ ├── DictationMode.swift SwiftData model for dictation modes +│ ├── DictationRecord.swift SwiftData model for history +│ ├── FormattingRules.swift Codable formatting configuration +│ └── VocabularyEntry.swift SwiftData model for learned vocabulary +├── Services/ +│ ├── AudioEngineService.swift AVAudioEngine microphone capture +│ ├── ClipboardService.swift UIPasteboard operations +│ ├── SpeechRecognitionService.swift SpeechAnalyzer wrapper +│ └── TextProcessingService.swift Foundation Models LLM pipeline +├── ViewModels/ +│ └── RecordingViewModel.swift Recording flow orchestrator +├── Views/ +│ ├── ActionBar.swift Copy/Share/New action buttons +│ ├── HistoryView.swift Past dictations with search +│ ├── ModeEditorView.swift Create/edit dictation modes +│ ├── ModeSelector.swift Horizontal mode chip selector +│ ├── ModesView.swift Modes list management +│ ├── RecordButton.swift Animated record/stop button +│ ├── RecordingView.swift Main recording screen +│ ├── SettingsView.swift Permissions, privacy, data management +│ └── TranscriptView.swift Live transcript display +├── Utilities/ +│ ├── DefaultModes.swift Built-in mode definitions +│ ├── HapticManager.swift Haptic feedback +│ ├── PermissionsManager.swift Mic + speech permissions +│ └── TextChunker.swift Text splitting for LLM token limits +└── Resources/ + └── Assets.xcassets/ App icon, accent color +``` + +## Built-in Modes + +- **General** — Clean prose, neutral tone +- **Text Message** — Casual, short, conversational +- **Email** — Professional with greeting/sign-off +- **Notes** — Concise bullet points +- **Social Media** — Engaging, hashtag-friendly +- **Technical** — Preserves jargon and code terms +- **Creative** — Minimal cleanup, preserves voice + +## Tech Stack + +| Component | Framework | +|-----------|-----------| +| Speech-to-Text | SpeechAnalyzer (iOS 26) | +| Text Processing | Foundation Models (~3B LLM) | +| Structured Output | @Generable guided generation | +| UI | SwiftUI | +| Data | SwiftData | +| Audio | AVAudioEngine | +| Language | Swift 6 | + +## Bundle ID + +`com.idlefusion.talkeasy` diff --git a/TalkEasy.xcodeproj/project.pbxproj b/TalkEasy.xcodeproj/project.pbxproj new file mode 100644 index 0000000..b0ff14a --- /dev/null +++ b/TalkEasy.xcodeproj/project.pbxproj @@ -0,0 +1,345 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 77; + objects = { + +/* Begin PBXFileReference section */ + A10000000000000000000001 /* TalkEasy.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = TalkEasy.app; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXFileSystemSynchronizedRootGroup section */ + A20000000000000000000001 /* TalkEasy */ = { + isa = PBXFileSystemSynchronizedRootGroup; + path = TalkEasy; + sourceTree = ""; + }; +/* End PBXFileSystemSynchronizedRootGroup section */ + +/* Begin PBXFrameworksBuildPhase section */ + D10000000000000000000002 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + C10000000000000000000001 = { + isa = PBXGroup; + children = ( + A20000000000000000000001 /* TalkEasy */, + C10000000000000000000002 /* Products */, + ); + sourceTree = ""; + }; + C10000000000000000000002 /* Products */ = { + isa = PBXGroup; + children = ( + A10000000000000000000001 /* TalkEasy.app */, + ); + name = Products; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + E10000000000000000000001 /* TalkEasy */ = { + isa = PBXNativeTarget; + buildConfigurationList = F10000000000000000000003 /* Build configuration list for PBXNativeTarget "TalkEasy" */; + buildPhases = ( + D10000000000000000000001 /* Sources */, + D10000000000000000000002 /* Frameworks */, + D10000000000000000000003 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + fileSystemSynchronizedGroups = ( + A20000000000000000000001 /* TalkEasy */, + ); + name = TalkEasy; + packageProductDependencies = ( + ); + productName = TalkEasy; + productReference = A10000000000000000000001 /* TalkEasy.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 089C1665B0F415FC11111111 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = 1; + LastSwiftUpdateCheck = 2600; + LastUpgradeCheck = 2600; + TargetAttributes = { + E10000000000000000000001 = { + CreatedOnToolsVersion = 26.0; + }; + }; + }; + buildConfigurationList = F10000000000000000000006 /* Build configuration list for PBXProject "TalkEasy" */; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = C10000000000000000000001; + minimizedProjectReferenceProxies = 1; + preferredProjectObjectVersion = 77; + productRefGroup = C10000000000000000000002 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + E10000000000000000000001 /* TalkEasy */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + D10000000000000000000003 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + D10000000000000000000001 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + F10000000000000000000001 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_CFBundleDisplayName = "Talk Easy"; + INFOPLIST_KEY_LSApplicationCategoryType = "public.app-category.productivity"; + INFOPLIST_KEY_NSMicrophoneUsageDescription = "Talk Easy needs access to your microphone to transcribe your voice dictation."; + INFOPLIST_KEY_NSSpeechRecognitionUsageDescription = "Talk Easy uses on-device speech recognition to convert your voice to text."; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 26.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0.0; + PRODUCT_BUNDLE_IDENTIFIER = com.idlefusion.talkeasy; + PRODUCT_NAME = "$(TARGET_NAME)"; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 6.0; + TARGETED_DEVICE_FAMILY = 1; + }; + name = Debug; + }; + F10000000000000000000002 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_CFBundleDisplayName = "Talk Easy"; + INFOPLIST_KEY_LSApplicationCategoryType = "public.app-category.productivity"; + INFOPLIST_KEY_NSMicrophoneUsageDescription = "Talk Easy needs access to your microphone to transcribe your voice dictation."; + INFOPLIST_KEY_NSSpeechRecognitionUsageDescription = "Talk Easy uses on-device speech recognition to convert your voice to text."; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 26.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0.0; + PRODUCT_BUNDLE_IDENTIFIER = com.idlefusion.talkeasy; + PRODUCT_NAME = "$(TARGET_NAME)"; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 6.0; + TARGETED_DEVICE_FAMILY = 1; + }; + name = Release; + }; + F10000000000000000000004 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 26.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_STRICT_CONCURRENCY = complete; + SWIFT_VERSION = 6.0; + }; + name = Debug; + }; + F10000000000000000000005 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 26.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_STRICT_CONCURRENCY = complete; + SWIFT_VERSION = 6.0; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + F10000000000000000000003 /* Build configuration list for PBXNativeTarget "TalkEasy" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + F10000000000000000000001 /* Debug */, + F10000000000000000000002 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + F10000000000000000000006 /* Build configuration list for PBXProject "TalkEasy" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + F10000000000000000000004 /* Debug */, + F10000000000000000000005 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 089C1665B0F415FC11111111 /* Project object */; +} diff --git a/TalkEasy/ContentView.swift b/TalkEasy/ContentView.swift new file mode 100644 index 0000000..83c02ee --- /dev/null +++ b/TalkEasy/ContentView.swift @@ -0,0 +1,34 @@ +import SwiftUI + +/// Root view with tab-based navigation. +struct ContentView: View { + @State private var selectedTab = 0 + + var body: some View { + TabView(selection: $selectedTab) { + Tab("Record", systemImage: "mic.fill", value: 0) { + RecordingView() + } + + Tab("Modes", systemImage: "slider.horizontal.3", value: 1) { + ModesView() + } + + Tab("History", systemImage: "clock.arrow.circlepath", value: 2) { + HistoryView() + } + + Tab("Settings", systemImage: "gear", value: 3) { + SettingsView() + } + } + } +} + +#Preview { + ContentView() + .modelContainer( + for: [DictationMode.self, DictationRecord.self, VocabularyEntry.self], + inMemory: true + ) +} diff --git a/TalkEasy/Models/DictationMode.swift b/TalkEasy/Models/DictationMode.swift new file mode 100644 index 0000000..df58152 --- /dev/null +++ b/TalkEasy/Models/DictationMode.swift @@ -0,0 +1,46 @@ +import Foundation +import SwiftData + +@Model +final class DictationMode { + var name: String + var icon: String + var systemPrompt: String + var tone: String + @Attribute(.externalStorage) var formattingRulesData: Data + var customVocabulary: [String: String] + var isDefault: Bool + var sortOrder: Int + + @Transient + var formattingRules: FormattingRules { + get { + guard let rules = try? JSONDecoder().decode(FormattingRules.self, from: formattingRulesData) else { + return FormattingRules() + } + return rules + } + set { + formattingRulesData = (try? JSONEncoder().encode(newValue)) ?? Data() + } + } + + init( + name: String, + icon: String, + systemPrompt: String, + tone: String, + formattingRules: FormattingRules = FormattingRules(), + isDefault: Bool = false, + sortOrder: Int = 0 + ) { + self.name = name + self.icon = icon + self.systemPrompt = systemPrompt + self.tone = tone + self.formattingRulesData = (try? JSONEncoder().encode(formattingRules)) ?? Data() + self.customVocabulary = [:] + self.isDefault = isDefault + self.sortOrder = sortOrder + } +} diff --git a/TalkEasy/Models/DictationRecord.swift b/TalkEasy/Models/DictationRecord.swift new file mode 100644 index 0000000..9b0979b --- /dev/null +++ b/TalkEasy/Models/DictationRecord.swift @@ -0,0 +1,27 @@ +import Foundation +import SwiftData + +@Model +final class DictationRecord { + var rawTranscript: String + var cleanedText: String + var modeName: String + var language: String + var duration: TimeInterval + var timestamp: Date + + init( + rawTranscript: String, + cleanedText: String, + modeName: String, + language: String = "en", + duration: TimeInterval = 0 + ) { + self.rawTranscript = rawTranscript + self.cleanedText = cleanedText + self.modeName = modeName + self.language = language + self.duration = duration + self.timestamp = Date() + } +} diff --git a/TalkEasy/Models/FormattingRules.swift b/TalkEasy/Models/FormattingRules.swift new file mode 100644 index 0000000..509d9e1 --- /dev/null +++ b/TalkEasy/Models/FormattingRules.swift @@ -0,0 +1,23 @@ +import Foundation + +struct FormattingRules: Codable, Equatable, Sendable { + var removeFillerWords: Bool = true + var addPunctuation: Bool = true + var outputFormat: OutputFormat = .prose + + enum OutputFormat: String, Codable, CaseIterable, Sendable { + case prose + case bullets + case numbered + case code + + var displayName: String { + switch self { + case .prose: "Prose" + case .bullets: "Bullet Points" + case .numbered: "Numbered List" + case .code: "Code" + } + } + } +} diff --git a/TalkEasy/Models/VocabularyEntry.swift b/TalkEasy/Models/VocabularyEntry.swift new file mode 100644 index 0000000..4572c67 --- /dev/null +++ b/TalkEasy/Models/VocabularyEntry.swift @@ -0,0 +1,21 @@ +import Foundation +import SwiftData + +@Model +final class VocabularyEntry { + var spokenForm: String + var correctedForm: String + var frequency: Int + var dateAdded: Date + + init(spokenForm: String, correctedForm: String) { + self.spokenForm = spokenForm + self.correctedForm = correctedForm + self.frequency = 1 + self.dateAdded = Date() + } + + func incrementFrequency() { + frequency += 1 + } +} diff --git a/TalkEasy/Resources/Assets.xcassets/AccentColor.colorset/Contents.json b/TalkEasy/Resources/Assets.xcassets/AccentColor.colorset/Contents.json new file mode 100644 index 0000000..f1868f6 --- /dev/null +++ b/TalkEasy/Resources/Assets.xcassets/AccentColor.colorset/Contents.json @@ -0,0 +1,38 @@ +{ + "colors" : [ + { + "color" : { + "color-space" : "srgb", + "components" : { + "alpha" : "1.000", + "blue" : "0.996", + "green" : "0.439", + "red" : "0.263" + } + }, + "idiom" : "universal" + }, + { + "appearances" : [ + { + "appearance" : "luminosity", + "value" : "dark" + } + ], + "color" : { + "color-space" : "srgb", + "components" : { + "alpha" : "1.000", + "blue" : "1.000", + "green" : "0.541", + "red" : "0.384" + } + }, + "idiom" : "universal" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/TalkEasy/Resources/Assets.xcassets/AppIcon.appiconset/Contents.json b/TalkEasy/Resources/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..13613e3 --- /dev/null +++ b/TalkEasy/Resources/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,13 @@ +{ + "images" : [ + { + "idiom" : "universal", + "platform" : "ios", + "size" : "1024x1024" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/TalkEasy/Resources/Assets.xcassets/Contents.json b/TalkEasy/Resources/Assets.xcassets/Contents.json new file mode 100644 index 0000000..73c0059 --- /dev/null +++ b/TalkEasy/Resources/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/TalkEasy/Services/AudioEngineService.swift b/TalkEasy/Services/AudioEngineService.swift new file mode 100644 index 0000000..248c14b --- /dev/null +++ b/TalkEasy/Services/AudioEngineService.swift @@ -0,0 +1,69 @@ +@preconcurrency import AVFoundation + +/// Manages AVAudioEngine for microphone capture, providing audio buffers +/// as an AsyncStream for consumption by the speech recognition service. +@MainActor +final class AudioEngineService { + private(set) var engine: AVAudioEngine? + private var bufferContinuation: AsyncStream.Continuation? + private(set) var isRunning = false + + /// Configures the audio session for recording. + func configureAudioSession() throws { + let session = AVAudioSession.sharedInstance() + try session.setCategory(.record, mode: .measurement, options: .duckOthers) + try session.setActive(true, options: .notifyOthersOnDeactivation) + } + + /// Starts the audio engine and returns a stream of audio buffers. + /// The engine is also accessible via the `engine` property for + /// the speech recognizer to connect to. + func start() throws -> AsyncStream { + try configureAudioSession() + + let audioEngine = AVAudioEngine() + let inputNode = audioEngine.inputNode + let recordingFormat = inputNode.outputFormat(forBus: 0) + + let stream = AsyncStream { continuation in + self.bufferContinuation = continuation + + continuation.onTermination = { @Sendable _ in + Task { @MainActor in + self.stop() + } + } + } + + inputNode.installTap( + onBus: 0, + bufferSize: 1024, + format: recordingFormat + ) { [weak self] buffer, _ in + self?.bufferContinuation?.yield(buffer) + } + + audioEngine.prepare() + try audioEngine.start() + + self.engine = audioEngine + isRunning = true + + return stream + } + + /// Stops the audio engine and tears down the audio tap. + func stop() { + guard let audioEngine = engine else { return } + + audioEngine.inputNode.removeTap(onBus: 0) + audioEngine.stop() + + bufferContinuation?.finish() + bufferContinuation = nil + engine = nil + isRunning = false + + try? AVAudioSession.sharedInstance().setActive(false, options: .notifyOthersOnDeactivation) + } +} diff --git a/TalkEasy/Services/ClipboardService.swift b/TalkEasy/Services/ClipboardService.swift new file mode 100644 index 0000000..c1856bb --- /dev/null +++ b/TalkEasy/Services/ClipboardService.swift @@ -0,0 +1,22 @@ +import UIKit + +/// Provides clipboard operations for copying processed text. +enum ClipboardService { + /// Copies the given text to the system clipboard. + @MainActor + static func copy(_ text: String) { + UIPasteboard.general.string = text + } + + /// Returns the current clipboard text, if any. + @MainActor + static func paste() -> String? { + UIPasteboard.general.string + } + + /// Returns true if the clipboard currently contains text. + @MainActor + static var hasText: Bool { + UIPasteboard.general.hasStrings + } +} diff --git a/TalkEasy/Services/SpeechRecognitionService.swift b/TalkEasy/Services/SpeechRecognitionService.swift new file mode 100644 index 0000000..fc93320 --- /dev/null +++ b/TalkEasy/Services/SpeechRecognitionService.swift @@ -0,0 +1,93 @@ +@preconcurrency import Speech +@preconcurrency import AVFoundation + +/// Wraps Apple's SpeechAnalyzer (iOS 26) for real-time on-device transcription. +/// Provides transcription results and voice activity events as async streams. +@MainActor +final class SpeechRecognitionService { + private var analyzer: SpeechAnalyzer? + private var transcriber: SpeechTranscriber? + private var detector: SpeechDetector? + + private var transcriptContinuation: AsyncStream.Continuation? + private var silenceContinuation: AsyncStream.Continuation? + private var processingTask: Task? + private var detectorTask: Task? + + struct TranscriptionUpdate: Sendable { + let text: String + let isFinal: Bool + } + + /// Starts the speech analyzer with the given audio engine. + /// The analyzer installs its own processing on the engine's input node. + func start(audioEngine: AVAudioEngine) throws { + let speechAnalyzer = SpeechAnalyzer() + let speechTranscriber = SpeechTranscriber() + let speechDetector = SpeechDetector() + + speechAnalyzer.addModule(speechTranscriber) + speechAnalyzer.addModule(speechDetector) + + self.analyzer = speechAnalyzer + self.transcriber = speechTranscriber + self.detector = speechDetector + + // Start consuming transcription results + processingTask = Task { [weak self] in + guard let transcriber = self?.transcriber else { return } + for await result in transcriber.results { + self?.transcriptContinuation?.yield( + TranscriptionUpdate( + text: result.bestTranscription.formattedString, + isFinal: result.isFinal + ) + ) + } + self?.transcriptContinuation?.finish() + } + + // Start monitoring voice activity + detectorTask = Task { [weak self] in + guard let detector = self?.detector else { return } + for await event in detector.events { + if case .speechEnded = event { + self?.silenceContinuation?.yield() + } + } + self?.silenceContinuation?.finish() + } + } + + /// Stops the speech analyzer and cancels all processing tasks. + func stop() { + processingTask?.cancel() + detectorTask?.cancel() + processingTask = nil + detectorTask = nil + + transcriptContinuation?.finish() + silenceContinuation?.finish() + transcriptContinuation = nil + silenceContinuation = nil + + analyzer = nil + transcriber = nil + detector = nil + } + + /// Returns an async stream of transcription updates. + func transcriptionResults() -> AsyncStream { + AsyncStream { continuation in + self.transcriptContinuation = continuation + } + } + + /// Returns an async stream that yields when silence is detected + /// (i.e., the user has stopped speaking). + func silenceEvents() -> AsyncStream { + AsyncStream { continuation in + self.silenceContinuation = continuation + } + } +} diff --git a/TalkEasy/Services/TextProcessingService.swift b/TalkEasy/Services/TextProcessingService.swift new file mode 100644 index 0000000..ad15c39 --- /dev/null +++ b/TalkEasy/Services/TextProcessingService.swift @@ -0,0 +1,199 @@ +import Foundation +import FoundationModels + +/// Context extracted from a DictationMode for passing across concurrency boundaries. +struct ModeContext: Sendable { + let name: String + let systemPrompt: String + let tone: String + let formattingRules: FormattingRules + let customVocabulary: [String: String] + + static let general = ModeContext( + name: "General", + systemPrompt: "Clean up this dictation into clear, well-written prose.", + tone: "neutral", + formattingRules: FormattingRules(), + customVocabulary: [:] + ) +} + +// MARK: - Generable Output Types + +@Generable +struct CleanedDictation { + @Guide(description: "Cleaned text with filler words removed, grammar fixed, and proper punctuation added. Preserve the speaker's intent exactly. Do not add or change meaning.") + var text: String + + @Guide(description: "Detected language code, e.g. en, es, fr") + var language: String + + @Guide(.anyOf(["casual", "professional", "technical", "creative"])) + var detectedTone: String +} + +@Generable +struct FormattedOutput { + @Guide(description: "Text formatted for the target context, ready to use") + var text: String + + @Guide(description: "Brief note on what formatting was applied") + var formattingNote: String +} + +// MARK: - Text Processing Service + +/// Processes raw transcripts through Apple's on-device Foundation Models (~3B LLM). +/// Uses a multi-pass pipeline: first cleans up the raw transcript, then applies +/// mode-specific formatting and tone. +@MainActor +final class TextProcessingService { + private var session: LanguageModelSession? + + /// Prewarms the language model for faster first response. + /// Call this on app launch. + func prewarm() async { + do { + try await LanguageModelSession.prewarm() + } catch { + // Prewarm is best-effort; failure is non-fatal + } + } + + // MARK: - Multi-Pass Processing + + /// Processes a raw transcript through the full pipeline: + /// Pass 1: Remove fillers, fix grammar, add punctuation + /// Pass 2: Apply mode-specific formatting and tone + func processTranscript( + _ rawTranscript: String, + mode: ModeContext, + vocabularyHints: [String: String] = [:] + ) async throws -> String { + let chunks = TextChunker.chunk(rawTranscript, maxTokenEstimate: 2048) + var processedChunks: [String] = [] + + for chunk in chunks { + let cleaned = try await pass1CleanUp(chunk, vocabularyHints: vocabularyHints) + let formatted = try await pass2Format(cleaned, mode: mode) + processedChunks.append(formatted) + } + + return processedChunks.joined(separator: "\n\n") + } + + /// Streams the processing result for the UI to display incrementally. + func streamProcessTranscript( + _ rawTranscript: String, + mode: ModeContext, + vocabularyHints: [String: String] = [:] + ) -> AsyncThrowingStream { + AsyncThrowingStream { continuation in + Task { @MainActor in + do { + let chunks = TextChunker.chunk(rawTranscript, maxTokenEstimate: 2048) + + for chunk in chunks { + let cleaned = try await self.pass1CleanUp(chunk, vocabularyHints: vocabularyHints) + continuation.yield(cleaned) + + let formatted = try await self.pass2Format(cleaned, mode: mode) + continuation.yield(formatted) + } + continuation.finish() + } catch { + continuation.finish(throwing: error) + } + } + } + } + + // MARK: - Pass 1: Cleanup + + private func pass1CleanUp( + _ rawText: String, + vocabularyHints: [String: String] + ) async throws -> String { + let session = getOrCreateSession() + + var vocabularySection = "" + if !vocabularyHints.isEmpty { + let corrections = vocabularyHints.map { "\"\($0.key)\" → \"\($0.value)\"" } + .joined(separator: "\n") + vocabularySection = """ + + Known vocabulary corrections (apply these when you see the spoken form): + \(corrections) + """ + } + + let prompt = """ + Clean up this voice dictation transcript. Your tasks: + 1. Remove filler words (um, uh, like, you know, so, basically, actually, literally) + 2. Fix grammar and sentence structure + 3. Add proper punctuation and capitalization + 4. Merge broken sentences and remove false starts + 5. Preserve the speaker's intent and meaning exactly — do not add or change meaning + + \(vocabularySection) + + Transcript: \(rawText) + """ + + let response = try await session.respond(to: prompt, generating: CleanedDictation.self) + return response.text + } + + // MARK: - Pass 2: Mode Formatting + + private func pass2Format( + _ cleanedText: String, + mode: ModeContext + ) async throws -> String { + let session = getOrCreateSession() + + let formatInstruction: String + switch mode.formattingRules.outputFormat { + case .prose: + formatInstruction = "Output as clean prose paragraphs." + case .bullets: + formatInstruction = "Output as bullet points. Each key idea gets its own bullet." + case .numbered: + formatInstruction = "Output as a numbered list." + case .code: + formatInstruction = "If the text describes code, output the code. Otherwise output as prose." + } + + let prompt = """ + You are Talk Easy, a voice dictation assistant. Format this text for the \ + following context: + + Mode: \(mode.name) + Tone: \(mode.tone) + Instructions: \(mode.systemPrompt) + Format: \(formatInstruction) + + Text to format: + \(cleanedText) + """ + + let response = try await session.respond(to: prompt, generating: FormattedOutput.self) + return response.text + } + + // MARK: - Session Management + + private func getOrCreateSession() -> LanguageModelSession { + if let existing = session { + return existing + } + let newSession = LanguageModelSession() + session = newSession + return newSession + } + + /// Resets the session, clearing any conversation history. + func resetSession() { + session = nil + } +} diff --git a/TalkEasy/TalkEasyApp.swift b/TalkEasy/TalkEasyApp.swift new file mode 100644 index 0000000..fb93f2a --- /dev/null +++ b/TalkEasy/TalkEasyApp.swift @@ -0,0 +1,49 @@ +import SwiftUI +import SwiftData + +@main +struct TalkEasyApp: App { + let modelContainer: ModelContainer + + init() { + do { + let schema = Schema([ + DictationMode.self, + VocabularyEntry.self, + DictationRecord.self, + ]) + let configuration = ModelConfiguration( + schema: schema, + isStoredInMemoryOnly: false + ) + modelContainer = try ModelContainer( + for: schema, + configurations: [configuration] + ) + } catch { + fatalError("Failed to create ModelContainer: \(error)") + } + + // Seed default modes on first launch + seedDefaultModesIfNeeded() + } + + var body: some Scene { + WindowGroup { + ContentView() + } + .modelContainer(modelContainer) + } + + private func seedDefaultModesIfNeeded() { + let context = modelContainer.mainContext + let descriptor = FetchDescriptor( + predicate: #Predicate { $0.isDefault } + ) + let existingCount = (try? context.fetchCount(descriptor)) ?? 0 + + if existingCount == 0 { + DefaultModes.seedIfNeeded(in: context) + } + } +} diff --git a/TalkEasy/Utilities/DefaultModes.swift b/TalkEasy/Utilities/DefaultModes.swift new file mode 100644 index 0000000..04758ce --- /dev/null +++ b/TalkEasy/Utilities/DefaultModes.swift @@ -0,0 +1,100 @@ +import Foundation +import SwiftData + +/// Provides the built-in dictation modes that ship with the app. +enum DefaultModes { + static let all: [(name: String, icon: String, systemPrompt: String, tone: String, rules: FormattingRules)] = [ + ( + name: "General", + icon: "text.alignleft", + systemPrompt: "Clean up this dictation into clear, well-written prose. Fix grammar, remove filler words, and add proper punctuation. Keep the speaker's natural voice and intent.", + tone: "neutral", + rules: FormattingRules() + ), + ( + name: "Text Message", + icon: "message.fill", + systemPrompt: "Format this as a casual text message. Keep it short and conversational. Lowercase is fine. Light punctuation. Emojis are acceptable if they fit naturally.", + tone: "casual", + rules: FormattingRules( + removeFillerWords: true, + addPunctuation: true, + outputFormat: .prose + ) + ), + ( + name: "Email", + icon: "envelope.fill", + systemPrompt: "Format this as a professional email. Include an appropriate greeting and sign-off if not already present. Use proper business writing conventions. Be concise but thorough.", + tone: "professional", + rules: FormattingRules( + removeFillerWords: true, + addPunctuation: true, + outputFormat: .prose + ) + ), + ( + name: "Notes", + icon: "note.text", + systemPrompt: "Extract the key points from this dictation and format as concise bullet points. Focus on facts, action items, and important details. Remove all filler and redundancy.", + tone: "neutral", + rules: FormattingRules( + removeFillerWords: true, + addPunctuation: true, + outputFormat: .bullets + ) + ), + ( + name: "Social Media", + icon: "globe", + systemPrompt: "Format this as an engaging social media post. Keep it concise and punchy. Add relevant hashtags if appropriate. Make it attention-grabbing while preserving the core message.", + tone: "casual", + rules: FormattingRules( + removeFillerWords: true, + addPunctuation: true, + outputFormat: .prose + ) + ), + ( + name: "Technical", + icon: "chevron.left.forwardslash.chevron.right", + systemPrompt: "Format this as technical writing. Preserve all jargon, code terms, acronyms, and technical vocabulary exactly. Use precise language. Format code references appropriately.", + tone: "technical", + rules: FormattingRules( + removeFillerWords: true, + addPunctuation: true, + outputFormat: .prose + ) + ), + ( + name: "Creative", + icon: "paintbrush.fill", + systemPrompt: "Preserve the speaker's creative voice with minimal cleanup. Keep unique phrasing, rhythm, and style. Only fix obvious errors. Allow poetic license and unconventional structure.", + tone: "creative", + rules: FormattingRules( + removeFillerWords: false, + addPunctuation: true, + outputFormat: .prose + ) + ), + ] + + /// Seeds the database with default modes if none exist. + @MainActor + static func seedIfNeeded(in context: ModelContext) { + for (index, mode) in all.enumerated() { + let dictationMode = DictationMode( + name: mode.name, + icon: mode.icon, + systemPrompt: mode.systemPrompt, + tone: mode.tone, + formattingRules: mode.rules, + isDefault: true, + sortOrder: index + ) + context.insert(dictationMode) + } + + try? context.save() + } +} diff --git a/TalkEasy/Utilities/HapticManager.swift b/TalkEasy/Utilities/HapticManager.swift new file mode 100644 index 0000000..24c6615 --- /dev/null +++ b/TalkEasy/Utilities/HapticManager.swift @@ -0,0 +1,44 @@ +import UIKit + +/// Provides haptic feedback for key user interactions. +@MainActor +final class HapticManager { + private let impactGenerator = UIImpactFeedbackGenerator(style: .medium) + private let notificationGenerator = UINotificationFeedbackGenerator() + private let lightImpact = UIImpactFeedbackGenerator(style: .light) + + init() { + impactGenerator.prepare() + notificationGenerator.prepare() + } + + /// Fires when the user starts recording. + func recordingStarted() { + impactGenerator.impactOccurred(intensity: 0.8) + } + + /// Fires when the user stops recording. + func recordingStopped() { + impactGenerator.impactOccurred(intensity: 0.5) + } + + /// Fires when LLM processing completes successfully. + func processingCompleted() { + notificationGenerator.notificationOccurred(.success) + } + + /// Fires when text is copied to the clipboard. + func copiedToClipboard() { + notificationGenerator.notificationOccurred(.success) + } + + /// Fires when text is shared via the share sheet. + func shared() { + lightImpact.impactOccurred() + } + + /// Fires when an error occurs. + func errorOccurred() { + notificationGenerator.notificationOccurred(.error) + } +} diff --git a/TalkEasy/Utilities/PermissionsManager.swift b/TalkEasy/Utilities/PermissionsManager.swift new file mode 100644 index 0000000..78ffdf3 --- /dev/null +++ b/TalkEasy/Utilities/PermissionsManager.swift @@ -0,0 +1,63 @@ +import AVFoundation +import Speech + +/// Manages microphone and speech recognition permissions. +@MainActor +final class PermissionsManager { + enum PermissionStatus: Sendable { + case granted + case denied + case undetermined + } + + // MARK: - Microphone + + var microphoneStatus: PermissionStatus { + switch AVAudioApplication.shared.recordPermission { + case .granted: .granted + case .denied: .denied + case .undetermined: .undetermined + @unknown default: .undetermined + } + } + + func requestMicrophoneAccess() async -> Bool { + if microphoneStatus == .granted { return true } + return await AVAudioApplication.requestRecordPermission() + } + + // MARK: - Speech Recognition + + var speechRecognitionStatus: PermissionStatus { + switch SFSpeechRecognizer.authorizationStatus() { + case .authorized: .granted + case .denied, .restricted: .denied + case .notDetermined: .undetermined + @unknown default: .undetermined + } + } + + func requestSpeechRecognitionAccess() async -> Bool { + if speechRecognitionStatus == .granted { return true } + + return await withCheckedContinuation { continuation in + SFSpeechRecognizer.requestAuthorization { status in + continuation.resume(returning: status == .authorized) + } + } + } + + // MARK: - Combined + + /// Returns true only if both microphone and speech recognition are authorized. + var allPermissionsGranted: Bool { + microphoneStatus == .granted && speechRecognitionStatus == .granted + } + + /// Requests all required permissions. Returns true if all are granted. + func requestAllPermissions() async -> Bool { + let mic = await requestMicrophoneAccess() + let speech = await requestSpeechRecognitionAccess() + return mic && speech + } +} diff --git a/TalkEasy/Utilities/TextChunker.swift b/TalkEasy/Utilities/TextChunker.swift new file mode 100644 index 0000000..a7dd339 --- /dev/null +++ b/TalkEasy/Utilities/TextChunker.swift @@ -0,0 +1,76 @@ +import Foundation + +/// Splits long text into chunks that fit within the Foundation Models +/// token limit (~4096 combined tokens). Uses sentence boundaries for +/// clean splits and overlapping context for coherence. +enum TextChunker { + /// Approximate characters per token for English text. + private static let charsPerToken: Int = 4 + + /// Number of sentences to overlap between chunks for context continuity. + private static let overlapSentences: Int = 1 + + /// Splits text into chunks that each fit within the estimated token budget. + /// - Parameters: + /// - text: The full text to chunk. + /// - maxTokenEstimate: Maximum tokens per chunk (leave room for prompt overhead). + /// - Returns: An array of text chunks. + static func chunk(_ text: String, maxTokenEstimate: Int = 2048) -> [String] { + let maxChars = maxTokenEstimate * charsPerToken + + // If the text fits in one chunk, return it directly + if text.count <= maxChars { + return [text] + } + + let sentences = splitIntoSentences(text) + var chunks: [String] = [] + var currentChunk: [String] = [] + var currentLength = 0 + + for sentence in sentences { + let sentenceLength = sentence.count + 1 // +1 for space + + if currentLength + sentenceLength > maxChars && !currentChunk.isEmpty { + // Save current chunk + chunks.append(currentChunk.joined(separator: " ")) + + // Start new chunk with overlap + let overlapStart = max(0, currentChunk.count - overlapSentences) + currentChunk = Array(currentChunk[overlapStart...]) + currentLength = currentChunk.reduce(0) { $0 + $1.count + 1 } + } + + currentChunk.append(sentence) + currentLength += sentenceLength + } + + // Don't forget the last chunk + if !currentChunk.isEmpty { + chunks.append(currentChunk.joined(separator: " ")) + } + + return chunks + } + + /// Splits text into sentences using Unicode-aware sentence boundary detection. + private static func splitIntoSentences(_ text: String) -> [String] { + var sentences: [String] = [] + text.enumerateSubstrings( + in: text.startIndex..., + options: .bySentences + ) { substring, _, _, _ in + if let sentence = substring?.trimmingCharacters(in: .whitespacesAndNewlines), + !sentence.isEmpty { + sentences.append(sentence) + } + } + + // Fallback: if sentence detection fails, split by newlines then by character limit + if sentences.isEmpty && !text.isEmpty { + sentences = [text] + } + + return sentences + } +} diff --git a/TalkEasy/ViewModels/RecordingViewModel.swift b/TalkEasy/ViewModels/RecordingViewModel.swift new file mode 100644 index 0000000..7b443ec --- /dev/null +++ b/TalkEasy/ViewModels/RecordingViewModel.swift @@ -0,0 +1,314 @@ +import Foundation +import Observation +import SwiftData + +/// Orchestrates the full recording flow: audio capture, speech recognition, +/// LLM text processing, and output actions. +@Observable +@MainActor +final class RecordingViewModel { + // MARK: - Published State + + var phase: RecordingPhase = .idle + var liveTranscript: String = "" + var cleanedText: String = "" + var selectedMode: DictationMode? + var errorMessage: String? + var recordingDuration: TimeInterval = 0 + + enum RecordingPhase: Equatable { + case idle + case recording + case processing + case completed + case error + } + + // MARK: - Services + + private let audioEngine = AudioEngineService() + private let speechService = SpeechRecognitionService() + private let textProcessor = TextProcessingService() + private let haptics = HapticManager() + let permissions = PermissionsManager() + + // MARK: - Internal State + + private var recordingTask: Task? + private var silenceTask: Task? + private var recordingStartTime: Date? + private var silenceTimer: Task? + private var durationTimer: Task? + + /// Duration of silence (in seconds) before auto-stopping. + private let silenceTimeout: TimeInterval = 2.0 + + // MARK: - Permissions + + var allPermissionsGranted: Bool { + permissions.allPermissionsGranted + } + + func requestPermissions() async -> Bool { + await permissions.requestAllPermissions() + } + + // MARK: - Prewarm + + func prewarmLLM() async { + await textProcessor.prewarm() + } + + // MARK: - Recording Control + + func startRecording() async { + guard phase == .idle || phase == .completed || phase == .error else { return } + + // Request permissions if needed + guard await permissions.requestAllPermissions() else { + errorMessage = "Talk Easy needs microphone and speech recognition access. Please enable them in Settings." + phase = .error + haptics.errorOccurred() + return + } + + // Reset state + liveTranscript = "" + cleanedText = "" + errorMessage = nil + recordingDuration = 0 + recordingStartTime = Date() + phase = .recording + haptics.recordingStarted() + + do { + // Set up transcript and silence streams before starting + let transcriptStream = speechService.transcriptionResults() + let silenceStream = speechService.silenceEvents() + + // Start audio capture + let audioStream = try audioEngine.start() + + // Start speech recognition using the audio engine + guard let engine = audioEngine.engine else { + throw RecordingError.audioEngineUnavailable + } + try speechService.start(audioEngine: engine) + + // Process transcription results + recordingTask = Task { [weak self] in + for await update in transcriptStream { + guard let self, !Task.isCancelled else { break } + self.liveTranscript = update.text + self.resetSilenceTimer() + } + } + + // Monitor silence for auto-stop + silenceTask = Task { [weak self] in + for await _ in silenceStream { + guard let self, !Task.isCancelled else { break } + self.startSilenceTimer() + } + } + + // Duration update timer + durationTimer = Task { [weak self] in + while !Task.isCancelled { + try? await Task.sleep(for: .milliseconds(100)) + guard let self, !Task.isCancelled else { break } + self.updateDuration() + } + } + + // Consume audio stream to keep the engine running + Task { + for await _ in audioStream { + if Task.isCancelled { break } + } + } + } catch { + errorMessage = "Failed to start recording: \(error.localizedDescription)" + phase = .error + haptics.errorOccurred() + } + } + + func stopRecording() async { + guard phase == .recording else { return } + + haptics.recordingStopped() + updateDuration() + + // Cancel all tasks + recordingTask?.cancel() + silenceTask?.cancel() + silenceTimer?.cancel() + durationTimer?.cancel() + recordingTask = nil + silenceTask = nil + silenceTimer = nil + durationTimer = nil + + // Stop services + speechService.stop() + audioEngine.stop() + + // Process the transcript through the LLM + await processTranscript() + } + + func toggleRecording() async { + if phase == .recording { + await stopRecording() + } else { + await startRecording() + } + } + + // MARK: - Text Processing + + private func processTranscript() async { + let trimmed = liveTranscript.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { + phase = .idle + return + } + + phase = .processing + + do { + let modeContext: ModeContext + if let mode = selectedMode { + modeContext = ModeContext( + name: mode.name, + systemPrompt: mode.systemPrompt, + tone: mode.tone, + formattingRules: mode.formattingRules, + customVocabulary: mode.customVocabulary + ) + } else { + modeContext = .general + } + + let result = try await textProcessor.processTranscript( + liveTranscript, + mode: modeContext, + vocabularyHints: modeContext.customVocabulary + ) + + cleanedText = result + phase = .completed + haptics.processingCompleted() + } catch { + // On LLM failure, fall back to raw transcript + cleanedText = liveTranscript + errorMessage = "Text processing unavailable. Showing raw transcript." + phase = .completed + haptics.errorOccurred() + } + } + + /// Reprocesses the current transcript with a different mode. + func reprocessWithMode(_ mode: DictationMode) async { + selectedMode = mode + guard !liveTranscript.isEmpty else { return } + await processTranscript() + } + + // MARK: - Output Actions + + func copyToClipboard() { + let textToCopy = cleanedText.isEmpty ? liveTranscript : cleanedText + guard !textToCopy.isEmpty else { return } + ClipboardService.copy(textToCopy) + haptics.copiedToClipboard() + } + + func reset() { + recordingTask?.cancel() + silenceTask?.cancel() + silenceTimer?.cancel() + durationTimer?.cancel() + recordingTask = nil + silenceTask = nil + silenceTimer = nil + durationTimer = nil + + speechService.stop() + audioEngine.stop() + + phase = .idle + liveTranscript = "" + cleanedText = "" + errorMessage = nil + recordingDuration = 0 + recordingStartTime = nil + } + + // MARK: - History + + func saveToHistory(context: ModelContext) { + guard !cleanedText.isEmpty else { return } + + let record = DictationRecord( + rawTranscript: liveTranscript, + cleanedText: cleanedText, + modeName: selectedMode?.name ?? "General", + language: "en", + duration: recordingDuration + ) + + context.insert(record) + try? context.save() + } + + // MARK: - Formatted Duration + + var formattedDuration: String { + let minutes = Int(recordingDuration) / 60 + let seconds = Int(recordingDuration) % 60 + let tenths = Int((recordingDuration.truncatingRemainder(dividingBy: 1)) * 10) + if minutes > 0 { + return String(format: "%d:%02d.%d", minutes, seconds, tenths) + } + return String(format: "%d.%d", seconds, tenths) + } + + // MARK: - Private Helpers + + private func updateDuration() { + guard let start = recordingStartTime else { return } + recordingDuration = Date().timeIntervalSince(start) + } + + private func startSilenceTimer() { + silenceTimer?.cancel() + silenceTimer = Task { [weak self] in + try? await Task.sleep(for: .seconds(self?.silenceTimeout ?? 2.0)) + guard !Task.isCancelled else { return } + await self?.stopRecording() + } + } + + private func resetSilenceTimer() { + silenceTimer?.cancel() + silenceTimer = nil + } +} + +// MARK: - Errors + +enum RecordingError: LocalizedError { + case audioEngineUnavailable + case permissionDenied + + var errorDescription: String? { + switch self { + case .audioEngineUnavailable: + "Audio engine is not available." + case .permissionDenied: + "Required permissions have not been granted." + } + } +} diff --git a/TalkEasy/Views/ActionBar.swift b/TalkEasy/Views/ActionBar.swift new file mode 100644 index 0000000..0f04a96 --- /dev/null +++ b/TalkEasy/Views/ActionBar.swift @@ -0,0 +1,110 @@ +import SwiftUI + +/// Action buttons shown after dictation is processed: Copy, Share, Re-record. +struct ActionBar: View { + let text: String + let onCopy: () -> Void + let onReRecord: () -> Void + + @State private var showCopiedConfirmation = false + @State private var showShareSheet = false + + var body: some View { + HStack(spacing: 16) { + // Copy button + ActionButton( + icon: showCopiedConfirmation ? "checkmark" : "doc.on.doc", + label: showCopiedConfirmation ? "Copied" : "Copy", + style: .primary + ) { + onCopy() + withAnimation { + showCopiedConfirmation = true + } + Task { + try? await Task.sleep(for: .seconds(2)) + withAnimation { + showCopiedConfirmation = false + } + } + } + + // Share button + ActionButton( + icon: "square.and.arrow.up", + label: "Share", + style: .secondary + ) { + showShareSheet = true + } + + // Re-record button + ActionButton( + icon: "arrow.counterclockwise", + label: "New", + style: .secondary + ) { + onReRecord() + } + } + .padding(.horizontal) + .sheet(isPresented: $showShareSheet) { + ShareSheet(items: [text]) + .presentationDetents([.medium, .large]) + } + } +} + +/// Individual action button with icon and label. +struct ActionButton: View { + let icon: String + let label: String + let style: ActionButtonStyle + let action: () -> Void + + enum ActionButtonStyle { + case primary, secondary + } + + var body: some View { + Button(action: action) { + VStack(spacing: 6) { + Image(systemName: icon) + .font(.system(size: 20, weight: .medium)) + .contentTransition(.symbolEffect(.replace)) + + Text(label) + .font(.caption2) + .fontWeight(.medium) + } + .frame(maxWidth: .infinity) + .padding(.vertical, 12) + .background( + RoundedRectangle(cornerRadius: 12) + .fill(style == .primary ? Color.accentColor : Color(.systemGray6)) + ) + .foregroundStyle(style == .primary ? .white : .primary) + } + .buttonStyle(.plain) + .accessibilityLabel(label) + } +} + +/// UIKit share sheet wrapper for SwiftUI. +struct ShareSheet: UIViewControllerRepresentable { + let items: [Any] + + func makeUIViewController(context: Context) -> UIActivityViewController { + UIActivityViewController(activityItems: items, applicationActivities: nil) + } + + func updateUIViewController(_ uiViewController: UIActivityViewController, context: Context) {} +} + +#Preview { + ActionBar( + text: "Sample processed text", + onCopy: {}, + onReRecord: {} + ) +} diff --git a/TalkEasy/Views/HistoryView.swift b/TalkEasy/Views/HistoryView.swift new file mode 100644 index 0000000..56e8cd2 --- /dev/null +++ b/TalkEasy/Views/HistoryView.swift @@ -0,0 +1,220 @@ +import SwiftUI +import SwiftData + +/// Displays past dictation records with search and detail view. +struct HistoryView: View { + @Environment(\.modelContext) private var modelContext + @Query(sort: \DictationRecord.timestamp, order: .reverse) private var records: [DictationRecord] + @State private var searchText = "" + @State private var selectedRecord: DictationRecord? + + private var filteredRecords: [DictationRecord] { + if searchText.isEmpty { + return records + } + let query = searchText.lowercased() + return records.filter { + $0.cleanedText.lowercased().contains(query) + || $0.rawTranscript.lowercased().contains(query) + || $0.modeName.lowercased().contains(query) + } + } + + var body: some View { + NavigationStack { + Group { + if records.isEmpty { + emptyState + } else { + List { + ForEach(filteredRecords) { record in + HistoryRow(record: record) + .onTapGesture { + selectedRecord = record + } + } + .onDelete(perform: deleteRecords) + } + .searchable(text: $searchText, prompt: "Search dictations") + } + } + .navigationTitle("History") + .sheet(item: $selectedRecord) { record in + HistoryDetailView(record: record) + } + } + } + + private var emptyState: some View { + ContentUnavailableView( + "No Dictations Yet", + systemImage: "clock.arrow.circlepath", + description: Text("Your processed dictations will appear here.") + ) + } + + private func deleteRecords(at offsets: IndexSet) { + let recordsToDelete = offsets.map { filteredRecords[$0] } + for record in recordsToDelete { + modelContext.delete(record) + } + try? modelContext.save() + } +} + +/// Row for a single history entry. +struct HistoryRow: View { + let record: DictationRecord + + var body: some View { + VStack(alignment: .leading, spacing: 6) { + HStack { + Text(record.modeName) + .font(.caption) + .fontWeight(.medium) + .foregroundStyle(.accentColor) + + Spacer() + + Text(record.timestamp, style: .relative) + .font(.caption2) + .foregroundStyle(.tertiary) + } + + Text(record.cleanedText) + .font(.subheadline) + .lineLimit(3) + .foregroundStyle(.primary) + + HStack(spacing: 12) { + Label(formattedDuration(record.duration), systemImage: "clock") + Label(record.language.uppercased(), systemImage: "globe") + } + .font(.caption2) + .foregroundStyle(.secondary) + } + .padding(.vertical, 4) + .contentShape(Rectangle()) + .accessibilityElement(children: .combine) + } + + private func formattedDuration(_ duration: TimeInterval) -> String { + let minutes = Int(duration) / 60 + let seconds = Int(duration) % 60 + if minutes > 0 { + return "\(minutes)m \(seconds)s" + } + return "\(seconds)s" + } +} + +/// Detail view for a single dictation record. +struct HistoryDetailView: View { + @Environment(\.dismiss) private var dismiss + let record: DictationRecord + + @State private var showCopied = false + + var body: some View { + NavigationStack { + ScrollView { + VStack(alignment: .leading, spacing: 20) { + // Metadata + HStack(spacing: 16) { + MetadataChip(icon: "slider.horizontal.3", text: record.modeName) + MetadataChip(icon: "globe", text: record.language.uppercased()) + MetadataChip(icon: "clock", text: formattedDuration(record.duration)) + } + + // Cleaned text + VStack(alignment: .leading, spacing: 8) { + Text("Cleaned Text") + .font(.caption) + .fontWeight(.medium) + .foregroundStyle(.secondary) + .textCase(.uppercase) + + Text(record.cleanedText) + .font(.body) + .lineSpacing(4) + .textSelection(.enabled) + } + + Divider() + + // Raw transcript + VStack(alignment: .leading, spacing: 8) { + Text("Original Transcript") + .font(.caption) + .fontWeight(.medium) + .foregroundStyle(.secondary) + .textCase(.uppercase) + + Text(record.rawTranscript) + .font(.body) + .foregroundStyle(.secondary) + .lineSpacing(4) + .textSelection(.enabled) + } + } + .padding() + } + .navigationTitle(record.timestamp.formatted(date: .abbreviated, time: .shortened)) + .navigationBarTitleDisplayMode(.inline) + .toolbar { + ToolbarItem(placement: .cancellationAction) { + Button("Done") { dismiss() } + } + + ToolbarItem(placement: .primaryAction) { + Button { + ClipboardService.copy(record.cleanedText) + showCopied = true + Task { + try? await Task.sleep(for: .seconds(2)) + showCopied = false + } + } label: { + Image(systemName: showCopied ? "checkmark" : "doc.on.doc") + .contentTransition(.symbolEffect(.replace)) + } + } + } + } + } + + private func formattedDuration(_ duration: TimeInterval) -> String { + let minutes = Int(duration) / 60 + let seconds = Int(duration) % 60 + if minutes > 0 { + return "\(minutes)m \(seconds)s" + } + return "\(seconds)s" + } +} + +/// Small metadata chip for the detail view. +struct MetadataChip: View { + let icon: String + let text: String + + var body: some View { + HStack(spacing: 4) { + Image(systemName: icon) + .font(.caption2) + Text(text) + .font(.caption) + .fontWeight(.medium) + } + .foregroundStyle(.secondary) + .padding(.horizontal, 10) + .padding(.vertical, 6) + .background(Color(.systemGray6)) + .clipShape(Capsule()) + } +} + +#Preview { + HistoryView() + .modelContainer(for: DictationRecord.self, inMemory: true) +} diff --git a/TalkEasy/Views/ModeEditorView.swift b/TalkEasy/Views/ModeEditorView.swift new file mode 100644 index 0000000..0b52b35 --- /dev/null +++ b/TalkEasy/Views/ModeEditorView.swift @@ -0,0 +1,177 @@ +import SwiftUI +import SwiftData + +/// Form for creating or editing a dictation mode. +struct ModeEditorView: View { + @Environment(\.modelContext) private var modelContext + @Environment(\.dismiss) private var dismiss + + let mode: DictationMode? + + @State private var name: String = "" + @State private var icon: String = "text.alignleft" + @State private var systemPrompt: String = "" + @State private var tone: String = "neutral" + @State private var removeFillerWords: Bool = true + @State private var addPunctuation: Bool = true + @State private var outputFormat: FormattingRules.OutputFormat = .prose + + private var isEditing: Bool { mode != nil } + private var isDefault: Bool { mode?.isDefault ?? false } + + private let availableIcons = [ + "text.alignleft", "message.fill", "envelope.fill", "note.text", + "globe", "chevron.left.forwardslash.chevron.right", "paintbrush.fill", + "briefcase.fill", "book.fill", "graduationcap.fill", + "heart.fill", "star.fill", "bolt.fill", "sparkles", + ] + + private let toneOptions = ["neutral", "casual", "professional", "technical", "creative"] + + var body: some View { + NavigationStack { + Form { + // Basic info + Section("Mode Details") { + TextField("Name", text: $name) + .disabled(isDefault) + + Picker("Tone", selection: $tone) { + ForEach(toneOptions, id: \.self) { option in + Text(option.capitalized).tag(option) + } + } + } + + // Icon picker + Section("Icon") { + LazyVGrid(columns: Array(repeating: GridItem(.flexible()), count: 7), spacing: 12) { + ForEach(availableIcons, id: \.self) { iconName in + Button { + icon = iconName + } label: { + Image(systemName: iconName) + .font(.system(size: 18)) + .frame(width: 36, height: 36) + .background( + icon == iconName ? Color.accentColor.opacity(0.2) : Color.clear + ) + .clipShape(RoundedRectangle(cornerRadius: 8)) + .overlay( + RoundedRectangle(cornerRadius: 8) + .stroke(icon == iconName ? Color.accentColor : Color.clear, lineWidth: 2) + ) + } + .buttonStyle(.plain) + .accessibilityLabel(iconName) + .accessibilityAddTraits(icon == iconName ? .isSelected : []) + } + } + .padding(.vertical, 4) + } + + // Prompt + Section { + TextEditor(text: $systemPrompt) + .frame(minHeight: 100) + } header: { + Text("Custom Instructions") + } footer: { + Text("Tell Talk Easy how to format text in this mode. Be specific about tone, structure, and any special handling.") + } + + // Formatting rules + Section("Formatting") { + Toggle("Remove Filler Words", isOn: $removeFillerWords) + Toggle("Add Punctuation", isOn: $addPunctuation) + + Picker("Output Format", selection: $outputFormat) { + ForEach(FormattingRules.OutputFormat.allCases, id: \.self) { format in + Text(format.displayName).tag(format) + } + } + } + + // Delete button for custom modes + if isEditing && !isDefault { + Section { + Button("Delete Mode", role: .destructive) { + if let mode { + modelContext.delete(mode) + try? modelContext.save() + dismiss() + } + } + } + } + } + .navigationTitle(isEditing ? "Edit Mode" : "New Mode") + .navigationBarTitleDisplayMode(.inline) + .toolbar { + ToolbarItem(placement: .cancellationAction) { + Button("Cancel") { + dismiss() + } + } + + ToolbarItem(placement: .confirmationAction) { + Button("Save") { + saveMode() + } + .disabled(name.trimmingCharacters(in: .whitespaces).isEmpty) + } + } + .onAppear { + loadMode() + } + } + } + + private func loadMode() { + guard let mode else { return } + name = mode.name + icon = mode.icon + systemPrompt = mode.systemPrompt + tone = mode.tone + removeFillerWords = mode.formattingRules.removeFillerWords + addPunctuation = mode.formattingRules.addPunctuation + outputFormat = mode.formattingRules.outputFormat + } + + private func saveMode() { + let rules = FormattingRules( + removeFillerWords: removeFillerWords, + addPunctuation: addPunctuation, + outputFormat: outputFormat + ) + + if let mode { + // Update existing + mode.name = name + mode.icon = icon + mode.systemPrompt = systemPrompt + mode.tone = tone + mode.formattingRules = rules + } else { + // Create new + let newMode = DictationMode( + name: name.trimmingCharacters(in: .whitespaces), + icon: icon, + systemPrompt: systemPrompt, + tone: tone, + formattingRules: rules, + isDefault: false, + sortOrder: 100 + ) + modelContext.insert(newMode) + } + + try? modelContext.save() + dismiss() + } +} + +#Preview { + ModeEditorView(mode: nil) + .modelContainer(for: DictationMode.self, inMemory: true) +} diff --git a/TalkEasy/Views/ModeSelector.swift b/TalkEasy/Views/ModeSelector.swift new file mode 100644 index 0000000..f8fdfc1 --- /dev/null +++ b/TalkEasy/Views/ModeSelector.swift @@ -0,0 +1,72 @@ +import SwiftUI +import SwiftData + +/// Horizontal scrolling chip selector for dictation modes. +struct ModeSelector: View { + @Binding var selectedMode: DictationMode? + @Query(sort: \DictationMode.sortOrder) private var modes: [DictationMode] + + var body: some View { + ScrollView(.horizontal, showsIndicators: false) { + HStack(spacing: 10) { + ForEach(modes) { mode in + ModeChip( + name: mode.name, + icon: mode.icon, + isSelected: selectedMode?.persistentModelID == mode.persistentModelID + ) { + withAnimation(.easeInOut(duration: 0.2)) { + if selectedMode?.persistentModelID == mode.persistentModelID { + selectedMode = nil + } else { + selectedMode = mode + } + } + } + } + } + .padding(.horizontal) + } + .onAppear { + // Default to General mode if nothing is selected + if selectedMode == nil { + selectedMode = modes.first { $0.name == "General" } ?? modes.first + } + } + } +} + +/// Individual mode chip button. +struct ModeChip: View { + let name: String + let icon: String + let isSelected: Bool + let action: () -> Void + + var body: some View { + Button(action: action) { + HStack(spacing: 6) { + Image(systemName: icon) + .font(.system(size: 12, weight: .medium)) + + Text(name) + .font(.subheadline) + .fontWeight(.medium) + } + .padding(.horizontal, 14) + .padding(.vertical, 8) + .background( + Capsule() + .fill(isSelected ? Color.accentColor : Color(.systemGray6)) + ) + .foregroundStyle(isSelected ? .white : .primary) + } + .buttonStyle(.plain) + .accessibilityLabel("\(name) mode") + .accessibilityAddTraits(isSelected ? .isSelected : []) + } +} + +#Preview { + ModeSelector(selectedMode: .constant(nil)) +} diff --git a/TalkEasy/Views/ModesView.swift b/TalkEasy/Views/ModesView.swift new file mode 100644 index 0000000..f906a73 --- /dev/null +++ b/TalkEasy/Views/ModesView.swift @@ -0,0 +1,117 @@ +import SwiftUI +import SwiftData + +/// Lists all dictation modes with options to create, edit, and delete custom modes. +struct ModesView: View { + @Environment(\.modelContext) private var modelContext + @Query(sort: \DictationMode.sortOrder) private var modes: [DictationMode] + @State private var showingEditor = false + @State private var editingMode: DictationMode? + + var body: some View { + NavigationStack { + List { + // Default modes + Section("Built-in") { + ForEach(modes.filter(\.isDefault)) { mode in + ModeRow(mode: mode) + .onTapGesture { + editingMode = mode + } + } + } + + // Custom modes + let customModes = modes.filter { !$0.isDefault } + if !customModes.isEmpty { + Section("Custom") { + ForEach(customModes) { mode in + ModeRow(mode: mode) + .onTapGesture { + editingMode = mode + } + } + .onDelete { offsets in + deleteCustomModes(customModes: customModes, at: offsets) + } + } + } + } + .navigationTitle("Modes") + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button { + showingEditor = true + } label: { + Image(systemName: "plus") + } + .accessibilityLabel("Create new mode") + } + } + .sheet(isPresented: $showingEditor) { + ModeEditorView(mode: nil) + } + .sheet(item: $editingMode) { mode in + ModeEditorView(mode: mode) + } + } + } + + private func deleteCustomModes(customModes: [DictationMode], at offsets: IndexSet) { + for index in offsets { + let mode = customModes[index] + modelContext.delete(mode) + } + try? modelContext.save() + } +} + +/// Row displaying a single mode with icon, name, and tone. +struct ModeRow: View { + let mode: DictationMode + + var body: some View { + HStack(spacing: 14) { + Image(systemName: mode.icon) + .font(.system(size: 18)) + .foregroundStyle(.accentColor) + .frame(width: 32, height: 32) + .background(Color.accentColor.opacity(0.1)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + + VStack(alignment: .leading, spacing: 2) { + Text(mode.name) + .font(.body) + .fontWeight(.medium) + + Text(mode.tone.capitalized) + .font(.caption) + .foregroundStyle(.secondary) + } + + Spacer() + + if mode.isDefault { + Text("Built-in") + .font(.caption2) + .foregroundStyle(.secondary) + .padding(.horizontal, 8) + .padding(.vertical, 4) + .background(Color(.systemGray6)) + .clipShape(Capsule()) + } + + Image(systemName: "chevron.right") + .font(.caption) + .foregroundStyle(.tertiary) + } + .contentShape(Rectangle()) + .accessibilityElement(children: .combine) + .accessibilityLabel("\(mode.name) mode, \(mode.tone) tone") + } +} + +#Preview { + ModesView() + .modelContainer(for: DictationMode.self, inMemory: true) +} diff --git a/TalkEasy/Views/RecordButton.swift b/TalkEasy/Views/RecordButton.swift new file mode 100644 index 0000000..05a1a47 --- /dev/null +++ b/TalkEasy/Views/RecordButton.swift @@ -0,0 +1,78 @@ +import SwiftUI + +/// Large, animated microphone button for starting/stopping recording. +struct RecordButton: View { + let isRecording: Bool + let action: () -> Void + + @State private var pulseScale: CGFloat = 1.0 + @State private var ringOpacity: Double = 0.0 + + private let buttonSize: CGFloat = 80 + private let ringSize: CGFloat = 100 + + var body: some View { + ZStack { + // Pulsing ring behind the button during recording + if isRecording { + Circle() + .stroke(Color.red.opacity(ringOpacity), lineWidth: 3) + .frame(width: ringSize, height: ringSize) + .scaleEffect(pulseScale) + } + + // Main button + Button(action: action) { + ZStack { + Circle() + .fill(isRecording ? Color.red : Color.accentColor) + .frame(width: buttonSize, height: buttonSize) + .shadow( + color: (isRecording ? Color.red : Color.accentColor).opacity(0.4), + radius: isRecording ? 12 : 6, + y: 4 + ) + + Image(systemName: isRecording ? "stop.fill" : "mic.fill") + .font(.system(size: 32, weight: .semibold)) + .foregroundStyle(.white) + .contentTransition(.symbolEffect(.replace)) + } + } + .buttonStyle(.plain) + .accessibilityLabel(isRecording ? "Stop recording" : "Start recording") + .accessibilityHint(isRecording ? "Double-tap to stop recording and process your dictation" : "Double-tap to start voice dictation") + } + .onChange(of: isRecording) { _, recording in + if recording { + startPulseAnimation() + } else { + stopPulseAnimation() + } + } + } + + private func startPulseAnimation() { + withAnimation( + .easeInOut(duration: 1.0) + .repeatForever(autoreverses: true) + ) { + pulseScale = 1.3 + ringOpacity = 0.6 + } + } + + private func stopPulseAnimation() { + withAnimation(.easeOut(duration: 0.3)) { + pulseScale = 1.0 + ringOpacity = 0.0 + } + } +} + +#Preview { + VStack(spacing: 40) { + RecordButton(isRecording: false) {} + RecordButton(isRecording: true) {} + } +} diff --git a/TalkEasy/Views/RecordingView.swift b/TalkEasy/Views/RecordingView.swift new file mode 100644 index 0000000..f764402 --- /dev/null +++ b/TalkEasy/Views/RecordingView.swift @@ -0,0 +1,95 @@ +import SwiftUI +import SwiftData + +/// Main recording screen with transcript display, mode selector, and record button. +struct RecordingView: View { + @Environment(\.modelContext) private var modelContext + @State private var viewModel = RecordingViewModel() + + var body: some View { + NavigationStack { + VStack(spacing: 0) { + // Transcript area (takes up available space) + TranscriptView( + rawTranscript: viewModel.liveTranscript, + cleanedText: viewModel.cleanedText, + phase: viewModel.phase + ) + .frame(maxHeight: .infinity) + + Divider() + + // Bottom controls + VStack(spacing: 16) { + // Error message + if let error = viewModel.errorMessage { + Text(error) + .font(.caption) + .foregroundStyle(.red) + .multilineTextAlignment(.center) + .padding(.horizontal) + .transition(.opacity) + } + + // Mode selector + ModeSelector(selectedMode: $viewModel.selectedMode) + + // Duration display during recording + if viewModel.phase == .recording { + Text(viewModel.formattedDuration) + .font(.system(.body, design: .monospaced)) + .foregroundStyle(.secondary) + .transition(.opacity) + } + + // Processing indicator + if viewModel.phase == .processing { + HStack(spacing: 8) { + ProgressView() + .controlSize(.small) + Text("Processing with Apple Intelligence...") + .font(.subheadline) + .foregroundStyle(.secondary) + } + .transition(.opacity) + } + + // Record button + RecordButton(isRecording: viewModel.phase == .recording) { + Task { + await viewModel.toggleRecording() + } + } + .padding(.vertical, 8) + + // Action bar (shown after processing completes) + if viewModel.phase == .completed && !viewModel.cleanedText.isEmpty { + ActionBar( + text: viewModel.cleanedText, + onCopy: { + viewModel.copyToClipboard() + viewModel.saveToHistory(context: modelContext) + }, + onReRecord: { + viewModel.reset() + } + ) + .transition(.move(edge: .bottom).combined(with: .opacity)) + } + } + .padding(.bottom) + .animation(.easeInOut(duration: 0.3), value: viewModel.phase) + } + .navigationTitle("Talk Easy") + .navigationBarTitleDisplayMode(.inline) + .task { + await viewModel.prewarmLLM() + } + } + } +} + +#Preview { + RecordingView() + .modelContainer(for: [DictationMode.self, DictationRecord.self], inMemory: true) +} diff --git a/TalkEasy/Views/SettingsView.swift b/TalkEasy/Views/SettingsView.swift new file mode 100644 index 0000000..d055152 --- /dev/null +++ b/TalkEasy/Views/SettingsView.swift @@ -0,0 +1,217 @@ +import SwiftUI +import SwiftData + +/// App settings including permissions status, language preferences, and data management. +struct SettingsView: View { + @Environment(\.modelContext) private var modelContext + @State private var permissions = PermissionsManager() + @State private var showingDeleteConfirmation = false + @State private var showingResetModesConfirmation = false + + @Query private var records: [DictationRecord] + @Query private var vocabularyEntries: [VocabularyEntry] + + var body: some View { + NavigationStack { + List { + // Permissions + Section { + PermissionRow( + title: "Microphone", + icon: "mic.fill", + status: permissions.microphoneStatus + ) + + PermissionRow( + title: "Speech Recognition", + icon: "waveform", + status: permissions.speechRecognitionStatus + ) + } header: { + Text("Permissions") + } footer: { + Text("Talk Easy requires microphone and speech recognition access. All processing happens on your device.") + } + + // Privacy + Section { + HStack(spacing: 12) { + Image(systemName: "lock.shield.fill") + .foregroundStyle(.green) + .font(.title3) + + VStack(alignment: .leading, spacing: 2) { + Text("100% On-Device") + .font(.subheadline) + .fontWeight(.medium) + + Text("Your voice data never leaves this device. No servers, no API keys, no cloud processing.") + .font(.caption) + .foregroundStyle(.secondary) + } + } + .padding(.vertical, 4) + } header: { + Text("Privacy") + } + + // Data + Section { + HStack { + Text("Saved Dictations") + Spacer() + Text("\(records.count)") + .foregroundStyle(.secondary) + } + + HStack { + Text("Learned Words") + Spacer() + Text("\(vocabularyEntries.count)") + .foregroundStyle(.secondary) + } + + Button("Delete All History", role: .destructive) { + showingDeleteConfirmation = true + } + } header: { + Text("Data") + } + + // Modes + Section { + Button("Reset Built-in Modes") { + showingResetModesConfirmation = true + } + } header: { + Text("Modes") + } footer: { + Text("Restores all built-in modes to their default settings. Custom modes are not affected.") + } + + // About + Section { + HStack { + Text("Version") + Spacer() + Text("1.0.0") + .foregroundStyle(.secondary) + } + + HStack { + Text("Requires") + Spacer() + Text("iPhone 15 Pro or later") + .foregroundStyle(.secondary) + } + + HStack { + Text("iOS") + Spacer() + Text("26.0+") + .foregroundStyle(.secondary) + } + } header: { + Text("About") + } + } + .navigationTitle("Settings") + .confirmationDialog( + "Delete All History", + isPresented: $showingDeleteConfirmation, + titleVisibility: .visible + ) { + Button("Delete All", role: .destructive) { + deleteAllHistory() + } + } message: { + Text("This will permanently delete all saved dictations. This cannot be undone.") + } + .confirmationDialog( + "Reset Built-in Modes", + isPresented: $showingResetModesConfirmation, + titleVisibility: .visible + ) { + Button("Reset", role: .destructive) { + resetDefaultModes() + } + } message: { + Text("This will restore all built-in modes to their original settings.") + } + } + } + + private func deleteAllHistory() { + for record in records { + modelContext.delete(record) + } + try? modelContext.save() + } + + private func resetDefaultModes() { + // Delete existing default modes + let descriptor = FetchDescriptor( + predicate: #Predicate { $0.isDefault } + ) + if let defaultModes = try? modelContext.fetch(descriptor) { + for mode in defaultModes { + modelContext.delete(mode) + } + } + + // Re-seed defaults + DefaultModes.seedIfNeeded(in: modelContext) + } +} + +/// Row showing a permission's status. +struct PermissionRow: View { + let title: String + let icon: String + let status: PermissionsManager.PermissionStatus + + var body: some View { + HStack(spacing: 12) { + Image(systemName: icon) + .foregroundStyle(.accentColor) + .frame(width: 24) + + Text(title) + + Spacer() + + HStack(spacing: 4) { + Circle() + .fill(statusColor) + .frame(width: 8, height: 8) + + Text(statusText) + .font(.caption) + .foregroundStyle(.secondary) + } + } + .accessibilityElement(children: .combine) + .accessibilityValue(statusText) + } + + private var statusColor: Color { + switch status { + case .granted: .green + case .denied: .red + case .undetermined: .orange + } + } + + private var statusText: String { + switch status { + case .granted: "Granted" + case .denied: "Denied" + case .undetermined: "Not Set" + } + } +} + +#Preview { + SettingsView() + .modelContainer(for: [DictationRecord.self, VocabularyEntry.self, DictationMode.self], inMemory: true) +} diff --git a/TalkEasy/Views/TranscriptView.swift b/TalkEasy/Views/TranscriptView.swift new file mode 100644 index 0000000..c1d791b --- /dev/null +++ b/TalkEasy/Views/TranscriptView.swift @@ -0,0 +1,108 @@ +import SwiftUI + +/// Displays the live transcript and cleaned text with a smooth transition. +struct TranscriptView: View { + let rawTranscript: String + let cleanedText: String + let phase: RecordingViewModel.RecordingPhase + + @State private var showingCleaned = false + + var body: some View { + ScrollView { + VStack(alignment: .leading, spacing: 16) { + if !displayText.isEmpty { + // Header label + Text(headerText) + .font(.caption) + .fontWeight(.medium) + .foregroundStyle(.secondary) + .textCase(.uppercase) + .padding(.horizontal, 4) + + // Main text content + Text(displayText) + .font(.body) + .lineSpacing(4) + .textSelection(.enabled) + .frame(maxWidth: .infinity, alignment: .leading) + .contentTransition(.numericText()) + .animation(.easeInOut(duration: 0.2), value: displayText) + } else if phase == .idle { + emptyStateView + } else if phase == .recording { + listeningView + } + } + .padding() + } + .scrollIndicators(.hidden) + .onChange(of: cleanedText) { _, newValue in + if !newValue.isEmpty { + withAnimation(.easeInOut(duration: 0.3)) { + showingCleaned = true + } + } + } + } + + private var displayText: String { + if showingCleaned && !cleanedText.isEmpty { + return cleanedText + } + return rawTranscript + } + + private var headerText: String { + if phase == .processing { + return "Processing..." + } + if showingCleaned && !cleanedText.isEmpty { + return "Cleaned Text" + } + if !rawTranscript.isEmpty { + return "Live Transcript" + } + return "" + } + + private var emptyStateView: some View { + VStack(spacing: 12) { + Image(systemName: "mic.fill") + .font(.system(size: 44)) + .foregroundStyle(.quaternary) + + Text("Tap the microphone to start dictating") + .font(.subheadline) + .foregroundStyle(.secondary) + .multilineTextAlignment(.center) + } + .frame(maxWidth: .infinity) + .padding(.top, 60) + .accessibilityElement(children: .combine) + } + + private var listeningView: some View { + VStack(spacing: 12) { + Image(systemName: "waveform") + .font(.system(size: 36)) + .foregroundStyle(.secondary) + .symbolEffect(.variableColor.iterative, options: .repeating) + + Text("Listening...") + .font(.subheadline) + .foregroundStyle(.secondary) + } + .frame(maxWidth: .infinity) + .padding(.top, 60) + .accessibilityLabel("Listening for speech") + } +} + +#Preview { + TranscriptView( + rawTranscript: "This is a sample transcript with some um filler words you know", + cleanedText: "", + phase: .recording + ) +} From 3dc972a7f0a8ca5efde459b2bfc239b16a6d9914 Mon Sep 17 00:00:00 2001 From: Matthew Strickland Date: Tue, 17 Feb 2026 22:08:37 -0500 Subject: [PATCH 2/3] Fix build errors: correct FoundationModels and Speech framework API usage - TextProcessingService: prewarm() is instance method not static; access Response.content.text not Response.text - SpeechRecognitionService: SpeechAnalyzer requires modules: init param; SpeechTranscriber needs locale/preset; use .text (AttributedString) and .results instead of removed APIs - Views: Use Color.accentColor instead of .accentColor for ShapeStyle conformance --- .../Services/SpeechRecognitionService.swift | 31 +++++++++++-------- TalkEasy/Services/TextProcessingService.swift | 7 +++-- TalkEasy/Views/HistoryView.swift | 2 +- TalkEasy/Views/ModesView.swift | 2 +- TalkEasy/Views/SettingsView.swift | 2 +- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/TalkEasy/Services/SpeechRecognitionService.swift b/TalkEasy/Services/SpeechRecognitionService.swift index fc93320..1b58780 100644 --- a/TalkEasy/Services/SpeechRecognitionService.swift +++ b/TalkEasy/Services/SpeechRecognitionService.swift @@ -22,12 +22,10 @@ final class SpeechRecognitionService { /// Starts the speech analyzer with the given audio engine. /// The analyzer installs its own processing on the engine's input node. func start(audioEngine: AVAudioEngine) throws { - let speechAnalyzer = SpeechAnalyzer() - let speechTranscriber = SpeechTranscriber() + let speechTranscriber = SpeechTranscriber(locale: .current, preset: .progressiveTranscription) let speechDetector = SpeechDetector() - speechAnalyzer.addModule(speechTranscriber) - speechAnalyzer.addModule(speechDetector) + let speechAnalyzer = SpeechAnalyzer(modules: [speechTranscriber, speechDetector]) self.analyzer = speechAnalyzer self.transcriber = speechTranscriber @@ -36,13 +34,16 @@ final class SpeechRecognitionService { // Start consuming transcription results processingTask = Task { [weak self] in guard let transcriber = self?.transcriber else { return } - for await result in transcriber.results { - self?.transcriptContinuation?.yield( - TranscriptionUpdate( - text: result.bestTranscription.formattedString, - isFinal: result.isFinal + do { + for try await result in transcriber.results { + let text = String(result.text.characters) + let isFinal = result.resultsFinalizationTime != .invalid + self?.transcriptContinuation?.yield( + TranscriptionUpdate(text: text, isFinal: isFinal) ) - ) + } + } catch { + // Stream ended or was cancelled } self?.transcriptContinuation?.finish() } @@ -50,10 +51,14 @@ final class SpeechRecognitionService { // Start monitoring voice activity detectorTask = Task { [weak self] in guard let detector = self?.detector else { return } - for await event in detector.events { - if case .speechEnded = event { - self?.silenceContinuation?.yield() + do { + for try await result in detector.results { + if !result.speechDetected { + self?.silenceContinuation?.yield() + } } + } catch { + // Stream ended or was cancelled } self?.silenceContinuation?.finish() } diff --git a/TalkEasy/Services/TextProcessingService.swift b/TalkEasy/Services/TextProcessingService.swift index ad15c39..60b360a 100644 --- a/TalkEasy/Services/TextProcessingService.swift +++ b/TalkEasy/Services/TextProcessingService.swift @@ -54,7 +54,8 @@ final class TextProcessingService { /// Call this on app launch. func prewarm() async { do { - try await LanguageModelSession.prewarm() + let session = getOrCreateSession() + try await session.prewarm() } catch { // Prewarm is best-effort; failure is non-fatal } @@ -141,7 +142,7 @@ final class TextProcessingService { """ let response = try await session.respond(to: prompt, generating: CleanedDictation.self) - return response.text + return response.content.text } // MARK: - Pass 2: Mode Formatting @@ -178,7 +179,7 @@ final class TextProcessingService { """ let response = try await session.respond(to: prompt, generating: FormattedOutput.self) - return response.text + return response.content.text } // MARK: - Session Management diff --git a/TalkEasy/Views/HistoryView.swift b/TalkEasy/Views/HistoryView.swift index 56e8cd2..d7b0d2f 100644 --- a/TalkEasy/Views/HistoryView.swift +++ b/TalkEasy/Views/HistoryView.swift @@ -72,7 +72,7 @@ struct HistoryRow: View { Text(record.modeName) .font(.caption) .fontWeight(.medium) - .foregroundStyle(.accentColor) + .foregroundStyle(Color.accentColor) Spacer() diff --git a/TalkEasy/Views/ModesView.swift b/TalkEasy/Views/ModesView.swift index f906a73..62406bd 100644 --- a/TalkEasy/Views/ModesView.swift +++ b/TalkEasy/Views/ModesView.swift @@ -74,7 +74,7 @@ struct ModeRow: View { HStack(spacing: 14) { Image(systemName: mode.icon) .font(.system(size: 18)) - .foregroundStyle(.accentColor) + .foregroundStyle(Color.accentColor) .frame(width: 32, height: 32) .background(Color.accentColor.opacity(0.1)) .clipShape(RoundedRectangle(cornerRadius: 8)) diff --git a/TalkEasy/Views/SettingsView.swift b/TalkEasy/Views/SettingsView.swift index d055152..7c0276e 100644 --- a/TalkEasy/Views/SettingsView.swift +++ b/TalkEasy/Views/SettingsView.swift @@ -173,7 +173,7 @@ struct PermissionRow: View { var body: some View { HStack(spacing: 12) { Image(systemName: icon) - .foregroundStyle(.accentColor) + .foregroundStyle(Color.accentColor) .frame(width: 24) Text(title) From 47db3d5ad24a9e8d5f30be6a3bcc5bd3f3e832a5 Mon Sep 17 00:00:00 2001 From: Matthew Strickland Date: Tue, 17 Feb 2026 22:09:30 -0500 Subject: [PATCH 3/3] Set development team to Matthew Strickland (FDH7J2W4TL) --- TalkEasy.xcodeproj/project.pbxproj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TalkEasy.xcodeproj/project.pbxproj b/TalkEasy.xcodeproj/project.pbxproj index b0ff14a..b5af5d7 100644 --- a/TalkEasy.xcodeproj/project.pbxproj +++ b/TalkEasy.xcodeproj/project.pbxproj @@ -132,7 +132,7 @@ ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; CODE_SIGN_STYLE = Automatic; CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = FDH7J2W4TL; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; INFOPLIST_KEY_CFBundleDisplayName = "Talk Easy"; @@ -167,7 +167,7 @@ ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; CODE_SIGN_STYLE = Automatic; CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = FDH7J2W4TL; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; INFOPLIST_KEY_CFBundleDisplayName = "Talk Easy";