From 65619a256a83dcdb287bfe6bf486578e44152e9d Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 25 Jul 2016 23:44:12 +0800 Subject: [PATCH 01/39] modify bug https://github.com/LaiFengiOS/LFLiveKit/issues/22 --- LFLiveKit/capture/LFAudioCapture.m | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LFLiveKit/capture/LFAudioCapture.m b/LFLiveKit/capture/LFAudioCapture.m index 50f91bab..e8485329 100755 --- a/LFLiveKit/capture/LFAudioCapture.m +++ b/LFLiveKit/capture/LFAudioCapture.m @@ -46,8 +46,8 @@ - (instancetype)initWithAudioConfiguration:(LFLiveAudioConfiguration *)configura NSError *error = nil; [session setCategory:AVAudioSessionCategoryPlayAndRecord error:nil]; - - [session setMode:AVAudioSessionModeVideoRecording error:&error]; + + [session overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:nil]; if (![session setActive:YES error:&error]) { [self handleAudioComponentCreationFailure]; @@ -55,7 +55,7 @@ - (instancetype)initWithAudioConfiguration:(LFLiveAudioConfiguration *)configura AudioComponentDescription acd; acd.componentType = kAudioUnitType_Output; - acd.componentSubType = kAudioUnitSubType_RemoteIO; + acd.componentSubType = kAudioUnitSubType_VoiceProcessingIO; acd.componentManufacturer = kAudioUnitManufacturer_Apple; acd.componentFlags = 0; acd.componentFlagsMask = 0; From e97513c78fd574fa22cec29467c666621034c070 Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 26 Jul 2016 11:20:34 +0800 Subject: [PATCH 02/39] modify bug https://github.com/LaiFengiOS/LFLiveKit/issues/11#issuecomment-235147191 --- LFLiveKit/LFLiveSession.m | 1 + 1 file changed, 1 insertion(+) diff --git a/LFLiveKit/LFLiveSession.m b/LFLiveKit/LFLiveSession.m index a06dc306..c0be5fd5 100755 --- a/LFLiveKit/LFLiveSession.m +++ b/LFLiveKit/LFLiveSession.m @@ -95,6 +95,7 @@ - (void)startLive:(LFLiveStreamInfo *)streamInfo { - (void)stopLive { self.uploading = NO; [self.socket stop]; + self.socket = nil; } #pragma mark -- CaptureDelegate From 1d53c484e29276958041d26a69f85aabe7d1741c Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 26 Jul 2016 18:27:16 +0800 Subject: [PATCH 03/39] modify black screen bug --- LFLiveKit.podspec | 1 - LFLiveKit/capture/LFVideoCapture.m | 1 - 2 files changed, 2 deletions(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index 08a38377..f8fa0105 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -19,6 +19,5 @@ Pod::Spec.new do |s| s.requires_arc = true s.dependency 'LMGPUImage', '~> 0.1.9' - s.dependency "YYDispatchQueuePool" s.dependency "pili-librtmp", '1.0.3' end diff --git a/LFLiveKit/capture/LFVideoCapture.m b/LFLiveKit/capture/LFVideoCapture.m index 68970a9c..9bd28934 100755 --- a/LFLiveKit/capture/LFVideoCapture.m +++ b/LFLiveKit/capture/LFVideoCapture.m @@ -203,7 +203,6 @@ - (CGFloat)zoomScale { } - (void)setBeautyFace:(BOOL)beautyFace { - if (_beautyFace == beautyFace) return; _beautyFace = beautyFace; [_filter removeAllTargets]; From 40bc03438dae2551254a9d49fe69d1a19837b89b Mon Sep 17 00:00:00 2001 From: chenliming Date: Fri, 29 Jul 2016 17:08:45 +0800 Subject: [PATCH 04/39] modify cropSize --- LFLiveKit/capture/LFVideoCapture.m | 61 ++++++++++++++----- .../configuration/LFLiveVideoConfiguration.h | 3 - .../configuration/LFLiveVideoConfiguration.m | 7 --- .../LFLiveKitDemo.xcodeproj/project.pbxproj | 34 +++++++---- LFLiveKitDemo/Podfile | 3 +- Podfile | 2 +- 6 files changed, 71 insertions(+), 39 deletions(-) diff --git a/LFLiveKit/capture/LFVideoCapture.m b/LFLiveKit/capture/LFVideoCapture.m index 9bd28934..95ca4e52 100755 --- a/LFLiveKit/capture/LFVideoCapture.m +++ b/LFLiveKit/capture/LFVideoCapture.m @@ -33,22 +33,23 @@ @implementation LFVideoCapture - (instancetype)initWithVideoConfiguration:(LFLiveVideoConfiguration *)configuration { if (self = [super init]) { _configuration = configuration; + if([self pixelBufferImageSize].width < configuration.videoSize.width || [self pixelBufferImageSize].height < configuration.videoSize.height){ + @throw [NSException exceptionWithName:@"当前videoSize大小出错" reason:@"LFLiveVideoConfiguration videoSize error" userInfo:nil]; + return nil; + } + _videoCamera = [[GPUImageVideoCamera alloc] initWithSessionPreset:_configuration.avSessionPreset cameraPosition:AVCaptureDevicePositionFront]; UIInterfaceOrientation statusBar = [[UIApplication sharedApplication] statusBarOrientation]; if (configuration.landscape) { if (statusBar != UIInterfaceOrientationLandscapeLeft && statusBar != UIInterfaceOrientationLandscapeRight) { - NSLog(@"当前设置方向出错"); - NSLog(@"当前设置方向出错"); - NSLog(@"当前设置方向出错"); + @throw [NSException exceptionWithName:@"当前设置方向出错" reason:@"LFLiveVideoConfiguration landscape error" userInfo:nil]; _videoCamera.outputImageOrientation = UIInterfaceOrientationLandscapeLeft; } else { _videoCamera.outputImageOrientation = statusBar; } } else { if (statusBar != UIInterfaceOrientationPortrait && statusBar != UIInterfaceOrientationPortraitUpsideDown) { - NSLog(@"当前设置方向出错"); - NSLog(@"当前设置方向出错"); - NSLog(@"当前设置方向出错"); + @throw [NSException exceptionWithName:@"当前设置方向出错" reason:@"LFLiveVideoConfiguration landscape error" userInfo:nil]; _videoCamera.outputImageOrientation = UIInterfaceOrientationPortrait; } else { _videoCamera.outputImageOrientation = statusBar; @@ -226,18 +227,18 @@ - (void)setBeautyFace:(BOOL)beautyFace { }]; } - if (_configuration.isClipVideo) { - if (_configuration.landscape) { - _cropfilter = [[GPUImageCropFilter alloc] initWithCropRegion:CGRectMake(0.125, 0, 0.75, 1)]; - } else { - _cropfilter = [[GPUImageCropFilter alloc] initWithCropRegion:CGRectMake(0, 0.125, 1, 0.75)]; - } + CGSize imageSize = [self pixelBufferImageSize]; + CGFloat cropLeft = (imageSize.width - self.configuration.videoSize.width)/2.0/imageSize.width; + CGFloat cropTop = (imageSize.height - self.configuration.videoSize.height)/2.0/imageSize.height; + + if(cropLeft == 0 && cropTop == 0){ + [_videoCamera addTarget:_filter]; + }else{ + _cropfilter = [[GPUImageCropFilter alloc] initWithCropRegion:CGRectMake(cropLeft, cropTop, 1 - cropLeft*2, 1 - cropTop*2)]; [_videoCamera addTarget:_cropfilter]; [_cropfilter addTarget:_filter]; - } else { - [_videoCamera addTarget:_filter]; } - + if (_beautyFace) { [_filter addTarget:_output]; [_output addTarget:_gpuImageView]; @@ -299,4 +300,34 @@ - (void)statusBarChanged:(NSNotification *)notification { } } +#pragma mark -- +- (CGSize)pixelBufferImageSize{ + CGSize videoSize = CGSizeZero; + switch (self.configuration.sessionPreset) { + case LFCaptureSessionPreset360x640: + { + videoSize = CGSizeMake(480, 640); + } + break; + case LFCaptureSessionPreset540x960: + { + videoSize = CGSizeMake(540, 960); + } + break; + case LFCaptureSessionPreset720x1280: + { + videoSize = CGSizeMake(720, 1280); + } + break; + + default: + break; + } + + if(self.configuration.landscape){ + return CGSizeMake(videoSize.height, videoSize.width); + } + return videoSize; +} + @end diff --git a/LFLiveKit/configuration/LFLiveVideoConfiguration.h b/LFLiveKit/configuration/LFLiveVideoConfiguration.h index b7d0e260..991e4ac6 100755 --- a/LFLiveKit/configuration/LFLiveVideoConfiguration.h +++ b/LFLiveKit/configuration/LFLiveVideoConfiguration.h @@ -90,7 +90,4 @@ typedef NS_ENUM (NSUInteger, LFLiveVideoQuality){ ///< ≈sde3分辨率 @property (nonatomic, assign, readonly) NSString *avSessionPreset; -///< 是否裁剪 -@property (nonatomic, assign, readonly) BOOL isClipVideo; - @end diff --git a/LFLiveKit/configuration/LFLiveVideoConfiguration.m b/LFLiveKit/configuration/LFLiveVideoConfiguration.m index c5406963..8e1db306 100755 --- a/LFLiveKit/configuration/LFLiveVideoConfiguration.m +++ b/LFLiveKit/configuration/LFLiveVideoConfiguration.m @@ -213,10 +213,6 @@ - (LFLiveVideoSessionPreset)supportSessionPreset:(LFLiveVideoSessionPreset)sessi return sessionPreset; } -- (BOOL)isClipVideo { - return self.sessionPreset == LFCaptureSessionPreset360x640 ? YES : NO; -} - #pragma mark -- encoder - (void)encodeWithCoder:(NSCoder *)aCoder { [aCoder encodeObject:[NSValue valueWithCGSize:self.videoSize] forKey:@"videoSize"]; @@ -248,7 +244,6 @@ - (NSUInteger)hash { @(self.videoBitRate), @(self.videoMaxBitRate), @(self.videoMinBitRate), - @(self.isClipVideo), self.avSessionPreset, @(self.sessionPreset), @(self.landscape), ]; @@ -274,7 +269,6 @@ - (BOOL)isEqual:(id)other { object.videoBitRate == self.videoBitRate && object.videoMaxBitRate == self.videoMaxBitRate && object.videoMinBitRate == self.videoMinBitRate && - object.isClipVideo == self.isClipVideo && [object.avSessionPreset isEqualToString:self.avSessionPreset] && object.sessionPreset == self.sessionPreset && object.landscape == self.landscape; @@ -297,7 +291,6 @@ - (NSString *)description { [desc appendFormat:@" videoBitRate:%zi", self.videoBitRate]; [desc appendFormat:@" videoMaxBitRate:%zi", self.videoMaxBitRate]; [desc appendFormat:@" videoMinBitRate:%zi", self.videoMinBitRate]; - [desc appendFormat:@" isClipVideo:%zi", self.isClipVideo]; [desc appendFormat:@" avSessionPreset:%@", self.avSessionPreset]; [desc appendFormat:@" sessionPreset:%zi", self.sessionPreset]; [desc appendFormat:@" landscape:%zi", self.landscape]; diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/project.pbxproj b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/project.pbxproj index fff24717..f89b37ec 100644 --- a/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/project.pbxproj +++ b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/project.pbxproj @@ -7,7 +7,6 @@ objects = { /* Begin PBXBuildFile section */ - 7ACB1193D70CF46C9676CB29 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 5C04F65629D3881D5F25ADE1 /* libPods.a */; }; 81E848D8BD2C446C2DD4876A /* libPods-LFLiveKitDemo.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 6FD9F92833FE7856CDDD3CED /* libPods-LFLiveKitDemo.a */; }; B2D23E7F1D348F3D00B34CA8 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = B2D23E7E1D348F3D00B34CA8 /* main.m */; }; B2D23E821D348F3D00B34CA8 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = B2D23E811D348F3D00B34CA8 /* AppDelegate.m */; }; @@ -31,7 +30,6 @@ /* End PBXBuildFile section */ /* Begin PBXFileReference section */ - 5C04F65629D3881D5F25ADE1 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6FD9F92833FE7856CDDD3CED /* libPods-LFLiveKitDemo.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKitDemo.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 8FAAEBE1A4F099C69588B394 /* Pods-LFLiveKitDemo.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKitDemo.release.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKitDemo/Pods-LFLiveKitDemo.release.xcconfig"; sourceTree = ""; }; AFD491825C5DB2AD871189B5 /* Pods-LFLiveKitDemo.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKitDemo.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKitDemo/Pods-LFLiveKitDemo.debug.xcconfig"; sourceTree = ""; }; @@ -69,7 +67,6 @@ buildActionMask = 2147483647; files = ( 81E848D8BD2C446C2DD4876A /* libPods-LFLiveKitDemo.a in Frameworks */, - 7ACB1193D70CF46C9676CB29 /* libPods.a in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -171,7 +168,6 @@ isa = PBXGroup; children = ( 6FD9F92833FE7856CDDD3CED /* libPods-LFLiveKitDemo.a */, - 5C04F65629D3881D5F25ADE1 /* libPods.a */, ); name = Frameworks; sourceTree = ""; @@ -183,11 +179,12 @@ isa = PBXNativeTarget; buildConfigurationList = B2D23E911D348F3D00B34CA8 /* Build configuration list for PBXNativeTarget "LFLiveKitDemo" */; buildPhases = ( - 6A9D2ED37E623D4A31A8D2C9 /* Check Pods Manifest.lock */, + 6A9D2ED37E623D4A31A8D2C9 /* 📦 Check Pods Manifest.lock */, B2D23E761D348F3D00B34CA8 /* Sources */, B2D23E771D348F3D00B34CA8 /* Frameworks */, B2D23E781D348F3D00B34CA8 /* Resources */, - 34EEB2C8F5E0D371D13B66CA /* Copy Pods Resources */, + 34EEB2C8F5E0D371D13B66CA /* 📦 Copy Pods Resources */, + 7336E9C92EDCA6C7449F2624 /* 📦 Embed Pods Frameworks */, ); buildRules = ( ); @@ -254,29 +251,29 @@ /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ - 34EEB2C8F5E0D371D13B66CA /* Copy Pods Resources */ = { + 34EEB2C8F5E0D371D13B66CA /* 📦 Copy Pods Resources */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( ); - name = "Copy Pods Resources"; + name = "📦 Copy Pods Resources"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n"; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKitDemo/Pods-LFLiveKitDemo-resources.sh\"\n"; showEnvVarsInLog = 0; }; - 6A9D2ED37E623D4A31A8D2C9 /* Check Pods Manifest.lock */ = { + 6A9D2ED37E623D4A31A8D2C9 /* 📦 Check Pods Manifest.lock */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( ); - name = "Check Pods Manifest.lock"; + name = "📦 Check Pods Manifest.lock"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; @@ -284,6 +281,21 @@ shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; showEnvVarsInLog = 0; }; + 7336E9C92EDCA6C7449F2624 /* 📦 Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "📦 Embed Pods Frameworks"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKitDemo/Pods-LFLiveKitDemo-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ diff --git a/LFLiveKitDemo/Podfile b/LFLiveKitDemo/Podfile index d1c9b6da..4ee1693d 100755 --- a/LFLiveKitDemo/Podfile +++ b/LFLiveKitDemo/Podfile @@ -2,7 +2,6 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios,'7.0' target 'LFLiveKitDemo' do - +pod 'LFLiveKit', path: '../' end -pod 'LFLiveKit', path: '../' \ No newline at end of file diff --git a/Podfile b/Podfile index e315c172..74ae2606 100755 --- a/Podfile +++ b/Podfile @@ -5,6 +5,6 @@ target 'LFLiveKit' do end -pod 'pili-librtmp', '~> 1.0.3' +pod 'pili-librtmp', '~> 1.0.3.1' pod 'LMGPUImage', '~> 0.1.9' From 8c492b130ecc81e7d8f3e4b765ed6656ade501eb Mon Sep 17 00:00:00 2001 From: chenliming Date: Fri, 29 Jul 2016 17:38:16 +0800 Subject: [PATCH 05/39] compile question --- LFLiveKit.xcodeproj/project.pbxproj | 44 +-------- .../UserInterfaceState.xcuserstate | Bin 11636 -> 10998 bytes .../xcschemes/LFLiveKitDemo.xcscheme | 91 ++++++++++++++++++ .../xcschemes/xcschememanagement.plist | 22 +++++ .../UserInterfaceState.xcuserstate | Bin 0 -> 10986 bytes .../UserInterfaceState.xcuserstate | Bin 95250 -> 95062 bytes Podfile | 6 +- 7 files changed, 120 insertions(+), 43 deletions(-) create mode 100644 LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/LFLiveKitDemo.xcscheme create mode 100644 LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index d4e0c4b7..afab372f 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -65,7 +65,6 @@ B2CD14751D45F18B008082E8 /* LFVideoEncoder.m in Sources */ = {isa = PBXBuildFile; fileRef = B2CD146A1D45F18B008082E8 /* LFVideoEncoder.m */; }; B2CD14761D45F18B008082E8 /* LFH264VideoEncoder.h in Headers */ = {isa = PBXBuildFile; fileRef = B2CD146B1D45F18B008082E8 /* LFH264VideoEncoder.h */; }; B2CD14771D45F18B008082E8 /* LFH264VideoEncoder.mm in Sources */ = {isa = PBXBuildFile; fileRef = B2CD146C1D45F18B008082E8 /* LFH264VideoEncoder.mm */; }; - BE55DA79155500CDEF87FB5C /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = B5758EB2A15DAA132D8BF380 /* libPods.a */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -140,7 +139,6 @@ B2CD146A1D45F18B008082E8 /* LFVideoEncoder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LFVideoEncoder.m; sourceTree = ""; }; B2CD146B1D45F18B008082E8 /* LFH264VideoEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LFH264VideoEncoder.h; sourceTree = ""; }; B2CD146C1D45F18B008082E8 /* LFH264VideoEncoder.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = LFH264VideoEncoder.mm; sourceTree = ""; }; - B5758EB2A15DAA132D8BF380 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; }; B75B965E6B94DE4CBCC82EA7 /* Pods-LFLiveKit.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.release.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.release.xcconfig"; sourceTree = ""; }; B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKit.a"; sourceTree = BUILT_PRODUCTS_DIR; }; /* End PBXFileReference section */ @@ -157,7 +155,6 @@ 84001FF91D00175D0026C63F /* Foundation.framework in Frameworks */, 84001FF71D0017590026C63F /* AVFoundation.framework in Frameworks */, AD7F89B4621A7EFEBEA72D49 /* libPods-LFLiveKit.a in Frameworks */, - BE55DA79155500CDEF87FB5C /* libPods.a in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -182,7 +179,6 @@ 84001FF81D00175D0026C63F /* Foundation.framework */, 84001FF61D0017590026C63F /* AVFoundation.framework */, B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */, - B5758EB2A15DAA132D8BF380 /* libPods.a */, ); name = Frameworks; sourceTree = ""; @@ -382,14 +378,12 @@ isa = PBXNativeTarget; buildConfigurationList = 84001F9E1D0015D10026C63F /* Build configuration list for PBXNativeTarget "LFLiveKit" */; buildPhases = ( - 8EE9401DCA9508E918B7FB68 /* 📦 Check Pods Manifest.lock */, - 98F2C3F394BD79A6D6B8424F /* Check Pods Manifest.lock */, + 5ED199EAC89EE599F1E56B19 /* 📦 Check Pods Manifest.lock */, 84001F851D0015D10026C63F /* Sources */, 84001F861D0015D10026C63F /* Frameworks */, 84001F871D0015D10026C63F /* Headers */, 84001F881D0015D10026C63F /* Resources */, - 817C22141AD3F2EB34365AA3 /* 📦 Copy Pods Resources */, - 8A5D8B623E50AAC1575D1741 /* Copy Pods Resources */, + 36D0848EAED7999C442A99BD /* 📦 Copy Pods Resources */, ); buildRules = ( ); @@ -471,7 +465,7 @@ /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ - 817C22141AD3F2EB34365AA3 /* 📦 Copy Pods Resources */ = { + 36D0848EAED7999C442A99BD /* 📦 Copy Pods Resources */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( @@ -486,22 +480,7 @@ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit-resources.sh\"\n"; showEnvVarsInLog = 0; }; - 8A5D8B623E50AAC1575D1741 /* Copy Pods Resources */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "Copy Pods Resources"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n"; - showEnvVarsInLog = 0; - }; - 8EE9401DCA9508E918B7FB68 /* 📦 Check Pods Manifest.lock */ = { + 5ED199EAC89EE599F1E56B19 /* 📦 Check Pods Manifest.lock */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( @@ -516,21 +495,6 @@ shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; showEnvVarsInLog = 0; }; - 98F2C3F394BD79A6D6B8424F /* Check Pods Manifest.lock */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "Check Pods Manifest.lock"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; - showEnvVarsInLog = 0; - }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 90ff3d38a89d4a08bcb25f998fe19e608e3df890..d66153a351e54cd1bb6ba0a25615c42c75b13342 100644 GIT binary patch literal 10998 zcmbVy2Yi#&`u`b8vv;$bv^1s6u5_iy=!OcEkx~kXHEly1Xq%d(Kt=QvmFu|bbs&Ps zr7c@=UKJ4sih^841q2Zs2q;5jdht5{&zrYt8^HVX^M}u;@0;_U^PF>@@qNZQH7$+4 zU?@F(AHs+riUdfAV(8O!Q&rMjZy@ONH&-S3Yi4_EL&0fDonb- zcopuzui(x2RlEhihPUF^@f-L}{1$!(@4&n8hxlW>7azo5;qUMXd=j6>7w|>=3;vZT ziHfL+hG>b7#1cKR5j%+|{mBqAl%$anWF#3y@<{vWz@QR+3J#maHRBk!Q&(WHWh{yhFB=cgcHX z2iZwJARm)Y$$s)V`I;Of$H{5(Jvm2yBEL~gW2l@esFEg92Th{=X)+x^2hu@wFdagN z(iECX(`X(YMz5p8X)!IQ6X--biB6_&T0w85)$}Inp>t>>ZKBO|K5e77(A(%D`Y?Tj zK1!F+$LMnUBwb1WNxSI!DM`((jg5Pe1WAz$$&m_ar> z=2WX=%Zf{;`U7)Y;rJ@YDBBFs0ddbE~(k4z}r=BiY%XTeXxF;;vbmZ(n zX5f`_56VK>Cx2%js&e;)Fw#V1Gz=ATGP({8MC{p4bL)W1liocYS}&Tbh3<@uz`{~U&tRQ2zWi! zYBl_JH~8n3d*=G;JG&CK} zKsPWGGcyabvN&dAb{5YPHlQ0(HOC3~UyJIH7xcCt8xZD(ljX9>%+2ZU^u{p#ihviS zZ0KRqcz>O@+6~L7^91TR-f_$1R|M8n$-4vox4X5b33^ZGe}xCCbugmXJImAB7%KMG zwAS<7N$75x$PEvMKsP6QTLRu7ym&(3USRv0aQAv|pf8`fxg+fyrF|%Xf+z%-(Te7x zd1!vMS{v~DLp-avt(0@iR~zfS&A!$kzc9`=yz@i+7gI~X*W?MbUG<4M7-ESmc|W=Z zEr1DbMGMhwFi9(FM~mQjXBD`;#=3yFxw6Fre?v2BIl9fEinbP*F`=#-TMEMN(F3*< zy~P>+fG@ILn90HVt1$yWi=J;W%+wBk4(4M?(5*iMH^*V*eVi@Lg9Ucm(u3$9)#?F% zs<+V$=|m49*I)0>Tk9j}(Q38hFU-)1mT=VnO4o={FGb5xatC^x4eUV6*&qg>)}NOM zzwqimQ;5ytszcXdS-0vLR-*QhR`gCMM?n&q`91V+l)M`4Ks(XKnZ+ERfY-mv*!^8anz?xufz}Et3&LL@3moRnCNX0xCzeCPdy;3=azULI_G%M~v zXIKfhI8AY7MbFmDLf)nzKkz)d)QK*ji>#E5W#y4w{|EgSX*1FsJMtGW#Qp20i3t|=rHNy(h>c_8S#~eeVN6SS zOjzbvjukid=u+65c40Z^Pa`B%SOfdSYBs3@TqL+f*e3vE(BJ4S^)-gP0Zyio&IX9* z!>Sr-u;4guw3StKU>lqAKe&YZ0he$hcCbn|t%plE84rru(O^7;O=VTEBhWM6yd3$y zo5DMaOqz}}qsC<6Y&MaYIYOLp6JNG%M?X-0-k(D*9z!b!?GRu&8wn%R^b~^au=S4r{fu{ zj(ORvE<6+8h^tvWo6p)f7A92H0@G7rFJpb7)SiGM)g217)`nUG;8ry5Ztd^+TCKMI z$*{6u5X>jSJuhxV$!qZ}T#p;D56{MPSOfF1*=!DLWKC;v6K=+S)F0mr(Lpl{vJlwk zTrMu)W9E|iEf4~D>%#iaIaV&z?G6ej1OD+ch^&qeBDmlezJZsE1lqi60jicXbJh-+ z$;C-Mdx@H)&^yZ?=vGD;!IO-G?(U-T94NO2ucQ{)>H@qFX*=+(%-?};V=WN{;o+np z$We-PU4-xC6!{Kza|gbQ1z5(kai!xRG6n%mi~?0>cHYngS5D@_g^{86;rn}Ld4RRD z?8t|Q@FUSQBOjLHWj!O7vw6KEp2RD8`}zwy5FyrypN9AkcY!?D;I()iUXM56jrb{c z3rKYVyOk|ux3Sw9W9=J2s?XqO@pJHe0lx@;Ujn%ycPk*EpRB!)sw19|eWIMwG^K7=Hyht}C>0Ne8bj zS5L3)JN@W@zqJJh^l^E-bTYHLGYz~AzrBjx$wT3G{4V&;Znx_8hPv=}{2p>mDR;vU z9tL)HcjqAC-{{gRR?fHY?$iEB_&vh?eRzKlo*ZEJM)2fwbg2s;!iVt}>^}AYd#D#v zzQ#wRfZ`ZF&i>Bs=X8@xKzdAoi)tPDE%{N{aSES_8vFx3%N}I^0G|Rwd9>um-N&NF zUBZ{6#{C47KHNKLPqM)cj_m9=B82=3|BnA4h+skpB?9&+Tf!b=OWEUW8C$*%B@q!3 z6A6(L8Igkre}Wxh$Jm8-9NUidVZkF%lD7!Jh0S7Gh;9*#T~2xjfc} z|7K1M__-RGyS0UzDm9v5YE&Nxwxdw`ioWCg^^t`{7Lh;_LBmKt_RkLDV4W;uFG)r+ zG62mbgUDd6I#L7ModM5)w-MMg*9+-E2U}IGcFqF(9F~@rHYEt@Rhp-+$=96L18co+ zsq;B^(*m1F3tI|FA*o;$z==+h(i16=bdrOT*N_b2B$*_OWV0@|nyq1L*}63(m$*nC z8Ah&S>)8uzC;OD`=eRU5YVZ7BQ-l82K&^Kiqz;XWCNDg_e?h^yvxjuhuu)-6l_jae&MO;~_TAh3i>fLMU+dCqm zB2p5;08+{}bYlQ1gP4L`PsXy1?AadsC*@=km?D`#CbFm4)1728akEYAnZFPgLJ_H# zby0C>Z#3!1ue+(Q<&)`TX4G^yl4|xGdp;b(EROD4NBk&xBk_`1q@FYoADK<&kVeu( zn%Rr&U+g9JGJAz>X0Ng>>@~J^BWWQwlK=^l5NRcI$viTjw6WLO8|+Q?7Td<&X4~1j z>^-(4>`+`#0?H_LWoHz_GrKq=J>8j8l#^KsM(E1OEpj?DvN8&aGfSN%g_&8IIoSmT zncOFSlD2ers%DH+}w~k$wpMOFsIxGVRQJm*i8)?LdL`X+Kv2r~frJXfi+D6=#(FRQRPH?JV4 zIH%B+<;*ILsMTWfx2Ry7+)M6@Zg_w^7~SwNfrx`kB_lK*BTM;)upcAK@!K8b3HA=V z)2}vsHsbKNC;@6@T#daA-X_9Z9SXwRWaNdXA8H%{s1(6_gx)_1q}P2X=pw86hDtu| zjC5R2HuRnNX~t?EhIes31UAdF5cJfBnf)BkVaqK8u#`A@T+1eGaqFI>?vo^JwpL zH0)^(f@J2W%;$VaN~6CX9F8+LtFW-3v@oT(D6c3bE32q5rJy9IFeN<;^gKJa#N{k4 zj?DNiIl)cv9Xr%PPO`%sTA2S7$$37W;rRFi`+_My1vPq}oF^}k3*;iy#*UMt5Nv9EX!`8)XoJZ{g3PKuDLZ$r2jC6w|N)IM%KqBmVsK!uzjxeDtwzF%8{ z-CUz0DuE1wirF_ERLYL>0U(E7kRn2k)T?=s0V--j+VxaTHB?J=G?wbAfg0Iy_AUF4 zonR-~DR!EDzn+FqGiV&OQ9F%CN$dbgHNsXmIbH!AQtn|D( zSB}T!%>|{*bve`XoOv1PvobSs^Rnt{Go4dBfu@|SO5a3|+BE)=Gt?R3XFAOS9HAN1 zNi*3+c8UGiMYCxR&1L^-$GUcmIYmwf(ME@%-61Q0ox8P(D>w71ekY+nK?zr@@)SDls$EQ{Gl2Dfv||)zy(6EefhHwP2_ttzY&CS& zRb%UE1NF6I+KvV7SjaLQ`D%Xj-HoM7&z!!=YHK2b`Bied+0YbMVgwH4r`d&;0t+SXW_ND zxx?GSbfl(C3gn!bkwtbo2gC2|oTy@XXExX9$^-Q2!-fP z-*5P2#K;QnY>8pG*`R^rVF%o1aH3pP0(TiE!_iv}L{2wD{M3qWM~mUs!ZNtCz@1BU zp*3*uumNr@Y)0Fl=Kcv3#E-!Jgm2LaxQ`HnHQ0jt;Uu_&kcEfiGPrwCjwj+NP#wGp zdvO5n9o&K+#1BJ>=T*2@u#M+3!Tb{rK%@TQciOR(cSE;RHnXSjQ4H$WMQ^9=$OU}n zeno8K4tg&X1_2%JqKoMmdN;j?{nU=-?O4HnZpX@YtXfO2N4x3$^Z_^>*bSI5p&hH+ zu@=qvGUQ8&LxyQgs$}qUO zlB+(&L+>hdsRJp%zW8DhT@863T|*AHV*~pPz_(Aj{!%z#+I0<`qR-H0r-Tb3VYM=& zM7SoyO*9I!ta)&meq9Sb-9Sy$0v9ue zz@>~d@J*TEl5*i{#u&JKv56k0zY8paY{4YKbiqtPwZJ2oEoc?oCRi-EPw=RqL-3s7 z1;M`rFAFvcwg|Qg-Vl5!_(<@HV6Wg)!2!WR!6CsHg42TEg{i^Lw zDCV)44KZ6{K8iUh5{P0%Vv$rN7xfpVin2uoqB>EFC?E=n=8EQvZV}xoS|VC1S|)lz z^rUF5XoKh}(I(N$qOV2A#VWB$JW^aF_KD|;+r$gRw~3i}k$9u{74a7FR`HwSZQ^&t zJH?-h4~xGPe=Yt-d{TT`d`5g$d|qOcWJ|_NDkak-GbA%5vn6vRjgn@`Jjwl%MnL@JQRNX1f(bdWSlI#N1TS|P2J zR!OHzZ;*PW^-`a7jHL`WG4YC(yTV?Oc_Q?*( zPRLHn&dAQmF32v)#d5veAUDd*@?`lS`4D-kJYDXTm&zx|E98~(D*1GIy}VK0EN_vw z%IC@3oS>u>33e5&2R1_ww`di}D}k zmlXnqL?Kfs6k0{B!k{oI%!))snj%Y)qi`ukC`Ks?6h(@0ifM|Q6g7%E#VkdW!mqel z5mdA)9#=fA_&{+`@vY*7;*{ch#Se;eiVKQAlvqiXLZwJ4QOcAGWs)*eS)%kQA6LGj z+^*cAd|&y2a<}qhetlU)$gizs6SSJp+2TQrM{@X zr2dck4-L`~jZ7ogC^bfnNn_Cr&## z4Vu}SM$PS-yERKR%QY)BD>Yr3HJWvr4VqUpZ)rZ%?9uGg?AIL9e4+VDb3}7Vi?wR4 zRvW7|XiZv+Hco5TrfAc&8QM&3wl-HgOgln5N?WN7YMFMac9-_B_Oec(ljvkRg-)xB z)fsdq-DKUZx_fkg*FB(nNcV_tiLO((QTMWLvu=xStM0t+=U5agjMc{4V&h}`#X4g9 z#}0^f#%9Il#JXaK#SV`h8S9H(8v9=Ck9xa)h(1-HuFuqG>vQ$_`V#$UeVKl&zDhq+ zU#<7(XXzXCv-OSoTlDwqpVx2E@6dms->v^x|Ed0f{-FMl{;>YI{(?bdkQihJg+XP| z7<2}`!DuiWtcG-h(~xDzF}MuF48sj04f%#bL$RUM;4$24SZa8|@S))sqrsSAoNjD2 zE;8O>ywkYY_=xdQ;}YXi<7(p?<67f-<4eYujjtGAHNI!uVccolW!!7rZ~V;oxk+L& zn2aWq$zmE{8fY418e(#qMw#+W1*Rg?IMW2vB$L}zXKFUhH{EGkY`WWYuj%imZKhqO z1Ew!cUz@%$9W$LaoiUv?oi|-H{cOhOB=bn~81p1^g}KsPW%in9nd{9ybHMztd71fD z^GD{B=Kq*4n}0I@Y!O+c7P&=b(O7hr1WTeN$&zfDVQH|;u{2p)ECEZ%@(;^0%L>a% z%PPxDmYtS8mVK7}mIIc9mP3{kmS3$ZtH!Fc##skiQ>=N`LhB4`jdg+bA?r$OhqcSP z*1F!h(Yo3Cw)G?H*Vb>W$E@F4PgqY`zqkHiJr{@LXq+%k6eo$3#VO)cahf?TGD!?Ue0%+Yh#LcC&r7J!rqxeuw>j`*M4Sz01DF zzRv!P{W<%K_LuFO?c40{*x$GBw|`;(%6`Ot)PBl-#(vIz(f*_T_jnvHi#Nr`$M=hO z#P^RM8lM*LjL(kGjn9uC9X~O?s^9DV-s<;$zXScg?03B1g?@h|;zU8BC{dCqPgEtw zCE61c5*>-ji31Y{Ck{=VlsGqWRpPG1y@`hsk0pMWcrNjB;;#;yBi=F4F~pJLNOQOx lBOLjTLPxPZBZ4`Zdp6`PPlFV}E%$ak3XPK#P zs`vN<>FLK1Mg&nLK~YFbf2F^Tcg)~@evh|tyu(|4BVQBnS2|q2nrR+rE^`FDO&Wx6 z-SD^?_d(G}hO{Ui=}>pn1NB6`P;b-+rJ=s4AId@nXcQWa#-OptiONtpszBq=cvOid zpeA$^@*zJ8pk_1!%|x?M3kss8Xf?VQtw#@{&1egH1U-(PKu@9_=nk|K?LyC^m(eR| zFWQgZK}XSh=mYdI`UIUrU!Y6qGWs6#ZCBTJQvTy^D)B<@NM{Z zyd1B__u@79KD-uh#GCMg=yv=N-i9B?+wpUF7v6*S;WzM`_%J?#kK*_7Y5W=f9G}DA z;*0oK{2TrqUm@{CNA$!%jKo9|h?!W3l_ZiRl0tfr-lQ+dAep3u3?`-IdNPCzB{z^^ zWH=d1oMa-IL?#oS)RIOri?op0OX0NF$yCXbLO$qw>7d6~RP_LH~B zQF4sDOHPr~E&`qV{?7|Q6xt(C>AM@3TeiTboQOjxA-T(XS-HbIBM!$et#Q^LrO*%Z9^PVGYNZ_ zRaSA1lHAPVl9HV4w9>3W#cA2OgG$o!OES~b^79IEi!yT42j>mWtK#B^mJA-}^-cFT zxoY?lZ%uOp-xvrltVf9`bqg{eBQl``WJVTbWl>DZqM3}zSqzKaf^5i+l7Isfa-bBZ zU~xtDSSr#Q3*=jimpKe(6#6~G!PY{B2# z90$Lg)4VfBx@LIlTmi3dGBgZ!d*EAt<+P`k9Gy7RdM8+X02l_V1!=jvsctMnt&}1|PRiUZKg{n~vax)9FvP5QM zc9z7tv1I1hifU0Eh;Rh_osQ~J1E_uqyADK{1%Ewk0b3}j|AhL`x@A5d6h+@5xDj4A zU*!b$+%BJ6kf*R4kyBu?Li9c2Z)bCL1B{*^{t8Xxv@oNDuXQ!o2TJ(r<~mV`_V%XH z!gPNC)O#%7gIgUi?QmoCIGE74tO6}lUZ08I?T-EePBNJLXQQ9;xr7}4PJGW5147x_oPA3tIka?xILuMXyzH+LDU0@Ii)^pbfn) zNaAW;;>>;MjVi9kpO3avvXMI3hTas&{p<0Ow(%BN$88Y)%|myvYe1z3FeIwVA;ERb z2V-h?9mmkSRa|#qszbLTa5U0*+t6`=r++(K-1Z6deifJeFIH$nCk5*NW@s3AKSZZd zYAZU$2DYM)SRt#e;!;HPXb0}U1GO-LGw4%+z*$z*iauk-thQ_f*loD?OLPHgHlnZ4 zdGs|K#7fxUjp!Rt#fz*IRIyHomU>1k2iP3oo#62r>q1{kJP;mu8(jb|0HeCdv3xyb z2)z5x?bq|(20q|x5zp&K;k26ew;D#TX5q2JLJ^an;5W5R~8q3i}Wj16Za z*vKtdf}^k$N24|@$1!Xa8_g!LYOwek=4Q2%1bDh4dn7h!n#<>c zgYb=YHP-Qyhj`q4QGNYTzrR^Hvy?wga<-pZQ5*yf_fM=yWnlOsSC=sH{rn{Y@PZK*easvgft%yZnY7Vwu(#b z@Kb=&eMKD#i@dKhba;cnksZaOQ7j&V=3ys#QAk9pU4DKdeERr$@SHPvfF_>hiIS)RE2+%>lt}cZzKzt`r0(N@fxm37*WRF(vr3IddRbP8Z_l z{Cpv7&P<;>H>^l5T#HgS;c8ri-I!-LvgxdzHEhClcpCQL8(AX@FvfzccKmR@wmt6D z0d#f5Jsok=1QB3_z^Zbrr*2wkaCn)U@C;!YANJz_Zf0KA#BO4~O(+G8z%9rDx|+rO z5c2_CnJWMsgsnkhz|Rz7D*ss5%nDb1Nc$2Zd)sx-S>p|bH4&Ym>0Td1cK#ZlrwPJr z&|ZDSkPvmj=tZdoQRapYSiuYNt$>7h5o>P6i`fi;H#K;KbPn}z%*OmZuJMA+xU z?3PwYV<2sTeL@uK_tx`(ivR<{k%k99fuBUF8#b`oaKqDhr!ad5Thxl5VYmL1l%5x) z^a9?^7PH$srSuYhHDX7v;n&%J*ln;QP&3hvk_$akLpusDx*xwCG3OwDhb>`u2ymx@ zSzUwh}2jE{7J+@XYg6Jg5Aj~M<*99 zGDgt-0-yiW&@Xb*tj8L~3}sTfGtgh=0OAvwPWA_OKvA zYXq8fBzb+qAgm6fZbugc^?x&`BQy?6?++q}fT^~zjqE}A+|0JX{|^cL3aR^GpU>+PH3q!>;6}lQ z-9kdy?pDTza8eBogP=a)YVz}L!S8}m3T=aDH9;1~yTu&}(?Z5s+#+TyAg z!BMpXuaggKgcb+N2Z)9rAuS@Q?ZctgM0(<78`ylYy-%0+exdLG{=7&Qxd!4Jl1*|* zF3BVLq=58ikFZDCV{99HoISyw+(HJx*<1(z3P}+uW>3MnY=?iZuzlNpPp<;1jGs5pso4Li2KP8G~AtefI2K{bSP76K&51p!YQ#f(X; zE_Eny#Dc|qt=HEMKcGw^rN10gZdQjQBO}Ntq-iB1+0(6LG}{q|0?}_2g?7-c|D=qJ zL#b_~oK&!#?3p$)o>a1D+4HQnGtDX+?RERZs#hzoC@P*XCmtCxOba+G^ zxltVPD0K7_MpVI<=Q>c5c!?iOo-{#^kG%jryEh7h$c)gi7s0)b_Icfq!TKXso?Lw1s9flkO5ljqo5?VwFwz{@t0 z-Q-2GhaF(=up=G7O+XLAn!y>K12?) zL+mg(ZkQ^%Byrvn7BTNQc|T&_Nm%q~=b{}|HeqrVckTaE?j1g?|B`b8IDAY#A!o>0 z@+tX@e9n%u_t**cK0C=iU>|Nl4)P`Wikv53lMCbH9K^~USUJ2`x68THLK8Q&Dlf*i&vt}AERHNg?pJSUFA>0ah8bei} zDlUDv%O4ozZD^|JfnaCuDNZYf*w-DJ2*piE_CZC#frr6GEE5Af8%5BGjHv}4!ZP@o z{3a;=FXUHtntjwpekWJhf7vBrWB)QNhZH`-dq%F+ceuAM%vJlEl+tLhXeyymRLVYP zXV|A3sf@~L4EvmY#l8U(=#2EKDiqa!^8cyq(v@<2i1JxT%WA>4|GuCDcqR)%a} zObsJ4!!Xg_H2tbXGiW9d&1UD>*Ff}wNVGZMbo9!2A=$~u&7M1cPKJ3V1hfXRZ`nnl^<6uIH1EQll_IUo%()ZhWTdC(2$4YtxuJ9zKoY%yU2dhr*$DxC$l6wXHaooT4rfZKHRz#l%y3G<>aIl4a!T;&&kcq z8&r}L-g_A>j{reBo=yO`q?PQaRyvXWEW(lCRz-i>>Ak`frcw{mJVafzn$}P^0>^Js1yTbkmVid$Uh{;3rMkv9j&<5H_y|f8Af|vqMj0<8(5Jv^EG>C<3sLINK z%U8z-oF&61c>IIwJ$2&6jb3an;b(Yi_@VxB9!O?+zYth;?Fg3zAqW$jL#34fM1@ec z?PwHPkS!L+XVj8q&C~?L+;LuGIJdVJ3Q-rgUO`@dW_m$pK}LFQRz_Yyw!0=Pv)tut$jz?s zj25U>iXXXsGQ)BW(nW&p-$EDAg+VM2;+P}T}#)|`-4~=#F`-1hH&#Qi$!n> zH6XYl2*3&T{^x79|C*M8qEkBL>pDdCE1S28u^7@Wh6Kyf}q;AR3 z?)1#&VaeLkdRLtvehb)?T$n2ETph;=m)&5yeT$kP9&on6)mlUOP=Q#nx88juwR?}A zSG%-rU$Cu5ug)9WK4~$1`=vwXHTL>q*i?uUr*Q9Hnqg13rz5zniUER|Sz$KYGJC=A zoZQGtV~)_6UjX3KMKEqrWNrO>U9}w|l#$M1d<#S_?GITFUwd7daIeQOJYiA8?S3*m zTggOua7jE29;{4+t4}XHQt`tBm7C$VZV9>vt%0kl`{6354Q+x4Dv!a#lo#Q$OB2g z6bj!)&!c=Vr-IikO{p^-XL$2 zcgSIKoSYyh$%hb&Cctx>TzF>FpAMkc(L!2GOW=`BIXtteq!Zz(O%((IHPi?A|DHrD z=^-hRR7vJY7D^UNZj&sLtdgvkJS2HSvP1HsAo(_m zMny$MN6DjNqvE1eQR*mdlrG8;l^m5DRT345S{$`A>dmMVQ5U5$sa&d;_LTOPrb+ur zGo)G49BH1kKzfaIq;#}&th7v8AssKBAe|(gBAqI&mNrOlm9COLBHbxHApItqM(d-y zM-Pauh`uS>AKe^1GrA>uW%SzUw&-oqhoetLe-!<3^qJ^Sqd$-SBKnF}lB^+3T`>vNvUK$v%;tm3=K&$kp<6`L*(a z@*?>l`7rqi`2_hS`4st7`Hk|K@)r3V`CR#8`6~Ga`NQ%@<=f;>$bXDMG5Q#DOh!!q z7(V94n7d=P#XJ%7RLs*cJ7f05yb|+T%-)!HVt$Uf65As-BX(Tuq}W?xm&dM*T@|}J zc1`Ts*jHl@#U71)H}*vA$=DBLKaRZ+`+e+>u|LQDsvru9LaLA{VidWGk&0S{SK(6x z6f+bH6e|_06!$3ZRotgor+7fILGifaNyT=>4#hKy=M>K?b}RNM4k%72F2zO1CC3%T zjfnHa&4`;H7mQmFwFpnOdExbjKmcI8XTSCp?Q_bQJn z-&MY+d|!D+`K|Ihs0xH&pvo2UG`Dhg9#YK2V)feWdzWbw+iblW;1|#HDimxC}0f%i;34LavxA;Yzt7 z+zs3`ZV~qo_cFJidy9LUJIS5mKH@&%&T^k|zo@BNrjAi7)JnBkZC7_wJJdbZz13;z ze(G!0qt%t_YPDA#P|r}$QqNV-R|nM#)JxU(sMo8vsduPfQNOOY7c}2$ zzSCUN{HB#_m0C`#(HgWStyyc;rfRda{j~$M*J%s2*K3DrhiOM>M`^v<1=`Kp?b^NC zH?;e;2eb#ZhqOnu$F#?_C$wK_zt(=Ey{NsU{a*W{_Gj&{+TXQ*#3#h}jK4O1O#IaN zS@CzoKNP<^{#g8}_|x(KjsGJ4hxp%hRHx9z=~OzS&ZINz9J&-;cU`(JLzkti(ADUC zx`1wmZkBGg?q=OQ9n&q>-Ko1vceie}ZjElO?ta~R-8S9Zy0dzjK0%+OckAcq|D#`` zU#efBU#VZE->l!Re^$Rs|APKy{j2)d_51XP^~d#R^yld)&h>M!ZP*Z-*h$q;3T zF(?d5gVvxk7z`#usv*tL&yZorGW0hLFkELSG!z?#8Acl>8>$U%L#<)Dp~2ua++>(% zSYlXVSZP>gSYudgxZkke@R(tjVYgwA;T6MchP{TPhU114h7Sy<3?CWJ8@@4IG+Z|P zVED-xWt16XjB!SlQEf~#CK;2Bsm310UdDdL0mgyGB4dfM)HuXgZmcklGgcaFjUMB4 zW23Rj=rhhS-eO#6yw&(0OLt38OK(e>CD&478Dp7Zsk3-2(=81auVt2Hw&iBaJPWhjVp(EYWx3z-uw}bt zhvgZ|bC%~VuUYn5-mvVq9Iza;yl45?a^CW@j>)v>qP4$ z>lEvBYooQv>bEvqXId9oms{6aAG1DgebTz!y2JX6b(eLw^(E_J>oMzb>j~=z*3;IH zt!J&DSDNy#}iK_ex7(a@z=yFHf)pFVr@#B+NQPX zY(s1{wwrB>Y%6Sc+3vBev8}VMx3$`~*>>2TwLNcp(e}FS4cl9`gSJDqpY7f4PWvQ# zjlID>+a9zpv@f>bZokvM%D&oupM9PELHic_WAI ze$M`#{f{JRk~~R~q)O5!nUX9?wxpz_UP&2A{gaB5Hz#jRek%E;@?{>2gPh;b+!Du>1q@6bDpj%yv&jyoJX9eW)69ETi79UnT*IL@WWQ({x% rQw%Al6myCrB`u|2N=8anN=`~%%FvXNDP`@^91%b19}&SEN~!oi$;Vgn diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/LFLiveKitDemo.xcscheme b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/LFLiveKitDemo.xcscheme new file mode 100644 index 00000000..86a692b7 --- /dev/null +++ b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/LFLiveKitDemo.xcscheme @@ -0,0 +1,91 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/xcschememanagement.plist b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 00000000..68810366 --- /dev/null +++ b/LFLiveKitDemo/LFLiveKitDemo.xcodeproj/xcuserdata/a1.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,22 @@ + + + + + SchemeUserState + + LFLiveKitDemo.xcscheme + + orderHint + 0 + + + SuppressBuildableAutocreation + + B2D23E791D348F3D00B34CA8 + + primary + + + + + diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate new file mode 100644 index 0000000000000000000000000000000000000000..87651a317d6b2e7a3df32fa928f76a215910e956 GIT binary patch literal 10986 zcmb7q2Yi#&`u`ben>1A|K#&Y-uKMsJkN8!&p4;NuG-=D zBqg0j7!gE~1cjhb`WgLvbmytO%k6O1j_&L%pTt*q++#Z1T@@1@P+Zj6 zREMS@7jh#Hsz+1Nqi7LYiT;7sqIGCKT8}oPt>`7R4ZVUEqF2$IXeW9Hy^D6EJ?J1h zgpQ(<=o9n>I*YzV|3p{O_vj}275#>ONB_YD>#+eFu?ch7j3coHN8#?c2kwb`;odj_ zC*nRh1!v$vI2Y&P5%@t|jK|=y_#s?|c{~wM#!g&^AHg&6qj(l(crjjrm*OYzGW;Z7 zg;(R}@kYD}Z^m2j3-}fMDt-;WiQmQV;XU{uK7^0qPw=PsGkgx8$KT=$_$t1Ef5!iZ ze<7FBy-3@vWP4r&yZE5m8>H#l2^%YvWM&?`^XV;lpG^x$QR@s zxlC@5@5xQ_3;8$sl~O99MrxuQHPc9Hp;0uN#?v0OFYQOuX@7bj9YAfgh!)clI*N{_ zW9V2~O2^Uh^dVYCAEuM&WLiyYXg!@p8THcHbPj!t&ZSS#Wpp`RLD$gd=vw+b-AuR8 z7w9(n3f)8Z(tUJ4JwQLAN9i$ooSvX(iaXcVS681xG8B&FNP$#HJ1XCnIGJy7kA?p> zy({TlQEhj-TTld2GQwyJQX>tMu+Ld(nYwdwYEF7eMp8yXYHr`Y32CXhISHAW$w>(r zIXNlmDSb0@bCPn))cU-f+)+-~WOtptg3obQ)YtH}9{uODpPlY=7TCcuru#IVwu_;prXj>tS;|BUzz&8j*|sZQkIrEI3pu9Jv|{O zH#;dIEjyzxuyt^HLQ+z4W=?8)zwEw4lBKZt#>V8d)ZQ@}nKOIGz++NU@0iTK88c^| z^vN|Dr8WaYbeQ_NB5xtXdoJdve96a!?aAt^vuAF%)~fm-h_sLfbK_m=m9hgo<^`p5YZEC8GD)? zho=dV$saXIu4W>kSH1-`&{B66Tf zs1xhV2KhLX!m8O^HV>5BF}B(_evyj@jWq@sHo{rSm)U@fO1rC4;G;mMND*wY6473W z@3#8#8fZOMeDifw>!3#tKfzvK?aATG>#IcW#IzL^2;JQtQ0qdz&c(ao!R`V31IyR= znpg3zj(is8_LmDIokr8pbkqPqG6OZDN6^eNwbtcydPG(UOUW0=SDUK%T1UNGBuv;= z{B)04!_~PQHFj6S9aXG9L#zuM_%WJ=7);4`Ff|jBu z&@!|ftw19{#ryy^+*#pc(L?QMP+<2=^tGt5J8IjD1qi6EYVJ&V?` z9xQ<+u~gQVr8Ctj^gL>URWzd()H=3(-eMm`tS9TmdIQPBhYYH(bT|jw>x6+5YecZv zfHsz?qkx`v-ofOK+A3e?0=vW2f;OSd|Ix-z!WQ%bYGsM659>XuLYzoV{gvBcPqeaR zz=+;sh7TDIa5~HZTTCSWHD6`g-79wqD`WlI7LvnH<*S`_f`y4Ga8|ngL%io}l_9h$ z63h1jSN5U(=m2Ug_3KD+;qWmYM-30Vy{3+3v3nRQQ}>_XsOGc!^yyRVhF#Lfp4=xu zV27u+6+y|}IQbDeT&C9Lloa_jz^^?m=!n3;yH$xYIEIdwsk{GKC)x!PtjsOwgfQIS zZXdjeAJ-iJpU~QG}F~jD*-8*u?A~dA+xb@01!Rd z1XjfyjzijY*or&h7#z!rSTQSMqt@Yg+!=Mk zUD;?h2F&ePK^|OzXng!L6!|d#eB3`(wYipqC)!F(l-a>D;Z&R^S|-M`@uDH|sQ}Idvw*YkJ@{VSAK!-uu!mR~dzjf-Iji`SSpX3O zEc%hM15EpiI)Jmkr~@SXi#h;tTM;htp>E~fQP+>JT?1whAU{#mEWaQUZ9b6>2D^9) zKaAwqjv8?V+Al;Z<#sn;3jZ#?8VvDN9sp}1%M$rE0aq30+dxrT@C1}8mO1bw0NQ-p zhO*b)4+F@=t*iN41E zX}oxzFwJZ{2S0}AG8c0*5364XG0_OT5OoF(PGM8Qg#-|@lmKK1ON01_pC-6y?n3*t z5_`2zCnJMv+ceTv;q-Y)L6yGlP8T?e?h2Qq4%~9V4GwDJ8(n|`k!#CQYEwJdti(?N zKH`6{>CO0Q)*#TP0oxZ?x!~2f#g5P7wJmrJevZvxv)F9^qML9l(zf7c+`<~!BQ1Cx zUe9K-M_K837^WQ&1YfJ9NT7Cvy{gvXsjuV%%#I(}$dK^ykNG0r7BuE|yn`|3Wu@&r zhdy<_KBB_Bj^B7Fpf0}fs1?611W5iNcjC8%b?#!1HAAoi;SH=4oLaZDng_H5a1cc5 zZ~Q*~0CjC@Vh(@7Uc6uEy^qar#s}Df|Hh?{1TG!MN7zEPxSdPK@u{E{oyKR_BKA0} z2-Hm6NAUw45BpZ+pY(J5RZyR=@maQnEfqje1-&}@^zj47z3=OL7yr;d+(mpjAn7Y? zIdh0o{Z63w8otg}uz$4E`#ruDH207AHhYq-WTgf11LtxsYK+~sKY5c(2 zrj9KMC1I#*E0K^863U)otJvyRA|)~s&Yop2vaJGFqk{k@5FaM`(OTn&=Rg&N4S&%m z;C#1nfT&3%gnvXsv_wbr#6XP1L^xt*YuIyaEqk6dv1Zo7TG_hIs1u1I(ZmY>F(ej# z<6-x%XB*fi_}|R7!0!tJy+S@C*X44$L?r^ZAh%Z7)|Em&qs{&lf)@bV1_gs5abd4> z^Ob^u7UDIb_w+i56ZuMUg+e#qe$8$WV&G9h~blYu||XKEnC z>p1;Tm$SYOI&=U{$N)iNA~mfo89|araueI=+gv1-q=98?!(tonX@!!$C=*Z{UVK_a z(%YJY2THQ=+9ozzEbrf;e4y|7|3h9mWH>lNB$o^!L&^Olk32wzv6t93ww>)@FSA$J zs~gD(l21kg!geBs#KvA@ue0|-HVSrx9c7L?gdpr2zXpjxyO8h-h(N$Pu~^7HiLCUS zw>w!XL|eAOBLvT?-hF4apc;al1y2g1O@NqMF(MQ5RsjVLnmwDJ;B>Vi5-3v}jqSEg z6jQ&{fNdiWk%y7CnUt|Nnu(pg=|>0AlnnAM$FKi9sR9Hb6WGpXGLh{9Usw#Ud~PSH zCbdwDcCmiV#L312jbv7IG1lScMVd&Q%K2^CvdVl(lw zciSr-BVZxHYh^u~$$a)6a}@Y#dz>r@EMqDAfR*|ymXnpJYYSNcWbbA>TgZI!6m;AJ z`Kdr?m&4aM{moaCwSX~b7g+-n^kaM3d#&Vo(gZL2*hj*QV}1F>X+n0l*?YpQ0?S|tHpfWYD7%;h z@HakAP6qgQiXCg`-^b)rU^!XAcDJ$I-{VX2HB|U4KRL@zwAll49 zuulUvfm|Wqbp(2Hja+A^*l9uIgxp|2p@k$<{J^?_LD>HTxgFH`C-O7^!^7uCo90Vv83?3COO`h7l*%IUThgw=Zc7fggl&k!@MH z4pEhRh|>k`t9zW7Teek9OMQ?LUr(BNl*mBt9d)vyW8G?slE&{W!)b`>hL ztg4xIV;9;hdeT0E^ZMryj3&`!UkG-QT>@Ag%N$}TH0iU&L7|X;;tZMvdPphtm7mckCLw&i=)2uS#!=k>k$uMj1Q)Veh=1;w5j@Qb z7s$KRQ8&?9%RAkfiK%7k%*v^rN*_%LVtH2I^t67N>6uAsNtu-y{p=ZhdRlsBdPYi8 zW(u6uOh`>m&rGYVNKGlWyK4HSl{gB7(Z+~>{d%YPgRAV&cVApu6iGGX;?e;XGuCJs_K2|Gi96e9O}hku$KmkBcTUlinHq>HSU+-ll193dp|>0 z(bZlo^WtzXmJ7YrV)w$VzaXNCuDf%X^>hQ>=*1CUtn^|P3^U;Sp!J-jFCsbJN?#Hp z_ew}3jnoi(x&Qnd6DkP_z31M)ckaD4OsY~xTB164>6M(8-gb^D3zsV*lyG#_##HfS z3tY@W9!Oc2*L!$3oEa(@9b>DnsNi|?$r^6QLH)F=6W_4ypri zwl%;ROHFZ}Fj%&;y7FY#Zryv_?L4k+!j|qm1BY>^dMAV@_DKR7Mon~hcvzW_7MpOW znw^Byl!e1^DXD%UTT*($d*6P+`J=u%}_24KLPvv0(#kL@o4PDjX=Z0EVz$UTpMY4st!c z1SE<3?^|9B!2U-gAPL}JNSFtsUl1kGXT%A|mUcMZi-BJcipiJT?nlY+&4L>ynd+{j%7XNcJg{8O5PqkowF*1AG@Ogv%AJ zWDD6wc92)dn`9Sxo4iXdlHb4wBFPHL(~>4hv*abody)?%dnEfL2P6k2A4!f# zj!C|eoR?gXT#{UoT$Nmt{7dq^NCkat23g?trqHsoB$w;>lp zE{FUxK{h?){lR{Z&Yv>!HheE#y z{XQ%_tV>w8upVK(!V`$*_NfJsq|x?AfsA!d?#B7j`=AvXn|g zq+wE-R4$E>4v^k2EtZawj**s1$4kqkc4>vQPTDB-O6N%DO6N;gOIxM8r3a*!q_?EE zr9Vr5kzpB?sbyN3US^cV$P#3IWXZBrS%29Bvf;9PS%IuVRwr}GJhG{>M%e<{O4(Xj zldMIyPIf_dJ)DM1!>!@n!VAMkh0hLuHvGBp=fj)BTf<)r-xj_j{FU&x!!Lz@CpXBg z@*H`d+$n!VK1=SEKPI0iUm)Kue@Fhle7Ag`{DAzR{HXj3`FZ&T`6c-k`3?C^`7QZv z`Ok`Y#XSm}Vw|E(VOLZr>J+mSUd0^6T*Z9FLdD~XC5knQwTdQ1i(;K(gJP3ni{eGa zPQ^aOH;Nx4)DejhSrH`>6%o}DwGnj@t_V-W%!pYL-iSF7b0g+Qv_u??_$1;FrBrE9 zMk%`}Q=}}HqPFK!Q&Q~r}KCWD%T&-NAT&rwS zZdGnm?ohs>d|$a+xmUSgc}jUk`HAu~t4R~4uV zRYj`tstVOa)fCly)l${ds@1ABsNIsfb%r`iJy@Nq9;(h$4^xj&PgXBbzo6c$KB7LVKCb>s{f+v(`l9-> z`k(6GHK7{0CPJgqXf#$$yrzq$o2IuWQIn)e(G1ksG~+b9#-*92Y0xxkm}a)-G0i;9 zGR-PYi)M%BP0c>dhnhp0BbsBH6Pk;fA2hc#KWcu`Qf;VKs+DUav?^`9wzoD}o2u=r zP1ojV^R&aXBeaFuB5jFww6qqr)*jQI z(4NwMsXeFtR(nx!j#FOnOVIVvCF@dk_v-R= z1-e3Ak*-8HURS2G>ne1-Zmw>X?rq&c-Iuzrb>HaD>n`Xn>8|Lm>aOYjrN?@z57CF| zWqP?jLa)+m^g6vkpQIn4x9O|&Q}s*rE&6x$r}S6#xAZ^if71V9P#AOuvmwq9Z|Gu3 zH1shf8?p@d82THA7={}14Aq7P!ve!1!(ziy!!pAP!%D+bhV_PxhRuc-3|kG`3_A?3 z7+y2%HGFKiZqyi)jKhpGjLVH{jm^eZ;|Aj<;}+xF#)HPg#-qj)##6>K#?OtHjaQA= zj5mxojkiq5L`@+ksVUr~FqutJCaWpd6mRNcN-^~{rJJ%$_nPiA4KwAN3QRUrv1ycP zw23!OG)*$qn4G35rbkV4Omj{1O^Z!SP0LIxOiiY(rdLg`o8C0-GJRm$W7=mrU^-|z zZ8~SVWV&LyYPw;%X}V>)%@Hn)lW}rR&1pG3XXN6!L@tR-;nKK%Tn0Co%jJf0dE78= z1UHf!&sA`fx#`?YZWia|=5Y(SMciU;8TSmgp4-H2;kI(yxE4# zMc#ns~A+bwTdPFuco)bMUdO`G#=(AS2Rd2OedszEf`&$QC2U&Bh!>sw%2dzcc5^I^&ZmqI< ztc}*0)>+os)`ix^)+ekhtShb0S(~gcS>Lkmw(hm=w;r?}x1O?oZ2ipoh4q5X`(qEr9*#X4dp!1J?9Xvg zaYN$j;u_+-af{-X#61=FTwF`szPJN%C*nSj`zr2i+_|``ao6K+#NCXi@zQvu=ywq! N{?o65{4f-+{(saei_ri8 literal 0 HcmV?d00001 diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 59e059bccb6cb2981fd990ea9bd6f32d29f12f3c..ed553b16cf088ab26b69e4031e98d5c6428917c7 100644 GIT binary patch literal 95062 zcmdSC2Y3@l*EYN}D`}gwYIZ0gV0ty~jX*%QW$fS<%Ql#%2wQ-ITU-bPNOn?4Z>0Co zLV8a~NFcrUg!JBf@BN>%tCd%lEmr-HG=s7`Hmu-qG0FvS3{6vLk}^ogH=K0`2uH8|lZ@<2qa0TnyTH)y+=x z+YHC>jGIYd5}Aq2BxW)*g_+7sW0INaObT-_Gn1Lc9Ky_I<}*u}rAz~}l4)d`nHHv< z>0pjx)-dat4Gdw9V>U7;F{d%7GiNjBFy}KDGnX@)neEKA%#F;g%$>|V%x>m!=4s{` z<^|?O<`w2u=40j)=2PY~=5yu?=1b&2PUJ#vlzNQ)=qL0S`WyX& z{>2u|VIEtt6T7e*AB0EZQ8)>Y#^Z4o&c-=77w6#__+Yk{UC1tF18f7kf<1z5X4}{f zb~U?}UC$oN9><=@p3I)cp2?oWp2uFuUcz3^Ud3)@uV$}hZ(wg`Z)5Ld?`H31_plGL zkFbxkPqELkFR(AOud;8jZ?o^QAF`jYpR-@F-?BfjKeNBFf3p9W7!zxWXPRl6ZOS)!O~s~prgGC^rfSoC(?Zh{)8VFi z({fXzsmau8YBzP6)|l3rjxiC_38s@wr<%?%oozbTbb;w&(`BYBO@+8shnR<(4>Bj2$C@XYC!434Q_Sh+EOV~;VDlknkGar1*LqnQP5;=EY{ed6_wAUTI!sZZRKe?ld1|KH9v&teB5CZ#17`KHYqld6W5k^F`)M z%~zN=o41*Fn6ES6XuidKyZJ8jJ?8t&_nRLwKWcu${IvNw^NZ%a=GV+`n%^GTt)DGSxEOl4i-Y z;#AyF7A94L$wMhoMFiNX{iSx6N! zglr*Cm?_K_@&&I@EX)(ig~NnuVZN|XSRx!Q)CL;=dA-OcE!Fl1*|*Zs`DNm^4BfC5@5B zOOvFj(sU_J%9L`X8PY6ij#MBONhMOLR3TMKHPQlUk+f6_NDa~o=?JM=YLhyo)zVsN zy>zT}oU~DTNP0|qTzXo1MtVVdQF=vsReDo;OL|XwU;0@3MEXMdQuLbzHOrc3onbx1 zI@?-cEwmO}ORQzqa_d6tBI{!766;c{-+H(;U~RUxSX-@a)+4R$)(&f@Rk0H5an|Fl zCs#f$?tb45YTOY7KXnn}~u=Nq^3)UB{FInHU zzGr>k`nB~N>$lcFt$$hnw*F%iY@$uFS#3jY!)(KC6KoT0lWdc1S+;Cjj;+8}X!F{N zY*jX&t=e|DEnr(_tGBh<+H6PK+HJ?!j?Q+|-w(D%y+jiRS zw%udfWqZu_xa|qsUfV0SS8cD^Ubnqrd(-x*?K9iwwl8eI*nYMBX8T=cWRq-`1zD6G zvQti!50b~o6XdCKnw&0Y%QNLe*zEHkMzF6KWZ}qpJU%-zr=p2eY^c?`wsi<_B-r%+IQRc*dMdMYJbiCy8R9N zoA$TtAK5>)e{KK9{*(O=`+p9z!{Xo^Hiz4BkYl_f*)hxEbu4i#b@&~JI|7bnj(SIf zBj{M}Sm9`P9O*d9akS$E$BB-Oj*}ecI5s)XbzJ1Q)UnmE!*R3Y7RT+5yB)h74>}%k zJnneX@tosb$9scXC-I?ypaArDloq5g~&PwND&MK$RS?#QG);i}q7dY#ji=BSw zGH1}a(%Itdbgp-9a315_=sd}Jvh!@`InGVaOPrTFFLPe*yxn<+^G@eo&YjM?o%c9* zIq!Ae=X}KZoO7@973aIo4;>dcKa!K2pEYC+JXT!k*LtH?FiRqQHp9qOue&37$u)w%qxm98dNo2%2c#qgg2uA5!Ax$buDc0KBP%=NhIdDjcB z7hSKpUU$9i`q1@}>vPxlt{+@Ky8d+i<@(#5;7)WO;2z>0>K^7E?mo~x!hMi?q&w4{ z<<55JxO3fk?iucb-80>@+y(AJ_o41;ca6K&J>R{^-Ry30x4PThN4neH9qvwdmz%hc zb06Ffw6G!q|j~3DXi%6S5NyPAEv2mr$3m zFkw-`;)Ep$OA}fWS`*q5j!bAz=t$^HI67fn0!cV7;nalF5>8LplyFhP6$#rCwkKSl zaC5>f2~Q+EnebG?(+STcJe%-b!t)6)B)pjLQo`PZ*Aw1KcqifggpU%wNcb}0+k}4; z{!3&MQ6f%c6HSS7qCL@(=uAvZJRosg;`qb~i4zkiB_=0MPfSnDP0UN2lUSJOO)N<~ zG;v;HMWQdUI;)X;e@%Y4ziKi!S zO1vcT(!{Na+Y)a`yd&|>#JduA){JZEYHE6!5g3t?7%O9ET=UC)(^m!8b}XcShNAx3 zarI4sj*cyijgb|suv-`h<5Wz9{Y7cVt)?^t?j-}`e{c|YkN&qX~$w!Ea%b=x#`5vX}|mm^3Dx$zU=SS+Oe)#kq~i zW^$NZCQorG83g}B*c+g(nG5P$8-mjdgUh;BtO&NxYwVmJ2|ayople02EZE%IzE&^J z@5s^A)c>aN}`hBcg$Fmyav3P+!kzKp4_auY(`2-yV{nsn*v=e^(%eB4%iD<1((mtSzezS z3@poO2;`-{#Q2%RnEKh{Q)~R@-27#BC9>@&l<}F_yVV|PnjUEJEeOh`TP@fkJM&O;O;*A{y9&L}j zhU{QYWPMLn@x~7hZ+S2yJtGid-&rcjTI=PqBCmK(^+qIlZsv57gCnQ+xr%4BUs zT)+{WA<`N3Vx&d3jTG51D(u!nhwnXr|)pLu|JP{~xX zmE5h&!^|Vhqsk2BSb`pioNJy^-J0ch6fSRU3et%rrKW?f87YB==Ejzk(xOs2_0T!A z`)`GG#)z9BEm>VV^o2=V&??)5s~cOpIzp?J_F(~pB!0)V81jDYvw}cdU|C~RV`pQqL(}m) zCdCk+-l48trF2Q3-a&JrovRyzN5w|=1)Jz{8Env3u+r9gHA0AEh;DdwpnXL!wD5=Q z9;O+R-__XE;0t!rP-8{NNZol~G5;_VZgx1CubFR{Z<+6y@0lN%ADN$+pP65nUzy*S z-GDOO69LzQ_-sZyqtD-}wma+p#DF#j_DAqF9Y5sOU7 zj4X&lJQ9$IBxFT4Br86pR++CXQ0kP0$|7a4vP4;`_?5$zfU-=fR~nQcG|<7S;nO4h zsK*ZxA8IZy=?pecFAKDFOmD34(RpSN+O?JCrH!35uJ8;(xiHWfp#7$kHk%sBL((fo9M5&%O+84F%HMhOBYhOHA+}aHD&EP!f3oH+`H_i*L ztzQ{vq_OavLDZ$Br8C%0E9B{FXl(U6vIhY%-&5ds%o>E^TpExS(6~F;KDdG_%4orZ z={EBNO?103xDm6j?xw!3qyhO#8t?iY`GfFUexQEU3fk>j8p>!~Oy{?mg8(TG2AVoo z(w-NZ)6f_+j+w9-ja8~Qqwz|OGI>!+M`2?}eN!vlLGilO6f`YTO8+7irN{mv8|B9S zVkSBy_80l6F!mS4=+M|-l%vYnUsR*o*k3F}i(`LrI9e9_i{)r#>@S*7OYAS&QD^Kg z)}W(fe{l>_Vt;W0+8FzbQ_<=0#c9fl&FBneC9DF@!rE#dtd+HNt%Nzg<0VKTpmWf9 zThJzSuF|NqD(!l^U4SlTTwBnE=py9^Wz`mR3A$8iQks>PB^0Klfey~b<&7{t9rWLT zSQc2((%9Jro5|7Lt8+ww-D+d!1m`gDe_G&q5!$i^0!+Q|ZD>bC;n$#Rl{V!_rKNK0 zoJ~)3E25jw%}c}as;9j@uy!l`>=ve;?i|&BYE`@q-LVCJ_M*GcPINb|_C079 zx)J?Cpa;=I${JuR5@B%r%bNZ7UNni5TU6Ebuec{M@UP;!6}5KszcE;=vlwx zAWb6_g7u7lwxZ|I^GtnTVst}58@Q*kR?02t#Vz3MUX0VLxfi`j!%FlDdKJBfUPo^z zib9m*l;f2XwxhSu+vpwiE_zQnQQ4@RrJPOB{ey`s_c2iHCGv|EvGx*I#tMi-0d9=o z(eLOputcAuFJU-;p`5fCeWje-+i?C4hV%F62jvvyjPBw5Gx`li)Gz2)7EfMq6hI1<$O9KFW8EQ;o{LpCfJH}|ED@KEccNmc)v+t^pfvS&HE>Sk<_DsOZ%!I4)L_7&k##8WAJWaV& zxlFlSxk9;8x$0^>9jD+_^?w=4W`gb|Xt)0Vd!UnyEog0OqLF;CAxSkfiJDLIWu0r2 z3TQ%gMQc0VXHmQKz8oo}em5>|HE9sg))ZI^u?8*3Af}g58))jHNd$vbXhw?RGY!S) zQyrz8#)1YAt&_||=iTf?L-8zp2%e4SU=Pm61-KAyUla1(CEEw~l8 z;UjT7?!cY63$MmU;Wc+uGB3_cbsnBe2^@%RLMBHoBk!YAWX@TvGTd^$b@ zpNY@HXXA75CVVbF51)@Oz!&0+@WuENd?~&RUyiT9SK_PiX1oP&#oO?9d^O&Iuff;i z>+tpX27DvF3Ezxw!MEbu@a^~xd?&sO@5FcGd+;uNFTM}&#(VJn_yPPNeh5E|AHk2} z$MEC$3H&5}3O|jX!O!C7@bmZu{33n{zl`_dSMaO&HT*h$1HXyi!f)eu@Vodu{678w ze~3TAALCE(r}#7cIsO8FiNC^M<8Sb{_&fYP{sI4pf5JcGU+}N^H~c&P1OJKt!hhp` z@W1#!mSGW#S(Y`iX4b-TEYAw8$V#l0wXrg5XC17Qb+K+XflXu&V27|n*WGAtc*(vN)b{d<^PG?itR5p!GXEWGLHjB+>bJ$!q zkDb9D%+6$Iv4^m;**UC-&1VbPLe|R`v2)pCwuC*DoyV54Wo$WH!B(<|u~n>(tyZ=u z+m!9f)yfX#8s%E$I^}xh2IWTOCgo=37UfpuHsyBZ4&_eeE@h{3w{nlNOSxCMPuZ>P zQSMhBP##nsQXWltNG{L1_e~6O=(vCP7&QWfPP`P%c4v z1kE7mV1i~6G>f1^2%1gM9D+Oqw2s)M^g&;!EaRePt&T}#k)1YJ+i4Fugt&`kv0OwcU^-Ad4H1l>;19R%G;&|L)WBNZ99q+bm8x&0A1_glMYMnGB^(a8iBY&!1;rFiGol_uhV&Gn>^L1=80 zP)%iJMOC%8&|g{VsV=IhDhs1Fg2I746>2@DC53*EvG^tsoQOOf9l=hhvd`zO_E&le z=K0G!bGtl5(n1QCL$@?a!|%FD&)y zn(d*Ago^W)d-6-Y{)%#cp_i6d=&$nnDr%|u>;kae}dS6|cv0;vhBUX`rs8Ni7 z*!UP|#hxl?zAA4)MOmfKUsw`0-~><^8B(cegM=S#pZuDV(n5c&r>ewLULCIGNg#V* zPuaNz1^!xZm5=sdy;i4!U`kIxwfzdI9R(HT)!y=I^-m9Vr{6cfq$u18&xmPJs0s5+ zstYO#!|i-F$WH88&|Gi1x5`uPh0ak_Qd&|zmloBnbS@~(>8TWUPDyz|X-%QmU+neJ zhVuD~sw&E8_bMr+9aigX5e~cn(5Cl9^Hf%rmK1nkSo^&TXl=ngeotiy?S!Rux}Pov z-SnQiz5)*wr9DjRqkdWiFEyj0igrEKv~Yu52KwU>oFFIp8tL}GWySjSP*uOLWG*eZ zyt<^i)LTaX469rTD%0Ys=mW!FRRe`qda8?cW4D0jxE`7n)y3W_)w^1eMWu#Ou^lwV z_Ryd{tStd=K(lBK6mHIIKw$_f47N43u7xWRbRbk$l~hKcUJpVeB7{7p)!wRdXz{sK z6*ZN*#W(dPSuvk>5?%6EkUTIBW|^Vy+yR1<;s{oHtIA4zP&e9K&}cgRPS8q<&?@v+ zmR8h3)A&m(w2q=n?*i!w@uVv&eAU!#bnfy|59&I*L1%b`jy_LCxa5KOauE)B801FA z!~=Kup+)qv9|PHh2-!l~Y=(8^Nst&8A)#Am>|xJ<*!T#sGEYH;ZvmaQ!``a)RL%8< zCyVDnB{@Px@4`MhLxU)ta09TZVQK}BVq zzp~ilqvH&^r!M+Bh>nX8RlnCE)hSqAu5^vJKx1eO4PSL#_iFYoNR5q<>b+_ofI?(< z_IWEk>YNy1@y8%IF#@@yvbdt$OV6F+`uwvP=G6A4@FgfjcIf#Yx)k=N^9|^XK{OlQ zKu>{TR--=Gr{t=da(@jC8w$Nex+lK}`EhaNL$YeG4G+qnK;u9(FWBA^Y)Yd0->#=vc#?>xFj6qfjC_*mzIfNyRo9SY!Ly)jHOC{62D zqN}mbTUG0&y-+U*;xC^cqBlC(heJ@4vqv>(+@e)ppJ5D5X-WX$(cQ&FOZxNau-3gg zqzATlK`0QW;aPq~sW!{&!oxw>-HlBRDX;KXmW0s`0)Yd%1)xHHx?WY6l!s-KKxRU> z3^>m}zofdD&NQ^@o~lAWZP+Rrn05PZEa)V5>(DuqMw$NVy2>!@1Q1E+7ExnRb&b-k zp9~Ts3=-k_Hs^&%Z zcn@fdiK(Hra9yN)Vc+twmWHb}7vv3HomPuZn;_!%d0}s+26ticLqXi&vF`Ok?dpCn z-J|L)PzJJw9-y~CM9Wt8DBq{;BI&A3{jZKPx|gdg(J_4>KCIgvKHBV|UTW-owIF7g zha$lu`biy#8oHp``Dl}s6xI0+A)RS)4?}Cp!%Y`!nEQJeI^R>JhO#{dz%mds_>;Qd z(6=iiEDeI}cvMM`j{_^zIUcqTb)guXZgSPd&lpjfR)WfOqe?-Q7dm%Ic~J%3deHqe zcn~%nR6oC@+*76RI!voTceqiPuA_7gqEoZF_~}j30&+=4Iqw2*K~1%~mkw`gbm=2O z`k*+{w1;%>>rI^?I@KuJZ8h~hO!3+VlqL!2>8P7P zdV;ae(3iEXpSHsL{KZ8y*z{C;be;1-XQEL@pSkt;uc)Gw#y-Z~sp%q68e>%IHeXja zHsqxsA2H@=s~Iv$etKn_Zl8-vy65;SKxLG%2wg=h$`~}7HiPicSi;75WE)70i;z;s zE?DZPkwJ8y*Z~?54y~-or>TZ;Y#hTk*MZI?V=1r+pyNs%EqXcLa+*0X#Bn!*R>Xjw zUq$C7>i4Lze+!6CM7|)rbWW-abgqOPU+H`Z_d<(J4GrfaSDf2HX?zc*?g1KWk-7_1 z#`I9pI)z?N#2R=H$dBzIudlAL3b+pxM)gpr@-B>tem@A$>>=!3pr&Xb#aK{7lVUW4 zH5BCc)#TIUo4U3cBDjZQA%&7X14DWgkVf>diTXa&J?LQ8N7)k~*|W{mR_eu}PlJ*h zRjcnjXs)Rm61Q|87oM`71A%md0IW)WH7i2XJrG;dsEV#M9*6^|Zjq;?w1&p|x>YZN z{y0>ty`k6gS)m;X9c*eW*kk723mPLMG(r;US{Sh)yaux4A zIn&dUl9N0Q4fM#NBgq$_SA`mn4z|;cbt&DC`RN%1Z3Q}7X*RyPWFZ}^kff!_S^BpM z>d&Imiut-JUjX*(C~P$gIXBo6Y*(-M>d1xO{F=FHQ^3*&0ZFO1)*JTk*8>7ukef|+ zrVtrYL^_Gl%s*t?bj!X2u$ld}ER<4K^Q8q)NzdHyCgVpy%IX^llHV|y(me`2^zax` za;9GZVov`c!eNZ+EgH!{^Q&7-y=uP$TK)jh)FGxfj7zYGU`g5rCB5KYMD* zt19TBOld_$W!Sf)K))YjT=Ok7a4i4oEj0$vVvNbIj?UKRBzQ_mU&gd26*UD`(D*8J z^kUp%n8(NQOs@!)r8P7)SL2I7ngmEOdR}ZKeG^;|fiX39eM$o@E4l(JXtEU=sDPeL z`b&H?p9^PJa5_jo&^S>@J-Mo?N1%6loYv|-3 z&avtGnV>(dkKzmH#6yer)zS4*Ke{&OfM#+ZnkD6Gd{1M2L$1y|15{`9p{i!Q)V!cR zIcs5UcTj1b1vt?IHL~&{-_aF}PNS9K?2&m6C`T_hF_r1uONS?2vMMV|;5bXoPuBSh z=n-akpcM26k4C(7$kRbWgQ`%TtuPe0`hB!lbZ<_~MSz#nhm*Qt0Fee>ky~DK34rv~ zyfHOP`%`?#x9ZU`O~Mq_lt)A><_Z9b9_}%HT1-$`jufyrf(4+!wH3?Znj}2%mM>TB+ z07fs@hRzxRs5Oo{)vACre@=rVeUD&X4QPk-+dK`;XdsPO^{TD~tmtsHcgu`e^Sz$1 z-_`?I^hluh>)YB}>)}a0+!G0DhjuS&1ocB<<7mVwaDu>a-t+rsHrwIS$~XTVsBp9WJsYRkqTneDn&m*02y^P>^DJm+H)D#MZjFl`X_R?!{^hy+57W2`_H{t@1`Ah(g-r4s9*pp8)inMj09VlvH z^on#2V3!WSXsu7HZBtlLpdOQHI|tZ_&<#}B9p?dZ+_sM44g<2ctdgEy=T{Wg>7(Vs zf!YauRJF2*Ui(ucOiuwEFVZ9j^{~OtO8_}~==8(8G(af~=Sa+#1CDoap3J8wu3%YY zqRV_0K$Z*+Qm0AiXVIfHo&od8=#a^lIFN+zF6#2V=9I?4|dO=nPOyPkF+#z}*9Nbq_mrXnHC` z&r=P~372p$lrWGPEKUh@HLI=we}s|u0P;YFUmWCe?|jIG=}q?_z(!|h;!ik+o=vYp zYu7w$^fa;gk$tScx`m??qqY~Pnfh@39|z=t%;4erhsGVfic}S@_EUg1$U*0$=?Xfa zD{ANkaD8k?59?>4gunnhu(qnx;5&42ik`yh9)Zpf4mzUO`xl_NC4(zYuY|rsR1YBF zwjOLtqKkSNiYgmmQR?akbGwri|3{Hqtr>;a4FpU z-aZ(q2Dp0T=|+ACfP)$@HRUQGU0A}$B<4>5Z6Hgk!G|gu-2N=1YeqP=_W8crM_+l= zgvaw&`|3XRtpRPiVL-jFgnRUFfY)n}jigG zP~DwcwOID806mNe0oSRyV;U%x=pMHS19MHdpV5smO(bdS7tJcGFJP)G3M%yDR*QAe zmK*ksVK1pWQ*D_FjSPz&3g~qOiST{c@=zLr-Ve}@cOx3q1)#mgx3NL#n=pEmPs5Hv zdI>`V*6mCL$X*wv2uQ;UqShW>H~8s?;rPfh6aW|WwkR0@lL9RbNi_YVCjiyKq5{(^ z2->08Sa{eBGT+vy6iR3f0$!@X$Wc)ViYk{46;s{LW2% zOh}UaOK&-q(16l-pUW~45Q_(kSn8q4|KgG|IAk*1|FTR0)ENUr)eioQ zCvBEwz&WIE96e)5-=O(v1_W*=g`dmlBO?{CX7(G4PBvBAJU|ypdhfKPG(6d40Meja z!FW=t+m;Q8*?rp<(XMpkOke+m=hi&H$Qd983{ZON5eAtFAn9-n|jK@EMKLT^#fJ3rwndZC#7!4*S0XElBF=r8ZS zlW!>oJkQ|pVs!a=05@}Ra1p6mOF1CT8XQuOzI7Nt9Wpqmo?WaOu+l1gp3uQR?OOT3 zeAqpEXWntWWZ^1e(^;`(lu{ zhQCrSXDva$cKv#Ijis%RkgcWxlZQTKucLR!^bWfOkSh9t6ca_gtPgLuL$GSE4Zk(E z91h^K_6c0KvmT%dqoLGGZPW=R9UWb89-*13*OG1zC^7|~~Lom8lhCT_W5G_rBT(+;sTB@F|oyPg9 z6`*qm3?0g+>Q!$Cl*|F6%q`Hfq?Rr~h|w5Yv(la&4$zXrfmyhb)!@uiO>cSV=S7w^ zfai_2sQ-B7zJelkF48Nx4j^NUl>S3%vA@18GG;I>#{l@;eX*BrBp`}5oR(V%fW{az z{kK@X``CTE%W?uB7VnFhp#%!OW*9z?vz!E|`_xzvBda-Qy|GT+XG25LcR>X`Sk_U` z0Mr-*G3-!Zu)eFElIBOvL-fHAsJ33biZ5xrTF$+DfD4@<$U@it2^Gg7z(J| zZv~XigX`P+b$iQYfWB|dYIIT+y_chV=1PF>ZPMtwS&h&%=q#wzk6JBT0K9y^d8L-V zaH97VDD9N%CH@aO)K6P^KNex(fC$=CfUNMzUH?m@*^cHM3g$z@&k~E67M-B1sIjvGW&H`)@*NJawi*%YU4O zG8J+Rhd9i!oQX3N^d>=X5%e}e?-2AZLGN9SQ#l^k3ONa9s62)5>uiM|5~LCq_QeqR z|BR}zf6R$S-}XjW$R*O*hC6_u55ltzHyoz6K4u%$-P{P3?Qj@(5I2$=#U*j0xiQ>W zZX7qBo4`%vCUKLwDcn?U8kfvX=Tf*-E{#j)GPq1Gi_7M6xLhueo53B-&E#frhj6pG zIh=>f=L)z&&dU{XbGc%!ggca*$CYwrTsc?4RdR=MRh*Bj=4!ZFZa%kwtK$}Oi@3$y z5^gEy=MLur+%m47Yv6+1a&85;l56CS;8t->Tr=0gwQ_CTkz701!F6(7+-mM9ZVk7V zJDOX^t>-py$8g7T3P-r(xZ}AKxD&aJ+)3QY+$r3t+-cnD+!@@N+*#b&+&SDP?p*FX z?tJb7?n3S&?qco|?o#eD?sD!5?n>?|ZZo%q+sbX@wsTi=JGg7OYq{&V>$w}a8@Zdf zo4H%KTe;h~+qpZqJGr~Ko!s5rJ=`wtUhY0_H@AnopL>9Nkb8)Gn0thKlzWVOoO^QJ!2QVm#Qn_u!u`tq#{JIy!Trho#r@6w z!~M(s$1^3jyC$!8Jt5kYi% z`h=iQ3Hpqn&k6d1pf3sfilDCv`i7uy3Hpwp?+N;WpdShPiJ+eeq7%rk1pP+P?*#op z(4PeTMGzhD{}A*qLH`lV5R3?>w3jTwCW6fbTL|U|<_Q)E773OJwi0Y3SSHv`u!CSH z!7hT`1Sb%jNbms!44Z)zIFsNkg0l(EAvl-dJc4Hsd@#W?37$pp zAq3ARcn-lHg7XP3Ah?iVFTq6w&n38+;1YrlC3qger39A|TuyKW!IcCbMsO9uK7y+W zt|7RV;Q0hEAh?d;g#<4mcrn3C2wqCCpWwp@4iLPI;Cg}^2o4gwoZuA%uOzsU;3EiL zMQ{_r%>=g)+)8j8!ABC@PH+doodkCgyqe&n2wp?*T7r)zcpbs(3En{PF$5n=utG4Q zoTCIEPw)u@pGfdVf=?p&WP(p2_*8;VBlvWJ&mj0rg3luOY=X}rcoV_r5_}%P=M#JZ z!50#I5y2M|d_HP3Eo5S{RBTi z@PhZ2B={$Se1oJyv?f#+`d^9 z)z?qFnyBrS8j)9Xv3*b@@@mp`0Mv-Qnm+9ZH6pL(MF&WY$gA1U!B8XeYPOSBq`%aN zyqeQY3oFn>Tu%~3{U#RulV$+v6*vPmBCn<~2SJUA(A34G zM&#AJPT$msyqckjEf?VsHQmxbYD8Ymp7chI$Zv;DNT1Y*{52ppAZkSZdQce@H6njg zv{5mr5&2s|tY6fK{2d@V5NbqzCusCVjmYl;sR2+U^1DGHvO9NEUBPjyK7T#{f)fWu zjmSS7!<^oz5&6eJA+kgFMUBWm2|E3!M&zFX`GHU)^3Q`tKd2G;mq4&T)QJ2my+InN z5&73auz%Eu{97PvAR>!JjmWZ(8j=4Gbb6&m6flVNMvW+#L83p@hyo9ihW@A>&_^=E2@(hy zc~JM>3X3?6o!CoPIsT6 z9caMRh=Q7C8wfR`a1a#Re`-V_sc(5BEtD`8lNwPt6yzdE z@Oq&}6v{w4f=SIFO?yZ;C6rJJqJyAD6nvoAZ)!xLHp-;#SLTuL4uv`pjqph~;h%vR zMpz7T5q|HM)8;r@)$Uiq5hKD6x&xp_6qfZ;SVS>~$H<|TP6+l=nARGRJQ2c5P>Aq= z_F}yUHKMQzcA=?EsjKB*CfBSB|?)QCbSDD|HjQ8)_ZBgPzUwO*+a zg`+{GpVWxL1`zIx8c|R{Y9Q2z!ttOH;m|&*5rvJQGf-+o;S|t{7|`*k5rxx1v?tA8 zAJmA#S)eo^YD8fZsPvy2Q8*vu`^j`DTm-`VgXvJXG!_!Qvjo4mF(9Tx;R-rbFRrK-wp!L*Y4qh<@rAWehy_2}gQi_L&|`hr)}17X9+C zceMCShr(XKizWt%iKmlkMKB!-uMMDneKH*iZw{b-aG+A9(rUmfw3tkX!aIQ1k4_5S z4|RmNOozhz02a+C73HiROozfpptlc9hr*`-5QA?He%i=LDhjoT!*nQo0T|J5>7$I- z?r$64mw>1FBa3G=m5<@F&&C- zP>m+ri8NYGVD-v$C>{Vf(E~LSCp7i-WI7awfpYY66H~b#Oo!r#{@|&P=K95SD2@WW zePlWm#{fuQ%^Pd)D~|6^@%jlg{74=U*7aaI6ej^l^l*>q7ch?m=47>fQp`rbrb0O5d$MZZ%l_`CLqNhtTB)xm=47p(2qZEbbZ5uACu`&oB=rT znH~2C9HmP4shhixJMlaX#0JT*+9@C*%0ciV&=}@c!tmtsHcgsS}sXoFoJR}!u z04#bW^k66qX~S_G%QU0ep08gs3Tv=IW)pTAe8L$Mi< zqfE)-dlcDl4nGVI{0C6Cl-iW9Z*TiNz6i)=~>ZsK^ zev=!p`^9u9o(v`QO6<|2<-*;yn?0;bC5?^EbSRz%1q`IOg$szsbSRz)ki9b2#Irff z;}gYnD4sJ=SNE_}hmOm1D4qu;3}gn2Qv%I=L^2(U7XtD?hF=`yNTx&a5`c})&h)GI zMKc|Wm+xc!bzZ+HrbF>6Kpw~p9X^9Q6Hb_P`nL5qr>?4T|C@#Vly3zcLHFqyl_3{Cc}&!hv`tfdmoHc zsRZ@yeJGhH-V1<(8ZR-J4#hoyHjpLN;6v5Co=k`0gZpZqk?Bx;WMAFagXvIw8~~&D zwuVk)$SbI!U9U`s;!}NAHz5%ekZjs`__Re%Dz74>=##`}#)m?Cy1}+}c zq4?gu6`+SPG{^u7#b~BO@xy_+Cfv{ZVmcH*8MNj7V>%Q+hXQ(CK_Yw~wmg)E&tfb4ZqihwlqX|<0;F&&CO0O0?c=}`O`O8K8M9g4s0pCWo; zIu!qeBKkW(2f=hG{sRRJnqw~p)1kxw;{IkjlvqIBccw$h0yz7J=}-~?Ykx2uN>)G` zbSoH7qC-KIWCz53V>*;vfU%EEhf*Sd44&yw8VdS@V>*-$1l|8R)1fpHirIfmhtg=k z+iy&V(l~(IA54eRL_pdvOo!4GfZ9Jyhf*?N4VvjtN(J?OU^ ziZfALWof3;$d}kV)1j0H;QNK?P?`x)F<9FM&2%Wu2E-WTdVL_qV>*=b0q=jobSQZN zIR+C~AC~suP>jWNC=~~nH?{;D)~~Ou zDD?R?HW8XU(&K&tl0#xh!ah7iJSQD5oe;r$D4is2VVHv_=!kP$c zAuLZ=k+4?6%7k?g)@>noCu}NV z(+Qgi=4_X)mUc+jNY_f&N!Lp^NH{*08m#`NQ_7cKgPS|aPy+)b*GP8h@m^x-5vq*YWVU$FI|MNSJS(Ch` zp58J}ZfNWXP*R-cWld{mq@;9o2I^P&m$wI+gR`3gT`l!1eelDXtu0l-<+E~@*QcfD zX65DPrl(F&C5u3?IMA_jR$69uPEKZKdR9()R#t9$PHs+Wnn6ZaPYW)u&kAOx8jX5N zHHt-aQ5Ym!VM=Xd@Th{;md^IpCi+w1)0>U(&uj23UBzO-mll=MA5xvy*qPG(x3;cj zO^qEZ=_kI<_F$m7s0TpcN>_{mLt`0V}wguZ$Dw_hG%Uj!< zJ5m}eSGKkUD}2-2LLV8Ve1-E20v!#jQhaMWI)lwArH#wl1MO>5)M?|W*7j8$DMhVa zEe&eX)74oAK20eO1{#9x9Vz9$s;-vO*4DPj!ST0Ng+vQ#W1qa!8I(A?NU4e40b z+1l2lhlEGBq4&@WbF1mh5rJ++8v2m`!5o>BmztK9my?&4nVyrDk(Qm2l^0`9n4); zA5m$CX4F9g7+$T*jtJJ%E~`02o4rd~mbXSWaiRu1iXO@J)0-)!{D6hK6_ zz&NcY3`)6c(muj$C`)wtp?iGNh-dbU;rj+v5 zO6y_PDyxsMIfTt6Y#w1}Y_rx_YteWs-I*Lr*qI95q|5@o@1d3Ljm>m*FKn&vYNl~R zbz^goZW5Z?=zH%TU)45tG%jlj7OV`k2kJY6?N#*m;Dbww8ykZ3UXPXsq0hK`DEWd- zbZQSaL~2oX^4aU(^L={9f_16WlCrYXHq^pjQgwoG>oV&Ks7SrF!5XwKC+uv(&LOOa zu=(4pE3J*RBC7~nK-fa4i1+`rBHR&aj?IYj8GU z%L!WnHK{Tly}N}kuwEJ$^fK$^6!c2MRuR?* zpw$}aeOK(x{yhXr`%K-s+`NowNg3H;OSfCE1JIcL|9a~Uny+gKJ3r)WwI6=Ca@dr` z>oU?)DOP$;YGxSicI(}Mc8B#&>s{8Jgsmg&Lc%U0?BZ?Kd#t-Ov?YXH`hVYY^A;X{ z`l=(;mdj1g3fJIK>r-)^{IvBMtpv7TE zu)e9G)f2YCfVQio`Pi!$ugghGO#`&N^f1~7)=%TM)o0evH8eW(S9GJzCgl%(use*F zozdM#zq9@v*S24*zf!c{3EN26BLHoc)}8j$cip&S(Ymx8wP$9g(hilDmyt#73>*2c zjRn|P^N`JCGo$e~jc>X*ubJ`M_qgx#HE^=C&nh z^*)lY?S^`<|MIPQ+ZV4(&rPRolAfI#9!9nUZApL@tB=}7+s04>Y0j&Yuw8(*S~YOq z!u)qHePPMEwA4J9sWMV?ayC#eWMpT26MMYg>(NA%Ml2sBMdEi>ZZ6340u2j|Z?5v@W#!>0^X< z!V~6#by;9ww+kC=jd3B5u&ttywC`*r>`4H5vI;rpS8wIgO^a!JX3tGar*%)u?w*Zp#I`Z6BTurO ztl^zO*fYEFW^b4`t+F^~Tc2e+FRpdx+b*EhzKF1N%{T|pHff%G0;Hsc?;+Gn%;xjLQAA&XJ=9iGqN*`OTF!0+XHcNAGAHBS$HX7FYC5& z-oh;v37Umz>EY?j_N47Oie!7r_O$I8+p~ndg0OTsyo#`!x7nVzy`Uj&A?(%=(*I!~ z=Xs{d)e=;FoR-&JgSTw&gD+x^rVnf%QXhXz*zJV98fvgZ>jS&SJp0zywCm^OLf21E zhqxgrKu~^W-FZatgf3S-AySdEU&lqMVE@!_GzKHK1Fffb#MS^KxkaPtMHD zD4_pI_a^72dQy|~^K{rtLBc*n*oO)G2w@*3>|=y|oUl(^BOk_$LgVFXxkj#)=QE=S`=nxpA@LMppC;@x zgngF&yQQu((7qzrNprso8#}yBjVoY>M?I>2OjF2>^}&*k`Hk`sf6xo3@E=snBt zaA_ZV+Ur*~cGCSG-2sO`)If>?&5ccKDZi}|fWCtdHUX#a8|5Qpy03hmurCny#jSF)+#QFcUFO84!p@Jo%9(X)5Ke0tXC3$_K?1N0}H z{f^1KOKWAQgMaKCUj3mork+U}kC~*{{rn91oLKeQB%dqOwf!x^zD?M7=(IjZ!(t9g zs!@MwSZ~TpH75v z1)BYRM6F(uZ)YU=4*Aac<-Y0kj3iLu#DQh>sHUScHFYn-tjTOSc;l--;RlWwH-2hbW=_qxmae9zmjzL>+T;c0^wI^) z7PaH*A(6Vc+~?_}A1v$Y40b4(j%~Ki*HvF13^oKCUUs@f_W{GeqE*4Q9SiB7p;EUb zBq}Dwyk*Ex#i9s`R9o&Vq6bJ+vY@r8wSDhFBS(ozqsNS;pZgk*4$@MW7c~V|bkKjp zgf(`~1g-K;n?vCgo_6W!>XTjV=%}Uy!17z08eW<(anj_4nyD>Rae?;NAOL~PmPu16 zwszC&{-)1dim%Zw>8KleNA<-U2rwZR5;CZlcf;;cie+gn>Z6~yq2lNrv8 zWs;e6CWrAd^O!1T5wnbGWsYPzm@ej6W+QVZb1`!na|Lr1vz6J->|m~CZei|b9%PMqgL^Ws$IvfR2JL*D5qqEQ^v;*CP z?n4i1UdBlKlbId*GxP`j(Ma}FZNc0r-@T;VSJu@Tpc8o^?S5P3o$@_QJ@qsEOCKfo z$`8>W;G?s~Zh4Qa$PdVL8KHB>=Y;)&P9KE*lCWQ0Egy$7CJpCAdX?nW=Ae*qLhDo()WZS<58R|8_D-<&v+M$zoU9%Q zy_GUe8AI5gl%dKvWu!73=JN1m5BX~?GW>b}=Crkd@jLlGYVn|8WjIWObjt6DlCSIQ9;KcTQ$9@kK}9<~c?&kohvQ-xxYdDW-B&&6s(1(}#r*RBYws?= zqsX>~-|vJfJn3o!k|046V6fos?h@Ps1PGEq0>L30cXv;MyUXAXgC`+4%-|B-{r;;v z9SAd=GvD{zd%yQS&wbDPn_<$`wbx#2RdrdDHXJTjs**o{`8)*+7A%^lP=QJn@)XOT zzi^&P6^oWHQlVI7pF+hd{l^tlDpavh;R3}gzeLkuG^7{as|+TC*Ql!rG@z4yNYH{GQ0-cN?fkp(m#yhZ`Nujp z>ct(wUj54RsE#imkG=GVLF}tbSe5?TPlZah`nkh`SrDiQijq}F-XOdaJ*(HXQ;^Uk~Ne!RFH}$6=Q^q%QKCAxAX#*E0TDp;;h45Z#Xl!U=XliI?XfBns zQb{KjH>tQw#bc?VrJ=w;|NfM=IGkF;cwrf0UIRt+xRuG=$9$d@I@>7402D`)~t{qp!+=`sXaOvgnOuJ8$;yVC#a0!Tx+$>AOPS z&u+wV9{TsU8uT^C-zKxaOFw1MJ5)c!qDvVn(0<@z-xv}ylW4#S{ygL3pRjuSnydrt z_tn_36ez4;T4VPz$Z;LaFw`)dQ&Gb(snFIVq>?=>kV7B_w;ivCln>EY;z;ip19yQU z3}X%Bq>@7_IU@`c3=^gDn^f|JPr1W- z{QNG*8|E7pbKvc?!m!Y=h+nd-Y@fo0V+OA6 zEn_Vw41X9-8crEb>kAZ-3cHkIQYkK#61@8TfAN!!pL&z{XVDtj9%5wJmllY^1$B5SqH|;^`zqDE@MR@V4%-OtF?W*+}Ro9c~Sop5tfnKkBdX>sh zukQ={2GsR+EcMjz{Cn%XkV?6*K*vk34R7_`N{#P}9X9PvY#CPZE}`t$)J&aEg%d!rx+R5Y(r-ex=f% z!%+Po@b61a1)VqgcKcscmx_oc=u2O0Gz2O73p!jF(wP2A-3(J;qP!(Np5^L#EjYooO>+E{IzHeQ>cP1Gi7 zlez3`JHyvM?h>8tb0%AnlU6RV>n}OhE|vCD@zXe_gf>UCkJ0993wX;y(Og@sg=-O9 zD&c6+mkTl>L|L8-q=EWJh3!H?ExsC2fi!j1iyS624z>L?XZwnmXiNEKM8b)9;#@>( zOSEPBg=?;lCw?VHTcNGgRuRE!ZH=~8TSwad^L3Zhnp8STg^TT`zh8N=$Ffn|6rcSF zZKH^v`4(*}_X-jM{JGU5wQbsVJ;Yxx@{d=-3xfTH7N- z6?u5L7rkk}OCXi5_AvKq`&ihvW%~N}iqQ6p_CNf0l&!H3YQ3e>o%VI~?$NNwdQsZ% z+=f`C9hXXweN}&GCv7(*;xB_Z))}dt(oXA>;$Fp1Fa0o*qvy|RvFx_BbJ}?=N{iNF zvfb}?##bC{r!|QVSM^31G>v7NDLAGFUA+DGk^RKlbpr7|?}K`s5BaI~jCkm%{<<(c2sZ$@EsqPgQ+isp^v zC(fdMBQ{?DUbdAR6|NE)RjCXQ3%+1XZcHIk8B-cliRMNZqpQ(i)Yus~&JY~iOY85K zAM{gI+Y;UV0s_DPn9bAH+jToS-uwNvFd>zZQkg0h4(z9p#e{RHM5)O8Pc^I*q>p0{ z-0ZYviP2)T8q*llN@bK(II7~Xe-hUrjP6Dc=8@i*!I-i6_mLafVv@>esf>}zSS|n- z_RQ1b?k92xB&V6V^}1 zez}YzGH+vcBPW03r80p)nl|jL_nBw@_?NYh&OZ9fVYWk@v5q(8v;TCm{imG5mFl2h zP9R#_SE@WY%koUr2xZJezS;lJK8M1_BK(>r;!Bk1@4<3(!Pdw&Z98UaFh!LMrsLA71G@kiTum@o!C7oxUhP z(=Wo=Pq_a+youKmmbV{UCCX;}8FCSy_$zP5=Gy1Q><8={LaQJDqpj2Q#qk$>jqQJY zp`-mj9O(`)cGb^B3hS4QjNPQNG%TlmZ9z`cBaJ}PzuWx%npf6x z({d+z?-=9wpWZvc$OgYwD(f7BCtOXl58k6(q7_Ut>L>ih>BbpS*&vmT5&9>H{ZcW% zsMl6m{ZlFa<4V6?XTR@5^)e;A`om{dq9sb_`!C-(`1^OY{(dFTxX`#l-wGBP7aPNk z5ynX465~?iG9wAzB9*OD*(R0kQrRJuol^N-D!Zhzd!=!uah1rVpC1|58P^*(a6Me% z1SyJ7MBAB>RNhK8l~irl#G7}sT^_Yxgwda0uqWL1Db7a}U!Dr?pkI5`=WBn7OGu6F zw*>9~?c2LucaAHY|L{i5%sco>u3ztNy=)(R_G^}&@jt4kznyPv1@=n3htanb`{$&8 zaKe>Jx=0_}@pFAmF~&W{y~cg)(f9MYbkKN+PdxqJm;FkwK9xGQvzfG=`~w4ehu8zN z9Z>Y`Wq&1I5BtO9e|t$PXY|jby;3?4(v zQaNDzaNJMv_Q%L-MBaPyMszkW>!aJ`?}>LbLr7)o{XiS6`Sb1V>po{)5S|!~VfkFww`= zFMhhxxuCuVAN^L{_ce^O|1@X(Pn~w?4*(h88$XEVQaL4+(_!NOrtf>$KMB7Wzy9+> zI1$Eg`iI^>J)e`PKTJ+^$f=~OMDwKVyZBBpMGX_S?3FOBNgG5*^K5hhJv`@dN<{*%IFGFkYfFqx$i z9d5EpB}V@&^|EhC@%LJOwShZU;b!vCSK%&|3*n~pQi%=A*|0VTa*o$MP1!h$H)S?u zF=ds?MX6kp%H>Ftm$Q#4yHu{&`s%g5kI$KCSlc1BFSmCBJK6r2aFdL1kIzgKWSd+G8}I#XV$Tur!{Wy)_V&c%CE0aHO!AyZ*f5mQkU*FxE(u1n>H zRBlS;PpSMRm0QbAB}^qvrA(!bH%(wkF7!|__>zU-;=$2q_l~Q19UOj!P`2nkJs_`se2*P4!I;oV-k2uDdIhd-{RaeQxdW zC5t2JUDNS6e8ED+xqnz7zfWPFmn>3Pf1KTC&<{I4Q*%=b$BvH^xrYfmK6^FVvKuq` zncA7!n>v{MO&z7edEjHI#7X6eRGv!Z*{^nEjy===!TnY9Jo;Hg{EcVtgx$D)gUViY zM;Y?^+S08*ZT7?McH?{7?RMpqJr(2h#^iM2P1vABM~3+e=ub@ZHTi^lC#K1!8Qf|x zO)*V1O*2iG$~&pNm&ymJd|YChX`1C+(8NLXC#gE=J6kmc_bh7pcZ|QuX*-~G-1AKM zByVot%=Kf0W(@*5b+#?+*mf72mg>WWna$sJPD@1|W+O(NB# z2}i%Cy`}?+4?s-^O^2kaNL39BOvqx^a>H``c-Uz=ZaVqX+fJEIOEsBPlk=QoLPE2a z8?__R`=U%S`d%|yswu-cG~n_L*)wun6B5)d{&t#P5=S8}o36xPYPH|aF@gH%o5=lswV_fzmsOi!h%N!3X3eB#)fIcvGGixUO^ z%JlZ9_q{W{m#SH+7RSB39}n@emg}4N{a?(@tLCI;(w0W5 zX{DM@s%}zsm#T+U(@QmjR5MC7lkFJ-b1HLcvy0i)Y!I2uMzcxJU23U%N;N>L-KBa? zsu%SiT#@QkJ*ofx8G@g0%l{95ir~jb9^(7NoXO0Mp_OJ&b7pfEb5?UUvzOW1oZXy5 zs+pylMXFh)noX)+QuUT3rV%GREtQps8owdwfJ(gzqzBileshh z>ym1*R7*&;q*TjDwSw*ctMEM8|GeWIQ~YsGaPM~g^4LEw95oL$56};A{^?6M=7Hux zwy(6PrKDP#p69#C`T>sq<(Ojc?O!e7%P~J3(wK*vM;Kkrqu61qm8HsI;8dxWrLzB< z=W=E~`6JBZ%@d?bJ623M;4yc$oph0N{buOcCXO$m+fUTYGi)bRY9;##mHn{?`&m`$ z(zdgzB>Cdcs{ZAniR}Z_yh!gGN?uJr3`z6-a$kP?6{-S!C(Q8-hk1#4rTsW$x_P;I zh3z=xqf~21wdNA@D)VaRg66eSttHj^w&M`rFmCzVE=}0>586K;i8xBO$-JF)n>U-c zn75kw=&mhQHmJH%t@l6apu-XELGxjKr^2B^gMaZsZa!f?sW1MAR2znyPf68wzvdTD zbD7Waxotjgj*@C)sWwgc+%{h@U;63u_n?`NJC0SF=ucnC(La7#n{Sx^`f2G?=G(SI zlxF(UBaSCp`o5V5F%~cWzF&K6exmR4Y)2Yqjt*MJA4!>C=+V72zmjSzskZ(e-COgA zpT_&)sKSx9uNJ4D7W&~3BH@~lC8=m`Np2*$oj40u+y4DZjwPvnCCB2Lbd|n9C;jq^ z>i5GYYtbx5$0plRs_hasSzFIr(y#$q(pos&V1L-*R~umb8HXj4eZ%wrZyR2ss|l7I zKU_^vJO6k!!J?m%T9Pxdgk7~IzolS8gIL%-cZpB8r6}pP6tfhUYFDXtOGvk+l%;Iq zO#b+2UwpbPl`K{Nwh&Kbn54q{UY7eRQlxpx& zi?5}jrIDqvrHNESq&h~b3#7VVst5GX(O*4-mvHs!*NY@P+wfoRCUAh9_=3Fct04dC zF+EEgOGoajTH0FtEbT1qEgdZWQtc(x-ck*fY9Fcgm1@6bmQI$=7XF$C3*V8IYJaIp zsgBeShDO`(tN!9~yT3n5r$6A4aIHMyK7;<4=083!_aC3~``a^t|NZGcM?rgA`taoF&!S`XB6PGIRA<{?#)&|NrAn z`r}vn6Hh-r-t@otjM$G4azxu7WU^e)A7qj$HzxzWKeA-GqW{ICX@c7E_s50$+cqJd z8{lCB{XV&4ExYw6OZ2r^Zt&EI%GDUhQzI6Bk4rsA``)qK(;M=xROf|T?n`z4f7&u1 zTjCOZBwL5<+iC8{q7xjJE z61~0n_dcE&(L3xvKdbTo^phgLd_*NaLsr$Af=4E;Nv+AOd_XRh>N2S=kF=(=rm}K! zze1|3q`KF()v~X(U2RCTpS6E=!uE69gPcLVd90YP7XSFf%fEe?#CE7)HEEy2tz6>Z zrs+@5XgESnYt0}`ORdQX+TH45<+{akTjZ;yx<;yNms&GgxhE+ySve73Csn@bpkJrh zsF(i#^iwH(akWDc{>H)hr&6prc`C(fw&v2Ot8zPaD7Q@#J$|4+hf<)BJu~@u%Eam; z)lEM=WnwL4bhQ?-7S)f;xWmcLaI;i7SNh+4D8yQVheE6+t);A`t(=f{w`I% z79D?pW-V_&Z`!8sO!a3#GWh)SGazk3c($&U<6Tv(Rk>}Y?vN@sUdJ|ze_*3PAuY9I z3A25@EA8ub{FGCB{e8O~qx~;DvtfG_=zG$C_1W|5W2S_|Do67*v$p5TkF~kwwY8k9j-d}r^@vpY>^UaY<5E4rl^?6WwWGBY{|>Nr;h){~9R%kl zC#8By|Kn+?p3(n(R^QO{P4@qrk66TinkPJBVeR|F!xPp4KmNzy?~hnm<&Q56PxOd| zb(D1s#~=3dI4d9j=i?94trK{}!aC79$;uG`CvGuPjZHXKw@$OpOnew_>}Q>=KVqRq z>!)IU+WvSdW}Rj_-oL^t3gJmR@S*80W{Y_!xh9g2K@yYpokNcC)Ut zu9xa1sa{U>l*E634&pyQN0Ix#Jtgtu->B!fsPPZqezpE?-POo;-;6K73XvRc$bwub zj1u7MsHr{)k(y;vyCMxTgA4Sj^B^DcqX>$jBub+p27t5V)Uz-L^RNJm5ROPJ#TIM> z7w=Q=!d@J}VI0E={E06@xTFN*yRd$jj*u9NNtlhfn2&{6jFni8wb+2o*ortIT={~t zYhCoiND!YZ@wpP8EAhD!pDXdXE(h!3OXI?o^|-DFvAG__ar}W(ID>PDLJVSY30H6* z&+rM%!}XgG1}Bt3RWv{g_ zUEBltH9f>zyu*8Z6vCVg$&mu7kPh^@*&XDKEBV6gg90doA}9^!VlIzLr~+R!LK842 z^J35oX7Xy@2x2x8JCgxxs7J@ps(?9Oi!F?09U^{k#{J9?o zd2uH%?&lDN7%+eLJD_*miN~Fo-JgJ-acBE<|0aZoGZZ9+8RWx*e0X>u12Tbmd*nlY z6haXcLuFJ$4b(h40dw_Wt{y`mF$|M24Kpwsb3x8L$fF0j z^;ie`&to&T;vnb=4|>Am51hgoP=S%fnG>&MOwIl zny1f+T*wO_6hIX)$Mg-+7){X}ozM+|=mF}Qp1G#)hXJ6b>6uUZ{Wt;Yll~&8P5OtR zF6pUD`j;Tq^l$MV#G8ToWJm>fWCn3%AeIb;Q53~N{xc9$2J)VPJZB)548)RwSTY1- zC&*cbvxq?~E`jYN!%fg98OT?LM|dGb#?)ZEj9F0;tUn{`%@_vel5rwdVjB+N5U58+ z>XGpTPJ%HqGDgNLxCZKyk#RCIPA2*+QyFvzdC5fmGCjZ>Fz-wsL4RhVKRum5Z9FrA zcs#w39pu5Y2uh+f%AyjektaRrNz9&2(G`=xIG*8%#8Rxn8mz+xkR#6vAWl!>^dwGC z`p5Gvh{=B5Sgo@9$KLT=<&?-cxG~xxhF!<8}vZtevnv(EjWkEASam_D>J>C znc8P&{h3*RX4apD-poSXv#?!eNec3ng&br_2e!*B8Ic*;;Eh^f{4C5f%T_RtEUYhU z5+S_k5wGGPPhRB7i#&OiM@3WybMdN+ly`3%NLE%1kKO_%-5?8{Lmi$=!5`} zXD{mM6$EnNMGm}rgB*B~1Fr!XgdrdwUc)c~qc8^JFagZLYYL`e24-On=3xOAAsmrd zise{|)mV%5*oe*8itX5mUD$(tIDkVqf@3&=lQ@mDIFD#tz(rifRb0nS{Ds@Ni~D$p z$9RHgcmcL!uXmshUSEWuOu{<}sHb-du#I^eV1gCY*4qQr);lw(t#@|(hCJ{=LD2Kw z#Zd}nQ2~`f&wJNI9n=T=1Menij#g-k_UMQJbVCr>KX~^>Ukt!tNDRj)jKu^@1~u`X z3F_fJAJoD-0!y(1)WDnV$(#Ip)9c>k-uriucklfm=iWy_zP(R@Tzf}>JbPaPIrhE* z^6PyE~6*W*B_27%fXoi+(gLd!-+h6vs2t-fxLLc$Aj~v9BgBWvM2DQvd z4RbPH&K$^(f@p<~Aip_hVKE}Wd~-5SF5`>ei1Rn%`^|)MsE(TGgdPaSMDYIK=HLMS zz$rYzTQL7z%qXrL3UW4)SWPmqvpbna#8A31+tRv3?EW-*M!#PCZIX-}TEpcQD{n)P zue@zQym_Z!8a7}jcHt8K0=&aIejnNcgU>^C#f%oS-jH9>( z=97>7`LGTja_5r{Wl;?^z;@vi2x{On6N?ZIa_4g#)WC;)_ajk$=#QV>HCVkklkMc5}4VSK_DMcu*pMX66w z#xL3c4bcPrFaYzg1j|4SMTwy(F%*4?PoT!ds72_{ye6*{6b z=79W_U=Ahd%M!e=#03z43H^PG#I-ab1oNvVD!Rlus<<=79DFAs84z7{587KpVx zb1Kgm8l5qBTHN>Pp_|h?-7d zel=5reN#uszipgpnAH(})G}*0z9L)Xs!f=m>IA zn;O)n-)d9m+VplEdb>_CR0DlohaA+IfrVI%>$rypLeyn0b(u?D=2CYorh#Sap27uO z6r!FRvLGAisd@uIjP=eyf1O&@&jE5+pB&Z?#SoCk`sbj(epQGD)TKdYP?rW>(F-iw z;2=(dx%!d^U-IBf9(+5Y2N=`$5Qxc_F&lazFML3r8qzlnr+{%9(lZU|nMSS*9_|G${s_Yf=&PWs^uO6{2Yhm_SXM zGLNR@qbYN4dKT2G=@lWGk*{WXz&6pWHwGgNClG}gA)32`d^BfH&8cPco(RG3V9w2t z2+<-rj4-1a+MxqhViUIDoe(VroIx#H)+2MYOG0$c1m@BCH!zpZ zeZlyh>6y+^h!G+n8|bTmeCUD@&|d*-uoc_!S%@wwlA$h|qB&Tm3(It2nXcrcD|PCc z8op?W)}XJtZo(F@o^GtC8|&%T0L-&nE3Cyf>=2^60vC{v?!?obc)Al$ced&7#M_;@ z1?EHnkki27n1D(63y%;dL{Mo|L3PwZ9ngb8^n4IG2pWjNAa6nA!8(FiN6>tb>!1}_ zjdj?F-?0b#aR^6o9Iu4vkp=Z3F%eTR9rR(3xnP|===mP>bPxKc#{nD$>*x^;YSn|< z_PBxDpuRmG;xRr5(UY9^q|QBygE{rA3Fgz2F?%)!_2@}1dy>nZ9YH_zWL`aqr6;lU zoQ5S>j#VJvJ=fzkz6cRa-GYrU!-{mEcY>K$FntzG4+Z~*y6B32V2t1q7>%)DuEC5E z%oxFp5zH9D%sF^7sBJJg3ueB-CvXa9K@SFBz(u?hB82fnN`U=D$VjZl7HkLk57~_) zIEDv!Aw+0)5L;*-kfYFoC<1a7S_*9Qp%qXW)j;k-$z3S93vGxdXby50%DAEJK@LL$ z&<#Nd20a+s7Xv`wgi`BJdM|Von13k!6FM2wFcWh?-9r~40!y(1%qesoHiEf@GPh9X z7D{i0(p#bADU>{glBZDe6iS{#$x|qK3Z+j%>61|UB$Qqcy@UIBgeQ28S9psL_>6Bt z^l^rY&z!O>Fjhx7hd|+GaQy9fi5@k>xl~5HmP#g8&i^gb%mS}@^ z@JDBKMId^j7y6(-1|bZ?K<)dC!FWu&2gMRC?9ILPv8?YJMuoJtn4+n7s z$8i#8a2_$Zh%2~`KXDuP@DOo$hL?DQ_xOabLiBZlf@DYu7icge4cw3(nUDou$bnqQ zi~J~rq9}pVD2Ixug6gP+x@dq#Xo?nS4L@{1Cv-t~^gswg(GLSL1Vb?bqcIK>F$L2x z3v;mmixG)sScx@Qk4@N$9oU7vIDo@AhCgr`=MarpT*fur#4X bo4yufR`!$*7( z{GG~3f}}`+)G)vVE7HLO8Ic*;kR87v4}4G%MNk~2P!<(X8P!k|bx@!Ug0y?Cc`p;*u&_DFxC^sdcs&wSZDM^2Zl2FF_iupO8*RH z{Gn^H9>g$|7={wV&~HKvWBg$$krM?_2n|3V3~PnK7zN@OMoxy2lVRj!80#6vdWNx{ z;YpAhuE>j`C=TW^yc+_s0OVl!3jBd+P}kufgc#ur1>VR9axtPR>Va5BbOrMn5sF1% z9V1rZAgIlV)3^g_JK~uTBZ*^VTDXCnj-($)mH~4b*#R9f5%kGO`efuL?7|-00DV33 zp%A08A~%@Rs77cFw%Ji5!23r{##Zdbeh|YbVi-jXqct%8Xb%(yjma9mVMS(@;bH|$j4afI(90EW9$wv{@6pfjW|3NVq7X%Kpf+!%Q)UYj=GHN3yERa z0`fPGIgFz($La5TF2s1^AJ6;76aV;|T1`=q9gIDt0DRFBtuYkiFah+*6#8V!Lm{T-0y&ym1mtz91hGzKPE#3U z>S-aS5z{nRkke_ba~kWM#yY1FhyKXmv~{=(YB%k<5YyRqrnBu#F9X&!om@|!h;v}h z)2|9KgZ;-0Bh07{`eH^y5Z{cMV6HQ+f%VQH)|qa|f^47;GdrUTR$>#jfceaPiPu8R zDhjrnSryO^%zxH!9KtD_5n?udGMhQgHh~ys6T@s`m^~U(Fb&6X9??R~aYib*fS#H| zKhCL!5nz6ECgUK;+Z^&Xm-Wu|KnAo!S9Hg6Y``Y`g-3`JVxBkhAwQVkyq*ZbVldBn ztMM4jZ{AxW=Cgm8Pmbp^&-wJo{J{vr9vlU+&!?9cBu7egLJtIEJNAQk7km|BA-PzX z0##8D97eQ70LWv+@6caAB19y0iA)9R z64?l?!LpHSuocWTk~}OS4@=0ylKN;4#$2)%#I%Glml}``?jTP~>6@hkz&K0knWgm1 zGG1Sn8ClUE!$AF)#o#(_3b8yF3ZV#CW;x3&XPM5buU>LTqe~ z_VC9Z9K~@VHhCf^$nPcz#@{p^(YS^iLTu*s%>_W6Hupq-36d~6?%377=dxt(=xXPr9?pf7j0qZ`;4?C1m5w_`sJ3b8W< zn8!{Fn9I&~VEmo*%+B4|E5z@vNDDVKL@Ur=zt6-XgySBb;e`;pilYK5fn|2F%r2JM zMLu>>r(JJ_*j*Y`P!06e?)g{<*0Y=S>}EZCN`ZOqsft-xj0il%JCKjP#Iu)p_7cxt zw&}gZyO+7`Gb24RA`pGh4<`|WSRwXjhYzUt{`#PA_P0bRhJn29AAwPrj~ zui^&&!X4ZP+r)lqzW+I1fw}G{&i$Y9O^5@|P>~#|KyD8hVF5WlK&=jBfG4t|HT*zL z4s=2n&>IJOAOysEfO#Dlh#?pXVm>e$<1i6ZK>P>D;eokW0CIVN?dQNUtOVQ9f%Vvg zEjWNfID+Fs9ITGUXbSRlkenY}0djtDJ9c8X5QmaL13h(!IUQn7hgkLyaUY@w4^fLl zJ;4}<7~>FQ9Ab<^jB&Uc8lefAfm$3Uw!_=71HWUB5Jwo}2xA;!j3bP3gfWh=?2!O; z1^GEbevS~=k;6C&)_;WBAMFq7d2|fO&r$Molv*CW3+i|DA>xEM#yXC%>@o6sY%nBF z;|i{Ux*z*Xh~w=L1m<*{dK{-7$9IA8j{kvEpl?p3gBP+RCvt=3PlSP*ofr<{`lB>z zpcd+&J~m=6_TwPP*U2jI1^GHjzD_pBbc7=kOR++TQ#nu=MNu53FbtD16};!vEHJlI z%TVmw2PXKsV>&XU`+1&>hs` zEOR|O9+SX0XNmbNF`o?wm`=8~bnohjA2VK|e$hZxr!Hy#e`(B41JDD>@lc zf_z0AU;^<+6MuA05N|Z`Mw6##@)S*;qRCTqBao+P@)X?x5uPT~^C@r66Mk4GTa7s&C25BQ9)Lc}_O*kj3YtOs%+ zKMJ8JN}x2VfF6mZM`Fo$EcuT0MN1HGY$%8~b|^-GxMPVsmUv@lfqg(M`Ho$La1d`S zIgZ_jQy_`4#2kASH$d#M#2ib^vCr`mZ}3iti{$%a3Zy|MWPulm{bDW<`$b~ENbDC& zp)AUy2AaSRf#`$&pf@gtVHn0^5~hOKFU|tJaxns{u>%Ld_IHt9zW4`DBNmr&4a9$u z_%GfDIle?}mkOc<*p@DJ2jgC1yi06Dm&Sl`FEQ?=*_elgpvIRXL5(l1!!8^_6v)#h z#<|2em#Fb2YJ7=2U3vv-eCa(t;foNLlR|?Vyg`mG=LL1XTm;2Io-UU`P1FYafy)gr z3gqGPOc2{;V!M0}*YPKq;ZMYO3tq`{#9~*m7HHC=T{%$37+GX5Z4rt zgKPBXwN!A05f-EcHMr&hdg7W7s-rD>gZW>h&#vvlF(Ixe0qeQ0fw^AK4(fKDx?QKo zuNOrblm~OZUKOnG`asOW2JFOP=<8z+*H7adqHzJY@DOo$2DY>7AMhF9gt*}h=5!+u zT3{3=gZ1B_FK=uDW8Bz{eK?2{IEAyIS8v3Cez|cIcflAp7~=+G++d8GjB%5B-b@Vx zOdwx3sqIZ_dy_feEQt!JjB21yZ#Dq6z1bARcC!^af;!)%wl@b0@z-t8pMNp8zrF}@ zi@DsQF1P5*Th!nd{cXsy+vMck=I+~^A`EMRRgtA55AyZZ!zAjL5KkJy!95$ z<90G=Fe5$a$=j^|HZ{3TytjGZZ9jAXao+BXo}mA3kHL6M!cEzPd}j?_R?Tyv93x1bL?f;vW5cFDa8;?>$=bK_o>@`>UN*H z-Di&Xnd5!le?JI)F$iH8h7n-C_sRMFxmW<|dw(fbfU)lHz)73}+s^%4xQ|D8f|q!M z_h8-+nC}Dn;Q=u{$cW5fz7Lr1gWtd$A27!UCBb(5pgby}3IZ@5tFaF^!8{*)!dD?4 zQuBujk|8Brz*rCIfrnYZI1d@;A-Q;%8|3vNd3{J9JgkUz=m+Nca6SG2IecUUu|6W! zN37!!>v%-0k66beVthpJJR+x$$mt_;`iOi!Y7IZMM^A);oIV)YWQeB4uLoxox&NMLlmxpTt9jSa{GuJJR%2=1(F~&3^0LSeVi6&4FYqDV@`3CKt1B9N8D`81AP#;3Tv?eo3Rz!!93!q zM;!6T-N0YCgZp4Uac`l|=QF+u@x&Pl=%pv=z+9eWgeS6tetMDz`H&xFP!W~EoSxK1 zT{J*L&|6PBqBFXp2ZGTX%H@?W~6}|(jyZJp(skAG|GYbJ*8$( ztAm<7r4OIhhcAf#DY<_di)TVSV_nb4!?QLR0&4VZA5MWdpAqM?3%Gw1$Jtm{oa6hL7V12uTl0@UD5PxQrLkf%3eF#(gX1lw^Gm+=Pg z@DX2xcq@hh|{iAKIfM$khjW=0ga=FbpFx2IDaiv#=21Sc2tPiEY>g zV)<|YM{xqDa0Y)tkMA8m7fDG^k@qeUeK9)ooR74dpkB{{J$JQVp zAA6x624V###pStA3}U$j8AFt8>qo2YVe6MK2d{D)Zi1j`a}&rRX}AlMGG*_ zCu;nO9DVAD0FeJr^62&O4MTaX>h zD{*}#uCMg_SLXbcTz}0Ca`ct4zcSue^7M5rj^YGPg8uk=4wrEaH$lIAeI&#;*7?m1 z)c;#81cUi}TYyzyUEelfGnmJ>9rzPZ@Eqjt8~OW2{=R(@L<<*KkQVO9fNaQ)-;f9S z&;lVChv}GwxtNcY*nwTxivu`}WB3E7aSbO@S#Ow0lGQ>mZ278|e`JFp9Tu^+^&M&nQ11--2jw@TdV zD-gH(UN|MC2a}rN3F?=$7)qiH%A*phq6VmK(t7YkXLLn(^gswg(GT=q(!rp{NvUyC z@}HDkC#Akgi7hEHBqLAB3ZN{=MKV9Iu4Mf&0h2Ke%sClzPBtH`BN_EdM!k|T|76TJ z*&eXYWCw8s$8i#DcgfC!K28=3`X;$EG-L;3C$9$PkbDTng8fHw`XV{ob@IiC1Y;#X zjVrj0KXDuP@BnY{8Q+9c3f7-OMQRvef)!~|5iLNTQjnh%jG1C6$WIFTF$LqLV7wH| zuo7#q9-FWg^kNEnF$Hx^K|CqWAsWmr#bsOt@uYYyoKiA&%B(1hR-mU+Qje75FdfV@ zCG$+lJX11mO2$jccqth#e{FWU$jJP&~vFfpd*MW^+-^I)JJg>%rW&dyaaVl z&GwS|qi}Lz3>ObDf0xY22J+yN6QxlFHBkrk!I&;>!Q5RK$E7QJfL?G3MPEov#5BwV zJ@2v*;aGxYpgu0-#AOfmVn65w7ka@Z8uWq-z2HJGxZDACa$ycG%)y1cxqKB)uH?%# zDae;A^Kdo7f;7mA9LR;dD2O5`j*_T_x?oPO4bcS5!!;Pa!8}|CU^1A8EAwz=9SE{)=3rnB2IgR(Ck@6v#h?$hWmt(dp!V7(>;q$IjHNM_ z_6JVmEErE?{Te-Lq!*3VK#h#l#5fw`K#v)xU^=LeaRsP@aR+u`FAm@kSf7zP7;l3* z80jbDQ@p@yP!r=j;bc-_f)(lD0cv9M1obfafO(pVfL=0{1hJWFq7LeVelj&idyorL z0J7g#B0t9Z;(H8ZqQ5S0w|24C;@uQOq}L1Aa~|7crTnR^q{2-TB8rhtz{#&fSg(m z;~1!!B^tzNA)gjFSlkBK0cR&r^bi8+{$h1iQrcqyFHxF9{4cbZC|UTNxpvC=d|3ozF-e&_(|mS!;M z-!#lI4RcHri_5qM>XC-IrX}xb$$MJzo;DR+VFY8P%>?F~HXEozTI!INIj3cAX-9#a zrrnPHID+Fii8HtZ>YbK)r+tMt!YN%!q=o_HG#&X&M~|kfg+?I$bi|*I_|tVl7jy$T zOE(bYEZtBfpOg!&y6{|{f<3g?rx0j#{AuwzuO-;jk91}x39vSaMsd_e z0H}>S>vw0p?$pRV5^Jy)8?Y0M;l3NJ+x;j`APUsdom#rz#yvbl9G>9?=wA;fC`g8s zNDVh+1hIHz1NHPE4<7lDA0F sQmn!ntOIlNU=AMCB7G)g2YE?f29-GD4fPU)-sUz(}cLI3~& literal 95250 zcmdSC2YeGp(+7OJC+V7Wk|jBGFuerh-Uv;$Wvt*9%Ql!2Mc4uy+~PtBkldz_-bqgg zHKg}SLV6*+kluSCz4zqXxzov#CAq=#z2Eod_db9$^WWLo-Pzf>y}y>WHa4_(q^7>Y zAO{>(FilJ| z)5f$jtC=;-dS(Mdm=lY$yTQkpnqVB02;OLW9u|G!zX(!_f#d5{*Jh zXf&FLCZWkF1!bYbkPFR4rO1nFkq-sXN^}fbjW(bY(P`)$bS}COU5PfMtI!s-6>Ue? zqZ`nT=vH(m+JhcI527d0Ui1`t8a;zvL@%R#=za77`Vf7DK1N@n{pdUN8~Ppnf&N5) zVHO*(5erzxR&2vV@i06bkHDjG8cxR#@@-^!`{a}z&^}A z#y-hD%|6Gz$nIlbW8Y-oVc%yzVn1cSV83F&Wq)9QW`ASW0$TzqR#fEu?GQ$yuD#LukLcUOGjtl(7}gn%GZ4c`hEoiu8_qJEYq-F0k>OIq6^5$}+YCDm*BGuh++?`b zaEIY;!*0VK!$XEg4Nn-JGCXT|!SJ%-Rl^&Gw+-(ZJ~Vt{_}sAH@QvYn!%v1^4SyK^ zHvDJAMx&88N=Dh3V00P>8HXB27?X@+jT4NMjZ=*&#&lzrG1qvQahB0#EHutF9&Ri( zRv5j;8e^?-vC(H-Y77`x7*`sbjmH=}jH`{u8aEgfv8qYDFZ@kcWiScsd zX5&`l4&yH4b;cWww-|3X-etVkc)#&M<0Hn$jeCvH7@s%3WPHW=y74XJyT%WU9~(b2 zerf#L_?_`b<1fbFjei;cH6fG1#F<2s#bh%%Ooy0;n1-81nZ}sLn1xxprW;JRm>w`aXnM%>u;~%gqo&79Pn(`Gy=Z#F z^rq=8)7z$xO`n)PHGOIN*7Tj}d()q$zf6Cd{xSW_Vb0DuI476L9l{Ob26Mx?QT%j% zCO?NS;EQ+Wo!%wNV|$#3De^H=lN@;C4|^SAML^7ruf@elA1^N;aQ@=x>6@h|fG_}BP1`FHsD z`H%Qd`7ijd_;2|i_@DXT_&@o71V&&5lOPCY!7A8=L}9QnOc*JQ7RCt^g(*U^kSb&d z*+QN$Lzpe(3vQuUm?xA8M+jBId|{!mL^w*Q6P5`LLZi?kvgiD1hgsX&Y!cO5D;d{3QG;{2}};{3l}3DDt8t%3^}(6bFez#SvnXI98k>P8O$% zDPp>qCFY8UiL*qPSSZdF4;M?t3ehXph_&Kk(I+ky1L6vCrPwSUBX)?Z#bd<{q9UFs zZWK=w&lJxQ&lfKgFA*;nH;Y@v9pWzWI`KyF7V&oRF7aORe(^!^5%FG4VOkqW2EuY zBnbXY~<{Wdbd4_qW z*=5c*7n$dpz2+)&wYkPT-@L$FYhGwxW?pVyVQw%VZC+_^G&h;onvXTFGp{#qFdt_= z-h77nO!Hahv(1;8FE?LdzS4ZH`8xCU=DW@JnC~?|Zhpf2q*f1(qK8-veL5Jvc|I3aWmQ9umEf-m~S+-ktSZ=Z0YPrpF zyX8U4Lzagv&sm zBjmC2c==Fys+=sl^1br?@*epC`9b+n z`9=8^`BnK%`7QZ9`F;5(`DghT`B(Wj`FHsb`A_*T`EM(-4zUik4zmuoj8ZN1KVlgwFfw%%^N!+MYPe(N6V!`4Tv zPgtL{K4X2s`l9tE>#No`t#4UBvwm*dcH3&(I@|HK z6K$v1&a|Ct+hn`Mw%NAbcD?Ne+l{t6ZFkuovpsHm!uF(Xuk9(@+qQRX@7mt8y>I)# z_Mz<~+sC#qZToFM+J3VAW&1n9l)xqM3C@JXgb@kj6S5Mr6LJ!A6Y>&V3Hb?!C(KJY zB4K{Ql7za1`h-Bj%7kMQ)+L;na7M!A3ELCyOSnH_Pr?HU4<4w9m57w$HJ%No8u72 zAjfdWILB1SbjM+i8IGBbS&rF`az};Z2uG#E>!@;6J8B$@9ZMW_j(SIQbk;i?olVY8 z=PKuM&W+AfoToZBJFjwXac*^Pb8dI;aPD+o?cC+O!Fj9mZs$GDhn=X~G!vGWt>m(FjTKRJJP{+Vb(MB>QAQHe>3qZ7v@ zj!hhwn3|ZDn4XxCn3tHJSd@5pVp(EkVol=0#6^j|#G?}ZiGjq1#G?~eC9Y0flXz0% z$%z{iPf0vA@$AG45-(4@B5_CJ&cy2zZ%Di)@vg-C5+AJ|*WB6IxQ`JSk&zfPV`c2~ z%e>Q82G+JOq<;oa05#+48vX6<+ZYQYD_CK-F*YVaF%b59rP*h5dJ5h1TiRB(xBBY> zg)McRO@ZbPpKYA0AS*LHKf53~qcA5cIWwoAC^;`9Ej2l<$W>TSRG8r|$j;JRK7<*` zOxVQ?Vg@rqn4!!tW;ioKF)Aj7Q+P#CL`B-gjAD|Q(aacTEHjQ7ub34JVecU9-GqId zuwM}NOJy>(CBH4;U)kEy(A?4Ps_ST2KQkg7f!VV^l zNoO*cOhs0#icLw_!DKTzOfHkB*p&=|{~_#l=xgSJx|aIDw8FsB&gIJkZSxvBrbR+e zo9pjf9w-eowY06(oAcRnG&S{qGb-9z;Ed<(Yz3pc>qp4=Y!ft1I@~eZ^0sug)ddQu zMLyfao&-x;mg~mo-JZkDVa6MRfa1gl_X`1 zGESj_B}^$(#*{M^%n?i_<7KLtYGsl#MLATNsw6AZloTaZNmJ65$@`cEbXwOk3zBz1MD2$S*IDNM1Ym(Q1Hy6XnK{eqQUm^_IraX$)R!3_a}?uemM+v*NwdExut583puM`xO9yxx{km>J znYUa$P=_0}Ts2_OKnzGr&&|rq%}q}Y$7@vah715NFFj{j-qO^Zj0n7ARJ>sW!wclr z*VW}l;H^^eMhpP2E-h_YMpj*YB;Gm|Z`1(r^3wg8f!w@h%OdPkRJ_pxz^hA3_xtPe z0)Ys;lU2O21Hhx>kyoD`$cgOl=_=m%f#EF+WTa>KBkVgz#hW+)yt>?FOVe`wd07-s z+da-_E?_2X*`iF=R>Vcj#Xg&(%v;*o;a}PqC~TnNxTV?Owst#n5pxMsx46vP^_h-w z8FRUYp?|QMxhnF5ZOrz_53Xi*MSgG{bA99oH!(LyesCLed*laqF?UCPu$#Fr@`DGM zN0!N^$iwCdI@pRI6NLt}sjl9cLpx@V;L z>zf*yQ%Z_TXy~CawCit$G~z`5YnmF5T4{Vv318qZsK_}!n=2MF-LqEG5?v%x_D<$K z=6&V^=0oNqWsc%fmMQJZaj>f;t9ysOF=-oGMO$E1LrZ6SaJSMvEP#~6XPX*B-lu(5 z;BWOWZD?%hXb7}xIzHQ^7~<2~)xE2PZt2t7X<4>RFrTLSYn^9}PY^Bwa& z^8@oE^Aqzk^9%DU^BeO!^9S=M^B42ClCKmfg^F7#QsyefibpwInWvN}rAnDnu2d*T zD3t*7FY_N_5JDKS$bgKJ66z{7mR6W3>wEw*owv~Ra?<`rCOQ1$kSff&|cTrLQhbANJZ(fzsN?pvA>vsX2t#@9~H*_q8J?>`-?JE5&MfOR1^D)g=lf?FOEV>V}G#> zt%&_aBWjNQMH}jf{lyw|Z0s+NLrUx~PC^@Fe{niG6TUb@S-usWrL2HmAfd3P$_smC z4c#jt#<#x=B?NRH+O!Rwk1kLeloq8;AGeFprHp+Wx)@!e9IdR}hAu;wD~(E%(!7Mi zc(fU64Tz=w<;@Kpop6{O-L*SMG}xs!c1~ap1OKNbo|m9)+aSTz8{dI; zMKpd5x>jjbj!~K`#?Co^Zr=Q)z)X|fp`cJKkx1l?>qT7`Y zWy34zE_64#hj#m3v>V-r?yqfE^8>ok)OIv9(E@_MsdZ6(up~kcg34-TmC~uKKS+t; zSl1rc?jd{seP8eDL+DXv!Z!3UdPG^HtlfqlLys%RD(jTVHQHueqa`A=6rnEWjOYw$ zNhmpmlvH&ndKNwBvyIR+f+<+{{AW9Q9=*WS^(ICS1ayGAD{HOXf?nDN&hEiDy_>I~ zH))v!y^3B#ucJ2wq8?zv*Fy#uEA#9jKa7>w z1{2lzNg({Xd`|l~a||wxOTV zFUsl483%3Y)gPs<;oSxmCEL;@f3V0o=X3lhNmi5DO;57^uHbS|GU{o*D5V%b*wF;2@u_6TO0jr zVK1ixpik2zwI4}AL3&K0W!tzNjDQ#$CZ@Mj<8SPYY-nsjOJgI={R8!pEv-!|pe5Ml zEp2L(!O#-Jr(muPX|o~^ZQ6>bD_fPem#Ncc;#qihEv@9WceK%?ld{cc8;P{ul(#~r z-@bmmyUdke;`ZfxstU>r-D;Gvotu@~| z;AMC@UV$6%(Rd|p#7(#vx8PQM3~s~ixC3|MRd_XCgV*9?@jAR7Z@|alL<16r$cr(5V zZ^2vfHoP70z&r8Pco)6~UyHB9*W(-Tjrb;fGrk4iif_ZW<2&%3_%3`mz6alncjNo; z{df<406&Ny!Vlv|@T2%K{5XCBKZ*C^r|{GG8T>4M4nL1yz%SyL@XL4~eg(gZU&F8C zH}ISIE&Miq2fvHo!|&q{@Q3&#{4xFne~LfDpW`p^mv}$^3V)5i!QbNV@b~x!{3HGe z|BQdZzvAEU@Awb=C;kinjsL;_;{RBNMJ#4n*1#HB6U(tYE3hIfv1Zo7%B+>Ou?ei5 zb+Ar0kv)VR#13YMutV8l>~MAjJCYs6Cb6U0G3;1&96O$!z)oZ*v6I;;?4j&bHkqBq zrm(4O8k^2$u$gQYo6Y91xojRgojr`5!Omo7v9sAZtc%TO3)n)|%@(n9*<#ki9?s5V zOW0Djj4fv?*dy3V*2`A0)ocwrpIyM#vJ2To>|%BadnD^qb}CmZyOe8`YnAJi>y;an z8}! zQ29vtSouWxRQXK#T=_!zQrWM3rF^Y?qkOA;r+lycp!}%(r2MS>qWr4-ru?q_q5P@* zrTne@qx`G)M01Wh1lB0-Y~noQ6X zf(|8UDnZEvO(Q6Upj3j=2udd?gP=@;vIxp1D2Je2g7OHOPS9Zl%^+wdL9+;&P0$>I zTm!S|IYAW!9YIhfL0*EY2&yKihM@Tb zEg-0tpoIi2B4{x|O9(oWARj?T5#%RmDM57v)e{sTXc|w zCa8s=R)UTpsEwd@f;tH5Bxn^us|i{|&{~3yC1@Q%>j~OG&~XGEPmn?oA?O5xP9*3g zf=(uABSEJSbSgoo5p+61XApEIL1z(kHbLhQbS^>X5p+I57Z9|GpbH7Qh@guJx`d!h z3A&7+%L%%IpeqU5Owd&XZ6RnYLE8x0PS6g5b`o?oLAwaLhM;Q+x{jdh3A%xx8wt9J zpqmN0g`isrx{aXQ3A%%zI|;grpt}jWhoE~2+D*`X1l>>2o={9?>mU-3LN2&52sM|^ zwg8R9uEIi3m8ZPSRpRp&&hrKXIJ*pl6QhJH3yMP`4Inb8y9foTEHAIpaT-BrY?M%S zMMZgKmAlYaQR1p9Dz7XJp|*g+uaxNTx;Lpi z8oMUgkzjMd4XnJ(SLmkg75XaO-ty|o0(Yn*>p&qb)Km;f+8A{`b-bs{TjeS#@m0Cz zhSZM(^+S79hpxG+3W|N@m4)s~-B1FGLxLM_DDzpr-dkH5J}@W65i8F>JY4J)5E~x@ zt=Lrw!&m7pC@-z>`U*WE15O8(s2xAQ+EY^Kt8rC&TxC_EUY-TA!@6e&a|;T5HSS6; zotAp9&IQ4g?t<$06;eA2%FC+UWmW2*F6vI7cfO}6Gzl+=X;JqaL>uReKz3sHhUU7< z+?B2>H%yKqPl=~&E^P`D7ftC>P@29Tl+X#Q zO|}RJUIA#+x}&)&DoQ*BE?BER_X65maF5Sb;qjH1mDK8%Tm`!6-F3YME-FfAm^Md! zv( zxGMU>@KshrqZO{IV%^wlKyzF-&GM>ZcctoGt;wR2uvKwAXpHTqL48=`0dK&tXaf`) znVUdi5Go9`HnyyV+YfXhR8@K^B2aGyq2Uoiu97NuWf_e4+{*Im3fx_^s zq{9}rue<;fLn0(}>%wQ)%OEyBLafwPQ0`qo!*ZlPlX=e_1?V=Vk072C@8O}^;HzRymXzx^wdS)1JQ92 zqU!fLq#A+^47m{Ze}!uOyMIicsf=vLU1&QG8-2GPQIJ$>K{QH}as56P9)Wxi^f zHWa#xbWi>Y^5f#j2W8b+8(NfqfW|O1FVNN;XiTCfc&pnswyw1rR=U< z-H;BFVe?T-0ZIxZgt9Q1t%`uM5@&k zN=?CFNl|0ye5Pn!>#q<%#&v;Ix|S&1=Yz%hs;w{ z>kCWi3`@HiT3r?zx>(CR(9O{Ku1YnP?Y00`fLNG6sr$p`c147xD?xTVs-RDb{ma!D z566evU=B_Xx$5Q@o>3Z_L1kLFN=y@7E2!{@;pYJJiRqCe>!!e*c zG+dYNqcjH5(5!BL`jB*hTvE84dx5*4x=JkuhYmHm^lFeE5l5QNkgjvR;aCtoG+eaH zYU+Ck@!A2D771u@3>~u#8$fY%xMIks74zl-NQ7UND3iL@Vz>S6L=cVeNta3C!Dl1L zMfkl-P78#ztHJb~9@rvQ#A%>AHr)C!-Ri1}Y8vNgam5u{XJ_`%SVS}G_^E}V(0V?n zhsLzg2xe3I_?-_55gyQHL#ZorvR(*s5woJJ7hOi_sF#5Bgz!GYT-J_$+79pa6&KND z(^citbuI^;iQzhWG*7QG#J8Lb?I<#D%zR>XpyUrFN<^?TIXzY9bsB5wd* za89c5cdURKU&(w(_kx>EHBILtcbt1cX?!=Ot_2!vle!;N#&lEBCWYQk#2)w{$dBqK zU+G>LQ}_`Op3zO%y+EzFK*g}2n%1~zMjB?A&s&{OYia6M6_$`a9t$a0yXha&UO*b& z%_i!~;AqpesV|ymK(hM~s7~*}KhJ}b168SSXwcG871Uwru`CpUUIKyiFag+Pd}@({ zR&O9xrb!arI9#x;Q{5t$r=*&u?z&a4fc`jCqrC~Qm8XIS47zsI)UI0~ejPMMMrZ^j z)U7QdzP|;sb<_q%?9Ey&HL2Te%yC?btXw2%+QGu^Tu0c=KJEelqN)$(Tn zbka39bY}Pkkg|G5f?77jMtbC+&oEqJ6*j}~05PX;5TUF@^%hM`VEEM&qu#Z@04=|t zXzCKvyQZ#$kgNU$yv)Ag71q)%i0&Bb>9indkrDNyf1a}7vaf_1d-$nJM>F)Jf7;Ni zrBE6a=F;O5&6~>S!viF}e?|`Q`Y=gh0D}{O79GmmB~bfwmy}lkyl$KbU{iW`7OXU1 zNmaS7X92y8-u1wUN;ib*;6AGZ*Z?5AZvb?3=vlLrF0g!BAPzn2Gdcjn)jNh32+Av} zJf)t6+DfI~RTGP_Jji$mz@_vK2Yo7{XL4G;2|W@p4guxdzA0A~)4ik6>nZ}TP++WR z=DBNW;iOPk&jj`8;f+);@WBpPO@n`^WTxxqfd158nlGS%hc@l4rTeA+B-%I~G?RPL z^pvUTI!)cfN^i!QpgO%5RkeVnmihGH+`Y3UGj91(4;C@HY*G!u0AiF66X`(L>lV> zC;qU+!KtK$C*6|efUq#GdntnU@Ccq#Y1wxPq%UQ3>sAW`5Dj5h|UG3SjaIE=mSIBS216cG*pf~DU+gj@2WqP>w5!4QzUepejdgprG zWufi!1i(2Ws)OBdU_sF!NpnTGVn9!3VZ}u{r|CT{FWuGYJPwBA&^$RA;G;)lzP}Cb zr6g6f(3kM&HDzsJA;X{`#q`-E9JNY(!IUF}b}FDnFWVl_D%^$ivXUn>24?_5^cciO zs8S0Xx)D&%a938ASBA_v8^Gp8`7Q|7PS>WZu`#JJm4(wUoL0xCu)IKh>ZP3=;3Ps1P$73* z49IcEI)*#Kklm#f^kH;U!F`?33RZYWd_$lNbfSfxJoAqKZ zy*@-EKrwx26N&=&_1D$i?9`#@Ll*jYG|V}n79M~W`V+z8v_N;Ws&ep02>D?^?$7dz zgIwmG54A9T=pF;u=;}=TfD<;e>CI;C)@HR{CN@5Kfc;mGa5OM#XK`Ao5B2|PK<-Zj z5A{E|?&!^;%22nT1GE7yIxj6(&;?yyP49E-Ydd;bzX&b(`#FKNU7aT1!Fy2jp_=Xy zm<*w$BYMBz2hA-RSaW(O^b?}`00J)G!LcN|sn?*X(tb9j?tah*Exk|==RO*>XkDzV zO1~6md=sDtwCd@NIqkYTjaBrX2F>I{Gv^%ujZWj^Pw~*qsVE@>2jD(0ZvZQvvD55_(Kx`~uMW zv!#alP(_2woTYTn2$j~pI#B!QOB>ap_5AIDx=(#|fW9=SyYB}8j6U0jO`5Q}f;zeB zd5>=La6qH$M|(CDmXH73Yj;(=klWxeUgN2(s-{(!E;!w~-}+`<2(8#tNN)#(0QI>N zn$dp(UXL?6(mHM1_LONa&XgAw(TgwTMYPmjP#Jpo@DE@{S7v(fAT7_NF6eq#uUltg z0IA2VFUGN1I~1tTy5Uk@0j=@6=uxoi8Jmd(*dC8a@nC(0)!N$c)*&WdOUVjmBV8%nhVfN3{_pv>rKLeHK3<4LEu`oc1#l`Psrou{<$VJ z&*;IJ7Lv66i&mA@7h0;y3(ED!ttRV$Ee|_4hPVmw7_5bp~MM z^b-RXD1GV?0+|gU>F`Kay-!n7HaDq=KAeR@R@kjvm{IDr8v1?2^$Sxz=uhiSAA%yi zhEA2?+OqBhH|W-?#9qrN@vlbkK%WbHDZ}pCr7ixp`j~T_-gc)?TSIf4X6V`!fJ?vy z)%0Z(dW(YI`3ZHg7@El+STl5TR?&ADePvx&>`n6k&owZ-7*oCs;ARXAE}~RxIs%Yp z4h*T=+^PboSp$RWKE>t(R$95&6@2hdr&d027IsB1%_|_GjcP(|4}Roeat)oPkBx#;WW zwe${|K4Fgnr1Cx>#Y9o>-c!qTSasHhUL7{o0r<=V0@v+a22h33Q0k>N>I6@FdnY`P z(9G0(NskAVRvX@Ngul>r)ODTF)BuS2{YETxmzG!3mkwwXbJf_YTi6JACH=;O7e-6y zrJmq>U39MueiAAnnpyz4^gxldQa#-}!{b#OKF78I$mNblrv02yPY^c_;m z{ql$0DSI&*h>!*kVS_*EjOJ6pfT1=-z`?}K6c&iGMxg5#Rp<$uz*6Z8HS$6 znNA1P0~##Ik=2^BK3HcRu%W^1yP%vtSk_U`1=JV|G2~Ehpsurx(%nbJA^QGKbtSyF zsb0u|dul3hR~)_opglc!ws&ZH!HO2o=!v20O)S$z0K4FTVCjucFy0NX`$48mpMbjQ zg=Batq5I`hXvKTbTA}ZGSInnxSasz)S3m<52d#n9d2oGOziw~33eXR1Si_xEN$=(8 zp4kS_Jq3;4hcz6UCY=Qp`lD9UP5>`EXkMwIFD&Wz6in9u_HZERPNqK-(a;>IU%$Sh zywDqbnX8L*#B@DK4vHZO=kOqTo9RZ=O%c@Lrdv&vTzseLHq-5N6t+w{2UNlJxiqW*but7$JmdwU>5G(Ahn5KYgSo+s!jf}RV}A(~z?y+Zj3 zO)s1F5%e@c&ulZj3M5I-9yBsieHy%DdOxxubdBjlg%UzNuW}*2(Kjwc(`S?%(DXS$ zFNKjIn)aK%GJOriORo?_`|&dHAr{5rL+m$kdkSzOzV_P_Gj{3y_`&oGHY~dYzy*2zpbQJU=)W>1$4L$1y|`6iI^E)6lCle5U`X4>*RP zw?Yhw9LtH6A(1n1M$W`>9M1^^y-m2tf zXl@KQmK(>7=O%Cyxk=n)ZVGoOHt7BCqq&t_BiF<=b1hsecMR9YwR0U@C%1}Q z&8^|qa>sJ(xb@ry?l|svPT>f50(T;J5_dAUkvoMul{<|)ojZd&lRJw$n>&X)mphL; zpSytD#9hc;#9ho?!d=Q;#$C=`!ClF1=C0zla9g=;+;(mUx0Ab?+r?eOUCUj^UC-UX z-N@a<-OSy>-OAm@-Ok;?-O1g>-Ob&@-OKIf?&I#~_HYky4{{H24|9)jk8+Q3k8@9O zPjY*?r?{uNXSip%=eXy&7q}O>m$;X?ecUVDtK4hc>)adMo7`L6+uS?cyWD%+``icI zhulZp$J{5}r`%`U=iC?Em)w5tEADIV8}3`~JMMe#2kuAiC+=tN7w%W?H|}@t5AILy zFYa&dAMRi7Kc3+ck9n3i@J8Omb3D%ryvR$unYZvVZ{=-#0&nLXypvDl58(&#gZUx+ zP<|LcoFBoDNC_j}?=BM!~d@7&Dr}G(nCZEM; z^ErGjL7x!xDM6nRL}S$#1bs=+euBOt=xc($A?RCzz9Z;+f_@O+v z-w2|C;72*E=M9!BtRf=3WMlHgGUClNfF;4uV`C3qac z;|ZQX@I-gTDFh!%@Kl1637$r93c;xarxBb^F#QpxOoFor&L%jA;9P?92%b*x zVFb@0cqYNK2%b&w9D-d0=M!8&a3R5Nf{O^AOK>s49)b@icpkwe1eX$AMsPX76$Bqa za3#TBf~yFwCb)*+`2;T@xR&6B1TP|ZF~Lg+K9XP`!ABA7CwM8rbp+QF93XfZ!OIC= zL2v`XM-#k~;6{R*2yQ00h2U0#k0H2?;C6yL2<{|!6~U_sUPJI&f{!J59l`4f-azni z1RqbZLNK8;qy(Qx@JR%pOz=j6Pa*hJf=?s(bb`+y_)LP&BKT~A&ms6+g3lxPe1b0^ zcoV@F5_}QC7ZZF5!Iu(z8Nrtmd6rY6MPTB_Y%CD;QI)^pWrnuKnau4?coccHG8RuVoHmeG zt6h=Ab6wnw^t}-MJ`k_guKLTP$g8EPzVayYYKbY5b}o$5E+&s6uU3tE=TYR<`cG`R z2#2V}oWAiW@@j3SCmuy!t+4dUqsXh}m45Lk@;5<6WrGX;cgzMg9&D>l2S6 ze>aHshewg$4H~`iDDrzisvkUx{6nA+Ii0(BvEX5Dux7_U3W5{+$D_zU5yPCGcog}k zKp}EM_r{~hKMOj2=TYQe0Qvs#DDp3ZMjv<-`By=(FFcC;8$CgW@hI|dgJ9oy6#4f+ zHjK3_7LOwTAqa)>(e=)w$bSMVedJN(KL_D3{<1E5L-le%7>^>qA4L1fqsV^)N(13h z~X?=q|RlXvuIMMgG@r*gX-f@qd7D4?K$e-yjf102qTuk^c{5 z`p2UvV9@E2M^P|>NKZV90uK^>;ZYPMkPMrT+T;I78aY7*p)e9VO(=>-QAhx}uz4F| zAc@PPC^$hS%#HMFi~4S47>}Yb2xP)o?0Vo)6o!IqPS>2E6R6)jioys0=?{;hkOUfi z=TQ{K_AVdJAtg)z`LL-@yVV1aqA(f6!#vitUqtaJ3R6KgYzF8f5PtDPNa@yo&pe7k zI*9kmqbOv7SXdm2&!Z^hf@s(jjKiZS9M;Xyo_Q37S=|f`-k0%pTL3N)3-c#+f7slP zpr#WFK{kTZtv4P;VJ@gd@cH$bM^QK&bR!t^V)7^or63oR;Q3cCH^Q4|6_G#1fJ*sIRLolaQMLu1-#M6yr_D?uT`1KRuW-FOs*W{``R6^t3j#nJc`1xARn>j=&1F`qbO_ul|J$)3JM7K#-k{l z2vYswQ4}_UMubCqXpi$D=5m1ESqY{CeS06wU{we(@*@7lKOP zc@%|9Kz?jD8bX~I#v9EhEL;u>eI#BKHiPiNAYK%<#zLZ3sNnZJ`bE4b>;R;`5HAY5 zK(hOo(M$4`A*!1g{<#j6dLUjDZUljW6E6z4fPR077lqqFqi@8E!d)QSFXBbvUQp>P z@uF~lFVYdji^79(O$QpI2;xQI5s>XO@uKiJh)2JvelUm^g}s0sO;9l);zi+^eu4E& zyeK>mVA1cI#jz|{QjSHuD7*wn2SmImyaEu>Z(c?j1MeP%Qo|4#PB-F3;dMZZei5-} zwD`n}!drkB%~KK+Pv`cEAYK&S?MMH5C0-Oh=tuwhK)fh?40wH*q~QJFM2JhgD0~KB z(F9jf&gw?ID0~Te2SB_id<_6G=Q1~U`~u1cO1voi4k&$C=3s(Og&&i6QTPjxqRGBub|wb#qVO;1 z_hF4|zExjAi$QD|OqN9iXff7gY_db`xDi2hC>r8;rbmQW#ET*aNHJz!Y$W{<9BGXh zyFWUcRPV%#q6H8JOuQ)CKs|bRyBQFd3r=)^es9ctv4|JNLqKyt#Eaq(P>tsPi8NZR zc=brUC=Lgl=!F`I6AXRbi5JCDpd7v3#8mDB@uE1UFL>%Ze0?Hb6vqSJ0TM5YlK`Z* z;f-}z77y)9^ZFxe_z6DX&+A6KC{6>A=;a>M$1#Z)#WX;O-j^Z~qKOy9Owc%caPYw$4qF4m yS;(r)bpXJj1WMbQIz2TZ&umH^B_AYK&90Vw+A_%J_) zUX|(-@uFA>fYIA^JV0&Njz_#GRs-6>Azl<009JIm+OuWB;Z)xV3wy6!Tm)dzE1?@< zVo*ECc%Xtr5-*BJ0?z-8cv18NeDr7pnL+wVyeQTKTJ*B*0j(SHqPQFoqQ@XMLNCON z;?V%sBdf^3h!@2s0FFM{hw-Vz1MZc0QEUb5{?L|bD+IDXUysC#Vmlzm9qWD)FN&-B zYbOMK)yiIo7sa)J9KCe<;N1w~MR7gg^hg!bZIr@18Jl=fJRTr>q`!#|NneHa0>NZ7 zE1-D70azNFcu_nVAp64-71sHf#EasofY_f&AC9Q==)@*o6wd(cs;J#M{*Z@Z_lbB> zJR4f*k!Pga$c3hB7nxb5%3&Lucu_nL8tBh#3pEgrcv0L0kUbLU#Ird>D-=b%C|=xO zS9h~hhmK3UC|(9F^e2MFX@OQgB8eBpD*?Gb%P$UcB=MrS1z@AAGkuzU(Zq}5_5!ZzSwJbBdHC0BwiHv_077_AoN1KD82@GJ-s>vD831p(UqAVJQ#;~QG5rG zdffUV&f#IJP5Y6-*u;zC`vBYH5h)&Q&%}%3M*!Soz7-ExJq3qY>EaPDik}`>19}=m zlMG;Vj3!30(luUqlu!$EX0Z1JXeuUX)S+>fjJBN*RDPVB$q78`KYgcu~p&fP+E2D9r$*7|c*n6pq?1ODmP( z^o>0eFG{lk{GbpoO8Ed4gG6q?#EX&}5Myxt^@13Wcu^__y#EF9qBIYXV-SP&Vre%@ z$5_ORQW-!WDDk3n1fU!U@uE}(2r&jDL}?ew`1(Y=D9s1F1LE@QPP`~B1jraGrSB%j zCtj460Qf;BUX+dk&=_l`?-q9>UX&aSv2=-asdSlixpakerLf*hz$)LfEN$HtBZh4(U$mF6nOR9_e0bw{)L$zqChs zfUuc_ok7?_!p2PQtDs?0Ui;PuLR)dkSGs zC+t~-J&&-P2zv=(FDL9(%H(~_0!CtLnT5NT``-+)qnb|oxnVIQXIq6whx#>B%IjL!3GP-(NU|C&OAS*T8sJ*IDETXYH787A% zpnYXWOKVC=Q3?H>)OigZg@LA)ZvU!j2&^t>Y3^uiX{0|eKCLMn{y7bvrJGGG_z-Lt zWJ+u2(#D4N74#EtM_a((RN2wg>TRi88R(d{A{_c<4cbWSK?8-RKNOnZxojCVIs*B1 z4Vj}40tSj4%iwo3Sixu8aFEA8H!~|YJ3TKqJtHG4Ju^KmH#K~U=)<3$=FiT}N{bl& z4>g-4pY8BIcD0`q;4`fO%Rw2Xh^h6JhHRzDVINJ8F|~r@6aoCb2Aptk=4=G?FB-JN zXS+eS8E!B%)KQxJmge>pm4tJezqK_57F3|QqdlcCuqx2l(i&(>sc7_fENf|NYENmX zSkclPDECfl4Sp0R~SCeU8LGR3>Ly(7?+Qqr)r&EK{*MGYIPTiRB(rxdkxHrK07 zPgA1~e40`m@Ye_0+EdEBm7UEcEiJ8)i{nqN3yCx*)w=-9y6q|c`lg0vYDn(^LtFJK z9d2NCB>I0E`k?Qj%g)HkixDH~B1y~jr-m;Qvq3Xz2+hJ` zbvZ^5i?K)|I~_hN%z}nLY~Yc?*qsjKF0G5`w5%C5q94nv zWhw9)w+Can7QH>q%UU9a*zC}NN5$p74em5yTLSErZS>#8^w&vN``ZGp&iaNHceDC& z#C~HQtbva{khNP?S`j0Bn@4ESV|}*U^;z(LxprMOJ+59?jq0Re0uea^W3--(r#Bz; zo`f&qc=NfUrM^9-wV|;gxsiqvx+|y9Pf`jRA%p4m1$;ZLE^N0pPt@9+cmQ)eP`4~I zeZX@(&7Zz3FDoMXF;CTuqI}&seRPCAx3#=1a_*&RXonvB{XHWmJ1>Jquhi_cjGWBW z%)GSh*sD0R-oG?r6=!QUB_Evi5D~Qw)1XrhVpI&z!pw6t{Iq^#VRio2j?T6K9VB(~ z7ij;C93{5~n{m+gFX|M5p(M=aVsj}oVTajcKHNOdTte6^!e$dThp@Rj%w^_sG~Rp! zVe<$(U8yGQVZdcQxT3A0iSEgTEp?qubhoc+XbRBOzNwYIOx^9Pnuhjd;R;~&+b^TE_G^BR(9Hk z8u&}<2E8MT%tt{-7Mqutk2KSB&rHJ3BJ6C!&e>u1o0rm#)DzZ4*nH?n!T)JTxV_Ie zUHqxmk-VG@3)W?(=7e{p+1w6YX)(8&k1@9q)=k(V!p3dtMl-q6+@;hUYC)cO0m*&QZqwnH<)jYi*}p&b`5PlVHbp< zDYG6yb5#%Kre}qacAFoJ+u4WA4{JyZ3A?BZ$@RcA<1s@PuggqN%~hS47DC%=el9NB z^X3;cv?YW+G7N3Er|I~s7q81nOHBi`yz~&-Yv#A(_VOL`yBgY2g!Ol!%_e1!e7Gls zmYvZxS3WU+8P~S`=C3H)H-xPtY(1a_v|)XquJguSi`J#(s52xpl@4rLUPczRGi2mX z=HCG}W<2=A{3nI|o3P6Xy8>VvR9J5BZ5ItXR2_=+)Kt1I({j?o)};koxOn}x@D@Sq z_e#PxhV^^>{ivxDY>35OT^ehd6xTtM zEmJfHts?B|Fb8dTWOP1>>7X=Ac3iX^OD=WLbi%GB?6H8hP94G7gYHht|q-D@) zk(mi=Av7&4b1d$-phcFsnu!|-dtA7Q4=maE_M&xa-_z+COrK+xGK&|$Vg+hTm8F_m zNY}bTSOQ=tXj6C3Gsg+XI^Z%BCd^BS~hDoo=?~d!faf>f9TO~YvV&V zj9_+Q*=f0sBE?>;*IRC&_T5C-3kiD>AYH84_uNY<<(r_lFcN7wnd+XFoe{p(Tkf#j z8y9!Cj)xxyAP#Cp5VxhlS6K5{&wLC>Fe1@=B5cWzy+N@c) zd(3lhe@!P&PA*KG^mMQ=Jv)cS(XI*klI1nPjTJ{NuUp=rxNi}53t_hc?lu*d+xvuZ z_b=)yOwY>AqJcCAcBC%M4=f)u6Si4Cw0uO^9fZAlo8=SBr-Z$Rus26kpDp{fymu$P z|DsnP>F?Uos&jURt0*r!Gd(#wE7zTzndZt#cI6bfle2OQvhv)SX+=31Sz#wR%Xb>k zt|*|q{KC8(I(w2cGcyY4|D?NhJ5~_(CWYY%yOXdt?30bMiIH%Q%*njZ zmQd#P=wc;-=H(qLC{oyGHU7rVfQ{Tj*>-LS`_y6HrdS7g@2$)$uccX9^xr< zm$WSJ-l6TXO-^9y;(kxrnhI;Gx@!fUCp%=i-EEcWMt93rnLZe~wGYmdhf?Rs!w7p@ zEa!bE)2At#e|8b}_6YybZtD$CkS8(rt@1>|-nmttOxU}W<|VX)W%M}&t@hArdgM<8 z>1xyD9L9c)oFb>nX>z)pA!o{2GCdmIL)d!>yPL505%zw>hIV&j!*VzuQIBdL)3Rwp zUBJ^mzoCsDg4)|Dk81cA`poj#?Aph!wz?G!9rUzB&yS%GHIO2IQ$yog%GDYUKwr@g zGy>sBRExB7Th|uo2rO!7g(_`Jd)_p9*vzY6)lsk7l&pO|Ju4?Oo0cO}GgI^GbF=-q zft<{oyd3&;CoerOEp=H&T25YOeO*R+wZE+?JF`Zs`POQGWKT`kom(!`jdQzPAs-=E z67~tgK1tZU+vO^`TCO4NQ-uADu%E*ONu?v@s_ST2MSuOTGSJ%6)&V~a+Zce#v$wMe z&Y{{MdIQVhFiKrEI7%h>+t=E}jYO+z=x7Y63sqi>Htm#`$VbXP`6$^>*ry5m5@BB@ z?3;vrFZ82ca=p9)Ob^J*WSTwFnEEVXpW7}s$VbaG^?RPMFA(-cFnvOl9TAq&Q*R6W zrXgjg>{&3MK40?&TK#Q)`eVmF+vJ|5wKmknKXwkU{_q!5$0Uu%Oj7K5-XX7v)t|NU zvGO{?zD(GCgnfmE^*I_Ab3{_L`kTD^P!gFQ^`iZ;Q9eaJm9Vc7_I1L(0s1x#pmKT~ zL(Z1Zj|Fsryh*;0uym7oo3QUtpxNI?^y+2#GDeawm#>&#=AA~L3;4?uuzx9iJkj2f zn)(XDtifnHY{r6dt7y5OPMQVdf(75&aei1m^y8{=^kJc$K^wQ+$njR&&|$;JjXyLk zGpBl7b7y1YK0%bs7I{G#y$=A_DxL|r=_G)8|Fhap*>S=@wU=m+Rjg9Y^)+u zxbzI&$ZhFU=)beFsbFCPU7IbNn2mzX!051CJx&zLU|LMD`ehN97E97;x+Xa@425>$pN zP&HbDjzR&{hC0!)=p1xD+J){#_oIh3FJq+r$;=M^8T^C(mLmJEwqb6TuUb;(E$!^^ z(?DKGr{8vYv%H0=qke{e>8oU$d@cPAIT|%~$UEib@-F!rHFmsD*bivP{pf1B z0Z)-{kZ;6O=nq1%&(oigV?QSBr^;3IKN?WR^t|$`*0$2P-ir{mi3&~Z83}~E9cy7@ z>O~T2f2!om@?CTl-Ywq)bH0Oq3KgAakD0&6MA%O>PwkfP)0`66ChunIT5L9^iVkKy z<7d_~N%Vg^n4~h%Tg;@<|E4n;Opa~;;_%A77N#FUo3=8t!otrN(E_IY7}|85bQM!9 zZISmf1&oI&VhZKwWEzO4Dl|WsM%ewzU}c;#QW;9vuL=7tg!0h+26>;B7=CpS3&P<@(fk+3uo z{+X~e5&o61zY+HLYvld%SMt~LH;gQQ!i<6>=nulu-1jeHv?(l=reV^oOg@rMzxg!B zZCR~8)}>sN^n>y?`nW7mKOdg)LgcCPFYUUYLAUi;pcL~g#|gT?EGAJYG!U>j{t?41({jtdHKokyyE|9?=HZj$hJoB?}RFXCEZ=! zoe+Z{2@b)X!QF=79w0agBoG|3ad#Ws-3A%l-7N_+xVua6?_b^NkT5gnocBKWKJWY8 z=brQYn2@fnz4ltGs>>2`acmQqt4z_dWph<5Q?_E+0_F1s7A#ko-`Cm=A2p`nxS=@H85%LdV8$I|Z3sW?f+St`k-qDVzuX>b$iopTyY2F+kL zSVVe*yTL;$$*JWcsdSS{FR2QtrjAM@RX3>`qO$1+*!F)awhT|haALyWgW8AntQj2H zp`HCW#+`pKi_hrb(D;8-HlliP@30=-`AueeR{plo^}2H(uX}HOl|g*Kch{fcv9CK} zz4qafR4Pd$ai_mSyU-+SXsWNKd`PPv?b_*ObA)OitV2kI-mR>oVu#Kl`YBlx`(LXE zw+iVTzjrreHslroD-8aI07Di-Rzo&Jc0&$BPD3uKxJV_1R8mSMl~htoC5=>ErQ#+P z!wRw(sArYD7BCbv6fzVheTq@fsh-u+QfVlaMp9{|@A{2;@*FYR=cu2g*;A|^)wc*D zlWn-utY=;9Dav~MDT_Eu@BBb9RVd2qj6_j4eN%7gg8=D&cOAy1%~ocF)c7ul!~TL$KO7F ze8vB12*)a_8mjB7s3sNj0z(a{SfaAktr6TI)baYShI&FKq|$hl3gk#q*5{}TT{IW^}`DNv%ZNvL|gFeFYHH9`cX*I zLg`luY+D_#RN;{t{VOzj@8u@mhsm)LlPl{;^wY;&`oIo$9Q`_SjqN_=8)H zrE|5;6%p1WymdnP`x&Ir{{E_zVUS_4lfPl8R5D2=GpEXmKmS@WhYg2(+|qKZAb)|P zTq?_#H?SaAr3w|)FZBoZk1>okj1j2~V-4dB;|&uG6AhD`bFzIn!g8Dv->v*Q8QdDp{pcOe$HT7V(=6?Tzj~`pgk!ng_T>uMmaA)f#c8X4(adnba8NXsN)f3PjS~Nxo(r(&?x^9|-(R>| zXgIE??C-8=+2486aEf1b|8rCPC_&QgaG~L}zPkT-`Np}}*S3j2Knh4`K%QpJZQt-6 zZJRsDw#Dg}brK!e8tf15Dn;zQ7_0A_E=uK>sQ&E4`r2=u>jw|{^b7kugW2D4soU_U z;U*V+4Ob1<4A%`eq*6vIWu;P1D&-d&ZW(S?rFQ;!EW0t>8tU(?V#2?ipXzPPx7Me z?Oaddh`@HyH7uO>yC+%1wsw72YtP|lr%m&J?ApI^J%dlB?^3D0z&R%$+5~6fcrYTY za|pX>o_5fuA3sf_YUI}80%LNi)Lg(tS*4b3aW0o8WJcdNN7x23rs3a{H@X@*OsFlD zdI^`Fj3%Rnf9Bn&8O>7pRVsBB8m&e?>w=_Gmus4=?8p8?-^l9M)#q3vxLs(6?mgOs zB-ANW>A_iTC$f&_51;x{X}}fBgsNsLJ$!4D_hm71yKnw{{gS=ybitUz zm`k5!PN_6rU}XQ%l+&|PEBKD=fcJ&&gC0&5vp^W@Q~2@T;gMg*rc_-Y96bxozDBUiGRxGpLQ!v zxQOUDCez32B9$4X}ZvUK=N@uBrI6l_wz1Er!DPtRBh_S7)ow2=XGHg@*Oj{9>7>*rsZ`G>dIA4zj$`nd5t2k*uc#*@ZV z#?!_##L1tER+f# z4*FSS{KF!jC@a%$|R}$rhji%RJO*pBekCTbqmKD+)bWD>$KdkUSB3JUzxo1uO=sa zV4BjIGW@(qCVi19KNblv@tL}mwX8B_Gi5jBFy+)2;KOyQRHjK~x>RQH>W}}&&ntd* z^=yxuH{&yoT!Z*0P8-DElW@da%v91AjH!e^!c5|g&r-I=)oWC$U9XCs zAIHMwOchz!R9-K@Y)bKC;h+w+f*ea#H`PpN2vaSo%#CuKwVUdg>gxNkrxbsIi7mzU zb7!{02vb8-WBsLf6nTNEiBx_ZVVIhmc*=8usfAP)EU-n#Zp84@Pbt)m!_)>-NqT=rv;A*SI!FEUbJWciOp#+W8kXH#~)&U6ZOw$)cJw7%rOsk0-N z*`|5_P!`7$i%sje2xwYjT54KmT5eilT4`EkT5VcmS}T=RQdupPHB#a0by8U`mEWbZ zK`QM3H?1`Bk#5?6O{UHIWh;^1v`s3T^<(ob`X5K7a$LW5rC-K6sqfx1v)4)3PbchS zxVO|fxNAg6n@ZtfJ-Twtt$+Cab+Mq<`cLN74vXlZx2*lP=U!{F{~)i`zFF!&tH=*4R?(j?U~Y~!ylDE9i*}|< zrpu-)QrRw*9a7o3$aK|o&2(KVyQIPa!@;Of$2~{RXKDu3<{o&v@ZhfYa>n1uAU8n7xFJTiq?{8il&OxT5`=rOQEGCY5)4ZavGyl4oT%OCqaL` zvuuk+OQX5Q=YFBaLqzs(8Z;wU)Di;xc@tTrnKVrg@t+s@+dIV3nuYWo)~u$-n!9sh z=fWh=(f%}}{lO=x9J2?JR`X2MxeGNf(K^xJ&+DD6`ItC}J3+t3H>>6swXk-4S85rx z%n6?sn!i*|C45?FS+#6QPIk4NT7GueS}rZOmPZTJ@@ibvJ0q2|QelAeQn?_N=%r5n zT0!TW{8>aRsvi%=uz%z~&!zH0-$m*>2>mKUtccpp%ufzYlif!9o&NtkoYU#d0DPluwd$&P)*lyHtBB<1dj%G!N{>Pp$`sv})Tbf2+2Ew(~2kzSd}=)3YR$A3qIEsC#gOjyl?QWITWckiTT$VOPTfUg zt*v3A)?VwNpV8j2ow?tZ3OAGUCY-?}i9Gk;rQCj*+CENar%j8sE?TG-rg2cgg*=YZ zA4}y)!b!dsp>@}<)p87f-*&M!V?n=MP5yfE*-yVZS-(S@kg#ISjB5O8yVhIlqrdr~ zR37o>hIQNPmGjH%c)5Z7+VM5pwPVLO2id>*)b>rza7wh%??SPDTR-}765^L6v8Bud zzuW(pe1e?R4bdoD({_dz8k{ldRB2v+0i2#%bfV3ED($k~Uc?uWeiJ8>zgN z3P+*urSd^4aZ>rXf-QNPHeH*+e`jj5_)~wJR{12A&rfPdsGokYFZ%tU8z9`?* zZ!g==75_TCmUwu_k=S2P?g|CQ-&59BXpQF6^Y$)Uo%k>7oTki=e;7mCkobko_P;pN zy;a+;AJy|ATXTW7L#j^k7cR8jPE!_Xd$hgUKB+oORh4RrgsT_YLG4J=%Msd9?U+=P zNmbELF7>;$3A;i2Wxd`l6JO8JPHX3WTEThkf>e`B)y4iGsQs#){k@ZOCVB5Ajf0fL z7hJWg+BK=Blxix+;0d>h?JMY7I>`!dYxg)D*Y0R{rOM55*M<7tQr}DJ-I3l_*`Id( z`XyWYV_8(tJ7J0V{dSU9OXm4EKLGpJUmcL>-qAnYQgT%5x%QFM1nq_PQhTMn*4}7u zwRhTk?SmF4Rg+XTshXu~k*Zay?o#!TYFep!O4Vzn_DR31t$o$LY2VF4e^$UuvAmh` zBB}aFHG@>8R7b==D=<1LTjCq)_JZoOPADpe^Z!46QsC!@ApZ6iUVOgICbOBX!%SX% z7nsRwI=w?oe}99(oR&*{W>2%1RMSh&ELDH0@=5`7L31Hq+p@}Do-{m;0rXD)57#DSZ+jJd42oVmQYg1Mqpvr09aRI^Jphg5S)HP=#eWhZ}g zRa7_EkZNvr>1rPRk3v!{^51+lPd_(J*gO(GFZA0-|L&4q{=)iCq9nd#XKr9_#1XH# zp;QAGm>Wwq@4vWYXKrrRufCXDNHyOAbFftN|HE}RbBLLTq!*gon%haWfK&_WU$(V< zSs*G~?W6}zj+P5Ecl!}ZxKs=O=*AvqF2^Lg(PZvz?)zieeo`&^W7&cHs)i)X4mOWt zPi7us9%>$D9&R3C9%&wB9&H|D9xK)2QY|6Xl2R=t)nBAqTB>EFT2`v%q*{KZdAxH@ z^F;F`^JMcB{+uMzn`cP1g1#TCB-JWXtt!=OwlAvadoyl@{+CbO|KHUQ$94R~S3mxv z3rCK&Sz_k)hNFR(OSR&UR#;_T9p4J}YZ&Hr`ZWxxR!(xY!n{epTHzknieCs58DcyC z;Vgqw6aB#vqlmw@qPgp5nfiLn+c<+%tN%EIH1FaJGT{;oS6v*T?K24zIA2JWyWRSZrYpC-X{diZNBcj0 za`>-LO&wFbXy!+O7nv`aFPpDOwZ2pvNVVZ2^HuXT^L42f_`d#W`0h< zw#b`GwV70#FEPI~ztRuW%x|U2p01_sq*iUE?-&2OPf7V9n>K~`jX?3Iq~_0@lA70PYh2!hiKb>M)k{c#kQdszXK2iH{DG@N2uTrF-EgT*LS_WJFm+DWRN6HYNLX4@H`+AjW#&o}Vz&oEm=aG9)G{Ar>k zt;Lg5OSOYkxhb8qQT(Z8z5>QH@k^NR*;m`xwpz#AGTOh{C4Q{`!r7(mBEXM~CppD z+%QUfc&r_@bhMpt4$<#__iUN?1kVy`;h-hS(&sG{le5cV`qGnECt13urO!`GCpw>W zggwwQh+`*9l!eozky0I%R-nqQK05=4~q&hv(RXfWD%SLvo+$59gQmOuC+o#U>Uw)3%vW-V{ zEZZ$REITc`EW0gxq&icov!yy$s`I6~P^yc6>{|Eh&yh-XmQ?=5W226J;}P3~o+{5M zmH@DDG?+8*Up7yX+@J}rORms|3eM?Nh#ZI67a z%l>yh@|onJA23r{L8uyBO7YJugIR9EXe z-2c*(?3Q;Ho?nQ6lHC#~)invXe=VOa-?<-V`C|EM`6ku1Qe7w2^^2^+Ij5D+?BDe* z?1X*@@_+atc54bIZcQmwe&RZQ;?^`)3(vt=U9E0bgVks?Sv9L!svD)cNvfNr`iE4v zNOh}JxA73W)!pi0O>5b2^|E^F53#G;rMg?HNA;Fek4g3T|FnnL9h3C8X6Hfmzx@Rd z)*M!j&#k$ox?Ze1gjIG;R z+*-m~(pt*;i?y^=_eyo2RQF5ufK(4k_0T_U-G9FsJHDG_^L2bP$wuq=W|B=c{+reY z_U-cUzu7M9=dvB8@zc%;HzfYkCzLHWZ9mMWF0<#Mt=@QCE;#v@CzSQ)j1oVgZ0%(2 zsy~`@)!M}xYT#xJ=j^AY%DuO;i>=+P;m+Bu97&y(Y7D`ve>%=S5}vfR?e_J3exk>e z9c|FtI)GJM`&j#0`&s)-^_)~WVZ0#K=>I{FBRhf}X&s$#fMey8J2w6R$2y+tgVqUF zKD{qW^>V_ojdhB3deY+$qt(jCQ;<|I=|?MaMB<|r>m2L+pO(&G z>7~}?1Y=z$)vN#hd1&hz>)Iq&X{^6nH}ZD-qr+BqEH@l;*`m*7t96@HZ%UQhV~)x1 zwC?#Sx3X(%g!}-q5Gmk^jL3n4ChB_TwOq;5bg=6la79%Fp*h;2 z3z(OoFX&OjD2xGfG@JnA7#PREI0nWsdLt{cBPVjB6MCT!`e6Ww*?1JkaT2G6FcE_( zEvT!>2kFoR?a>jP5sFpVitX5m-S{komIA4e25zW^hG>MwXo?W@#UPBwY|O!2%m;ba z)`DDX`Gf<|7oTOhX@xz%3bHOQ~_P6)Fzl0iXom|=kx z9>@%T@XH0bIUvjhQ39n<8f8%h%*9*-wNVEx5RBGfPF!viW_rO)Ud_8e%ofJA7@>hN zE%d6TIXWQ{)P%DlVd;kfh{9-$1-Y?I#AHmxbg(U0-V0&P0OGI?#4IewDUcg0d9jig zYYZ;pGXBIh+yJ?<-oZUQz#}}tGrSPOojBcz)15wWrw`nHkPd#x2=e7luH4C$J3Z!} z6S)zHd?Tq%8{Oopv+$yC>`Lq{lp&izo4Tu|BUbu#6Y; z^pY5YVHklan2Lp1f@N3<>flBHcu@zh-Pnr*I0W+N6%F#@MP9tF;yP}E`Fp(tz2ikZ zUc~J6Q3!8(#+&WaJ2l*31oiYz5AxwnKD@Ia8*+eodlyGZlm`9dT^_%pF6yHZnxGk& zvp0G1X0G1h=nnGcJs8Z@o4IVu}CcSrI5BA|S=m~Fn z!aEk1a0S%f`!Sw^x_iIE8zFoI=vyBZDUb?j;0a#zp%;8IAQSvS&3y`@Fp8lBN}&#z zqfbk;LL0P21bU$l`hmLoFjt?U7!GRc!+d;BA_mmQ=MJci&pS{TAL`=sO$c9N^>u~< z;`OCIz8(lbE)bV5vG|rjIaC1o_a!D@^6pEXeTl`FSbT}acK{B7ocaEVo4Ad;U_0@p zFMa6~U-ISq9$$q>mllkdE)Qyf^`~RK>BfM$q??J&*pCx9g)^Wg>8MG%tGEuvO2=5~ z?t$8*r^nJ0clxfFk2P424Pbrgw_qDsU;0=OZ~AMvfm?VE>Y4r>$XWU?_$Gv(06Fuc zkNv2jUnSH=GqeJA@}o|EozMkg=mv7+Hx2Zj-$E?LQV@q9aro`PF3@)ws7D4@cp(EA zCxbt-AU6U*-()C&LZDAF^u$n%!$eF5`OCmq8P?%HV#*!<}me{$eI z0h2HV;mNE!JZLHsKF2Pk-j=&piEiV=wmOAP(aw zj^iZAqd$4{KaXgTNB>K>f~&ZWo4Ad;xQ~Z;jHh^xmw1i0c#k-I!WVqw#|H(Rp&&U@ zAQjTU4Mu3Nfcgb^ff@w(fqn{L1p(}H19Bn{@}VG#pg2mQ49bHZ52%V7sEr`hMi$19=Xh zZv)727AKJ3EG{6oS?J{~U=<6&Q;g77yf!xT80w|1PD2dW2hl;3z>ZpY} zsE3AVg60TD8?-}5bU{}{AQHXN4+Aj>LoouQF%A8tbqDo3Rx; zup9eu5Jzwvr*IY*5R1#WiW|6%dw7T^c#cku@2TBPG(n01Z~8g*Vb6 z12Q8EvLhD)kspOn6eaKr%Ax`)qZ(?WHX4C^X6=t*AU|2jL00CTbrlZbG?-V`_xLVE zHtL=&JqR`%<7TUiW@rKOnvHyB`we?=6yzow{gRzN$UdGQm5~D#3d5Iw}G35OdjGy^A^qbhoX9ONH@rC0~*nm-2DaZ`u_ z&PWY%Q6Lj?f>;Vv0`no48@7YKE_evvg(#E)%&Aab6a(9Ap2``5ksM; zVEjTKgeYtR;}@nrg&Dta6;uOtE!-91n1orF17avl426lI@FTniH7-JJig>~s#8IRI z$W4(j^gX?vII3Ku>|x)iSzgq*YH({lF30WOXfpy zlmv4x$=pkJ#Z=HwCFf&5PT-UfrP9C(VlPFkrI>3e>R*bOOA&J^VlK5AyRjFsxPe<- zE^|kEWI$#7iXe1He+&e%{=%Gp*?<^Ot6y#kQ92nIyR;kfpa_bgIohEEwu3$?eMpEh zE+9u`G?3Ra%|NVWm{S?XD6;~@ROT_rX<61;mUWh8on?um>Vf%JY>9bTj+LNKDl(^v&xNSu4`Qf943*lT3&OA% zYp@R2@c@s6s7y~)rXMS3MlhIP<<6K3@>ZF=Rc5`FKj5PfRZ5`}s$ek2U>tVgFpdgQ z)fuVb3g%a}25O-nm}k}DI0EKZ^}G<(*gsSw$JLl;HF~64V>HEYSO{XTMlV;pi~B-U zFNf-=iOHA^;;nuK17fT_1-xFHTGf6Ia#))j z{#pVRKpuac244Sl0jSHb4?tb&a$FJmZ`5V^G%2bEY~0<=*tEj&VgyFvEQq(opF*@OgkMkwzhNO3|DU?QU48joX26JkC4Bv!k!aRKC`3-NRzo-V}Gg>AYE@pfTuU0xs#003upS$P2up@Epx$A* zK;MKFK^?RJc?}CjYjg$k2xI&(32GcR9HTH5GcXHtK@Gy_ldz?T6{4#Xh_fr}>`Gp{ zuEJLAz;5irK^(>v+`&UU0k!G+5?_Vr<_!9?TMD?s2n`mH%WehH2=sNgP6$OeM4}gn zzZ-SvHVg|v9NpI9cWeYPbfZSy$Zt3D-|dVL;c1ZpxxxCwi=Y@vqC6^r{D#*+Ei?di z3MVh&%s+e`$aVN;Yyo40pF=d*uEUvc_%+-F^A2aO;ji%yAMg>M@l6O?Kt%YX4A^cX z#)0@FSXTt=h+rKNtRsS+j(85%)!hUyAtGzySJVYDMm9z>P~%AI8%fS0*@hxJBMjl_ z0rD0}-Xf`cq{I+V_eka!ITjNz8Po6^=)K5!Ait4IupHz$k~kyDbL1wFhsf>Ng}pd{ z!#IYMAn%dq5ra$k6W4JIckuv^@eIr{@(tdDTt$AtcOiPxD?RCzp7cu3)Nlj2=xKom z$VpFM_#qPlkPSJJ2l-GC)VpVKlmdD0NzHpw^PbeaCpGU`8$qZKdaq|w(C0l{Aq3Q^ zC%xD+6y4ArJ<$jK5rx4RhLIS9@tB0En1NZCiv?JWWmt(dSdWeP1KY3@d$1pea1U(jz1Mkrg?R8+lOx zg;5M8Q5xk?5mitfwNMB3&=5_~9KmRVcIb#M=!ytLqBr_sAO>M5Mqo6?VIrnrI%Z-H z=3^0-Vg*)X9X4Pywqgf%V;>IU2#(_v&f)@MaT!-}1GjMx5Ag)g@d|J80iW5u`Lkp+M)wG zBMjl_fnMl~0gxDi;TVOnn1IQch8b7@^4a?kPJ{gPCI`Kld+#?w^l^ndm{*@ts08ZX zryaV2ar?~15-bCG?L$8M+!CU%3ydH)ed(9J^hsal)R%hqrOthcsV{ZxOa1!(4(ijF z_xHUd1Z^Suk>7r)krM?$ANH$<<{%IKCSexni+;Ot1jj(!{l4Iv5dF)bDypMDhJmsB z@4{gm#Y=p|XCVeqrvcPw0OJj4iV!f@0pxA~IU7LE25i6%u#N#&aSsoK7?>RN!ax)9 zpa_bAbqovz`5H*>2hPS^5bwaVIFA>I1AP|d0Y9)!MwJ7xN6`;atS5@~M6sTzF_?0sDX^Zb=(BG8{`6NFo=8%DuGgH zgDwaIxfrwn^v@v1A9M~EKn#P4VK6Za&IHCEoEtSz9}N)+`e3lcLaYLD3??Uo$;n`H zGMM!YVLd}w&yXw#L_X9(6Ep+!7%~Bqun**5$VoiM2T<3cz6d~8R7DWT#n4XZ4q_QP z9?WOxOdJ5~7 zk2m-NdSsM0GK2h%ssiRQihb57@;a(N$j2z^I_eJ)$Ed4d{80~t80`WhG~_`M5XWfh zGMe{~rY@srV+oew63E|Z<}il79K-v@SU~(^c;6V}AJYQu(Gkpl4DTN^7e{dp7Z4}J zSSKVy4irEkGz4=W%iPBfz;KMjdTax=97_$xKEn$k#(5zV{80flK`qDi!C(vpb05dt z$8E(m&`;wY3NhXd9`FQxG`>74g7=IchY2_X<~;riJ_<2`noUrU8--C6wLshx8e=?W zU?%o~ZD9g^Jb`#8yhof66WOm!%m(H?k$5N4yAvCs9~gTgbDp>yzhfgVg8jk7+d@pr z4{|(-{lKIskef-PaSE{@*2&CiGGk262x6K{E+>b8bxvlTlUe6v;+R7IrzAr$ltTrK zz(h<2>zZ;4cZ8Vg59T~I5Y5mQ?Ll3q(ic-#fcU090dt*}7p!+0u}%v`PxQtn?7|*= z6JojxQiAzRuY@WX1NN)ar{gr3|Mbg3%t(t2$b_a~PBYqp7-kT|3}To;eP>YL84rc{ z%@+a4iUw$jR+xlYn1d@|e!txlVx|Yk+f4E{ll9K*ig0YfUhKygA!ey?K_Rf+&nkny z7=mG7ezOkaDBgj2&K4*Q<~O?<*f-54$FrH|YsT{HvR)0~|+ zgd;-CO#>^4cWw}xqB$63?h=rTxzu3pdr;?j#X)b+tBB#Cuji42dC|Cr8$!(Yfj_c< zxy)xS^O?*1V>l0%U62|UxT7J0(HivBg6$y2g|6WBh16;`#UWB{)( z@&a{P)DqNX(MoIt%Pw|8Dlpf@l{2Sk0VQE2xCpsDl|;fJJzSS9l}DnnL&mWk4<0jK@SUpEY-IPl&adkqdc1 z-PaBQ%dU+Ey|I?wSXU6GP#V)P4~)0&1>*2Yi1oy>p19XH!4j+maj)m~-<^;QRY1JI z*Tp()#daY!Xz+$FdSC#^?}k_~{)SsZY|M(h$PZrMI2_bz<6)e^IUzQ&o=vQ06YJU3 z6d|AoHxbLGE!c+F_>8YYY%UDuw7D#Lf-yG_0=3&r?KV@pKfI6${^*DZ5aS=L>yQ07 zD8v@#wuL_3LY=qNL<5kEEiCl1N(xlCxzI? z`nFl&f#zTy+d6=`Y}*3H-%ihLH^Ge7ps%)vVg)vU{@VToZ}DD;9mP=|6)_Ie@f%oX z2g~eWnVsZgCw1Cc4U;enb3kA1e1$hc>|#B;SkErjvuh%l=dRg!ig)-R#O`va4)U>^ zcy<%dZsOU)Hob><_b|6T?a>Y7bk6~t#AzY+`XL)~pdUtJ6h>n#7;owrr>)xxN zPxg}Ez4t(@dzsVT=OE_2%xy0*?fnGmPMXEOWJr#bNCN{j5ZAu6@J2dhKxQ;UOOWS% zZP5XpLEZL+qX&ASF9twj2<8;VbVP{5kr<4jAU}u6&*5m?!X4ZLH9W#Pj#EXaUN@JCj#{4sj$7&SW<1>!nh6cte!RZ#Zpf?V63x@bv78x^K2J%1wC@M2YQ0KpB)AAdX~JNT?z7fmYB|x*Ry|syq+bmXUXeX z@_P0-$m`h{+{O#U;WNGoan1<}$j><=%&>yk&v}6!JeL#n+PSizzUSz-bJb824bT|W z=UfZ4LK_hOxjq<<37{tDrr|ft#u6;YDlmt0%;6l{?ztT}3gSI?9pw4kUC&p*L)yaKVGC-(E?{{nfx;0AAGMiyj8 zE(C&nUnl|klcDC0@JV?M?kJF+ydiV_<&FNDnvASiB5(zFhGL^9`Hm) zkf&(IiKed6)HRyAM%M)UifHN@O3 zgB(R~$4(r=IZ)f^2cV|W&+roDC;AJ%3lZZC1umerF~lB2u42elOkNa2Nt8x8keirV zsDpZFfW~MF;*JSNe~bdX7(@PIe#30c1HBkSFUG6|y%@6*#2!PAW2kM+72LsnJi=4F z0JV&v4`aRwLHR{2`HodVZDWZyHY_rfJ>@^U3EV0KDd+c+(!W(=M z;$lje;fHL<33}sVJ`_YL(1#bxgV--t0ljjuE}EeuA`pq*=!bzAfzcQT;=f4z7pGz_ z$oEBZd~rWc;53N)B5_|N?u$1;&My-8#fNwTdgbDKAuf^YOT>K19pw2^I%Gfqi2oAt zU&@W*Am5jML0R;~5DWvcT_UzidqIsaoy7&j;xhik2O;=UT&Avz4{NX;28?lyF|IMjHO9Ec7}q}H3%(0+ z-5KQTI<>t{ZLc%u>p74Q1yKa_>GiUpw%03z*sj+=eKZ5Ly&fvW%?qGEZ!))=&%j)6 zQkPrw zV%@jC3c>Wm?UW$)+l+CWcyAX&B~a_zL8ylYXo()^1?qphKgjRxK^Ovh@;2MT?a>&A z37CUke54`LGAC{0D<4RhlijZ zcc{l5)_K<%DqKJx-c18H7?B?2%Cprix^zO6%g+|;=M<__lWl%@!oq3>U)n|+dC)HptD**K zqYmhqhi%Xaq3DM0Ah!?6?L%_=a4?2~Ts}O8TOdCV$JWUh~x+au=oh`BxT zfERq>hfH8@kC@vd=JtrWJz{Q;%7EA&5!<6GV0(Mi3jM)6AFaU&T*Mn87*;$^i8Nqt zk2P444jGUc0mz0NC;-NITn#Nj?>ruasbD^jH(?9tjmPxH<2`tVPeME)k59SO&&=!dOoj>j`5$IfV0|hEJH^lPkCj z#((k_??Ep;O%CS#G&RWaQ|9@UetDV~jQ_L{$mi3_V4hEFg8Dw4j`{clJ8%w+@r-_a z#=M?Ui)W?L0g+&vde#Tz{@E~$#2AdjQf$OdkfUe&LHy5-g7rR&26K3J8CP)~55QcW zeG%fh1;qV4J8~fq#QvPvpOd%e)aZFRR0Q*QUKQ;@-#uTBeK?L2I0feM{2FfJ4(@^R zo_`kNMGBCs7v$=N35@;19gO{gF<;PsFUZ#m`tJqve^CQucyi*-1N>!7b*>SKQs z;*}E=_=0g?fFOH5ljB29TFmKx(*w9KE%G9KEG>Z>inezMyB{QjfRH@9hbk#yLdeBCdcw zeal#HU*I(u=Pl#Br510$;JXm-oI#JgGb119!FTk;yMdUAW4HxkeMhYCSjRio@s3#E zv5xn|_}&|SpoZ_M;rkrOjl9T@GN_3l)Cc)~-wfpXeJivqcILsF&*Uj zJ$ZgV2TQRA+i?JgL7eZ&!~4^?2ah$*B!(kHw0{BaicI6%qMO(m`~h7upPuL z$4YF$ZZMa)gP>M%r*IY*5QB@LK5>um1k5S!4c;LRpM?16f;4c032YA^)506f?_+ip zMlqB`X;7<=6;K)V&;ZQoBXj!5oIbWdFj}K4dY~8jU?7<5$H5qiNmvHv^l=T=gPMJ$ zW*@gnd8ToV4feD=O-T!@260Z+fVewr=LyL{AXoXJY$29doe(yKo%j^)v7NdQ z*88nG8i4hFYmLqzkKg)%e1DsO$(V+DV1C~i>l<_WwgH>56+5sCO&1|!TM58vtI@67*u9T3-d;`vS--MFwgHX zxP&{PzTY2#alez}?=SIMIPr@#oJ0XM#RUA0y*P%`IEQFl#65fxPEN^Sga>>PfNaQ# z+$e_%sElf;iC-}k<1igFF$eRp2urX5o3Rx;a1*!j0>tX{4spWC*$Hki!3=j~0&zHJ zMGn+LLv+P>%*Hw}hBIS0Glnx`IPb>fv=ZAQVXLt!>aV8e$55g&# z2F6WR3{?<}9+-%wVExHhZ?dB}0dkov2A6OPjFIdvSa-7Lcm;Bt>@&UzCnYtQtHN9r z3&^?RiHsoU3OQHuARnlsQU(=4EJ}6!in?fkM(7OUQwCu;Mqw-_VJc>T+9=EMJBVL7 zfWtV3(>Mp_p~T`5$cOS>IH?L;kO~G6vr2tbD+;3?7(-o+ZD3t0>rz>ldJ$JZ?p5lp z-obsae)XwvO6~;KpFBBIfV?CpFUc7rxdrqrJi3ramx3T(muet)F67OH?Zc%7sIN;XA`pq*7yyYOU@Vu(AdfDyFcg#wXoYGjqT+-A-D-d6r_UMFA zbVYv*z(7ch!$gqlG}AF33$X*cK!2p6KhjXAG%>h@Tc9`65LcRKc!{sV$(8)Nl3!ON z%y5S%e2@+Fw`(4dYuEfJgd!*gVs;I~NRTgA^5aTATyKK8yFLbExiXe3wRQaj#^mG4 z$&LEDkr%fTXbaZkMqS)`pcjT=GN^|e^>AbTZq&n#dbm*!x2<5lZo9D;`$4a|9l;CX zWXOc_2trc?qYc`jBceduhA|iq@@ZI#18L6jn2$-*tc#O==NRJtri}9LpGBK9P6@Fk& zrT|bE6Lm4=K|T}&H8HUrnrebEO^j)(k49*M<{$^A)@TcQ%rqGEkBRx3wu62#-4{+8 zwb4Al`Zd<8v2AJ8NvnX0sDe6R42>SpShvR9v{s;RHRh->N39>|S$-FulQtA1Fbb0~ z6LT;hi?9S6um!}T?E*E`$b)ti$8iBSaSspi1oW2n2JaCkoXk#egBk9im(A&r0hvMm z%=y8*%;d{l3gu7%mB8H0^oW^Sn5SU{R%0EA&%7C1LGPJ&V;>IUBCdd%nQ!10?tpxn zAK@vUgLuuKg_FgCj3Bp`@@S0ypbssJ!8jJiv5*T3xv=cTW&DZjVEvZ6_=qp~E}X2+ zP~igBYb}dfpjKAuWNnBhXpUgCMpr~261_12b3k3J)Wy0KYztQAYh}Jx;;uoScEAd!~$NEh;xjTV*x@SOUkbC!R$c^GCg)%4y`of*@+!@b(Ip|4uwgvY; zunjxG`0gig8s`uV^6q|FIC(IZhZ{^_Ob`0k!wWuOOpicNcaK6Siqc?Q59;nw8Emf} zfk|7dK|PkRRE!Q9i5`?QboRycVw zh9_fq8lb@na^UHQTwwm5%-@swdoq8|68Ht=-?JuuMO`#N8?X)V`wE>ryPyyHV;}}$ zJSJfZreiUdf%rXFV;gpY*gf~*Fvy)Jx$`74@>&mK@gf$lJ=l*!IE}L)k6tmj2;%bkAe_7{ zD2&=@hL&iJw&;LP=nls8?gw(>Ein$0F%7?AE*5~?c+*eb^piJnc#{wBgP^9~CqX^E ziNiY@)YF@KdOyQUyumvVmk+t}Ay+=0AYVS@%O@LhA`kMRAc) z&=I7WdQ`%J=AOb4~~SqsjG_5Uf(_WxQrXH4`zC!at5KY$wW Ag8%>k diff --git a/Podfile b/Podfile index 74ae2606..fd799652 100755 --- a/Podfile +++ b/Podfile @@ -2,9 +2,9 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios,'7.0' target 'LFLiveKit' do - + pod 'pili-librtmp', '~> 1.0.3.1' + pod 'LMGPUImage', '~> 0.1.9' end -pod 'pili-librtmp', '~> 1.0.3.1' -pod 'LMGPUImage', '~> 0.1.9' + From d749db1ddc2db07c231f35b940a4dd9e2ea63d9b Mon Sep 17 00:00:00 2001 From: chenliming Date: Fri, 29 Jul 2016 17:43:33 +0800 Subject: [PATCH 06/39] update version --- LFLiveKit.podspec | 3 +- LFLiveKit.xcodeproj/project.pbxproj | 81 + .../UserInterfaceState.xcuserstate | Bin 10998 -> 13633 bytes LFLiveKit/Info.plist | 2 +- LFLiveKit/publish/pili-librtmp/amf.c | 1037 ++++ LFLiveKit/publish/pili-librtmp/amf.h | 180 + LFLiveKit/publish/pili-librtmp/bytes.h | 91 + LFLiveKit/publish/pili-librtmp/dh.h | 345 ++ LFLiveKit/publish/pili-librtmp/dhgroups.h | 198 + LFLiveKit/publish/pili-librtmp/error.c | 26 + LFLiveKit/publish/pili-librtmp/error.h | 46 + LFLiveKit/publish/pili-librtmp/handshake.h | 1034 ++++ LFLiveKit/publish/pili-librtmp/hashswf.c | 626 +++ LFLiveKit/publish/pili-librtmp/http.h | 49 + LFLiveKit/publish/pili-librtmp/log.c | 209 + LFLiveKit/publish/pili-librtmp/log.h | 68 + LFLiveKit/publish/pili-librtmp/parseurl.c | 312 ++ LFLiveKit/publish/pili-librtmp/rtmp.c | 4331 +++++++++++++++++ LFLiveKit/publish/pili-librtmp/rtmp.h | 365 ++ LFLiveKit/publish/pili-librtmp/rtmp_sys.h | 123 + .../UserInterfaceState.xcuserstate | Bin 10986 -> 10986 bytes Podfile | 1 - 22 files changed, 9123 insertions(+), 4 deletions(-) create mode 100755 LFLiveKit/publish/pili-librtmp/amf.c create mode 100755 LFLiveKit/publish/pili-librtmp/amf.h create mode 100755 LFLiveKit/publish/pili-librtmp/bytes.h create mode 100755 LFLiveKit/publish/pili-librtmp/dh.h create mode 100755 LFLiveKit/publish/pili-librtmp/dhgroups.h create mode 100755 LFLiveKit/publish/pili-librtmp/error.c create mode 100755 LFLiveKit/publish/pili-librtmp/error.h create mode 100755 LFLiveKit/publish/pili-librtmp/handshake.h create mode 100755 LFLiveKit/publish/pili-librtmp/hashswf.c create mode 100755 LFLiveKit/publish/pili-librtmp/http.h create mode 100755 LFLiveKit/publish/pili-librtmp/log.c create mode 100755 LFLiveKit/publish/pili-librtmp/log.h create mode 100755 LFLiveKit/publish/pili-librtmp/parseurl.c create mode 100755 LFLiveKit/publish/pili-librtmp/rtmp.c create mode 100755 LFLiveKit/publish/pili-librtmp/rtmp.h create mode 100755 LFLiveKit/publish/pili-librtmp/rtmp_sys.h diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index f8fa0105..1415f6fe 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.0" + s.version = "1.9.2" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } @@ -19,5 +19,4 @@ Pod::Spec.new do |s| s.requires_arc = true s.dependency 'LMGPUImage', '~> 0.1.9' - s.dependency "pili-librtmp", '1.0.3' end diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index afab372f..ce8ef189 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -46,6 +46,22 @@ 84001FFD1D0017680026C63F /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFC1D0017680026C63F /* AudioToolbox.framework */; }; 84001FFF1D00176C0026C63F /* VideoToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFE1D00176C0026C63F /* VideoToolbox.framework */; }; 840020011D0017850026C63F /* libz.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = 840020001D0017850026C63F /* libz.tbd */; }; + 849005B01D4B5C8400D2A3D5 /* amf.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A01D4B5C8400D2A3D5 /* amf.c */; }; + 849005B11D4B5C8400D2A3D5 /* amf.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A11D4B5C8400D2A3D5 /* amf.h */; }; + 849005B21D4B5C8400D2A3D5 /* bytes.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A21D4B5C8400D2A3D5 /* bytes.h */; }; + 849005B31D4B5C8400D2A3D5 /* dh.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A31D4B5C8400D2A3D5 /* dh.h */; }; + 849005B41D4B5C8400D2A3D5 /* dhgroups.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A41D4B5C8400D2A3D5 /* dhgroups.h */; }; + 849005B51D4B5C8400D2A3D5 /* error.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A51D4B5C8400D2A3D5 /* error.c */; }; + 849005B61D4B5C8400D2A3D5 /* error.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A61D4B5C8400D2A3D5 /* error.h */; }; + 849005B71D4B5C8400D2A3D5 /* handshake.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A71D4B5C8400D2A3D5 /* handshake.h */; }; + 849005B81D4B5C8400D2A3D5 /* hashswf.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A81D4B5C8400D2A3D5 /* hashswf.c */; }; + 849005B91D4B5C8400D2A3D5 /* http.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A91D4B5C8400D2A3D5 /* http.h */; }; + 849005BA1D4B5C8400D2A3D5 /* log.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AA1D4B5C8400D2A3D5 /* log.c */; }; + 849005BB1D4B5C8400D2A3D5 /* log.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AB1D4B5C8400D2A3D5 /* log.h */; }; + 849005BC1D4B5C8400D2A3D5 /* parseurl.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AC1D4B5C8400D2A3D5 /* parseurl.c */; }; + 849005BD1D4B5C8400D2A3D5 /* rtmp.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AD1D4B5C8400D2A3D5 /* rtmp.c */; }; + 849005BE1D4B5C8400D2A3D5 /* rtmp.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AE1D4B5C8400D2A3D5 /* rtmp.h */; }; + 849005BF1D4B5C8400D2A3D5 /* rtmp_sys.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */; }; AD7F89B4621A7EFEBEA72D49 /* libPods-LFLiveKit.a in Frameworks */ = {isa = PBXBuildFile; fileRef = B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */; }; B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */; }; B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */; }; @@ -120,6 +136,22 @@ 84001FFC1D0017680026C63F /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; 84001FFE1D00176C0026C63F /* VideoToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = VideoToolbox.framework; path = System/Library/Frameworks/VideoToolbox.framework; sourceTree = SDKROOT; }; 840020001D0017850026C63F /* libz.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libz.tbd; path = usr/lib/libz.tbd; sourceTree = SDKROOT; }; + 849005A01D4B5C8400D2A3D5 /* amf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amf.c; sourceTree = ""; }; + 849005A11D4B5C8400D2A3D5 /* amf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = amf.h; sourceTree = ""; }; + 849005A21D4B5C8400D2A3D5 /* bytes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bytes.h; sourceTree = ""; }; + 849005A31D4B5C8400D2A3D5 /* dh.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dh.h; sourceTree = ""; }; + 849005A41D4B5C8400D2A3D5 /* dhgroups.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dhgroups.h; sourceTree = ""; }; + 849005A51D4B5C8400D2A3D5 /* error.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = error.c; sourceTree = ""; }; + 849005A61D4B5C8400D2A3D5 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + 849005A71D4B5C8400D2A3D5 /* handshake.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = handshake.h; sourceTree = ""; }; + 849005A81D4B5C8400D2A3D5 /* hashswf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hashswf.c; sourceTree = ""; }; + 849005A91D4B5C8400D2A3D5 /* http.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = http.h; sourceTree = ""; }; + 849005AA1D4B5C8400D2A3D5 /* log.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = log.c; sourceTree = ""; }; + 849005AB1D4B5C8400D2A3D5 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + 849005AC1D4B5C8400D2A3D5 /* parseurl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = parseurl.c; sourceTree = ""; }; + 849005AD1D4B5C8400D2A3D5 /* rtmp.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rtmp.c; sourceTree = ""; }; + 849005AE1D4B5C8400D2A3D5 /* rtmp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp.h; sourceTree = ""; }; + 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp_sys.h; sourceTree = ""; }; A17586B27CD6843997425CCF /* Pods-LFLiveKit.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.debug.xcconfig"; sourceTree = ""; }; B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamingBuffer.h; path = LFLiveKit/publish/LFStreamingBuffer.h; sourceTree = SOURCE_ROOT; }; B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LFStreamingBuffer.m; path = LFLiveKit/publish/LFStreamingBuffer.m; sourceTree = SOURCE_ROOT; }; @@ -298,6 +330,7 @@ 84001FC91D0016380026C63F /* publish */ = { isa = PBXGroup; children = ( + 8490059F1D4B5C8400D2A3D5 /* pili-librtmp */, B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */, B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */, B289F1D61D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h */, @@ -310,6 +343,30 @@ path = upload; sourceTree = ""; }; + 8490059F1D4B5C8400D2A3D5 /* pili-librtmp */ = { + isa = PBXGroup; + children = ( + 849005A01D4B5C8400D2A3D5 /* amf.c */, + 849005A11D4B5C8400D2A3D5 /* amf.h */, + 849005A21D4B5C8400D2A3D5 /* bytes.h */, + 849005A31D4B5C8400D2A3D5 /* dh.h */, + 849005A41D4B5C8400D2A3D5 /* dhgroups.h */, + 849005A51D4B5C8400D2A3D5 /* error.c */, + 849005A61D4B5C8400D2A3D5 /* error.h */, + 849005A71D4B5C8400D2A3D5 /* handshake.h */, + 849005A81D4B5C8400D2A3D5 /* hashswf.c */, + 849005A91D4B5C8400D2A3D5 /* http.h */, + 849005AA1D4B5C8400D2A3D5 /* log.c */, + 849005AB1D4B5C8400D2A3D5 /* log.h */, + 849005AC1D4B5C8400D2A3D5 /* parseurl.c */, + 849005AD1D4B5C8400D2A3D5 /* rtmp.c */, + 849005AE1D4B5C8400D2A3D5 /* rtmp.h */, + 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */, + ); + name = "pili-librtmp"; + path = "LFLiveKit/publish/pili-librtmp"; + sourceTree = SOURCE_ROOT; + }; B2CD14611D45F18B008082E8 /* H264 */ = { isa = PBXGroup; children = ( @@ -342,12 +399,16 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 849005B21D4B5C8400D2A3D5 /* bytes.h in Headers */, 84001FDB1D0016380026C63F /* LFLiveAudioConfiguration.h in Headers */, B289F1DD1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h in Headers */, 84001FDD1D0016380026C63F /* LFLiveVideoConfiguration.h in Headers */, B2CD14701D45F18B008082E8 /* LFMP4Atom.h in Headers */, + 849005B61D4B5C8400D2A3D5 /* error.h in Headers */, 84001FE31D0016380026C63F /* LFLiveSession.h in Headers */, B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */, + 849005B71D4B5C8400D2A3D5 /* handshake.h in Headers */, + 849005BB1D4B5C8400D2A3D5 /* log.h in Headers */, 84001FEB1D0016380026C63F /* LFLiveStreamInfo.h in Headers */, 84001FE91D0016380026C63F /* LFLiveDebug.h in Headers */, 84001FE71D0016380026C63F /* LFFrame.h in Headers */, @@ -357,16 +418,22 @@ 84001FDF1D0016380026C63F /* LFGPUImageBeautyFilter.h in Headers */, 84001FD31D0016380026C63F /* LFVideoCapture.h in Headers */, 84001FD11D0016380026C63F /* LFAudioCapture.h in Headers */, + 849005B91D4B5C8400D2A3D5 /* http.h in Headers */, 84001FE11D0016380026C63F /* LFGPUImageEmptyFilter.h in Headers */, + 849005B31D4B5C8400D2A3D5 /* dh.h in Headers */, 84001FDA1D0016380026C63F /* LFVideoEncoding.h in Headers */, 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, B2CD14741D45F18B008082E8 /* LFVideoEncoder.h in Headers */, B2CD14731D45F18B008082E8 /* LFNALUnit.h in Headers */, + 849005BF1D4B5C8400D2A3D5 /* rtmp_sys.h in Headers */, + 849005BE1D4B5C8400D2A3D5 /* rtmp.h in Headers */, 84001FD81D0016380026C63F /* LFHardwareVideoEncoder.h in Headers */, + 849005B41D4B5C8400D2A3D5 /* dhgroups.h in Headers */, B289F1DF1D3DE77F00D9C7A5 /* LFStreamSocket.h in Headers */, 84001FD51D0016380026C63F /* LFAudioEncoding.h in Headers */, B2CD146D1D45F18B008082E8 /* LFAVEncoder.h in Headers */, + 849005B11D4B5C8400D2A3D5 /* amf.h in Headers */, 84001F8E1D0015D10026C63F /* LFLiveKit.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; @@ -505,24 +572,30 @@ 84001FE21D0016380026C63F /* LFGPUImageEmptyFilter.m in Sources */, 84001FE41D0016380026C63F /* LFLiveSession.m in Sources */, B2CD14711D45F18B008082E8 /* LFMP4Atom.m in Sources */, + 849005B81D4B5C8400D2A3D5 /* hashswf.c in Sources */, 84001FE61D0016380026C63F /* LFAudioFrame.m in Sources */, 84001FDC1D0016380026C63F /* LFLiveAudioConfiguration.m in Sources */, 84001FD41D0016380026C63F /* LFVideoCapture.m in Sources */, 84001FE81D0016380026C63F /* LFFrame.m in Sources */, B2CD14721D45F18B008082E8 /* LFNALUnit.cpp in Sources */, B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */, + 849005B51D4B5C8400D2A3D5 /* error.c in Sources */, B289F1E11D3DE77F00D9C7A5 /* NSMutableArray+LFAdd.m in Sources */, B2CD14771D45F18B008082E8 /* LFH264VideoEncoder.mm in Sources */, + 849005BC1D4B5C8400D2A3D5 /* parseurl.c in Sources */, 84001FDE1D0016380026C63F /* LFLiveVideoConfiguration.m in Sources */, 84001FD21D0016380026C63F /* LFAudioCapture.m in Sources */, B2CD14751D45F18B008082E8 /* LFVideoEncoder.m in Sources */, B2CD146F1D45F18B008082E8 /* LICENSE.markdown in Sources */, B289F1DE1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.m in Sources */, + 849005BA1D4B5C8400D2A3D5 /* log.c in Sources */, 84001FD91D0016380026C63F /* LFHardwareVideoEncoder.m in Sources */, 84001FEC1D0016380026C63F /* LFLiveStreamInfo.m in Sources */, + 849005B01D4B5C8400D2A3D5 /* amf.c in Sources */, 84001FEA1D0016380026C63F /* LFLiveDebug.m in Sources */, 84001FEE1D0016380026C63F /* LFVideoFrame.m in Sources */, 84001FD71D0016380026C63F /* LFHardwareAudioEncoder.m in Sources */, + 849005BD1D4B5C8400D2A3D5 /* rtmp.c in Sources */, 84001FE01D0016380026C63F /* LFGPUImageBeautyFilter.m in Sources */, B2CD146E1D45F18B008082E8 /* LFAVEncoder.mm in Sources */, ); @@ -650,6 +723,10 @@ INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 8.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/LFLiveKit/publish/libpili-librtmp", + ); PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; @@ -669,6 +746,10 @@ INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 8.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/LFLiveKit/publish/libpili-librtmp", + ); PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index d66153a351e54cd1bb6ba0a25615c42c75b13342..b8b1809ad3b584a4c3ba316040f5c736785d2e1f 100644 GIT binary patch literal 13633 zcmbt)34BvU*Y_+pY0{)=(lkxdB~6+%sVyzt7X+azAf*(#(6ZIEy|j_EDM`wv=tM+B z6cG_r5NXSf2)K*L<^t}@A|mb};`+F+kNVE!Hf;mN*YEw>Uz?k`GiPSbIrBfuoW>5P z!{bd)KLH>>00Rz)fS9|PTU=+E!@E5WS9_hs)p#x6KozJ4HJ}zu26dnwOaW7Y z9W;U#-~t_BHgE$Em;-JAH-ekMVsIx|0qz1T!B(&hYzI5QLtrO(80-SO!5;7kcoaMV z_JU`@LGT=S5gY-hz&qeHco&=j?}7KhS#S<~0lox3fFHq6;7{-ulF$_>73HCPG#nM7Vl*0!LFK3t zRiheIi>9Fl)P$N*2bzuC$b-CyARoF3EkZY=CFl;c9NmfTLHDC>)PpvnhtN*+FnR<% zg`Pz(pcl~*^agqpy@gJrv*>g5HTn*HkA6VEp#NfkIV{1^I0nb!IBdZ2*oaLy6{q2$ zI2~u;JbWb{j<3cQxC&R}X?QxGfoI}t@ht4Z9r${@5Z{1r#02~BO?VN$6R*H`;g$Ga zybeErd+5Th8jzIr_*^HNI@u&0XYZ<5tA#b(q{4VJyYmsub$Uh znw)lzryGO;1wjON10_%qj+`Tw2BjsdG`~0_zbrSkBsVuNHM=0YEVZaGJ3qBJuc)vv zKR3UiG&8qB89BD3bh67m%hO?R;!9jjoo#%(H!yJ&FaYZopanXh2hkt~#DX{?B4QFk zBt%L=iEImq2S#9`E6@WANF;I+M#AY@LP#{pBS|wDbw_)%Yi^m_-o`g5O|)TxyP0?M z&3&_0+Z&5r?OwOb>7)m_L3!1{B?Y}~$SEu;%+JcsNX;wAC`rvO%qmJP%FWM8%___( z&Mhe_OV7&7XiyFrNI;mre}bBJ$LvmiY%|@aW2S?5Hz?z2_rfMG#f!n0)S%Qeg8pq( zyIjsjyL+ss*5Pq9I_Zvd{qO9JMM1Ch*Ik#NpEtyqnUlA0i1E6N?DX^@#)6#ug$s}Q z<(dvMyMUhZN^uNigB*|x@}>wPZKuqt6CQaFm3*PwZJ*ywJLK0@R88=sTtG&{KnbvJ z16P9KU<9}dj0B@VAt(aHL`hU6f~ZL((U2&jCAw{(l*(u{7z4(FtLalY7*F(6PBA2w zTtTYo(+tu;k{Xmzg&q&ZT6?>~dT?_ml@hm|>;e5KvnI-PzM_P_=zfwV!XE-x#mq8{h)rp;?|HS-;A*R=s&P6IQ5 zwF^uqab2K+7#J^9CAHOkOCIax+dP8on!wC%&|8O7KXSI2mnM%m36+b7htu^vw+PtUZf zig!}e%s2PFYET+4Gp5Sx?ridQy7|C%?*#)?HjoG&1RFpX{pHp0XhVn@PxsrYkCnM*I{pO=9`5bvSCaSJw@}Y=XX$8 z(6EM}JyYtvL;J|FuC$H83!RK2(#5N3P)5*qef>wfU7Z~s2}SMeuA@+r&QK*Yr(lSY zVqySP9s`eWAj1R!KM9_qO4KX=D&E@zo&-;W{F;g?`bE&=XL`FcJF_3m+(2wX`=J5t z&-@-%p`ChXka4=xPE*%=qDECT47{%;FpFRdrE#hao z+i{?BP55P(hL^FYWs?_)X~hEA29Y_5GSYl#`^% z9i5HTXKoD;{Tsr-5+NcJiKX`i6v2?b1+lS7ffq6u);B^ys`^K$U<9yk*wEio1GNF< zggR0~lKff(W5CQF7z^W|fz*)%yqn44NaXBzQXj}oEbb9}!eO5%UMae^JJk%i>YHx0H zv=8l*QUB8=1_X|TV<}R>QLqpe!D3hfOJNxt4abmch@CW&CeloJGLy8BR*F<`95uqF zpcGDkm2e`QL=S<38eu*CeVV*M-X!m^1EFNFQo=WOwzTkWVPR2yyBtqJw}GCO_GY`g znQfkR78pQ5qE?8Dgzr_Ijcv5|6yckHpi)gol<+g{oldWR27$b>x2cj1_joBRO`=wk z_s|DBvqlWX0t@OhjZ>U{AAy~efRtvyYp71Z26Al|w3AtEGl|0Nz5c}ICI}GVVGAQL zlQ_FzD`_K1)#L4s_P|>w>;Mrxuno3D7ilLh($NEFQ*L_5Y|6nWnI>HkG#$efzKXgo z?JfStK6^*~1`620eS3l8;`Y(JtBv=%=L_mRpE_`G0dW&WU{MR*b zDy+%BT57Wj{ZTD5Q^5kE@(|7oeZyRo3cstZ4?y7{?_vV)O6O7evf#PjIhh)GS2L-+ zL8)zWwWZlRI-Ige|6dT%GPFzMSe?numFabNn7+${<}EgtnqihqJk| z)$ZXv0txCo4$?b-_<>E8yP5(@JXrTiH$TVW>hx6G-7S2t`&-O{tOliZ5bC{a8Q43( znC>OqTQ`-4W57};oI;Od)6Jv zvo0gEswNioRk|GQ-f{eVs^U4NB^d<;rG>>bJT1*BC@9M)&B-mwD4?(MY50^;mY7pJTJ1ZVKcV=38KcT!!2?eIlx%?{{ zl3ud1tlX0H{OtVF!qSqwf`WqL?Ci|Kf`X!wTETw{D9h*Yi(q^DHT)*H;XC-f&>$es zPvG$`_%pee409=?-Wl=Uc^ZWm`}0%t=~E?rYNlB#Bh6*;^wR}8Y1XQVJqKp|9sUtK z!C&z2paz5x4sH-5>LxIw?_UB6rQnBTWIa)wKnfI<6}5C<3WbWWC1^pA=T=y$;G> zuf45fYIARLfx-&uA{)qqWOIWub0&qmVMB-hds92qT=?v`X}i|lR^Pi7q(d>(0YZ8d zO}a@>H;P4ZWFy%`l4=EDs1=SU^`!+Z9C%v$-6_Avj4UWI5X=Ms>SE;RK~`h~`Ttut z8d=d8@8ZFOE+VjzWRxN>_#{e0Ln%AcQ3lFHStuLjAeyUDUQBI9|AA-GuD?ecHIe@0h1?{zrRh=CjF1I&L(Bgpn(t`RNN0TYn>(Dqf1*utzu#p?6-^M6-|+wV`&hpFB@qBu7Y6T{%C~pWvyVRx1yf)pf%`T@)CKq&kI0n(fWaQ8$F0NkeA6T#L~xR^Qh~Bq8zjdZ4DZ-4Q(f{k=Kc( zUm$c$$3UZxcA?$DVE-t3Jm9{dC&*iYMLx|IxfeY{j**l7i`E75hhruAl$H-q zg|%2mLFFqSjQ7C=A58bbDj%$7Cpof+W*}yDFc;P1za^kSWHW`A-XK{}B!7hFDRa3g zxO%1umkCu(tvtK(h!nm~>TIX@;&rt9!;c1Kdb!=>Eq1lh$chr|4~2zcMHI%H{R3&* zoSGSmPV`&^Y=CeVK|x9w~{zJaqh?DRYI2osq z@5moMDEGlAw$=Zu5emdcf%I?ia?+~&pco9jxnBqE16?VekH#!$G6vq9Wx~`RSm8i=)>ZA>cjrVeAq!9 z#z7kcq#AG|C55lScJg2HS2u3L%{~ZxP!h}_ho3=z(wxjcyWo|sX4YXKZ`=Jt|87v0 z{qxZNlv!^ka;VeM*g^}2h^rI5N*)=&x>C5CiRP3?QT20h;UD`x3}=q$2gk# zLZ>ruO_6xfpg{H@c$8myaNTxQ9gE^JvN9G z3tx{nusI&|L1h>2@F@&EVSTy&1o?13e`j9+C-c*O^! zyYOp181rxB_ZE}iF&~Vjh=orIp*$$Xr|=PcT3|&V5Len;_=?W9M&3>IJ{aeNIv+In zwYT$J#VCQn@8S1_n5}Qr#d_0&KfoU{HnYg?vc3M9*&pMx^lk!w;)BL6e9i|=LiV5K zbA%|eKfDSs=X3l$h}eq1z+d98@Ynbo{2%-+{>}%@K1h`)(Fd(QX!F4&AH2c`legj@ zXnsBs|BQdZzvABj?UF)$1bSeH_+Y9JrupDd`rT6R_2&w!O2$obcuJj)7NIU6N@y;j z`NSrEtY@-=Mr1t8BSsE<5l}pe=t8sKfArD>E4}~hYZQo-+1ngWnk3TXL2Om%d$y4rb{rywn>L0Ql|$J^{*Q>xHDEGI8Jw;-<|Jv+UiIX~B)&*xEG z$ji@6FUTy&NS~RNkynu2+?17BV|TaZX4g6@8MS)hBX>w3vwd))k6v+5G)bp25B6+&y&#${s_JZG-nk&aD!zpkqcAt;k|7g1 zdT%1wK@Fx>?eIEelLLL6nrpYAdSBXh=wV())Us`$U`}KGbMRFw^!b#{!Ti}FD=bCA814WAl?iwzEYvh{zm~piaj-#;QgXKOrelur5ZCnf2N>gKP z6hg?0K3L&{l{7>4A_J0ye?IX-PT_y)MAt+u1Pi!>@h=FHSf?uQd{(ITzw~6Tv%lvt zZZjyh69x_(Q zs>aZowqdlgEmQf$l=AVTD{IEKQTM^`Gji8~nOz`^o;IQ0gu9Vd3fxYkmR0l?^uN@_ zCAL9Sio1ndQsWP{{aU352_VHk$w*ow`U9;EmC%Sjn%0DxXvmvHBitdhF0=+a@qBzM zz75}wm(zOBm3S3ijW^*fcpKhwJ{fd_Ipa(3;IUT7S8k+rz!az0IBG&T#K@ z=eh5=-$h6iB2tRtMCqbTQMM>olrI`48ZNp@G)hz?su4{V)r+Qzri&Uxc2Sdv7qy5S zqMJnbh`K~?iOz}DVyieuTr4gXmx-r|9b%`rT|8Ut5qFB0idTx)i0>1x6+a-}F5V^H zBYsr;lz6XrpLoCcRq==7kHnvgzZd@kHV zXNV_+ge(bJ8nP_pj*wL$t3&P$xj$rG$i9#_Lw=9|iCUtOXeD|{j3iDHFEL5Xl0-?q zWSC^Q#X_3s4+$LEs*(rHJa$53}G*l{+CP-7HL!@caJZXt^ zoOGgeigc=Ux^$+rMe2~cr5zm z`_NxPe+&IXMgsvEClkv;WGb0KW|k$&Y_cJ;G+DYVQ#MRCT6VRpTsA>AQC20pM%E8Fl0PSZL4HL3lKd6#bL$siWe0}6{i%Z6=xLh zD?U_wtT?OqRPmYO$8ZoH6FxM2T(~2=GyKl*b>UmWw}tNr-xhLT{7m_|@@M5A%0HEVt2mWd zB~gW{(o}h>64hAMIMsO7BvrMlR#m66tJ+mQ)gskm)e_ZG)iTu`sykJ8sqR*7R&7;n zS3RV9ShZXAi0U!b6RM|FdsSyue?{mcvLhNJ7Dn6~@o2>B5ho))i8vc^F5-N|4-r2` z{1owvnp2C^VzorAQb($z)H?NOb+x)iU8}BB^Xi%E7PUh?M?F_PPrX3BM18Azsd|}u zm3oc(KJ{AlL+U5g2i32tUsu1WKBhhqDUFPXjE}TLS|gJpQzM5)W<+L14vQ>|tc|=T z(h<2daz*6s$QL8eL|%yeJo1akuQX5-sWEGEHIp>enp#brW{PH-X1QjK=6=mO&3era z%@dk^n*ExCn!}n`H77McYJQ8-MVX?~qSB)>qq3t$L{&xAMYTuGjp~lt5_K%3-KKp+ zyH9&W`=RzL?N8cYw7+Tp(Eh0t>m<5Rom{8TDRpVOak_Th65R^jUAnt<8+1LoO}eeR z?Yf6_2X%*aZ|IKcPUueQPV3I-&gnkaeX09e_aEJV^-z!XB7LY{u2<-ldc8hDKUANs z&(-JaN9afD3-!hN@%mbQlfGHc>s$0*{apQg{q_1AXkF4${c`;Z{Yw2ReV2Z-eye`F zewTia{!#to`UCo-`jh&1_3!CF(4WzO2#9bX% z9#<1LIj%l#T3kb%J#KB>^Ksu8LJe9&v?10IZ!j6mhIB)wA={8^m}Qt}SY%jiSYlXe zSZ27xaIfKh!#cxy!v;gQVWVNEVYlHC!()aQ3`Yzv8D25GW_a6h)^Ne_h2bm1cZMGf zKN)_Bm&cpp)8ez@bK>*jhsTeMFN!aTpBP^iUlTt${>J#*%P7@LgCjH`?f7&jRoF+OA5Z#-mt-gw0Lrt!G(ZR0z} zGsgFg=Z)VQe>a7gLQP>NrAcklm=a7Dlht&EX@+T*sogZ&3!1&rjJeMOy4Dl5_AdC32_P5gv^AzgyMwq1U|u;usC6D!q$Wx2@fai zNq8*biG=48UPw5S@Uq!pPBEvMGtAlMJad7$#yrDpH#eJSn&+BVm{*$bG2d%mYu;$y zX@1pw-2A;oV$oQ1mKck{VzQVm8I~(8V=VQSX_f{{qlLG$T4q_=Ewe2*Sr%KCSe9CD zx7=x2X}QO;#&Vyf$MUe{h~)#z4~ddQV`4#KX`(N2N#gRvdlNS$K9smC@sY&G6Za(^ zNIabQLgJCc->n*Jnl;Z_WG%Ihv5vD=SSMPmtk+mutk+uGtR2?5)&ibI(2Zd%owKduQ9QKYD#=kkA!$ z| z&}XO;Eks|Uuh1&=HCl_lL6gyUXdBv&cA{PA5IT&Gprhy*I*)!u7tlp?3H^qyqFd-T zdVu~xPta40Fvbe3#2lI!z-p|=25iBRI10z$#<&TN!_9FDZh_n4b~p=n!Pz($zlJ?{ zARdJCu^*S>GCT^8#$)hX7;q5(2fvNq!SCV^@yB=`UVzK-db|ODgE!)D@h1Eo-i){4 zt#}9CgZJSB_(yyMpTuYJWqbu+#eqBcF208!`Ic*sC9h>RuU$ape=1PG8InMfv)$z%$7hfE_g z$Sg94d`9Mz3bL3iAzzTM$a=DYd_#7SodL3o>?V82Ub3J3NRE-?|b!k0XpEjTkX(QU0HlcB}1#L-Rp{-~;noYaXZnQh? zL33zN`UcIT1E_}%rv-EbEu^FA82T2SKqu0VXo!k4=`8v&ol8HX^Xccbk}eJ1ByDI2 zl0!}CL}@cfgZMRTsTU7ON3l!LE2tG}joP5LC>5oF21L+;4)kCE;}VpC>Y(;06Lnx? zCon-XcpV18J8VpV`2L;RwQryA^A{DT6?;8-8V!5S8Coi?UF3dn4(hu5u`v$#GxG2AH^<1Jy9=|i(W&$Q6JP7^+T_N87yE08`!}C zPKbo4W#|o*7xskx4?_9Ki(()e>Q;G^0L`HX?&tk7PPZQ!)GwU$l!j84` z4lErq#9QU=QB;gdke{tziptO^G&*Fkc8+ob0vE&{M{l9A?A16l9!+4cOHmL_WaH!z zShG91Ax47%X;5vosZ1fr5{Icl9cCv6Sl)y;_#R3qN28cs^}Y5+EeZEk_2P@c&WsJ5~R?WhQAD{5BRjsAyX7ot6AFZv$sgA`~1E#Z}gXg@lD z4x%5R6=XnH=mzniC}hKbm>nqFOs^1Xd}_=XIvdRw#1 z@p|Y~c%2jICv+1146UIJw1w1#D274t3#!BTlK|tdIOhH!h#ooTthd) zIA4c$<>)4~XR6yYmZs_`;;t3nU+SoOy92&5Gkd&X2^9yLQL&sN0( z7MY7!1KrD6-m+ZH(})>Gg(XD=-b`PC-&-6;r&_rVhmF`&9pRekR%{PXw?WTx?0{ZX zJG2gpW*fvV?1o(E^L&Fi7T2%keFNMOUW4AudlsGQgmEt8$_tB?l+Ai0}6b&9#x z>RoY<|EJoMsSbo>cWU7Rgc>pB8W(!w*HLUG?t}Z{evl7d7+i__<2P^~41v)wh5-{) zxb+O&9B+Ylkl&mCqV~;`MLpk%7Z;$|#dt6tf`?)s9)^d*Q1HPp7!Cz6Vlf_p3vm&O z!EdsHRtP2FXEZDem$E#KK8)h%k)FbQZ+<7gcSK3Jti8d6xxNw~%fB9M**uL9&V*+! zRM)*|CJ99=qe3~#xX@*V!&0-Qv3NYvmg8|yRE{UWNQU_;rNx0K;>qEVPl7kg@f0YA z_&!;gS-vuFXP>`Y2$arV_&q%B`NZi^3duDVXW&rnsWmTV<2lb~%!N@eW_*U{R}+rG z*PsWkz$;h>z?BT1MR+k@f|ufDcsc$8-eMq)g>f(*CO`lHg3B04U*fOuDmJdhYuM*n z2Gc~C1XI{J75>9M-)2$ClG{RM@|ht{VGrPKc>4mFT%9>P@h(=Np4F}#ufG!S#Jf>S zuk0N5P|dHs&(y+hdn#1Xh^%FL8T+Sg*E-2L=hK5i$ufc<-`pY5PyWkB97EWWu!i7@LG0GlYyQR zZ$CB`dkZ{%#zEFh%V9yDCLwgdRW~%i$#tm;CTUEXu)y`|kySw&Kd*hHDQSjc7m;|9 zKoUt3Nrp;T2#a7bELlXFlN8c|v?Q; z8}cgY49nrG=kX)iq&vf(bS2&33s_M>dXOAg31714gsZwzj1A-uc)8|~zNCMx32%@* zSOs5Km8Yq-Rr5&^id{~;WH1>*h7uncMuw9DGJ+Js8u%})g>|qVHo!Nq5x#{@%gIRc zCMhN*#7{~|85u=JlQHm}9yY@k*b3WVJM4sAup9QUjgHqwn&O*hBqYQq#izAP%uHyX zo|u%_EIBPLF=Wv?EasHhBqbzeJ|i2MO8!%;rI2^Yd$k4A$p^Is9}!kY z!sm<{8y}O|)q<-0BXjZga`GwcfXPLgkX4t|lYCCtf#>BJm1JSHAa`ujMo~@2*H~Ig zmc49g1v`(BmGC{NkCIj7YqFZrZw>h`SxeT1R_LOY`{94E501V>xD0pNcb)qsr1z;` zV{#+e%sK=4mTV&5!2vi3KU9(}WGmSQKf)n64Dq?uy)UZ7EUZS zaqI;7sWvF5$eD1yora&v$uID8?R>jXm0Kr6^`l#9$#3LJSo1QRDkoRrG$YKI=;$bN zGwjmua0b-J7*Jo6J7hJvOYX6Q?j>@8JPdss?J&YQKyo+on6-) zLkH5q|56-6hf-e<(;$`vv6Oi^y{@ATEutm=()80(S{B5zAeINQ0^;3mZ&y_?cGHTA zAt>rFWj7)^j*ee*qizuyp{x#9fI|Q0{j{CO@2R8#8bm3%>=wYDYJxI}zRM0kY{ygR zRN9fgP2Yh(gP0Fu6+8-Jbr1`S>8t19LN-nwp-7iNf zAkSJlUC4?%T|`a?u@Qb^@p-8ct*uIoee7tJozvMn#+#p3JZLEEMa75cm-MS%Iew4d z3mD0f1-Vc|cB^ZN+Ok4Civ33LKAFva8K@+S*e?SsNZ?CWV^))s?B@RuyYs6VGqsGB zMrx*3cHeKv?)q_zSc!~J&Do8=BfHbDq^Ic#^ly8!6 zk#Cdlknfcrlb@FVB0nepRen`|U4B!3OMXY;P$Vn5C~_5j6#W$a6~h$66$Oey#VExz z#Y{!HV!2|a;w#11ioJ?MisOnCii?Vyid%|1ihGI&iYH2>BuYs@8KIObMP+?ulCq7m zv$Cf$SJ_+HSNXcqs~n>ADTgaZD2tTSl*^R|l)ou2D{m|BDDQEMW}K9hb4rfqqPY58 z6Rs(jz$I~MTqf6%dzI_Tb?0)pUfdwAj0*N1$+g+kYC1s!LQ`M;{Pz_Y&s|Kq^sESl?s!CL) zs!voaRQpvYRlljOsIIAQsD4-7R^3%SRbw?(OVu*9Ld~gF>N@H~b$hi>{fTR;8j)OXeQ)eqHwsUHhcAwqBn4TQ!*oDfJ5l7wbLhL9XVk0q5j29EdR$_+OUhE)t5<82%#r|TR=n)5tL&afYffx|q7H5fb z#d+d&30&0r9YSR6H)863>Wd#q;8QE!JwZTCHAd)S9(ctzGNXHrB>z zXZheftu0BDZq;IBA(YMsM(znt3 z^t1K5^$!eALqkIoLsLVdA=%K}kZNde=wRp+FmyKbHuN{-89au;hM|UGh62M|hG~Yc z4I2%64EqfS4L=%=8BQ2Z8crEb8!j2{8f8X>ku$1{f>AW;j0U60XffK1O^pf0Bx5sU zim|1!m9dR6)tGK6jjN3ZjE_x5Q@p9Ksnj&lG|4pCG}RO`%`^pOnP!_7 zniiQBo0gi^n%0@to4zsaHtjL(HSIGUF&#JkWct~xFdNM#v)ODl*EQEO*EcsbCz#us zQ_X4S40D#btGT;5$DD62G>*ky0Tjo3F zd*(+LY^h^uW9ew=Zs}>s4On_xyq3Y1Ar_yd*z%EOj^!K650b^|KAMjkV3N&9{}?Ds78xfu*+PwhgxJwjXTgY`@wr z+J3WLv0by>u>EekZO3+Mm)d1^g`Kmj?1Eji>+BADJ$stHw|#_tqJ6G?jeU##u>(8g z4y_~75$mYuXy9n%NOUASQXH>1T02HM-g7K;taNN}Y;~idJ9Ce&>oN=6W zoOfIaIIcNvIDU8Bc3PYroF&e2&PmQ`&biKVXQgwIbBXgy=PKtK=Q`&G=QigK=l9Oz z&NI%l&hyR-&TGz_&fCs=&IisXkvNizG)G28Mn}3MVXELh9&0|`W&Z~R4r=QF diff --git a/LFLiveKit/Info.plist b/LFLiveKit/Info.plist index 6e2e047f..ce0bd8b0 100644 --- a/LFLiveKit/Info.plist +++ b/LFLiveKit/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.0 + 1.9.2 CFBundleSignature ???? CFBundleVersion diff --git a/LFLiveKit/publish/pili-librtmp/amf.c b/LFLiveKit/publish/pili-librtmp/amf.c new file mode 100755 index 00000000..18630ce2 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/amf.c @@ -0,0 +1,1037 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include + +#include "amf.h" +#include "bytes.h" +#include "log.h" +#include "rtmp_sys.h" + +static const AMFObjectProperty AMFProp_Invalid = {{0, 0}, AMF_INVALID}; +static const AVal AV_empty = {0, 0}; + +/* Data is Big-Endian */ +unsigned short + AMF_DecodeInt16(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned short val; + val = (c[0] << 8) | c[1]; + return val; +} + +unsigned int + AMF_DecodeInt24(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + val = (c[0] << 16) | (c[1] << 8) | c[2]; + return val; +} + +unsigned int + AMF_DecodeInt32(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + val = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3]; + return val; +} + +void AMF_DecodeString(const char *data, AVal *bv) { + bv->av_len = AMF_DecodeInt16(data); + bv->av_val = (bv->av_len > 0) ? (char *)data + 2 : NULL; +} + +void AMF_DecodeLongString(const char *data, AVal *bv) { + bv->av_len = AMF_DecodeInt32(data); + bv->av_val = (bv->av_len > 0) ? (char *)data + 4 : NULL; +} + +double + AMF_DecodeNumber(const char *data) { + double dVal; +#if __FLOAT_WORD_ORDER == __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN + memcpy(&dVal, data, 8); +#elif __BYTE_ORDER == __LITTLE_ENDIAN + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[7]; + co[1] = ci[6]; + co[2] = ci[5]; + co[3] = ci[4]; + co[4] = ci[3]; + co[5] = ci[2]; + co[6] = ci[1]; + co[7] = ci[0]; +#endif +#else +#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[3]; + co[1] = ci[2]; + co[2] = ci[1]; + co[3] = ci[0]; + co[4] = ci[7]; + co[5] = ci[6]; + co[6] = ci[5]; + co[7] = ci[4]; +#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[4]; + co[1] = ci[5]; + co[2] = ci[6]; + co[3] = ci[7]; + co[4] = ci[0]; + co[5] = ci[1]; + co[6] = ci[2]; + co[7] = ci[3]; +#endif +#endif + return dVal; +} + +int AMF_DecodeBoolean(const char *data) { + return *data != 0; +} + +char * + AMF_EncodeInt16(char *output, char *outend, short nVal) { + if (output + 2 > outend) + return NULL; + + output[1] = nVal & 0xff; + output[0] = nVal >> 8; + return output + 2; +} + +char * + AMF_EncodeInt24(char *output, char *outend, int nVal) { + if (output + 3 > outend) + return NULL; + + output[2] = nVal & 0xff; + output[1] = nVal >> 8; + output[0] = nVal >> 16; + return output + 3; +} + +char * + AMF_EncodeInt32(char *output, char *outend, int nVal) { + if (output + 4 > outend) + return NULL; + + output[3] = nVal & 0xff; + output[2] = nVal >> 8; + output[1] = nVal >> 16; + output[0] = nVal >> 24; + return output + 4; +} + +char * + AMF_EncodeString(char *output, char *outend, const AVal *bv) { + if ((bv->av_len < 65536 && output + 1 + 2 + bv->av_len > outend) || + output + 1 + 4 + bv->av_len > outend) + return NULL; + + if (bv->av_len < 65536) { + *output++ = AMF_STRING; + + output = AMF_EncodeInt16(output, outend, bv->av_len); + } else { + *output++ = AMF_LONG_STRING; + + output = AMF_EncodeInt32(output, outend, bv->av_len); + } + memcpy(output, bv->av_val, bv->av_len); + output += bv->av_len; + + return output; +} + +char * + AMF_EncodeNumber(char *output, char *outend, double dVal) { + if (output + 1 + 8 > outend) + return NULL; + + *output++ = AMF_NUMBER; /* type: Number */ + +#if __FLOAT_WORD_ORDER == __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN + memcpy(output, &dVal, 8); +#elif __BYTE_ORDER == __LITTLE_ENDIAN + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[7]; + co[1] = ci[6]; + co[2] = ci[5]; + co[3] = ci[4]; + co[4] = ci[3]; + co[5] = ci[2]; + co[6] = ci[1]; + co[7] = ci[0]; + } +#endif +#else +#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[3]; + co[1] = ci[2]; + co[2] = ci[1]; + co[3] = ci[0]; + co[4] = ci[7]; + co[5] = ci[6]; + co[6] = ci[5]; + co[7] = ci[4]; + } +#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[4]; + co[1] = ci[5]; + co[2] = ci[6]; + co[3] = ci[7]; + co[4] = ci[0]; + co[5] = ci[1]; + co[6] = ci[2]; + co[7] = ci[3]; + } +#endif +#endif + + return output + 8; +} + +char * + AMF_EncodeBoolean(char *output, char *outend, int bVal) { + if (output + 2 > outend) + return NULL; + + *output++ = AMF_BOOLEAN; + + *output++ = bVal ? 0x01 : 0x00; + + return output; +} + +char * + AMF_EncodeNamedString(char *output, char *outend, const AVal *strName, const AVal *strValue) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeString(output, outend, strValue); +} + +char * + AMF_EncodeNamedNumber(char *output, char *outend, const AVal *strName, double dVal) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeNumber(output, outend, dVal); +} + +char * + AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *strName, int bVal) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeBoolean(output, outend, bVal); +} + +void AMFProp_GetName(AMFObjectProperty *prop, AVal *name) { + *name = prop->p_name; +} + +void AMFProp_SetName(AMFObjectProperty *prop, AVal *name) { + prop->p_name = *name; +} + +AMFDataType + AMFProp_GetType(AMFObjectProperty *prop) { + return prop->p_type; +} + +double + AMFProp_GetNumber(AMFObjectProperty *prop) { + return prop->p_vu.p_number; +} + +int AMFProp_GetBoolean(AMFObjectProperty *prop) { + return prop->p_vu.p_number != 0; +} + +void AMFProp_GetString(AMFObjectProperty *prop, AVal *str) { + *str = prop->p_vu.p_aval; +} + +void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj) { + *obj = prop->p_vu.p_object; +} + +int AMFProp_IsValid(AMFObjectProperty *prop) { + return prop->p_type != AMF_INVALID; +} + +char * + AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd) { + if (prop->p_type == AMF_INVALID) + return NULL; + + if (prop->p_type != AMF_NULL && pBuffer + prop->p_name.av_len + 2 + 1 >= pBufEnd) + return NULL; + + if (prop->p_type != AMF_NULL && prop->p_name.av_len) { + *pBuffer++ = prop->p_name.av_len >> 8; + *pBuffer++ = prop->p_name.av_len & 0xff; + memcpy(pBuffer, prop->p_name.av_val, prop->p_name.av_len); + pBuffer += prop->p_name.av_len; + } + + switch (prop->p_type) { + case AMF_NUMBER: + pBuffer = AMF_EncodeNumber(pBuffer, pBufEnd, prop->p_vu.p_number); + break; + + case AMF_BOOLEAN: + pBuffer = AMF_EncodeBoolean(pBuffer, pBufEnd, prop->p_vu.p_number != 0); + break; + + case AMF_STRING: + pBuffer = AMF_EncodeString(pBuffer, pBufEnd, &prop->p_vu.p_aval); + break; + + case AMF_NULL: + if (pBuffer + 1 >= pBufEnd) + return NULL; + *pBuffer++ = AMF_NULL; + break; + + case AMF_OBJECT: + pBuffer = AMF_Encode(&prop->p_vu.p_object, pBuffer, pBufEnd); + break; + + default: + RTMP_Log(RTMP_LOGERROR, "%s, invalid type. %d", __FUNCTION__, prop->p_type); + pBuffer = NULL; + }; + + return pBuffer; +} + +#define AMF3_INTEGER_MAX 268435455 +#define AMF3_INTEGER_MIN -268435456 + +int AMF3ReadInteger(const char *data, int32_t *valp) { + int i = 0; + int32_t val = 0; + + while (i <= 2) { /* handle first 3 bytes */ + if (data[i] & 0x80) { /* byte used */ + val <<= 7; /* shift up */ + val |= (data[i] & 0x7f); /* add bits */ + i++; + } else { + break; + } + } + + if (i > 2) { /* use 4th byte, all 8bits */ + val <<= 8; + val |= data[3]; + + /* range check */ + if (val > AMF3_INTEGER_MAX) + val -= (1 << 29); + } else { /* use 7bits of last unparsed byte (0xxxxxxx) */ + val <<= 7; + val |= data[i]; + } + + *valp = val; + + return i > 2 ? 4 : i + 1; +} + +int AMF3ReadString(const char *data, AVal *str) { + int32_t ref = 0; + int len; + assert(str != 0); + + len = AMF3ReadInteger(data, &ref); + data += len; + + if ((ref & 0x1) == 0) { /* reference: 0xxx */ + uint32_t refIndex = (ref >> 1); + RTMP_Log(RTMP_LOGDEBUG, + "%s, string reference, index: %d, not supported, ignoring!", + __FUNCTION__, refIndex); + return len; + } else { + uint32_t nSize = (ref >> 1); + + str->av_val = (char *)data; + str->av_len = nSize; + + return len + nSize; + } + return len; +} + +int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName) { + int nOriginalSize = nSize; + AMF3DataType type; + + prop->p_name.av_len = 0; + prop->p_name.av_val = NULL; + + if (nSize == 0 || !pBuffer) { + RTMP_Log(RTMP_LOGDEBUG, "empty buffer/no buffer pointer!"); + return -1; + } + + /* decode name */ + if (bDecodeName) { + AVal name; + int nRes = AMF3ReadString(pBuffer, &name); + + if (name.av_len <= 0) + return nRes; + + prop->p_name = name; + pBuffer += nRes; + nSize -= nRes; + } + + /* decode */ + type = *pBuffer++; + nSize--; + + switch (type) { + case AMF3_UNDEFINED: + case AMF3_NULL: + prop->p_type = AMF_NULL; + break; + case AMF3_FALSE: + prop->p_type = AMF_BOOLEAN; + prop->p_vu.p_number = 0.0; + break; + case AMF3_TRUE: + prop->p_type = AMF_BOOLEAN; + prop->p_vu.p_number = 1.0; + break; + case AMF3_INTEGER: { + int32_t res = 0; + int len = AMF3ReadInteger(pBuffer, &res); + prop->p_vu.p_number = (double)res; + prop->p_type = AMF_NUMBER; + nSize -= len; + break; + } + case AMF3_DOUBLE: + if (nSize < 8) + return -1; + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + prop->p_type = AMF_NUMBER; + nSize -= 8; + break; + case AMF3_STRING: + case AMF3_XML_DOC: + case AMF3_XML: { + int len = AMF3ReadString(pBuffer, &prop->p_vu.p_aval); + prop->p_type = AMF_STRING; + nSize -= len; + break; + } + case AMF3_DATE: { + int32_t res = 0; + int len = AMF3ReadInteger(pBuffer, &res); + + nSize -= len; + pBuffer += len; + + if ((res & 0x1) == 0) { /* reference */ + uint32_t nIndex = (res >> 1); + RTMP_Log(RTMP_LOGDEBUG, "AMF3_DATE reference: %d, not supported!", nIndex); + } else { + if (nSize < 8) + return -1; + + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + nSize -= 8; + prop->p_type = AMF_NUMBER; + } + break; + } + case AMF3_OBJECT: { + int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF3_ARRAY: + case AMF3_BYTE_ARRAY: + default: + RTMP_Log(RTMP_LOGDEBUG, "%s - AMF3 unknown/unsupported datatype 0x%02x, @0x%08X", + __FUNCTION__, (unsigned char)(*pBuffer), pBuffer); + return -1; + } + + return nOriginalSize - nSize; +} + +int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName) { + int nOriginalSize = nSize; + int nRes; + + prop->p_name.av_len = 0; + prop->p_name.av_val = NULL; + + if (nSize == 0 || !pBuffer) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Empty buffer/no buffer pointer!", __FUNCTION__); + return -1; + } + + if (bDecodeName && nSize < 4) { /* at least name (length + at least 1 byte) and 1 byte of data */ + RTMP_Log(RTMP_LOGDEBUG, + "%s: Not enough data for decoding with name, less than 4 bytes!", + __FUNCTION__); + return -1; + } + + if (bDecodeName) { + unsigned short nNameSize = AMF_DecodeInt16(pBuffer); + if (nNameSize > nSize - 2) { + RTMP_Log(RTMP_LOGDEBUG, + "%s: Name size out of range: namesize (%d) > len (%d) - 2", + __FUNCTION__, nNameSize, nSize); + return -1; + } + + AMF_DecodeString(pBuffer, &prop->p_name); + nSize -= 2 + nNameSize; + pBuffer += 2 + nNameSize; + } + + if (nSize == 0) { + return -1; + } + + nSize--; + + prop->p_type = *pBuffer++; + switch (prop->p_type) { + case AMF_NUMBER: + if (nSize < 8) + return -1; + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + nSize -= 8; + break; + case AMF_BOOLEAN: + if (nSize < 1) + return -1; + prop->p_vu.p_number = (double)AMF_DecodeBoolean(pBuffer); + nSize--; + break; + case AMF_STRING: { + unsigned short nStringSize = AMF_DecodeInt16(pBuffer); + + if (nSize < (long)nStringSize + 2) + return -1; + AMF_DecodeString(pBuffer, &prop->p_vu.p_aval); + nSize -= (2 + nStringSize); + break; + } + case AMF_OBJECT: { + int nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + break; + } + case AMF_MOVIECLIP: { + RTMP_Log(RTMP_LOGERROR, "AMF_MOVIECLIP reserved!"); + return -1; + break; + } + case AMF_NULL: + case AMF_UNDEFINED: + case AMF_UNSUPPORTED: + prop->p_type = AMF_NULL; + break; + case AMF_REFERENCE: { + RTMP_Log(RTMP_LOGERROR, "AMF_REFERENCE not supported!"); + return -1; + break; + } + case AMF_ECMA_ARRAY: { + nSize -= 4; + + /* next comes the rest, mixed array has a final 0x000009 mark and names, so its an object */ + nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer + 4, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF_OBJECT_END: { + return -1; + break; + } + case AMF_STRICT_ARRAY: { + unsigned int nArrayLen = AMF_DecodeInt32(pBuffer); + nSize -= 4; + + nRes = AMF_DecodeArray(&prop->p_vu.p_object, pBuffer + 4, nSize, + nArrayLen, FALSE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF_DATE: { + RTMP_Log(RTMP_LOGDEBUG, "AMF_DATE"); + + if (nSize < 10) + return -1; + + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + prop->p_UTCoffset = AMF_DecodeInt16(pBuffer + 8); + + nSize -= 10; + break; + } + case AMF_LONG_STRING: { + unsigned int nStringSize = AMF_DecodeInt32(pBuffer); + if (nSize < (long)nStringSize + 4) + return -1; + AMF_DecodeLongString(pBuffer, &prop->p_vu.p_aval); + nSize -= (4 + nStringSize); + prop->p_type = AMF_STRING; + break; + } + case AMF_RECORDSET: { + RTMP_Log(RTMP_LOGERROR, "AMF_RECORDSET reserved!"); + return -1; + break; + } + case AMF_XML_DOC: { + RTMP_Log(RTMP_LOGERROR, "AMF_XML_DOC not supported!"); + return -1; + break; + } + case AMF_TYPED_OBJECT: { + RTMP_Log(RTMP_LOGERROR, "AMF_TYPED_OBJECT not supported!"); + return -1; + break; + } + case AMF_AVMPLUS: { + int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + default: + RTMP_Log(RTMP_LOGDEBUG, "%s - unknown datatype 0x%02x, @0x%08X", __FUNCTION__, + prop->p_type, pBuffer - 1); + return -1; + } + + return nOriginalSize - nSize; +} + +void AMFProp_Dump(AMFObjectProperty *prop) { + char strRes[256]; + char str[256]; + AVal name; + + if (prop->p_type == AMF_INVALID) { + RTMP_Log(RTMP_LOGDEBUG, "Property: INVALID"); + return; + } + + if (prop->p_type == AMF_NULL) { + RTMP_Log(RTMP_LOGDEBUG, "Property: NULL"); + return; + } + + if (prop->p_name.av_len) { + name = prop->p_name; + } else { + name.av_val = "no-name."; + name.av_len = sizeof("no-name.") - 1; + } + if (name.av_len > 18) + name.av_len = 18; + + snprintf(strRes, 255, "Name: %18.*s, ", name.av_len, name.av_val); + + if (prop->p_type == AMF_OBJECT) { + RTMP_Log(RTMP_LOGDEBUG, "Property: <%sOBJECT>", strRes); + AMF_Dump(&prop->p_vu.p_object); + return; + } + + switch (prop->p_type) { + case AMF_NUMBER: + snprintf(str, 255, "NUMBER:\t%.2f", prop->p_vu.p_number); + break; + case AMF_BOOLEAN: + snprintf(str, 255, "BOOLEAN:\t%s", + prop->p_vu.p_number != 0.0 ? "TRUE" : "FALSE"); + break; + case AMF_STRING: + snprintf(str, 255, "STRING:\t%.*s", prop->p_vu.p_aval.av_len, + prop->p_vu.p_aval.av_val); + break; + case AMF_DATE: + snprintf(str, 255, "DATE:\ttimestamp: %.2f, UTC offset: %d", + prop->p_vu.p_number, prop->p_UTCoffset); + break; + default: + snprintf(str, 255, "INVALID TYPE 0x%02x", (unsigned char)prop->p_type); + } + + RTMP_Log(RTMP_LOGDEBUG, "Property: <%s%s>", strRes, str); +} + +void AMFProp_Reset(AMFObjectProperty *prop) { + if (prop->p_type == AMF_OBJECT) + AMF_Reset(&prop->p_vu.p_object); + else { + prop->p_vu.p_aval.av_len = 0; + prop->p_vu.p_aval.av_val = NULL; + } + prop->p_type = AMF_INVALID; +} + +/* AMFObject */ + +char * + AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd) { + int i; + + if (pBuffer + 4 >= pBufEnd) + return NULL; + + *pBuffer++ = AMF_OBJECT; + + for (i = 0; i < obj->o_num; i++) { + char *res = AMFProp_Encode(&obj->o_props[i], pBuffer, pBufEnd); + if (res == NULL) { + RTMP_Log(RTMP_LOGERROR, "AMF_Encode - failed to encode property in index %d", + i); + break; + } else { + pBuffer = res; + } + } + + if (pBuffer + 3 >= pBufEnd) + return NULL; /* no room for the end marker */ + + pBuffer = AMF_EncodeInt24(pBuffer, pBufEnd, AMF_OBJECT_END); + + return pBuffer; +} + +int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, + int nArrayLen, int bDecodeName) { + int nOriginalSize = nSize; + int bError = FALSE; + + obj->o_num = 0; + obj->o_props = NULL; + while (nArrayLen > 0) { + AMFObjectProperty prop; + int nRes; + nArrayLen--; + + nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); + if (nRes == -1) + bError = TRUE; + else { + nSize -= nRes; + pBuffer += nRes; + AMF_AddProp(obj, &prop); + } + } + if (bError) + return -1; + + return nOriginalSize - nSize; +} + +int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bAMFData) { + int nOriginalSize = nSize; + int32_t ref; + int len; + + obj->o_num = 0; + obj->o_props = NULL; + if (bAMFData) { + if (*pBuffer != AMF3_OBJECT) + RTMP_Log(RTMP_LOGERROR, + "AMF3 Object encapsulated in AMF stream does not start with AMF3_OBJECT!"); + pBuffer++; + nSize--; + } + + ref = 0; + len = AMF3ReadInteger(pBuffer, &ref); + pBuffer += len; + nSize -= len; + + if ((ref & 1) == 0) { /* object reference, 0xxx */ + uint32_t objectIndex = (ref >> 1); + + RTMP_Log(RTMP_LOGDEBUG, "Object reference, index: %d", objectIndex); + } else /* object instance */ + { + int32_t classRef = (ref >> 1); + + AMF3ClassDef cd = {{0, 0}}; + AMFObjectProperty prop; + + if ((classRef & 0x1) == 0) { /* class reference */ + uint32_t classIndex = (classRef >> 1); + RTMP_Log(RTMP_LOGDEBUG, "Class reference: %d", classIndex); + } else { + int32_t classExtRef = (classRef >> 1); + int i; + + cd.cd_externalizable = (classExtRef & 0x1) == 1; + cd.cd_dynamic = ((classExtRef >> 1) & 0x1) == 1; + + cd.cd_num = classExtRef >> 2; + + /* class name */ + + len = AMF3ReadString(pBuffer, &cd.cd_name); + nSize -= len; + pBuffer += len; + + /*std::string str = className; */ + + RTMP_Log(RTMP_LOGDEBUG, + "Class name: %s, externalizable: %d, dynamic: %d, classMembers: %d", + cd.cd_name.av_val, cd.cd_externalizable, cd.cd_dynamic, + cd.cd_num); + + for (i = 0; i < cd.cd_num; i++) { + AVal memberName; + len = AMF3ReadString(pBuffer, &memberName); + RTMP_Log(RTMP_LOGDEBUG, "Member: %s", memberName.av_val); + AMF3CD_AddProp(&cd, &memberName); + nSize -= len; + pBuffer += len; + } + } + + /* add as referencable object */ + + if (cd.cd_externalizable) { + int nRes; + AVal name = AVC("DEFAULT_ATTRIBUTE"); + + RTMP_Log(RTMP_LOGDEBUG, "Externalizable, TODO check"); + + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); + if (nRes == -1) + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", + __FUNCTION__); + else { + nSize -= nRes; + pBuffer += nRes; + } + + AMFProp_SetName(&prop, &name); + AMF_AddProp(obj, &prop); + } else { + int nRes, i; + for (i = 0; i < cd.cd_num; i++) /* non-dynamic */ + { + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); + if (nRes == -1) + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", + __FUNCTION__); + + AMFProp_SetName(&prop, AMF3CD_GetProp(&cd, i)); + AMF_AddProp(obj, &prop); + + pBuffer += nRes; + nSize -= nRes; + } + if (cd.cd_dynamic) { + int len = 0; + + do { + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, TRUE); + AMF_AddProp(obj, &prop); + + pBuffer += nRes; + nSize -= nRes; + + len = prop.p_name.av_len; + } while (len > 0); + } + } + RTMP_Log(RTMP_LOGDEBUG, "class object!"); + } + return nOriginalSize - nSize; +} + +int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName) { + int nOriginalSize = nSize; + int bError = FALSE; /* if there is an error while decoding - try to at least find the end mark AMF_OBJECT_END */ + + obj->o_num = 0; + obj->o_props = NULL; + while (nSize > 0) { + AMFObjectProperty prop; + int nRes; + + if (nSize >= 3 && AMF_DecodeInt24(pBuffer) == AMF_OBJECT_END) { + nSize -= 3; + bError = FALSE; + break; + } + + if (bError) { + RTMP_Log(RTMP_LOGERROR, + "DECODING ERROR, IGNORING BYTES UNTIL NEXT KNOWN PATTERN!"); + nSize--; + pBuffer++; + continue; + } + + nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); + if (nRes == -1) + bError = TRUE; + else { + nSize -= nRes; + pBuffer += nRes; + AMF_AddProp(obj, &prop); + } + } + + if (bError) + return -1; + + return nOriginalSize - nSize; +} + +void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop) { + if (!(obj->o_num & 0x0f)) + obj->o_props = + realloc(obj->o_props, (obj->o_num + 16) * sizeof(AMFObjectProperty)); + obj->o_props[obj->o_num++] = *prop; +} + +int AMF_CountProp(AMFObject *obj) { + return obj->o_num; +} + +AMFObjectProperty * + AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex) { + if (nIndex >= 0) { + if (nIndex <= obj->o_num) + return &obj->o_props[nIndex]; + } else { + int n; + for (n = 0; n < obj->o_num; n++) { + if (AVMATCH(&obj->o_props[n].p_name, name)) + return &obj->o_props[n]; + } + } + + return (AMFObjectProperty *)&AMFProp_Invalid; +} + +void AMF_Dump(AMFObject *obj) { + int n; + RTMP_Log(RTMP_LOGDEBUG, "(object begin)"); + for (n = 0; n < obj->o_num; n++) { + AMFProp_Dump(&obj->o_props[n]); + } + RTMP_Log(RTMP_LOGDEBUG, "(object end)"); +} + +void AMF_Reset(AMFObject *obj) { + int n; + for (n = 0; n < obj->o_num; n++) { + AMFProp_Reset(&obj->o_props[n]); + } + free(obj->o_props); + obj->o_props = NULL; + obj->o_num = 0; +} + +/* AMF3ClassDefinition */ + +void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop) { + if (!(cd->cd_num & 0x0f)) + cd->cd_props = realloc(cd->cd_props, (cd->cd_num + 16) * sizeof(AVal)); + cd->cd_props[cd->cd_num++] = *prop; +} + +AVal * + AMF3CD_GetProp(AMF3ClassDef *cd, int nIndex) { + if (nIndex >= cd->cd_num) + return (AVal *)&AV_empty; + return &cd->cd_props[nIndex]; +} diff --git a/LFLiveKit/publish/pili-librtmp/amf.h b/LFLiveKit/publish/pili-librtmp/amf.h new file mode 100755 index 00000000..77f93e84 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/amf.h @@ -0,0 +1,180 @@ +#ifndef __AMF_H__ +#define __AMF_H__ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include + +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + AMF_NUMBER = 0, + AMF_BOOLEAN, + AMF_STRING, + AMF_OBJECT, + AMF_MOVIECLIP, /* reserved, not used */ + AMF_NULL, + AMF_UNDEFINED, + AMF_REFERENCE, + AMF_ECMA_ARRAY, + AMF_OBJECT_END, + AMF_STRICT_ARRAY, + AMF_DATE, + AMF_LONG_STRING, + AMF_UNSUPPORTED, + AMF_RECORDSET, /* reserved, not used */ + AMF_XML_DOC, + AMF_TYPED_OBJECT, + AMF_AVMPLUS, /* switch to AMF3 */ + AMF_INVALID = 0xff +} AMFDataType; + +typedef enum { + AMF3_UNDEFINED = 0, + AMF3_NULL, + AMF3_FALSE, + AMF3_TRUE, + AMF3_INTEGER, + AMF3_DOUBLE, + AMF3_STRING, + AMF3_XML_DOC, + AMF3_DATE, + AMF3_ARRAY, + AMF3_OBJECT, + AMF3_XML, + AMF3_BYTE_ARRAY +} AMF3DataType; + +typedef struct AVal { + char *av_val; + int av_len; +} AVal; +#define AVC(str) \ + { str, sizeof(str) - 1 } +#define AVMATCH(a1, a2) \ + ((a1)->av_len == (a2)->av_len && \ + !memcmp((a1)->av_val, (a2)->av_val, (a1)->av_len)) + +struct AMFObjectProperty; + +typedef struct AMFObject { + int o_num; + struct AMFObjectProperty *o_props; +} AMFObject; + +typedef struct AMFObjectProperty { + AVal p_name; + AMFDataType p_type; + union { + double p_number; + AVal p_aval; + AMFObject p_object; + } p_vu; + int16_t p_UTCoffset; +} AMFObjectProperty; + +char *AMF_EncodeString(char *output, char *outend, const AVal *str); +char *AMF_EncodeNumber(char *output, char *outend, double dVal); +char *AMF_EncodeInt16(char *output, char *outend, short nVal); +char *AMF_EncodeInt24(char *output, char *outend, int nVal); +char *AMF_EncodeInt32(char *output, char *outend, int nVal); +char *AMF_EncodeBoolean(char *output, char *outend, int bVal); + +/* Shortcuts for AMFProp_Encode */ +char *AMF_EncodeNamedString(char *output, char *outend, const AVal *name, + const AVal *value); +char *AMF_EncodeNamedNumber(char *output, char *outend, const AVal *name, + double dVal); +char *AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *name, + int bVal); + +unsigned short AMF_DecodeInt16(const char *data); +unsigned int AMF_DecodeInt24(const char *data); +unsigned int AMF_DecodeInt32(const char *data); +void AMF_DecodeString(const char *data, AVal *str); +void AMF_DecodeLongString(const char *data, AVal *str); +int AMF_DecodeBoolean(const char *data); +double AMF_DecodeNumber(const char *data); + +char *AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd); +int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName); +int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, + int nArrayLen, int bDecodeName); +int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, + int bDecodeName); +void AMF_Dump(AMFObject *obj); +void AMF_Reset(AMFObject *obj); + +void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop); +int AMF_CountProp(AMFObject *obj); +AMFObjectProperty *AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex); + +AMFDataType AMFProp_GetType(AMFObjectProperty *prop); +void AMFProp_SetNumber(AMFObjectProperty *prop, double dval); +void AMFProp_SetBoolean(AMFObjectProperty *prop, int bflag); +void AMFProp_SetString(AMFObjectProperty *prop, AVal *str); +void AMFProp_SetObject(AMFObjectProperty *prop, AMFObject *obj); + +void AMFProp_GetName(AMFObjectProperty *prop, AVal *name); +void AMFProp_SetName(AMFObjectProperty *prop, AVal *name); +double AMFProp_GetNumber(AMFObjectProperty *prop); +int AMFProp_GetBoolean(AMFObjectProperty *prop); +void AMFProp_GetString(AMFObjectProperty *prop, AVal *str); +void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj); + +int AMFProp_IsValid(AMFObjectProperty *prop); + +char *AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd); +int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName); +int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName); + +void AMFProp_Dump(AMFObjectProperty *prop); +void AMFProp_Reset(AMFObjectProperty *prop); + +typedef struct AMF3ClassDef { + AVal cd_name; + char cd_externalizable; + char cd_dynamic; + int cd_num; + AVal *cd_props; +} AMF3ClassDef; + +void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop); +AVal *AMF3CD_GetProp(AMF3ClassDef *cd, int idx); + +#ifdef __cplusplus +} +#endif + +#endif /* __AMF_H__ */ diff --git a/LFLiveKit/publish/pili-librtmp/bytes.h b/LFLiveKit/publish/pili-librtmp/bytes.h new file mode 100755 index 00000000..87221cf1 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/bytes.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifndef __BYTES_H__ +#define __BYTES_H__ + +#include + +#ifdef _WIN32 +/* Windows is little endian only */ +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 +#define __BYTE_ORDER __LITTLE_ENDIAN +#define __FLOAT_WORD_ORDER __BYTE_ORDER + +typedef unsigned char uint8_t; + +#else /* !_WIN32 */ + +#include + +#if defined(BYTE_ORDER) && !defined(__BYTE_ORDER) +#define __BYTE_ORDER BYTE_ORDER +#endif + +#if defined(BIG_ENDIAN) && !defined(__BIG_ENDIAN) +#define __BIG_ENDIAN BIG_ENDIAN +#endif + +#if defined(LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN) +#define __LITTLE_ENDIAN LITTLE_ENDIAN +#endif + +#endif /* !_WIN32 */ + +/* define default endianness */ +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif + +#ifndef __BYTE_ORDER +#warning "Byte order not defined on your system, assuming little endian!" +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +/* ok, we assume to have the same float word order and byte order if float word + * order is not defined */ +#ifndef __FLOAT_WORD_ORDER +#warning "Float word order not defined, assuming the same as byte order!" +#define __FLOAT_WORD_ORDER __BYTE_ORDER +#endif + +#if !defined(__BYTE_ORDER) || !defined(__FLOAT_WORD_ORDER) +#error "Undefined byte or float word order!" +#endif + +#if __FLOAT_WORD_ORDER != __BIG_ENDIAN && __FLOAT_WORD_ORDER != __LITTLE_ENDIAN +#error "Unknown/unsupported float word order!" +#endif + +#if __BYTE_ORDER != __BIG_ENDIAN && __BYTE_ORDER != __LITTLE_ENDIAN +#error "Unknown/unsupported byte order!" +#endif + +#endif diff --git a/LFLiveKit/publish/pili-librtmp/dh.h b/LFLiveKit/publish/pili-librtmp/dh.h new file mode 100755 index 00000000..d7aeb5a5 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/dh.h @@ -0,0 +1,345 @@ +/* RTMPDump - Diffie-Hellmann Key Exchange + * Copyright (C) 2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#ifdef USE_POLARSSL +#include +typedef mpi *MP_t; +#define MP_new(m) \ + m = malloc(sizeof(mpi)); \ + mpi_init(m, NULL) +#define MP_set_w(mpi, w) mpi_lset(mpi, w) +#define MP_cmp(u, v) mpi_cmp_mpi(u, v) +#define MP_set(u, v) mpi_copy(u, v) +#define MP_sub_w(mpi, w) mpi_sub_int(mpi, mpi, w) +#define MP_cmp_1(mpi) mpi_cmp_int(mpi, 1) +#define MP_modexp(r, y, q, p) mpi_exp_mod(r, y, q, p, NULL) +#define MP_free(mpi) \ + mpi_free(mpi, NULL); \ + free(mpi) +#define MP_gethex(u, hex, res) \ + MP_new(u); \ + res = mpi_read_string(u, 16, hex) == 0 +#define MP_bytes(u) mpi_size(u) +#define MP_setbin(u, buf, len) mpi_write_binary(u, buf, len) +#define MP_getbin(u, buf, len) \ + MP_new(u); \ + mpi_read_binary(u, buf, len) + +typedef struct MDH { + MP_t p; + MP_t g; + MP_t pub_key; + MP_t priv_key; + long length; + dhm_context ctx; +} MDH; + +#define MDH_new() calloc(1, sizeof(MDH)) +#define MDH_free(vp) \ + { \ + MDH *dh = vp; \ + dhm_free(&dh->ctx); \ + MP_free(dh->p); \ + MP_free(dh->g); \ + MP_free(dh->pub_key); \ + MP_free(dh->priv_key); \ + free(dh); \ + } + +static int MDH_generate_key(MDH *dh) { + unsigned char out[2]; + MP_set(&dh->ctx.P, dh->p); + MP_set(&dh->ctx.G, dh->g); + dh->ctx.len = 128; + dhm_make_public(&dh->ctx, 1024, out, 1, havege_rand, &RTMP_TLS_ctx->hs); + MP_new(dh->pub_key); + MP_new(dh->priv_key); + MP_set(dh->pub_key, &dh->ctx.GX); + MP_set(dh->priv_key, &dh->ctx.X); + return 1; +} + +static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { + int n = len; + MP_set(&dh->ctx.GY, pub); + dhm_calc_secret(&dh->ctx, secret, &n); + return 0; +} + +#elif defined(USE_GNUTLS) +#include +typedef gcry_mpi_t MP_t; +#define MP_new(m) m = gcry_mpi_new(1) +#define MP_set_w(mpi, w) gcry_mpi_set_ui(mpi, w) +#define MP_cmp(u, v) gcry_mpi_cmp(u, v) +#define MP_set(u, v) gcry_mpi_set(u, v) +#define MP_sub_w(mpi, w) gcry_mpi_sub_ui(mpi, mpi, w) +#define MP_cmp_1(mpi) gcry_mpi_cmp_ui(mpi, 1) +#define MP_modexp(r, y, q, p) gcry_mpi_powm(r, y, q, p) +#define MP_free(mpi) gcry_mpi_release(mpi) +#define MP_gethex(u, hex, res) \ + res = (gcry_mpi_scan(&u, GCRYMPI_FMT_HEX, hex, 0, 0) == 0) +#define MP_bytes(u) (gcry_mpi_get_nbits(u) + 7) / 8 +#define MP_setbin(u, buf, len) \ + gcry_mpi_print(GCRYMPI_FMT_USG, buf, len, NULL, u) +#define MP_getbin(u, buf, len) \ + gcry_mpi_scan(&u, GCRYMPI_FMT_USG, buf, len, NULL) + +typedef struct MDH { + MP_t p; + MP_t g; + MP_t pub_key; + MP_t priv_key; + long length; +} MDH; + +#define MDH_new() calloc(1, sizeof(MDH)) +#define MDH_free(dh) \ + do { \ + MP_free(((MDH *)(dh))->p); \ + MP_free(((MDH *)(dh))->g); \ + MP_free(((MDH *)(dh))->pub_key); \ + MP_free(((MDH *)(dh))->priv_key); \ + free(dh); \ + } while (0) + +extern MP_t gnutls_calc_dh_secret(MP_t *priv, MP_t g, MP_t p); +extern MP_t gnutls_calc_dh_key(MP_t y, MP_t x, MP_t p); + +#define MDH_generate_key(dh) \ + (dh->pub_key = gnutls_calc_dh_secret(&dh->priv_key, dh->g, dh->p)) +static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { + MP_t sec = gnutls_calc_dh_key(pub, dh->priv_key, dh->p); + if (sec) { + MP_setbin(sec, secret, len); + MP_free(sec); + return 0; + } else + return -1; +} + +#else /* USE_OPENSSL */ +#include +#include + +typedef BIGNUM *MP_t; +#define MP_new(m) m = BN_new() +#define MP_set_w(mpi, w) BN_set_word(mpi, w) +#define MP_cmp(u, v) BN_cmp(u, v) +#define MP_set(u, v) BN_copy(u, v) +#define MP_sub_w(mpi, w) BN_sub_word(mpi, w) +#define MP_cmp_1(mpi) BN_cmp(mpi, BN_value_one()) +#define MP_modexp(r, y, q, p) \ + do { \ + BN_CTX *ctx = BN_CTX_new(); \ + BN_mod_exp(r, y, q, p, ctx); \ + BN_CTX_free(ctx); \ + } while (0) +#define MP_free(mpi) BN_free(mpi) +#define MP_gethex(u, hex, res) res = BN_hex2bn(&u, hex) +#define MP_bytes(u) BN_num_bytes(u) +#define MP_setbin(u, buf, len) BN_bn2bin(u, buf) +#define MP_getbin(u, buf, len) u = BN_bin2bn(buf, len, 0) + +#define MDH DH +#define MDH_new() DH_new() +#define MDH_free(dh) DH_free(dh) +#define MDH_generate_key(dh) DH_generate_key(dh) +#define MDH_compute_key(secret, seclen, pub, dh) DH_compute_key(secret, pub, dh) + +#endif + +#include "dhgroups.h" +#include "log.h" + +/* RFC 2631, Section 2.1.5, http://www.ietf.org/rfc/rfc2631.txt */ +static int isValidPublicKey(MP_t y, MP_t p, MP_t q) { + int ret = TRUE; + MP_t bn; + assert(y); + + MP_new(bn); + assert(bn); + + /* y must lie in [2,p-1] */ + MP_set_w(bn, 1); + if (MP_cmp(y, bn) < 0) { + RTMP_Log(RTMP_LOGERROR, "DH public key must be at least 2"); + ret = FALSE; + goto failed; + } + + /* bn = p-2 */ + MP_set(bn, p); + MP_sub_w(bn, 1); + if (MP_cmp(y, bn) > 0) { + RTMP_Log(RTMP_LOGERROR, "DH public key must be at most p-2"); + ret = FALSE; + goto failed; + } + + /* Verify with Sophie-Germain prime + * + * This is a nice test to make sure the public key position is calculated + * correctly. This test will fail in about 50% of the cases if applied to + * random data. + */ + if (q) { + /* y must fulfill y^q mod p = 1 */ + MP_modexp(bn, y, q, p); + + if (MP_cmp_1(bn) != 0) { + RTMP_Log(RTMP_LOGWARNING, "DH public key does not fulfill y^q mod p = 1"); + } + } + +failed: + MP_free(bn); + return ret; +} + +static MDH *DHInit(int nKeyBits) { + size_t res; + MDH *dh = MDH_new(); + + if (!dh) + goto failed; + + MP_new(dh->g); + + if (!dh->g) + goto failed; + + MP_gethex(dh->p, P1024, res); /* prime P1024, see dhgroups.h */ + if (!res) { + goto failed; + } + + MP_set_w(dh->g, 2); /* base 2 */ + + dh->length = nKeyBits; + return dh; + +failed: + if (dh) + MDH_free(dh); + + return 0; +} + +static int DHGenerateKey(MDH *dh) { + size_t res = 0; + if (!dh) + return 0; + + while (!res) { + MP_t q1 = NULL; + + if (!MDH_generate_key(dh)) + return 0; + + MP_gethex(q1, Q1024, res); + assert(res); + + res = isValidPublicKey(dh->pub_key, dh->p, q1); + if (!res) { + MP_free(dh->pub_key); + MP_free(dh->priv_key); + dh->pub_key = dh->priv_key = 0; + } + + MP_free(q1); + } + return 1; +} + +/* fill pubkey with the public key in BIG ENDIAN order + * 00 00 00 00 00 x1 x2 x3 ..... + */ + +static int DHGetPublicKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen) { + int len; + if (!dh || !dh->pub_key) + return 0; + + len = MP_bytes(dh->pub_key); + if (len <= 0 || len > (int)nPubkeyLen) + return 0; + + memset(pubkey, 0, nPubkeyLen); + MP_setbin(dh->pub_key, pubkey + (nPubkeyLen - len), len); + return 1; +} + +#if 0 /* unused */ +static int +DHGetPrivateKey(MDH *dh, uint8_t *privkey, size_t nPrivkeyLen) +{ + if (!dh || !dh->priv_key) + return 0; + + int len = MP_bytes(dh->priv_key); + if (len <= 0 || len > (int) nPrivkeyLen) + return 0; + + memset(privkey, 0, nPrivkeyLen); + MP_setbin(dh->priv_key, privkey + (nPrivkeyLen - len), len); + return 1; +} +#endif + +/* computes the shared secret key from the private MDH value and the + * other party's public key (pubkey) + */ +static int DHComputeSharedSecretKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen, + uint8_t *secret) { + MP_t q1 = NULL, pubkeyBn = NULL; + size_t len; + int res; + + if (!dh || !secret || nPubkeyLen >= INT_MAX) + return -1; + + MP_getbin(pubkeyBn, pubkey, nPubkeyLen); + if (!pubkeyBn) + return -1; + + MP_gethex(q1, Q1024, len); + assert(len); + + if (isValidPublicKey(pubkeyBn, dh->p, q1)) + res = MDH_compute_key(secret, nPubkeyLen, pubkeyBn, dh); + else + res = -1; + + MP_free(q1); + MP_free(pubkeyBn); + + return res; +} diff --git a/LFLiveKit/publish/pili-librtmp/dhgroups.h b/LFLiveKit/publish/pili-librtmp/dhgroups.h new file mode 100755 index 00000000..f3d0293f --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/dhgroups.h @@ -0,0 +1,198 @@ +/* librtmp - Diffie-Hellmann Key Exchange + * Copyright (C) 2009 Andrej Stepanchuk + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +/* from RFC 3526, see http://www.ietf.org/rfc/rfc3526.txt */ + +/* 2^768 - 2 ^704 - 1 + 2^64 * { [2^638 pi] + 149686 } */ +#define P768 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF" + +/* 2^1024 - 2^960 - 1 + 2^64 * { [2^894 pi] + 129093 } */ +#define P1024 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381" \ + "FFFFFFFFFFFFFFFF" + +/* Group morder largest prime factor: */ +#define Q1024 \ + "7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68" \ + "948127044533E63A0105DF531D89CD9128A5043CC71A026E" \ + "F7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122" \ + "F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6" \ + "F71C35FDAD44CFD2D74F9208BE258FF324943328F67329C0" \ + "FFFFFFFFFFFFFFFF" + +/* 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 } */ +#define P1536 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF" + +/* 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 } */ +#define P2048 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AACAA68FFFFFFFFFFFFFFFF" + +/* 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 } */ +#define P3072 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF" + +/* 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 } */ +#define P4096 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" \ + "FFFFFFFFFFFFFFFF" + +/* 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 } */ +#define P6144 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ + "12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF" + +/* 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 } */ +#define P8192 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" \ + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" \ + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" \ + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" \ + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" \ + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" \ + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" \ + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" \ + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" \ + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" \ + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" \ + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF" diff --git a/LFLiveKit/publish/pili-librtmp/error.c b/LFLiveKit/publish/pili-librtmp/error.c new file mode 100755 index 00000000..b47913df --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/error.c @@ -0,0 +1,26 @@ +#include "error.h" +#include +#include + +void RTMPError_Alloc(RTMPError *error, size_t msg_size) { + RTMPError_Free(error); + + error->code = 0; + error->message = (char *)malloc(msg_size + 1); + memset(error->message, 0, msg_size); +} + +void RTMPError_Free(RTMPError *error) { + if (error) { + if (error->message) { + free(error->message); + error->message = NULL; + } + } +} + +void RTMPError_Message(RTMPError *error, int code, const char *message) { + RTMPError_Alloc(error, strlen(message)); + error->code = code; + strcpy(error->message, message); +} diff --git a/LFLiveKit/publish/pili-librtmp/error.h b/LFLiveKit/publish/pili-librtmp/error.h new file mode 100755 index 00000000..6a8dcbe4 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/error.h @@ -0,0 +1,46 @@ +#ifndef __ERROR_H__ +#define __ERROR_H__ + +#include + +typedef struct RTMPError { + int code; + char *message; +} RTMPError; + +void RTMPError_Alloc(RTMPError *error, size_t msg_size); +void RTMPError_Free(RTMPError *error); +void RTMPError_Message(RTMPError *error, int code, const char *message); + +// error defines +enum { + RTMPErrorUnknow = -1, // "Unknow error" + RTMPErrorUnknowOption = -999, // "Unknown option %s" + RTMPErrorAccessDNSFailed = -1000, // "Failed to access the DNS. (addr: %s)" + RTMPErrorFailedToConnectSocket = + -1001, // "Failed to connect socket. %d (%s)" + RTMPErrorSocksNegotiationFailed = -1002, // "Socks negotiation failed" + RTMPErrorFailedToCreateSocket = + -1003, // "Failed to create socket. %d (%s)" + RTMPErrorHandshakeFailed = -1004, // "Handshake failed" + RTMPErrorRTMPConnectFailed = -1005, // "RTMP connect failed" + RTMPErrorSendFailed = -1006, // "Send error %d (%s), (%d bytes)" + RTMPErrorServerRequestedClose = -1007, // "RTMP server requested close" + RTMPErrorNetStreamFailed = -1008, // "NetStream failed" + RTMPErrorNetStreamPlayFailed = -1009, // "NetStream play failed" + RTMPErrorNetStreamPlayStreamNotFound = + -1010, // "NetStream play stream not found" + RTMPErrorNetConnectionConnectInvalidApp = + -1011, // "NetConnection connect invalip app" + RTMPErrorSanityFailed = + -1012, // "Sanity failed. Trying to send header of type: 0x%02X" + RTMPErrorSocketClosedByPeer = -1013, // "RTMP socket closed by peer" + RTMPErrorRTMPConnectStreamFailed = -1014, // "RTMP connect stream failed" + RTMPErrorSocketTimeout = -1015, // "RTMP socket timeout" + + // SSL errors + RTMPErrorTLSConnectFailed = -1200, // "TLS_Connect failed" + RTMPErrorNoSSLOrTLSSupport = -1201, // "No SSL/TLS support" +}; + +#endif diff --git a/LFLiveKit/publish/pili-librtmp/handshake.h b/LFLiveKit/publish/pili-librtmp/handshake.h new file mode 100755 index 00000000..f791cf74 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/handshake.h @@ -0,0 +1,1034 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * Copyright (C) 2010 + * 2a665470ced7adb7156fcef47f8199a6371c117b8a79e399a2771e0b36384090 + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +/* This file is #included in rtmp.c, it is not meant to be compiled alone */ + +#ifdef USE_POLARSSL +#include +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX sha2_context +#define HMAC_setup(ctx, key, len) \ + sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) +#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + sha2_hmac_finish(&ctx, dig) + +typedef arc4_context *RC4_handle; +#define RC4_alloc(h) *h = malloc(sizeof(arc4_context)) +#define RC4_setkey(h, l, k) arc4_setup(h, k, l) +#define RC4_encrypt(h, l, d) \ + arc4_crypt(h, l, (unsigned char *)d, (unsigned char *)d) +#define RC4_encrypt2(h, l, s, d) \ + arc4_crypt(h, l, (unsigned char *)s, (unsigned char *)d) +#define RC4_free(h) free(h) + +#elif defined(USE_GNUTLS) +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX gcry_md_hd_t +#define HMAC_setup(ctx, key, len) \ + gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ + gcry_md_setkey(ctx, key, len) +#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + memcpy(dig, gcry_md_read(ctx, 0), dlen); \ + gcry_md_close(ctx) + +typedef gcry_cipher_hd_t RC4_handle; +#define RC4_alloc(h) \ + gcry_cipher_open(h, GCRY_CIPHER_ARCFOUR, GCRY_CIPHER_MODE_STREAM, 0) +#define RC4_setkey(h, l, k) gcry_cipher_setkey(h, k, l) +#define RC4_encrypt(h, l, d) gcry_cipher_encrypt(h, (void *)d, l, NULL, 0) +#define RC4_encrypt2(h, l, s, d) \ + gcry_cipher_encrypt(h, (void *)d, l, (void *)s, l) +#define RC4_free(h) gcry_cipher_close(h) + +#else /* USE_OPENSSL */ +#include +#include +#include +#if OPENSSL_VERSION_NUMBER < 0x0090800 || !defined(SHA256_DIGEST_LENGTH) +#error Your OpenSSL is too old, need 0.9.8 or newer with SHA256 +#endif +#define HMAC_setup(ctx, key, len) \ + HMAC_CTX_init(&ctx); \ + HMAC_Init_ex(&ctx, key, len, EVP_sha256(), 0) +#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + HMAC_Final(&ctx, dig, &dlen); \ + HMAC_CTX_cleanup(&ctx) + +typedef RC4_KEY *RC4_handle; +#define RC4_alloc(h) *h = malloc(sizeof(RC4_KEY)) +#define RC4_setkey(h, l, k) RC4_set_key(h, l, k) +#define RC4_encrypt(h, l, d) RC4(h, l, (uint8_t *)d, (uint8_t *)d) +#define RC4_encrypt2(h, l, s, d) RC4(h, l, (uint8_t *)s, (uint8_t *)d) +#define RC4_free(h) free(h) +#endif + +#define FP10 + +#include "dh.h" + +static const uint8_t GenuineFMSKey[] = { + 0x47, 0x65, 0x6e, 0x75, 0x69, 0x6e, 0x65, 0x20, 0x41, 0x64, 0x6f, + 0x62, 0x65, 0x20, 0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x4d, 0x65, + 0x64, 0x69, 0x61, 0x20, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, + 0x30, 0x30, 0x31, /* Genuine Adobe Flash Media Server 001 */ + + 0xf0, 0xee, 0xc2, 0x4a, 0x80, 0x68, 0xbe, 0xe8, 0x2e, 0x00, 0xd0, + 0xd1, 0x02, 0x9e, 0x7e, 0x57, 0x6e, 0xec, 0x5d, 0x2d, 0x29, 0x80, + 0x6f, 0xab, 0x93, 0xb8, 0xe6, 0x36, 0xcf, 0xeb, 0x31, 0xae}; /* 68 */ + +static const uint8_t GenuineFPKey[] = { + 0x47, 0x65, 0x6E, 0x75, 0x69, 0x6E, 0x65, 0x20, 0x41, 0x64, 0x6F, + 0x62, 0x65, 0x20, 0x46, 0x6C, 0x61, 0x73, 0x68, 0x20, 0x50, 0x6C, + 0x61, 0x79, 0x65, 0x72, 0x20, 0x30, 0x30, 0x31, /* Genuine Adobe Flash + Player 001 */ + 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, + 0xD1, 0x02, 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, + 0x6F, 0xAB, 0x93, 0xB8, 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE}; /* 62 */ + +static void InitRC4Encryption(uint8_t *secretKey, uint8_t *pubKeyIn, + uint8_t *pubKeyOut, RC4_handle *rc4keyIn, + RC4_handle *rc4keyOut) { + uint8_t digest[SHA256_DIGEST_LENGTH]; + unsigned int digestLen = 0; + HMAC_CTX ctx; + + RC4_alloc(rc4keyIn); + RC4_alloc(rc4keyOut); + + HMAC_setup(ctx, secretKey, 128); + HMAC_crunch(ctx, pubKeyIn, 128); + HMAC_finish(ctx, digest, digestLen); + + RTMP_Log(RTMP_LOGDEBUG, "RC4 Out Key: "); + RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); + + RC4_setkey(*rc4keyOut, 16, digest); + + HMAC_setup(ctx, secretKey, 128); + HMAC_crunch(ctx, pubKeyOut, 128); + HMAC_finish(ctx, digest, digestLen); + + RTMP_Log(RTMP_LOGDEBUG, "RC4 In Key: "); + RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); + + RC4_setkey(*rc4keyIn, 16, digest); +} + +typedef unsigned int(getoff)(uint8_t *buf, unsigned int len); + +static unsigned int GetDHOffset2(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 768; + unsigned int res; + + assert(RTMP_SIG_SIZE <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 632) + 8; + + if (res + 128 > 767) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate correct DH offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + return res; +} + +static unsigned int GetDigestOffset2(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 772; + unsigned int res; + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 728) + 776; + + if (res + 32 > 1535) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate correct digest offset (got %d), exiting", + __FUNCTION__, res); + exit(1); + } + return res; +} + +static unsigned int GetDHOffset1(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 1532; + unsigned int res; + + assert(RTMP_SIG_SIZE <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 632) + 772; + + if (res + 128 > 1531) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate DH offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + + return res; +} + +static unsigned int GetDigestOffset1(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 8; + unsigned int res; + + assert(12 <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 728) + 12; + + if (res + 32 > 771) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate digest offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + + return res; +} + +static getoff *digoff[] = {GetDigestOffset1, GetDigestOffset2}; +static getoff *dhoff[] = {GetDHOffset1, GetDHOffset2}; + +static void HMACsha256(const uint8_t *message, size_t messageLen, + const uint8_t *key, size_t keylen, uint8_t *digest) { + unsigned int digestLen; + HMAC_CTX ctx; + + HMAC_setup(ctx, key, keylen); + HMAC_crunch(ctx, message, messageLen); + HMAC_finish(ctx, digest, digestLen); + + assert(digestLen == 32); +} + +static void CalculateDigest(unsigned int digestPos, uint8_t *handshakeMessage, + const uint8_t *key, size_t keyLen, + uint8_t *digest) { + const int messageLen = RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + uint8_t message[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH]; + + memcpy(message, handshakeMessage, digestPos); + memcpy(message + digestPos, + &handshakeMessage[digestPos + SHA256_DIGEST_LENGTH], + messageLen - digestPos); + + HMACsha256(message, messageLen, key, keyLen, digest); +} + +static int VerifyDigest(unsigned int digestPos, uint8_t *handshakeMessage, + const uint8_t *key, size_t keyLen) { + uint8_t calcDigest[SHA256_DIGEST_LENGTH]; + + CalculateDigest(digestPos, handshakeMessage, key, keyLen, calcDigest); + + return memcmp(&handshakeMessage[digestPos], calcDigest, + SHA256_DIGEST_LENGTH) == 0; +} + +/* handshake + * + * Type = [1 bytes] plain: 0x03, encrypted: 0x06, 0x08, 0x09 + * -------------------------------------------------------------------- [1536 + * bytes] + * Uptime = [4 bytes] big endian unsigned number, uptime + * Version = [4 bytes] each byte represents a version number, e.g. + * 9.0.124.0 + * ... + * + */ + +static const uint32_t rtmpe8_keys[16][4] = { + {0xbff034b2, 0x11d9081f, 0xccdfb795, 0x748de732}, + {0x086a5eb6, 0x1743090e, 0x6ef05ab8, 0xfe5a39e2}, + {0x7b10956f, 0x76ce0521, 0x2388a73a, 0x440149a1}, + {0xa943f317, 0xebf11bb2, 0xa691a5ee, 0x17f36339}, + {0x7a30e00a, 0xb529e22c, 0xa087aea5, 0xc0cb79ac}, + {0xbdce0c23, 0x2febdeff, 0x1cfaae16, 0x1123239d}, + {0x55dd3f7b, 0x77e7e62e, 0x9bb8c499, 0xc9481ee4}, + {0x407bb6b4, 0x71e89136, 0xa7aebf55, 0xca33b839}, + {0xfcf6bdc3, 0xb63c3697, 0x7ce4f825, 0x04d959b2}, + {0x28e091fd, 0x41954c4c, 0x7fb7db00, 0xe3a066f8}, + {0x57845b76, 0x4f251b03, 0x46d45bcd, 0xa2c30d29}, + {0x0acceef8, 0xda55b546, 0x03473452, 0x5863713b}, + {0xb82075dc, 0xa75f1fee, 0xd84268e8, 0xa72a44cc}, + {0x07cf6e9e, 0xa16d7b25, 0x9fa7ae6c, 0xd92f5629}, + {0xfeb1eae4, 0x8c8c3ce1, 0x4e0064a7, 0x6a387c2a}, + {0x893a9427, 0xcc3013a2, 0xf106385b, 0xa829f927}}; + +/* RTMPE type 8 uses XTEA on the regular signature + * http://en.wikipedia.org/wiki/XTEA + */ +static void rtmpe8_sig(uint8_t *in, uint8_t *out, int keyid) { + unsigned int i, num_rounds = 32; + uint32_t v0, v1, sum = 0, delta = 0x9E3779B9; + uint32_t const *k; + + v0 = in[0] | (in[1] << 8) | (in[2] << 16) | (in[3] << 24); + v1 = in[4] | (in[5] << 8) | (in[6] << 16) | (in[7] << 24); + k = rtmpe8_keys[keyid]; + + for (i = 0; i < num_rounds; i++) { + v0 += (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + k[sum & 3]); + sum += delta; + v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + k[(sum >> 11) & 3]); + } + + out[0] = v0; + v0 >>= 8; + out[1] = v0; + v0 >>= 8; + out[2] = v0; + v0 >>= 8; + out[3] = v0; + + out[4] = v1; + v1 >>= 8; + out[5] = v1; + v1 >>= 8; + out[6] = v1; + v1 >>= 8; + out[7] = v1; +} + +static int HandShake(RTMP *r, int FP9HandShake) { + int i, offalg = 0; + int dhposClient = 0; + int digestPosClient = 0; + int encrypted = r->Link.protocol & RTMP_FEATURE_ENC; + + RC4_handle keyIn = 0; + RC4_handle keyOut = 0; + + int32_t *ip; + uint32_t uptime; + + uint8_t clientbuf[RTMP_SIG_SIZE + 4], *clientsig = clientbuf + 4; + uint8_t serversig[RTMP_SIG_SIZE], client2[RTMP_SIG_SIZE], *reply; + uint8_t type; + getoff *getdh = NULL, *getdig = NULL; + + if (encrypted || r->Link.SWFSize) + FP9HandShake = TRUE; + else + FP9HandShake = FALSE; + + r->Link.rc4keyIn = r->Link.rc4keyOut = 0; + + if (encrypted) { + clientsig[-1] = 0x06; /* 0x08 is RTMPE as well */ + offalg = 1; + } else + clientsig[-1] = 0x03; + + uptime = htonl(RTMP_GetTime()); + memcpy(clientsig, &uptime, 4); + + if (FP9HandShake) { + /* set version to at least 9.0.115.0 */ + if (encrypted) { + clientsig[4] = 128; + clientsig[6] = 3; + } else { + clientsig[4] = 10; + clientsig[6] = 45; + } + clientsig[5] = 0; + clientsig[7] = 2; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client type: %02X", __FUNCTION__, + clientsig[-1]); + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + } else { + memset(&clientsig[4], 0, 4); + } + +/* generate random data */ +#ifdef _DEBUG + memset(clientsig + 8, 0, RTMP_SIG_SIZE - 8); +#else + ip = (int32_t *)(clientsig + 8); + for (i = 2; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + + /* set handshake digest */ + if (FP9HandShake) { + if (encrypted) { + /* generate Diffie-Hellmann parameters */ + r->Link.dh = DHInit(1024); + if (!r->Link.dh) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", + __FUNCTION__); + return FALSE; + } + + dhposClient = getdh(clientsig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, + dhposClient); + + if (!DHGenerateKey(r->Link.dh)) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't generate Diffie-Hellmann public key!", + __FUNCTION__); + return FALSE; + } + + if (!DHGetPublicKey(r->Link.dh, &clientsig[dhposClient], 128)) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); + return FALSE; + } + } + + digestPosClient = + getdig(clientsig, RTMP_SIG_SIZE); /* reuse this value in verification */ + RTMP_Log(RTMP_LOGDEBUG, "%s: Client digest offset: %d", __FUNCTION__, + digestPosClient); + + CalculateDigest(digestPosClient, clientsig, GenuineFPKey, 30, + &clientsig[digestPosClient]); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Initial client digest: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, clientsig + digestPosClient, + SHA256_DIGEST_LENGTH); + } + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "Clientsig: "); + RTMP_LogHex(RTMP_LOGDEBUG, clientsig, RTMP_SIG_SIZE); +#endif + + if (!WriteN(r, (char *)clientsig - 1, RTMP_SIG_SIZE + 1)) + return FALSE; + + if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); + + if (type != clientsig[-1]) + RTMP_Log(RTMP_LOGWARNING, + "%s: Type mismatch: client sent %d, server answered %d", + __FUNCTION__, clientsig[-1], type); + + if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode server response */ + memcpy(&uptime, serversig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, + serversig[4], serversig[5], serversig[6], serversig[7]); + + if (FP9HandShake && type == 3 && !serversig[4]) + FP9HandShake = FALSE; + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "Server signature:"); + RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE); +#endif + + if (FP9HandShake) { + uint8_t digestResp[SHA256_DIGEST_LENGTH]; + uint8_t *signatureResp = NULL; + + /* we have to use this signature now to find the correct algorithms for + * getting the digest and DH positions */ + int digestPosServer = getdig(serversig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { + RTMP_Log(RTMP_LOGWARNING, "Trying different position for server digest!"); + offalg ^= 1; + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + digestPosServer = getdig(serversig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { + RTMP_Log( + RTMP_LOGERROR, + "Couldn't verify the server digest"); /* continuing anyway will + probably fail */ + return FALSE; + } + } + + /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key + * are the last 32 bytes of the server handshake) */ + if (r->Link.SWFSize) { + const char swfVerify[] = {0x01, 0x01}; + char *vend = r->Link.SWFVerificationResponse + + sizeof(r->Link.SWFVerificationResponse); + + memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, + r->Link.SWFSize); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, + r->Link.SWFSize); + HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, + &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], + SHA256_DIGEST_LENGTH, + (uint8_t *)&r->Link.SWFVerificationResponse[10]); + } + + /* do Diffie-Hellmann Key exchange for encrypted RTMP */ + if (encrypted) { + /* compute secret key */ + uint8_t secretKey[128] = {0}; + int len, dhposServer; + + dhposServer = getdh(serversig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: Server DH public key offset: %d", + __FUNCTION__, dhposServer); + len = DHComputeSharedSecretKey(r->Link.dh, &serversig[dhposServer], 128, + secretKey); + if (len < 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); + return FALSE; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); + + InitRC4Encryption(secretKey, (uint8_t *)&serversig[dhposServer], + (uint8_t *)&clientsig[dhposClient], &keyIn, &keyOut); + } + + reply = client2; +#ifdef _DEBUG + memset(reply, 0xff, RTMP_SIG_SIZE); +#else + ip = (int32_t *)reply; + for (i = 0; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + /* calculate response now */ + signatureResp = reply + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + + HMACsha256(&serversig[digestPosServer], SHA256_DIGEST_LENGTH, GenuineFPKey, + sizeof(GenuineFPKey), digestResp); + HMACsha256(reply, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, + SHA256_DIGEST_LENGTH, signatureResp); + + /* some info output */ + RTMP_Log(RTMP_LOGDEBUG, + "%s: Calculated digest key from secure key and server digest: ", + __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, digestResp, SHA256_DIGEST_LENGTH); + +#ifdef FP10 + if (type == 8) { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) + rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); + } +#if 0 + else if (type == 9)) + { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i=0; iLink.rc4keyIn = keyIn; + r->Link.rc4keyOut = keyOut; + + /* update the keystreams */ + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); + } + + if (r->Link.rc4keyOut) { + RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); + } + } + } else { + if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { + RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", + __FUNCTION__); + } + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); + return TRUE; +} + +static int SHandShake(RTMP *r) { + int i, offalg = 0; + int dhposServer = 0; + int digestPosServer = 0; + RC4_handle keyIn = 0; + RC4_handle keyOut = 0; + int FP9HandShake = FALSE; + int encrypted; + int32_t *ip; + + uint8_t clientsig[RTMP_SIG_SIZE]; + uint8_t serverbuf[RTMP_SIG_SIZE + 4], *serversig = serverbuf + 4; + uint8_t type; + uint32_t uptime; + getoff *getdh = NULL, *getdig = NULL; + + if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + if (ReadN(r, (char *)clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Requested : %02X", __FUNCTION__, type); + RTMP_LogHex(RTMP_LOGDEBUG2, clientsig, RTMP_SIG_SIZE); + + if (type == 3) { + encrypted = FALSE; + } else if (type == 6 || type == 8) { + offalg = 1; + encrypted = TRUE; + FP9HandShake = TRUE; + r->Link.protocol |= RTMP_FEATURE_ENC; + /* use FP10 if client is capable */ + if (clientsig[4] == 128) + type = 8; + } else { + RTMP_Log(RTMP_LOGERROR, "%s: Unknown version %02x", __FUNCTION__, type); + return FALSE; + } + + if (!FP9HandShake && clientsig[4]) + FP9HandShake = TRUE; + + serversig[-1] = type; + + r->Link.rc4keyIn = r->Link.rc4keyOut = 0; + + uptime = htonl(RTMP_GetTime()); + memcpy(serversig, &uptime, 4); + + if (FP9HandShake) { + /* Server version */ + serversig[4] = 3; + serversig[5] = 5; + serversig[6] = 1; + serversig[7] = 1; + + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + } else { + memset(&serversig[4], 0, 4); + } + +/* generate random data */ +#ifdef _DEBUG + memset(serversig + 8, 0, RTMP_SIG_SIZE - 8); +#else + ip = (int32_t *)(serversig + 8); + for (i = 2; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + + /* set handshake digest */ + if (FP9HandShake) { + if (encrypted) { + /* generate Diffie-Hellmann parameters */ + r->Link.dh = DHInit(1024); + if (!r->Link.dh) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", + __FUNCTION__); + return FALSE; + } + + dhposServer = getdh(serversig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, + dhposServer); + + if (!DHGenerateKey(r->Link.dh)) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't generate Diffie-Hellmann public key!", + __FUNCTION__); + return FALSE; + } + + if (!DHGetPublicKey(r->Link.dh, (uint8_t *)&serversig[dhposServer], + 128)) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); + return FALSE; + } + } + + digestPosServer = + getdig(serversig, RTMP_SIG_SIZE); /* reuse this value in verification */ + RTMP_Log(RTMP_LOGDEBUG, "%s: Server digest offset: %d", __FUNCTION__, + digestPosServer); + + CalculateDigest(digestPosServer, serversig, GenuineFMSKey, 36, + &serversig[digestPosServer]); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Initial server digest: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, serversig + digestPosServer, + SHA256_DIGEST_LENGTH); + } + + RTMP_Log(RTMP_LOGDEBUG2, "Serversig: "); + RTMP_LogHex(RTMP_LOGDEBUG2, serversig, RTMP_SIG_SIZE); + + if (!WriteN(r, (char *)serversig - 1, RTMP_SIG_SIZE + 1)) + return FALSE; + + /* decode client response */ + memcpy(&uptime, clientsig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, + clientsig[4], clientsig[5], clientsig[6], clientsig[7]); + + if (FP9HandShake) { + uint8_t digestResp[SHA256_DIGEST_LENGTH]; + uint8_t *signatureResp = NULL; + + /* we have to use this signature now to find the correct algorithms for + * getting the digest and DH positions */ + int digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { + RTMP_Log(RTMP_LOGWARNING, "Trying different position for client digest!"); + offalg ^= 1; + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + + digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { + RTMP_Log( + RTMP_LOGERROR, + "Couldn't verify the client digest"); /* continuing anyway will + probably fail */ + return FALSE; + } + } + + /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key + * are the last 32 bytes of the server handshake) */ + if (r->Link.SWFSize) { + const char swfVerify[] = {0x01, 0x01}; + char *vend = r->Link.SWFVerificationResponse + + sizeof(r->Link.SWFVerificationResponse); + + memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, + r->Link.SWFSize); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, + r->Link.SWFSize); + HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, + &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], + SHA256_DIGEST_LENGTH, + (uint8_t *)&r->Link.SWFVerificationResponse[10]); + } + + /* do Diffie-Hellmann Key exchange for encrypted RTMP */ + if (encrypted) { + int dhposClient, len; + /* compute secret key */ + uint8_t secretKey[128] = {0}; + + dhposClient = getdh(clientsig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: Client DH public key offset: %d", + __FUNCTION__, dhposClient); + len = DHComputeSharedSecretKey( + r->Link.dh, (uint8_t *)&clientsig[dhposClient], 128, secretKey); + if (len < 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); + return FALSE; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); + + InitRC4Encryption(secretKey, (uint8_t *)&clientsig[dhposClient], + (uint8_t *)&serversig[dhposServer], &keyIn, &keyOut); + } + + /* calculate response now */ + signatureResp = clientsig + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + + HMACsha256(&clientsig[digestPosClient], SHA256_DIGEST_LENGTH, GenuineFMSKey, + sizeof(GenuineFMSKey), digestResp); + HMACsha256(clientsig, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, + SHA256_DIGEST_LENGTH, signatureResp); +#ifdef FP10 + if (type == 8) { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) + rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); + } +#if 0 + else if (type == 9)) + { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i=0; iLink.rc4keyIn = keyIn; + r->Link.rc4keyOut = keyOut; + + /* update the keystreams */ + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); + } + + if (r->Link.rc4keyOut) { + RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); + } + } + } else { + if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { + RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", + __FUNCTION__); + } + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); + return TRUE; +} diff --git a/LFLiveKit/publish/pili-librtmp/hashswf.c b/LFLiveKit/publish/pili-librtmp/hashswf.c new file mode 100755 index 00000000..28ceeb4a --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/hashswf.c @@ -0,0 +1,626 @@ +/* + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#include "http.h" +#include "log.h" +#include "rtmp_sys.h" + +#ifdef CRYPTO +#ifdef USE_POLARSSL +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX sha2_context +#define HMAC_setup(ctx, key, len) sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) +#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + sha2_hmac_finish(&ctx, dig) +#define HMAC_close(ctx) +#elif defined(USE_GNUTLS) +#include +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX gcry_md_hd_t +#define HMAC_setup(ctx, key, len) \ + gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ + gcry_md_setkey(ctx, key, len) +#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + memcpy(dig, gcry_md_read(ctx, 0), dlen) +#define HMAC_close(ctx) gcry_md_close(ctx) +#else /* USE_OPENSSL */ +#include +#include +#include +#include +#define HMAC_setup(ctx, key, len) \ + HMAC_CTX_init(&ctx); \ + HMAC_Init_ex(&ctx, (unsigned char *)key, len, EVP_sha256(), 0) +#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, (unsigned char *)buf, len) +#define HMAC_finish(ctx, dig, dlen) HMAC_Final(&ctx, (unsigned char *)dig, &dlen); +#define HMAC_close(ctx) HMAC_CTX_cleanup(&ctx) +#endif + +extern void RTMP_TLS_Init(); +extern TLS_CTX RTMP_TLS_ctx; + +#endif /* CRYPTO */ + +#include + +#define AGENT "Mozilla/5.0" + +HTTPResult + HTTP_get(struct HTTP_ctx *http, const char *url, HTTP_read_callback *cb) { + char *host, *path; + char *p1, *p2; + char hbuf[256]; + int port = 80; +#ifdef CRYPTO + int ssl = 0; +#endif + int hlen, flen = 0; + int rc, i; + int len_known; + HTTPResult ret = HTTPRES_OK; + // struct sockaddr_in sa; + PILI_RTMPSockBuf sb = {0}; + + http->status = -1; + + // memset(&sa, 0, sizeof(struct sockaddr_in)); + // sa.sin_family = AF_INET; + + /* we only handle http here */ + if (strncasecmp(url, "http", 4)) + return HTTPRES_BAD_REQUEST; + + if (url[4] == 's') { +#ifdef CRYPTO + ssl = 1; + port = 443; + if (!RTMP_TLS_ctx) + RTMP_TLS_Init(); +#else + return HTTPRES_BAD_REQUEST; +#endif + } + + p1 = strchr(url + 4, ':'); + if (!p1 || strncmp(p1, "://", 3)) + return HTTPRES_BAD_REQUEST; + + host = p1 + 3; + path = strchr(host, '/'); + hlen = path - host; + strncpy(hbuf, host, hlen); + hbuf[hlen] = '\0'; + host = hbuf; + p1 = strrchr(host, ':'); + if (p1) { + *p1++ = '\0'; + port = atoi(p1); + } + + // sa.sin_addr.s_addr = inet_addr(host); + // if (sa.sin_addr.s_addr == INADDR_NONE) + // { + // struct hostent *hp = gethostbyname(host); + // if (!hp || !hp->h_addr) + // return HTTPRES_LOST_CONNECTION; + // sa.sin_addr = *(struct in_addr *)hp->h_addr; + // } + // sa.sin_port = htons(port); + struct addrinfo hints = {0}, *ai, *cur_ai; + char portstr[10]; + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + snprintf(portstr, sizeof(portstr), "%d", port); + ret = getaddrinfo(host, portstr, &hints, &ai); + if (ret != 0) { + return HTTPRES_LOST_CONNECTION; + } + + cur_ai = ai; + + sb.sb_socket = socket(cur_ai->ai_family, + cur_ai->ai_socktype, + cur_ai->ai_protocol); + // sb.sb_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + if (sb.sb_socket == -1) { + freeaddrinfo(ai); + return HTTPRES_LOST_CONNECTION; + } + i = sprintf(sb.sb_buf, + "GET %s HTTP/1.0\r\nUser-Agent: %s\r\nHost: %s\r\nReferrer: %.*s\r\n", + path, AGENT, host, (int)(path - url + 1), url); + if (http->date[0]) + i += sprintf(sb.sb_buf + i, "If-Modified-Since: %s\r\n", http->date); + i += sprintf(sb.sb_buf + i, "\r\n"); + + if (cur_ai->ai_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)cur_ai->ai_addr; + in6->sin6_port = htons(port); + } + + if (connect(sb.sb_socket, cur_ai->ai_addr, cur_ai->ai_addrlen) < 0) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } +#ifdef CRYPTO + if (ssl) { +#ifdef NO_SSL + RTMP_Log(RTMP_LOGERROR, "%s, No SSL/TLS support", __FUNCTION__); + ret = HTTPRES_BAD_REQUEST; + goto leave; +#else + TLS_client(RTMP_TLS_ctx, sb.sb_ssl); + TLS_setfd(sb.sb_ssl, sb.sb_socket); + if ((i = TLS_connect(sb.sb_ssl)) < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } +#endif + } +#endif + PILI_RTMPSockBuf_Send(&sb, sb.sb_buf, i); + +/* set timeout */ +#define HTTP_TIMEOUT 5 + { + SET_RCVTIMEO(tv, HTTP_TIMEOUT); + if (setsockopt(sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket timeout to %ds failed!", + __FUNCTION__, HTTP_TIMEOUT); + } + } + + sb.sb_size = 0; + sb.sb_timedout = FALSE; + if (PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) < 1) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } + if (strncmp(sb.sb_buf, "HTTP/1", 6)) { + ret = HTTPRES_BAD_REQUEST; + goto leave; + } + + p1 = strchr(sb.sb_buf, ' '); + rc = atoi(p1 + 1); + http->status = rc; + + if (rc >= 300) { + if (rc == 304) { + ret = HTTPRES_OK_NOT_MODIFIED; + goto leave; + } else if (rc == 404) + ret = HTTPRES_NOT_FOUND; + else if (rc >= 500) + ret = HTTPRES_SERVER_ERROR; + else if (rc >= 400) + ret = HTTPRES_BAD_REQUEST; + else + ret = HTTPRES_REDIRECTED; + } + + p1 = memchr(sb.sb_buf, '\n', sb.sb_size); + if (!p1) { + ret = HTTPRES_BAD_REQUEST; + goto leave; + } + sb.sb_start = p1 + 1; + sb.sb_size -= sb.sb_start - sb.sb_buf; + + while ((p2 = memchr(sb.sb_start, '\r', sb.sb_size))) { + if (*sb.sb_start == '\r') { + sb.sb_start += 2; + sb.sb_size -= 2; + break; + } else if (!strncasecmp(sb.sb_start, "Content-Length: ", sizeof("Content-Length: ") - 1)) { + flen = atoi(sb.sb_start + sizeof("Content-Length: ") - 1); + } else if (!strncasecmp(sb.sb_start, "Last-Modified: ", sizeof("Last-Modified: ") - 1)) { + *p2 = '\0'; + strcpy(http->date, sb.sb_start + sizeof("Last-Modified: ") - 1); + } + p2 += 2; + sb.sb_size -= p2 - sb.sb_start; + sb.sb_start = p2; + if (sb.sb_size < 1) { + if (PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) < 1) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } + } + } + + len_known = flen > 0; + while ((!len_known || flen > 0) && + (sb.sb_size > 0 || PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) > 0)) { + cb(sb.sb_start, 1, sb.sb_size, http->data); + if (len_known) + flen -= sb.sb_size; + http->size += sb.sb_size; + sb.sb_size = 0; + } + + if (flen > 0) + ret = HTTPRES_LOST_CONNECTION; + +leave: + PILI_RTMPSockBuf_Close(&sb); + freeaddrinfo(ai); + return ret; +} + +#ifdef CRYPTO + +#define CHUNK 16384 + +struct info { + z_stream *zs; + HMAC_CTX ctx; + int first; + int zlib; + int size; +}; + +static size_t + swfcrunch(void *ptr, size_t size, size_t nmemb, void *stream) { + struct info *i = stream; + char *p = ptr; + size_t len = size * nmemb; + + if (i->first) { + i->first = 0; + /* compressed? */ + if (!strncmp(p, "CWS", 3)) { + *p = 'F'; + i->zlib = 1; + } + HMAC_crunch(i->ctx, (unsigned char *)p, 8); + p += 8; + len -= 8; + i->size = 8; + } + + if (i->zlib) { + unsigned char out[CHUNK]; + i->zs->next_in = (unsigned char *)p; + i->zs->avail_in = len; + do { + i->zs->avail_out = CHUNK; + i->zs->next_out = out; + inflate(i->zs, Z_NO_FLUSH); + len = CHUNK - i->zs->avail_out; + i->size += len; + HMAC_crunch(i->ctx, out, len); + } while (i->zs->avail_out == 0); + } else { + i->size += len; + HMAC_crunch(i->ctx, (unsigned char *)p, len); + } + return size * nmemb; +} + +static int tzoff; +static int tzchecked; + +#define JAN02_1980 318340800 + +static const char *monthtab[12] = {"Jan", "Feb", "Mar", + "Apr", "May", "Jun", + "Jul", "Aug", "Sep", + "Oct", "Nov", "Dec"}; +static const char *days[] = + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; + +/* Parse an HTTP datestamp into Unix time */ +static time_t + make_unix_time(char *s) { + struct tm time; + int i, ysub = 1900, fmt = 0; + char *month; + char *n; + time_t res; + + if (s[3] != ' ') { + fmt = 1; + if (s[3] != ',') + ysub = 0; + } + for (n = s; *n; ++n) + if (*n == '-' || *n == ':') + *n = ' '; + + time.tm_mon = 0; + n = strchr(s, ' '); + if (fmt) { + /* Day, DD-MMM-YYYY HH:MM:SS GMT */ + time.tm_mday = strtol(n + 1, &n, 0); + month = n + 1; + n = strchr(month, ' '); + time.tm_year = strtol(n + 1, &n, 0); + time.tm_hour = strtol(n + 1, &n, 0); + time.tm_min = strtol(n + 1, &n, 0); + time.tm_sec = strtol(n + 1, NULL, 0); + } else { + /* Unix ctime() format. Does not conform to HTTP spec. */ + /* Day MMM DD HH:MM:SS YYYY */ + month = n + 1; + n = strchr(month, ' '); + while (isspace(*n)) + n++; + time.tm_mday = strtol(n, &n, 0); + time.tm_hour = strtol(n + 1, &n, 0); + time.tm_min = strtol(n + 1, &n, 0); + time.tm_sec = strtol(n + 1, &n, 0); + time.tm_year = strtol(n + 1, NULL, 0); + } + if (time.tm_year > 100) + time.tm_year -= ysub; + + for (i = 0; i < 12; i++) + if (!strncasecmp(month, monthtab[i], 3)) { + time.tm_mon = i; + break; + } + time.tm_isdst = 0; /* daylight saving is never in effect in GMT */ + + /* this is normally the value of extern int timezone, but some + * braindead C libraries don't provide it. + */ + if (!tzchecked) { + struct tm *tc; + time_t then = JAN02_1980; + tc = localtime(&then); + tzoff = (12 - tc->tm_hour) * 3600 + tc->tm_min * 60 + tc->tm_sec; + tzchecked = 1; + } + res = mktime(&time); + /* Unfortunately, mktime() assumes the input is in local time, + * not GMT, so we have to correct it here. + */ + if (res != -1) + res += tzoff; + return res; +} + +/* Convert a Unix time to a network time string + * Weekday, DD-MMM-YYYY HH:MM:SS GMT + */ +void strtime(time_t *t, char *s) { + struct tm *tm; + + tm = gmtime((time_t *)t); + sprintf(s, "%s, %02d %s %d %02d:%02d:%02d GMT", + days[tm->tm_wday], tm->tm_mday, monthtab[tm->tm_mon], + tm->tm_year + 1900, tm->tm_hour, tm->tm_min, tm->tm_sec); +} + +#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) + +int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age) { + FILE *f = NULL; + char *path, date[64], cctim[64]; + long pos = 0; + time_t ctim = -1, cnow; + int i, got = 0, ret = 0; + unsigned int hlen; + struct info in = {0}; + struct HTTP_ctx http = {0}; + HTTPResult httpres; + z_stream zs = {0}; + AVal home, hpre; + + date[0] = '\0'; +#ifdef _WIN32 +#ifdef _XBOX + hpre.av_val = "Q:"; + hpre.av_len = 2; + home.av_val = "\\UserData"; +#else + hpre.av_val = getenv("HOMEDRIVE"); + hpre.av_len = strlen(hpre.av_val); + home.av_val = getenv("HOMEPATH"); +#endif +#define DIRSEP "\\" + +#else /* !_WIN32 */ + hpre.av_val = ""; + hpre.av_len = 0; + home.av_val = getenv("HOME"); +#define DIRSEP "/" +#endif + if (!home.av_val) + home.av_val = "."; + home.av_len = strlen(home.av_val); + + /* SWF hash info is cached in a fixed-format file. + * url: + * ctim: HTTP datestamp of when we last checked it. + * date: HTTP datestamp of the SWF's last modification. + * size: SWF size in hex + * hash: SWF hash in hex + * + * These fields must be present in this order. All fields + * besides URL are fixed size. + */ + path = malloc(hpre.av_len + home.av_len + sizeof(DIRSEP ".swfinfo")); + sprintf(path, "%s%s" DIRSEP ".swfinfo", hpre.av_val, home.av_val); + + f = fopen(path, "r+"); + while (f) { + char buf[4096], *file, *p; + + file = strchr(url, '/'); + if (!file) + break; + file += 2; + file = strchr(file, '/'); + if (!file) + break; + file++; + hlen = file - url; + p = strrchr(file, '/'); + if (p) + file = p; + else + file--; + + while (fgets(buf, sizeof(buf), f)) { + char *r1; + + got = 0; + + if (strncmp(buf, "url: ", 5)) + continue; + if (strncmp(buf + 5, url, hlen)) + continue; + r1 = strrchr(buf, '/'); + i = strlen(r1); + r1[--i] = '\0'; + if (strncmp(r1, file, i)) + continue; + pos = ftell(f); + while (got < 4 && fgets(buf, sizeof(buf), f)) { + if (!strncmp(buf, "size: ", 6)) { + *size = strtol(buf + 6, NULL, 16); + got++; + } else if (!strncmp(buf, "hash: ", 6)) { + unsigned char *ptr = hash, *in = (unsigned char *)buf + 6; + int l = strlen((char *)in) - 1; + for (i = 0; i < l; i += 2) + *ptr++ = (HEX2BIN(in[i]) << 4) | HEX2BIN(in[i + 1]); + got++; + } else if (!strncmp(buf, "date: ", 6)) { + buf[strlen(buf) - 1] = '\0'; + strncpy(date, buf + 6, sizeof(date)); + got++; + } else if (!strncmp(buf, "ctim: ", 6)) { + buf[strlen(buf) - 1] = '\0'; + ctim = make_unix_time(buf + 6); + got++; + } else if (!strncmp(buf, "url: ", 5)) + break; + } + break; + } + break; + } + + cnow = time(NULL); + /* If we got a cache time, see if it's young enough to use directly */ + if (age && ctim > 0) { + ctim = cnow - ctim; + ctim /= 3600 * 24; /* seconds to days */ + if (ctim < age) /* ok, it's new enough */ + goto out; + } + + in.first = 1; + HMAC_setup(in.ctx, "Genuine Adobe Flash Player 001", 30); + inflateInit(&zs); + in.zs = &zs; + + http.date = date; + http.data = ∈ + + httpres = HTTP_get(&http, url, swfcrunch); + + inflateEnd(&zs); + + if (httpres != HTTPRES_OK && httpres != HTTPRES_OK_NOT_MODIFIED) { + ret = -1; + if (httpres == HTTPRES_LOST_CONNECTION) + RTMP_Log(RTMP_LOGERROR, "%s: connection lost while downloading swfurl %s", + __FUNCTION__, url); + else if (httpres == HTTPRES_NOT_FOUND) + RTMP_Log(RTMP_LOGERROR, "%s: swfurl %s not found", __FUNCTION__, url); + else + RTMP_Log(RTMP_LOGERROR, "%s: couldn't contact swfurl %s (HTTP error %d)", + __FUNCTION__, url, http.status); + } else { + if (got && pos) + fseek(f, pos, SEEK_SET); + else { + char *q; + if (!f) + f = fopen(path, "w"); + if (!f) { + int err = errno; + RTMP_Log(RTMP_LOGERROR, + "%s: couldn't open %s for writing, errno %d (%s)", + __FUNCTION__, path, err, strerror(err)); + ret = -1; + goto out; + } + fseek(f, 0, SEEK_END); + q = strchr(url, '?'); + if (q) + i = q - url; + else + i = strlen(url); + + fprintf(f, "url: %.*s\n", i, url); + } + strtime(&cnow, cctim); + fprintf(f, "ctim: %s\n", cctim); + + if (!in.first) { + HMAC_finish(in.ctx, hash, hlen); + *size = in.size; + + fprintf(f, "date: %s\n", date); + fprintf(f, "size: %08x\n", in.size); + fprintf(f, "hash: "); + for (i = 0; i < SHA256_DIGEST_LENGTH; i++) + fprintf(f, "%02x", hash[i]); + fprintf(f, "\n"); + } + } + HMAC_close(in.ctx); +out: + free(path); + if (f) + fclose(f); + return ret; +} +#else +int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age) { + return -1; +} +#endif diff --git a/LFLiveKit/publish/pili-librtmp/http.h b/LFLiveKit/publish/pili-librtmp/http.h new file mode 100755 index 00000000..1eb7a462 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/http.h @@ -0,0 +1,49 @@ +#ifndef __RTMP_HTTP_H__ +#define __RTMP_HTTP_H__ +/* + * Copyright (C) 2010 Howard Chu + * Copyright (C) 2010 Antti Ajanki + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +typedef enum { + HTTPRES_OK, /* result OK */ + HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */ + HTTPRES_NOT_FOUND, /* not found */ + HTTPRES_BAD_REQUEST, /* client error */ + HTTPRES_SERVER_ERROR, /* server reported an error */ + HTTPRES_REDIRECTED, /* resource has been moved */ + HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */ +} HTTPResult; + +struct HTTP_ctx { + char *date; + int size; + int status; + void *data; +}; + +typedef size_t(HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, + void *stream); + +HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, + HTTP_read_callback *cb); + +#endif diff --git a/LFLiveKit/publish/pili-librtmp/log.c b/LFLiveKit/publish/pili-librtmp/log.c new file mode 100755 index 00000000..d3934366 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/log.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#include "log.h" +#include "rtmp_sys.h" + +#define MAX_PRINT_LEN 2048 + +RTMP_LogLevel RTMP_debuglevel = RTMP_LOGERROR; + +static int neednl; + +static FILE *fmsg; + +static RTMP_LogCallback rtmp_log_default, *cb = rtmp_log_default; + +static const char *levels[] = { + "CRIT", "ERROR", "WARNING", "INFO", + "DEBUG", "DEBUG2"}; + +static void rtmp_log_default(int level, const char *format, va_list vl) { + char str[MAX_PRINT_LEN] = ""; + + vsnprintf(str, MAX_PRINT_LEN - 1, format, vl); + + /* Filter out 'no-name' */ + if (RTMP_debuglevel < RTMP_LOGALL && strstr(str, "no-name") != NULL) + return; + + if (!fmsg) fmsg = stderr; + + if (level <= RTMP_debuglevel) { + if (neednl) { + putc('\n', fmsg); + neednl = 0; + } + fprintf(fmsg, "%s: %s\n", levels[level], str); +#ifdef _DEBUG + fflush(fmsg); +#endif + } +} + +void RTMP_LogSetOutput(FILE *file) { + fmsg = file; +} + +void RTMP_LogSetLevel(RTMP_LogLevel level) { + RTMP_debuglevel = level; +} + +void RTMP_LogSetCallback(RTMP_LogCallback *cbp) { + cb = cbp; +} + +RTMP_LogLevel RTMP_LogGetLevel() { + return RTMP_debuglevel; +} + +void RTMP_Log(int level, const char *format, ...) { + va_list args; + va_start(args, format); + cb(level, format, args); + va_end(args); +} + +static const char hexdig[] = "0123456789abcdef"; + +void RTMP_LogHex(int level, const uint8_t *data, unsigned long len) { + unsigned long i; + char line[50], *ptr; + + if (level > RTMP_debuglevel) + return; + + ptr = line; + + for (i = 0; i < len; i++) { + *ptr++ = hexdig[0x0f & (data[i] >> 4)]; + *ptr++ = hexdig[0x0f & data[i]]; + if ((i & 0x0f) == 0x0f) { + *ptr = '\0'; + ptr = line; + RTMP_Log(level, "%s", line); + } else { + *ptr++ = ' '; + } + } + if (i & 0x0f) { + *ptr = '\0'; + RTMP_Log(level, "%s", line); + } +} + +void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len) { +#define BP_OFFSET 9 +#define BP_GRAPH 60 +#define BP_LEN 80 + char line[BP_LEN]; + unsigned long i; + + if (!data || level > RTMP_debuglevel) + return; + + /* in case len is zero */ + line[0] = '\0'; + + for (i = 0; i < len; i++) { + int n = i % 16; + unsigned off; + + if (!n) { + if (i) RTMP_Log(level, "%s", line); + memset(line, ' ', sizeof(line) - 2); + line[sizeof(line) - 2] = '\0'; + + off = i % 0x0ffffU; + + line[2] = hexdig[0x0f & (off >> 12)]; + line[3] = hexdig[0x0f & (off >> 8)]; + line[4] = hexdig[0x0f & (off >> 4)]; + line[5] = hexdig[0x0f & off]; + line[6] = ':'; + } + + off = BP_OFFSET + n * 3 + ((n >= 8) ? 1 : 0); + line[off] = hexdig[0x0f & (data[i] >> 4)]; + line[off + 1] = hexdig[0x0f & data[i]]; + + off = BP_GRAPH + n + ((n >= 8) ? 1 : 0); + + if (isprint(data[i])) { + line[BP_GRAPH + n] = data[i]; + } else { + line[BP_GRAPH + n] = '.'; + } + } + + RTMP_Log(level, "%s", line); +} + +/* These should only be used by apps, never by the library itself */ +void RTMP_LogPrintf(const char *format, ...) { + char str[MAX_PRINT_LEN] = ""; + int len; + va_list args; + va_start(args, format); + len = vsnprintf(str, MAX_PRINT_LEN - 1, format, args); + va_end(args); + + if (RTMP_debuglevel == RTMP_LOGCRIT) + return; + + if (!fmsg) fmsg = stderr; + + if (neednl) { + putc('\n', fmsg); + neednl = 0; + } + + if (len > MAX_PRINT_LEN - 1) + len = MAX_PRINT_LEN - 1; + fprintf(fmsg, "%s", str); + if (str[len - 1] == '\n') + fflush(fmsg); +} + +void RTMP_LogStatus(const char *format, ...) { + char str[MAX_PRINT_LEN] = ""; + va_list args; + va_start(args, format); + vsnprintf(str, MAX_PRINT_LEN - 1, format, args); + va_end(args); + + if (RTMP_debuglevel == RTMP_LOGCRIT) + return; + + if (!fmsg) fmsg = stderr; + + fprintf(fmsg, "%s", str); + fflush(fmsg); + neednl = 1; +} diff --git a/LFLiveKit/publish/pili-librtmp/log.h b/LFLiveKit/publish/pili-librtmp/log.h new file mode 100755 index 00000000..f7daf375 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/log.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifndef __RTMP_LOG_H__ +#define __RTMP_LOG_H__ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* Enable this to get full debugging output */ +/* #define _DEBUG */ + +#ifdef _DEBUG +#undef NODEBUG +#endif + +typedef enum { + RTMP_LOGCRIT = 0, + RTMP_LOGERROR, + RTMP_LOGWARNING, + RTMP_LOGINFO, + RTMP_LOGDEBUG, + RTMP_LOGDEBUG2, + RTMP_LOGALL +} RTMP_LogLevel; + +extern RTMP_LogLevel RTMP_debuglevel; + +typedef void(RTMP_LogCallback)(int level, const char *fmt, va_list); +void RTMP_LogSetCallback(RTMP_LogCallback *cb); +void RTMP_LogSetOutput(FILE *file); +void RTMP_LogPrintf(const char *format, ...); +void RTMP_LogStatus(const char *format, ...); +void RTMP_Log(int level, const char *format, ...); +void RTMP_LogHex(int level, const uint8_t *data, unsigned long len); +void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len); +void RTMP_LogSetLevel(RTMP_LogLevel lvl); +RTMP_LogLevel RTMP_LogGetLevel(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/LFLiveKit/publish/pili-librtmp/parseurl.c b/LFLiveKit/publish/pili-librtmp/parseurl.c new file mode 100755 index 00000000..0e50352b --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/parseurl.c @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include + +#include +#include + +#include "log.h" +#include "rtmp_sys.h" + +int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, unsigned int *port, + AVal *playpath, AVal *app, AVal *domainName) { + char *p, *end, *col, *ques, *slash; + + RTMP_Log(RTMP_LOGDEBUG, "Parsing..."); + + *protocol = RTMP_PROTOCOL_RTMP; + *port = 0; + playpath->av_len = 0; + playpath->av_val = NULL; + app->av_len = 0; + app->av_val = NULL; + + /* Old School Parsing */ + + /* look for usual :// pattern */ + p = strstr(url, "://"); + if (!p) { + RTMP_Log(RTMP_LOGERROR, "RTMP URL: No :// in url!"); + return FALSE; + } + { + int len = (int)(p - url); + + if (len == 4 && strncasecmp(url, "rtmp", 4) == 0) + *protocol = RTMP_PROTOCOL_RTMP; + else if (len == 5 && strncasecmp(url, "rtmpt", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPT; + else if (len == 5 && strncasecmp(url, "rtmps", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPS; + else if (len == 5 && strncasecmp(url, "rtmpe", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPE; + else if (len == 5 && strncasecmp(url, "rtmfp", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMFP; + else if (len == 6 && strncasecmp(url, "rtmpte", 6) == 0) + *protocol = RTMP_PROTOCOL_RTMPTE; + else if (len == 6 && strncasecmp(url, "rtmpts", 6) == 0) + *protocol = RTMP_PROTOCOL_RTMPTS; + else { + RTMP_Log(RTMP_LOGWARNING, "Unknown protocol!\n"); + goto parsehost; + } + } + + RTMP_Log(RTMP_LOGDEBUG, "Parsed protocol: %d", *protocol); + +parsehost: + /* let's get the hostname */ + p += 3; + + /* check for sudden death */ + if (*p == 0) { + RTMP_Log(RTMP_LOGWARNING, "No hostname in URL!"); + return FALSE; + } + + end = p + strlen(p); + col = strchr(p, ':'); + ques = strchr(p, '?'); + slash = strchr(p, '/'); + + { + int hostlen; + if (slash) + hostlen = slash - p; + else + hostlen = end - p; + if (col && col - p < hostlen) + hostlen = col - p; + + if (hostlen < 256) { + host->av_val = p; + host->av_len = hostlen; + RTMP_Log(RTMP_LOGDEBUG, "Parsed host : %.*s", hostlen, host->av_val); + } else { + RTMP_Log(RTMP_LOGWARNING, "Hostname exceeds 255 characters!"); + } + + p += hostlen; + } + + /* get the port number if available */ + if (*p == ':') { + unsigned int p2; + p++; + p2 = atoi(p); + if (p2 > 65535) { + RTMP_Log(RTMP_LOGWARNING, "Invalid port number!"); + } else { + *port = p2; + } + } + + if (!slash) { + RTMP_Log(RTMP_LOGWARNING, "No application or playpath in URL!"); + return TRUE; + } + p = slash + 1; + + /** parse domain + + * rtmp://host:[port]/app/...?domain=a.com + + * use domain to replace host + + */ + + if (domainName != NULL && ques != NULL) { + char *domain = strstr(ques, "domain="); + if (domain) { + end = domain - 1; + domain += 7; //skip "domain=" + char *domain_end = strchr(domain, '&'); + int host_len = 0; + if (domain_end) { + host_len = domain_end - domain; + } else { + host_len = strlen(domain); + } + if (host_len < 256) { + domainName->av_val = domain; + domainName->av_len = host_len; + RTMP_Log(RTMP_LOGDEBUG, "Parsed host and domain : %.*s", host_len, host->av_val); + } + } + } + + { + /* parse application + * + * rtmp://host[:port]/app[/appinstance][/...] + * application = app[/appinstance] + */ + + char *slash2, *slash3 = NULL; + int applen, appnamelen; + + slash2 = strchr(p, '/'); + if (slash2) + slash3 = strchr(slash2 + 1, '/'); + + applen = end - p; /* ondemand, pass all parameters as app */ + appnamelen = applen; /* ondemand length */ + + if (ques && strstr(p, "slist=")) { /* whatever it is, the '?' and slist= means we need to use everything as app and parse plapath from slist= */ + appnamelen = ques - p; + } else if (strncmp(p, "ondemand/", 9) == 0) { + /* app = ondemand/foobar, only pass app=ondemand */ + applen = 8; + appnamelen = 8; + } else { /* app!=ondemand, so app is app[/appinstance] */ + if (slash3) + appnamelen = slash3 - p; + else if (slash2) + appnamelen = slash2 - p; + + applen = appnamelen; + } + + app->av_val = p; + app->av_len = applen; + RTMP_Log(RTMP_LOGDEBUG, "Parsed app : %.*s", applen, p); + + p += appnamelen; + } + + if (*p == '/') + p++; + + if (end - p) { + AVal av = {p, end - p}; + PILI_RTMP_ParsePlaypath(&av, playpath); + } + + return TRUE; +} + +/* + * Extracts playpath from RTMP URL. playpath is the file part of the + * URL, i.e. the part that comes after rtmp://host:port/app/ + * + * Returns the stream name in a format understood by FMS. The name is + * the playpath part of the URL with formatting depending on the stream + * type: + * + * mp4 streams: prepend "mp4:", remove extension + * mp3 streams: prepend "mp3:", remove extension + * flv streams: remove extension + */ +void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out) { + int addMP4 = 0; + int addMP3 = 0; + int subExt = 0; + const char *playpath = in->av_val; + const char *temp, *q, *ext = NULL; + const char *ppstart = playpath; + char *streamname, *destptr, *p; + + int pplen = in->av_len; + + out->av_val = NULL; + out->av_len = 0; + + if ((*ppstart == '?') && + (temp = strstr(ppstart, "slist=")) != 0) { + ppstart = temp + 6; + pplen = strlen(ppstart); + + temp = strchr(ppstart, '&'); + if (temp) { + pplen = temp - ppstart; + } + } + + q = strchr(ppstart, '?'); + if (pplen >= 4) { + if (q) + ext = q - 4; + else + ext = &ppstart[pplen - 4]; + if ((strncmp(ext, ".f4v", 4) == 0) || + (strncmp(ext, ".mp4", 4) == 0)) { + addMP4 = 1; + subExt = 1; + /* Only remove .flv from rtmp URL, not slist params */ + } else if ((ppstart == playpath) && + (strncmp(ext, ".flv", 4) == 0)) { + subExt = 1; + } else if (strncmp(ext, ".mp3", 4) == 0) { + addMP3 = 1; + subExt = 1; + } + } + + streamname = (char *)malloc((pplen + 4 + 1) * sizeof(char)); + if (!streamname) + return; + + destptr = streamname; + if (addMP4) { + if (strncmp(ppstart, "mp4:", 4)) { + strcpy(destptr, "mp4:"); + destptr += 4; + } else { + subExt = 0; + } + } else if (addMP3) { + if (strncmp(ppstart, "mp3:", 4)) { + strcpy(destptr, "mp3:"); + destptr += 4; + } else { + subExt = 0; + } + } + + for (p = (char *)ppstart; pplen > 0;) { + /* skip extension */ + if (subExt && p == ext) { + p += 4; + pplen -= 4; + continue; + } + if (*p == '%') { + unsigned int c; + sscanf(p + 1, "%02x", &c); + *destptr++ = c; + pplen -= 3; + p += 3; + } else { + *destptr++ = *p++; + pplen--; + } + } + *destptr = '\0'; + + out->av_val = streamname; + out->av_len = destptr - streamname; +} + +int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app) { + return PILI_RTMP_ParseURL2(url, protocol, host, port, playpath, app, NULL); +} diff --git a/LFLiveKit/publish/pili-librtmp/rtmp.c b/LFLiveKit/publish/pili-librtmp/rtmp.c new file mode 100755 index 00000000..c7e0df79 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/rtmp.c @@ -0,0 +1,4331 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * libPILI_RTMP is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * libPILI_RTMP is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with libPILI_RTMP see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include +#include + +#include "log.h" +#include "rtmp_sys.h" +#include "time.h" + +#ifdef CRYPTO +#ifdef USE_POLARSSL +#include +#elif defined(USE_GNUTLS) +#include +#else /* USE_OPENSSL */ +#include +#include +#endif +TLS_CTX RTMP_TLS_ctx; +#endif + +#define RTMP_SIG_SIZE 1536 +#define RTMP_LARGE_HEADER_SIZE 12 + +static const int packetSize[] = {12, 8, 4, 1}; + +int PILI_RTMP_ctrlC; +static char reqid[30]; + +const char PILI_RTMPProtocolStrings[][7] = { + "RTMP", + "RTMPT", + "RTMPE", + "RTMPTE", + "RTMPS", + "RTMPTS", + "", + "", + "RTMFP"}; + +const char PILI_RTMPProtocolStringsLower[][7] = { + "rtmp", + "rtmpt", + "rtmpe", + "rtmpte", + "rtmps", + "rtmpts", + "", + "", + "rtmfp"}; + +static const char *RTMPT_cmds[] = { + "open", + "send", + "idle", + "close"}; + +typedef enum { + RTMPT_OPEN = 0, + RTMPT_SEND, + RTMPT_IDLE, + RTMPT_CLOSE +} RTMPTCmd; + +static int DumpMetaData(AMFObject *obj); +static int HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error); +static int SocksNegotiate(PILI_RTMP *r, RTMPError *error); + +static int SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +static int SendCheckBW(PILI_RTMP *r, RTMPError *error); +static int SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error); +static int SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error); +static int SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error); +static int SendPlay(PILI_RTMP *r, RTMPError *error); +static int SendBytesReceived(PILI_RTMP *r, RTMPError *error); + +#if 0 /* unused */ +static int SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath); +#endif + +static int HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize); +static int HandleMetadata(PILI_RTMP *r, char *body, unsigned int len); +static void HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); + +static int ReadN(PILI_RTMP *r, char *buffer, int n); +static int WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error); + +static void DecodeTEA(AVal *key, AVal *text); + +static int HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len); +static int HTTP_read(PILI_RTMP *r, int fill); + +#ifndef _WIN32 +static int clk_tck; +#endif + +#ifdef CRYPTO +#include "handshake.h" +#endif + +uint32_t + PILI_RTMP_GetTime() { +#ifdef _DEBUG + return 0; +#elif defined(_WIN32) + return timeGetTime(); +#else + struct tms t; + if (!clk_tck) clk_tck = sysconf(_SC_CLK_TCK); + return times(&t) * 1000 / clk_tck; +#endif +} + +void PILI_RTMP_UserInterrupt() { + PILI_RTMP_ctrlC = TRUE; +} + +void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p) { + p->m_headerType = 0; + p->m_packetType = 0; + p->m_nChannel = 0; + p->m_nTimeStamp = 0; + p->m_nInfoField2 = 0; + p->m_hasAbsTimestamp = FALSE; + p->m_nBodySize = 0; + p->m_nBytesRead = 0; +} + +int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize) { + char *ptr = calloc(1, nSize + RTMP_MAX_HEADER_SIZE); + if (!ptr) + return FALSE; + p->m_body = ptr + RTMP_MAX_HEADER_SIZE; + p->m_nBytesRead = 0; + return TRUE; +} + +void PILI_RTMPPacket_Free(PILI_RTMPPacket *p) { + if (p->m_body) { + free(p->m_body - RTMP_MAX_HEADER_SIZE); + p->m_body = NULL; + } +} + +void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p) { + RTMP_Log(RTMP_LOGDEBUG, + "PILI_RTMP PACKET: packet type: 0x%02x. channel: 0x%02x. info 1: %d info 2: %d. Body size: %lu. body: 0x%02x", + p->m_packetType, p->m_nChannel, p->m_nTimeStamp, p->m_nInfoField2, + p->m_nBodySize, p->m_body ? (unsigned char)p->m_body[0] : 0); +} + +int PILI_RTMP_LibVersion() { + return RTMP_LIB_VERSION; +} + +void PILI_RTMP_TLS_Init() { +#ifdef CRYPTO +#ifdef USE_POLARSSL + /* Do this regardless of NO_SSL, we use havege for rtmpe too */ + RTMP_TLS_ctx = calloc(1, sizeof(struct tls_ctx)); + havege_init(&RTMP_TLS_ctx->hs); +#elif defined(USE_GNUTLS) && !defined(NO_SSL) + /* Technically we need to initialize libgcrypt ourselves if + * we're not going to call gnutls_global_init(). Ignoring this + * for now. + */ + gnutls_global_init(); + RTMP_TLS_ctx = malloc(sizeof(struct tls_ctx)); + gnutls_certificate_allocate_credentials(&RTMP_TLS_ctx->cred); + gnutls_priority_init(&RTMP_TLS_ctx->prios, "NORMAL", NULL); + gnutls_certificate_set_x509_trust_file(RTMP_TLS_ctx->cred, + "ca.pem", GNUTLS_X509_FMT_PEM); +#elif !defined(NO_SSL) /* USE_OPENSSL */ + /* libcrypto doesn't need anything special */ + SSL_load_error_strings(); + SSL_library_init(); + OpenSSL_add_all_digests(); + RTMP_TLS_ctx = SSL_CTX_new(SSLv23_method()); + SSL_CTX_set_options(RTMP_TLS_ctx, SSL_OP_ALL); + SSL_CTX_set_default_verify_paths(RTMP_TLS_ctx); +#endif +#endif +} + +PILI_RTMP * + PILI_RTMP_Alloc() { + return calloc(1, sizeof(PILI_RTMP)); +} + +void PILI_RTMP_Free(PILI_RTMP *r) { + r->m_errorCallback = NULL; + r->m_userData = NULL; + RTMPError_Free(r->m_error); + r->m_error = NULL; + + free(r); +} + +void PILI_RTMP_Init(PILI_RTMP *r) { +#ifdef CRYPTO + if (!RTMP_TLS_ctx) + RTMP_TLS_Init(); +#endif + + memset(r, 0, sizeof(PILI_RTMP)); + r->m_sb.sb_socket = -1; + r->m_inChunkSize = RTMP_DEFAULT_CHUNKSIZE; + r->m_outChunkSize = RTMP_DEFAULT_CHUNKSIZE; + r->m_nBufferMS = 30000; + r->m_nClientBW = 2500000; + r->m_nClientBW2 = 2; + r->m_nServerBW = 2500000; + r->m_fAudioCodecs = 3191.0; + r->m_fVideoCodecs = 252.0; + r->Link.timeout = 10; + r->Link.send_timeout = 10; + r->Link.swfAge = 30; + + r->m_errorCallback = NULL; + r->m_error = NULL; + r->m_userData = NULL; + r->m_is_closing = 0; + r->m_tcp_nodelay = 1; + + r->m_connCallback = NULL; + r->ip = 0; +} + +void PILI_RTMP_EnableWrite(PILI_RTMP *r) { + r->Link.protocol |= RTMP_FEATURE_WRITE; +} + +double + PILI_RTMP_GetDuration(PILI_RTMP *r) { + return r->m_fDuration; +} + +int PILI_RTMP_IsConnected(PILI_RTMP *r) { + return r->m_sb.sb_socket != -1; +} + +int PILI_RTMP_Socket(PILI_RTMP *r) { + return r->m_sb.sb_socket; +} + +int PILI_RTMP_IsTimedout(PILI_RTMP *r) { + return r->m_sb.sb_timedout; +} + +void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size) { + r->m_nBufferMS = size; +} + +void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error) { + PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, error); +} + +#undef OSS +#ifdef _WIN32 +#define OSS "WIN" +#elif defined(__sun__) +#define OSS "SOL" +#elif defined(__APPLE__) +#define OSS "MAC" +#elif defined(__linux__) +#define OSS "LNX" +#else +#define OSS "GNU" +#endif +#define DEF_VERSTR OSS " 10,0,32,18" +static const char DEFAULT_FLASH_VER[] = DEF_VERSTR; +const AVal RTMP_DefaultFlashVer = + {(char *)DEFAULT_FLASH_VER, sizeof(DEFAULT_FLASH_VER) - 1}; + +void PILI_RTMP_SetupStream(PILI_RTMP *r, + int protocol, + AVal *host, + unsigned int port, + AVal *sockshost, + AVal *playpath, + AVal *tcUrl, + AVal *swfUrl, + AVal *pageUrl, + AVal *app, + AVal *auth, + AVal *swfSHA256Hash, + uint32_t swfSize, + AVal *flashVer, + AVal *subscribepath, + int dStart, + int dStop, int bLiveStream, long int timeout) { + RTMP_Log(RTMP_LOGDEBUG, "Protocol : %s", PILI_RTMPProtocolStrings[protocol & 7]); + RTMP_Log(RTMP_LOGDEBUG, "Hostname : %.*s", host->av_len, host->av_val); + RTMP_Log(RTMP_LOGDEBUG, "Port : %d", port); + RTMP_Log(RTMP_LOGDEBUG, "Playpath : %s", playpath->av_val); + + if (tcUrl && tcUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "tcUrl : %s", tcUrl->av_val); + if (swfUrl && swfUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "swfUrl : %s", swfUrl->av_val); + if (pageUrl && pageUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "pageUrl : %s", pageUrl->av_val); + if (app && app->av_val) + RTMP_Log(RTMP_LOGDEBUG, "app : %.*s", app->av_len, app->av_val); + if (auth && auth->av_val) + RTMP_Log(RTMP_LOGDEBUG, "auth : %s", auth->av_val); + if (subscribepath && subscribepath->av_val) + RTMP_Log(RTMP_LOGDEBUG, "subscribepath : %s", subscribepath->av_val); + if (flashVer && flashVer->av_val) + RTMP_Log(RTMP_LOGDEBUG, "flashVer : %s", flashVer->av_val); + if (dStart > 0) + RTMP_Log(RTMP_LOGDEBUG, "StartTime : %d msec", dStart); + if (dStop > 0) + RTMP_Log(RTMP_LOGDEBUG, "StopTime : %d msec", dStop); + + RTMP_Log(RTMP_LOGDEBUG, "live : %s", bLiveStream ? "yes" : "no"); + RTMP_Log(RTMP_LOGDEBUG, "timeout : %d sec", timeout); + +#ifdef CRYPTO + if (swfSHA256Hash != NULL && swfSize > 0) { + memcpy(r->Link.SWFHash, swfSHA256Hash->av_val, sizeof(r->Link.SWFHash)); + r->Link.SWFSize = swfSize; + RTMP_Log(RTMP_LOGDEBUG, "SWFSHA256:"); + RTMP_LogHex(RTMP_LOGDEBUG, r->Link.SWFHash, sizeof(r->Link.SWFHash)); + RTMP_Log(RTMP_LOGDEBUG, "SWFSize : %lu", r->Link.SWFSize); + } else { + r->Link.SWFSize = 0; + } +#endif + + if (sockshost->av_len) { + const char *socksport = strchr(sockshost->av_val, ':'); + char *hostname = strdup(sockshost->av_val); + + if (socksport) + hostname[socksport - sockshost->av_val] = '\0'; + r->Link.sockshost.av_val = hostname; + r->Link.sockshost.av_len = strlen(hostname); + + r->Link.socksport = socksport ? atoi(socksport + 1) : 1080; + RTMP_Log(RTMP_LOGDEBUG, "Connecting via SOCKS proxy: %s:%d", r->Link.sockshost.av_val, + r->Link.socksport); + } else { + r->Link.sockshost.av_val = NULL; + r->Link.sockshost.av_len = 0; + r->Link.socksport = 0; + } + + if (tcUrl && tcUrl->av_len) + r->Link.tcUrl = *tcUrl; + if (swfUrl && swfUrl->av_len) + r->Link.swfUrl = *swfUrl; + if (pageUrl && pageUrl->av_len) + r->Link.pageUrl = *pageUrl; + if (app && app->av_len) + r->Link.app = *app; + if (auth && auth->av_len) { + r->Link.auth = *auth; + r->Link.lFlags |= RTMP_LF_AUTH; + } + if (flashVer && flashVer->av_len) + r->Link.flashVer = *flashVer; + else + r->Link.flashVer = RTMP_DefaultFlashVer; + if (subscribepath && subscribepath->av_len) + r->Link.subscribepath = *subscribepath; + r->Link.seekTime = dStart; + r->Link.stopTime = dStop; + if (bLiveStream) + r->Link.lFlags |= RTMP_LF_LIVE; + r->Link.timeout = timeout; + + r->Link.protocol = protocol; + r->Link.hostname = *host; + r->Link.port = port; + r->Link.playpath = *playpath; + + if (r->Link.port == 0) { + if (protocol & RTMP_FEATURE_SSL) + r->Link.port = 443; + else if (protocol & RTMP_FEATURE_HTTP) + r->Link.port = 80; + else + r->Link.port = 1935; + } +} + +enum { OPT_STR = 0, + OPT_INT, + OPT_BOOL, + OPT_CONN }; +static const char *optinfo[] = { + "string", "integer", "boolean", "AMF"}; + +#define OFF(x) offsetof(struct PILI_RTMP, x) + +static struct urlopt { + AVal name; + off_t off; + int otype; + int omisc; + char *use; +} options[] = { + {AVC("socks"), OFF(Link.sockshost), OPT_STR, 0, + "Use the specified SOCKS proxy"}, + {AVC("app"), OFF(Link.app), OPT_STR, 0, + "Name of target app on server"}, + {AVC("tcUrl"), OFF(Link.tcUrl), OPT_STR, 0, + "URL to played stream"}, + {AVC("pageUrl"), OFF(Link.pageUrl), OPT_STR, 0, + "URL of played media's web page"}, + {AVC("swfUrl"), OFF(Link.swfUrl), OPT_STR, 0, + "URL to player SWF file"}, + {AVC("flashver"), OFF(Link.flashVer), OPT_STR, 0, + "Flash version string (default " DEF_VERSTR ")"}, + {AVC("conn"), OFF(Link.extras), OPT_CONN, 0, + "Append arbitrary AMF data to Connect message"}, + {AVC("playpath"), OFF(Link.playpath), OPT_STR, 0, + "Path to target media on server"}, + {AVC("playlist"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_PLST, + "Set playlist before play command"}, + {AVC("live"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_LIVE, + "Stream is live, no seeking possible"}, + {AVC("subscribe"), OFF(Link.subscribepath), OPT_STR, 0, + "Stream to subscribe to"}, + {AVC("token"), OFF(Link.token), OPT_STR, 0, + "Key for SecureToken response"}, + {AVC("swfVfy"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_SWFV, + "Perform SWF Verification"}, + {AVC("swfAge"), OFF(Link.swfAge), OPT_INT, 0, + "Number of days to use cached SWF hash"}, + {AVC("start"), OFF(Link.seekTime), OPT_INT, 0, + "Stream start position in milliseconds"}, + {AVC("stop"), OFF(Link.stopTime), OPT_INT, 0, + "Stream stop position in milliseconds"}, + {AVC("buffer"), OFF(m_nBufferMS), OPT_INT, 0, + "Buffer time in milliseconds"}, + {AVC("timeout"), OFF(Link.timeout), OPT_INT, 0, + "Session timeout in seconds"}, + {{NULL, 0}, 0, 0}}; + +static const AVal truth[] = { + AVC("1"), + AVC("on"), + AVC("yes"), + AVC("true"), + {0, 0}}; + +static void RTMP_OptUsage() { + int i; + + RTMP_Log(RTMP_LOGERROR, "Valid PILI_RTMP options are:\n"); + for (i = 0; options[i].name.av_len; i++) { + RTMP_Log(RTMP_LOGERROR, "%10s %-7s %s\n", options[i].name.av_val, + optinfo[options[i].otype], options[i].use); + } +} + +static int + parseAMF(AMFObject *obj, AVal *av, int *depth) { + AMFObjectProperty prop = {{0, 0}}; + int i; + char *p, *arg = av->av_val; + + if (arg[1] == ':') { + p = (char *)arg + 2; + switch (arg[0]) { + case 'B': + prop.p_type = AMF_BOOLEAN; + prop.p_vu.p_number = atoi(p); + break; + case 'S': + prop.p_type = AMF_STRING; + prop.p_vu.p_aval.av_val = p; + prop.p_vu.p_aval.av_len = av->av_len - (p - arg); + break; + case 'N': + prop.p_type = AMF_NUMBER; + prop.p_vu.p_number = strtod(p, NULL); + break; + case 'Z': + prop.p_type = AMF_NULL; + break; + case 'O': + i = atoi(p); + if (i) { + prop.p_type = AMF_OBJECT; + } else { + (*depth)--; + return 0; + } + break; + default: + return -1; + } + } else if (arg[2] == ':' && arg[0] == 'N') { + p = strchr(arg + 3, ':'); + if (!p || !*depth) + return -1; + prop.p_name.av_val = (char *)arg + 3; + prop.p_name.av_len = p - (arg + 3); + + p++; + switch (arg[1]) { + case 'B': + prop.p_type = AMF_BOOLEAN; + prop.p_vu.p_number = atoi(p); + break; + case 'S': + prop.p_type = AMF_STRING; + prop.p_vu.p_aval.av_val = p; + prop.p_vu.p_aval.av_len = av->av_len - (p - arg); + break; + case 'N': + prop.p_type = AMF_NUMBER; + prop.p_vu.p_number = strtod(p, NULL); + break; + case 'O': + prop.p_type = AMF_OBJECT; + break; + default: + return -1; + } + } else + return -1; + + if (*depth) { + AMFObject *o2; + for (i = 0; i < *depth; i++) { + o2 = &obj->o_props[obj->o_num - 1].p_vu.p_object; + obj = o2; + } + } + AMF_AddProp(obj, &prop); + if (prop.p_type == AMF_OBJECT) + (*depth)++; + return 0; +} + +int RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, RTMPError *error) { + int i; + void *v; + + for (i = 0; options[i].name.av_len; i++) { + if (opt->av_len != options[i].name.av_len) continue; + if (strcasecmp(opt->av_val, options[i].name.av_val)) continue; + v = (char *)r + options[i].off; + switch (options[i].otype) { + case OPT_STR: { + AVal *aptr = v; + *aptr = *arg; + } break; + case OPT_INT: { + long l = strtol(arg->av_val, NULL, 0); + *(int *)v = l; + } break; + case OPT_BOOL: { + int j, fl; + fl = *(int *)v; + for (j = 0; truth[j].av_len; j++) { + if (arg->av_len != truth[j].av_len) continue; + if (strcasecmp(arg->av_val, truth[j].av_val)) continue; + fl |= options[i].omisc; + break; + } + *(int *)v = fl; + } break; + case OPT_CONN: + if (parseAMF(&r->Link.extras, arg, &r->Link.edepth)) + return FALSE; + break; + } + break; + } + if (!options[i].name.av_len) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Unknown option "); + strcat(msg, opt->av_val); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorUnknowOption; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "Unknown option %s", opt->av_val); + RTMP_OptUsage(); + return FALSE; + } + + return TRUE; +} + +int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error) { + AVal opt, arg; + char *p1, *p2, *ptr = strchr(url, ' '); + int ret, len; + unsigned int port = 0; + + if (ptr) + *ptr = '\0'; + + len = (int)strlen(url); + ret = PILI_RTMP_ParseURL2(url, &r->Link.protocol, &r->Link.hostname, + &port, &r->Link.playpath0, &r->Link.app, &r->Link.domain); + if (!ret) + return ret; + r->Link.port = port; + r->Link.playpath = r->Link.playpath0; + + while (ptr) { + *ptr++ = '\0'; + p1 = ptr; + p2 = strchr(p1, '='); + if (!p2) + break; + opt.av_val = p1; + opt.av_len = p2 - p1; + *p2++ = '\0'; + arg.av_val = p2; + ptr = strchr(p2, ' '); + if (ptr) { + *ptr = '\0'; + arg.av_len = ptr - p2; + /* skip repeated spaces */ + while (ptr[1] == ' ') + *ptr++ = '\0'; + } else { + arg.av_len = strlen(p2); + } + + /* unescape */ + port = arg.av_len; + for (p1 = p2; port > 0;) { + if (*p1 == '\\') { + unsigned int c; + if (port < 3) + return FALSE; + sscanf(p1 + 1, "%02x", &c); + *p2++ = c; + port -= 3; + p1 += 3; + } else { + *p2++ = *p1++; + port--; + } + } + arg.av_len = p2 - arg.av_val; + + ret = RTMP_SetOpt(r, &opt, &arg, error); + if (!ret) + return ret; + } + + if (!r->Link.tcUrl.av_len) { + r->Link.tcUrl.av_val = url; + if (r->Link.app.av_len) { + AVal *domain = &r->Link.domain; + if (domain->av_len == 0 && r->Link.app.av_val < url + len) { + /* if app is part of original url, just use it */ + r->Link.tcUrl.av_len = r->Link.app.av_len + (r->Link.app.av_val - url); + } else { + if (domain->av_len == 0) { + domain = &r->Link.hostname; + } + if (r->Link.port = 0) { + r->Link.port = 1935; + } + len = domain->av_len + r->Link.app.av_len + sizeof("rtmpte://:65535/"); + r->Link.tcUrl.av_val = malloc(len); + r->Link.tcUrl.av_len = snprintf(r->Link.tcUrl.av_val, len, + "%s://%.*s:%d/%.*s", + PILI_RTMPProtocolStringsLower[r->Link.protocol], + domain->av_len, domain->av_val, + r->Link.port, + r->Link.app.av_len, r->Link.app.av_val); + r->Link.lFlags |= RTMP_LF_FTCU; + } + } else { + r->Link.tcUrl.av_len = strlen(url); + } + } + +#ifdef CRYPTO + if ((r->Link.lFlags & RTMP_LF_SWFV) && r->Link.swfUrl.av_len) + RTMP_HashSWF(r->Link.swfUrl.av_val, &r->Link.SWFSize, + (unsigned char *)r->Link.SWFHash, r->Link.swfAge); +#endif + + if (r->Link.port == 0) { + if (r->Link.protocol & RTMP_FEATURE_SSL) + r->Link.port = 443; + else if (r->Link.protocol & RTMP_FEATURE_HTTP) + r->Link.port = 80; + else + r->Link.port = 1935; + } + return TRUE; +} + +static int add_addr_info(PILI_RTMP *r, struct addrinfo *hints, struct addrinfo **ai, AVal *host, int port, RTMPError *error) { + char *hostname; + int ret = TRUE; + if (host->av_val[host->av_len]) { + hostname = malloc(host->av_len + 1); + memcpy(hostname, host->av_val, host->av_len); + hostname[host->av_len] = '\0'; + } else { + hostname = host->av_val; + } + + struct addrinfo *cur_ai; + char portstr[10]; + snprintf(portstr, sizeof(portstr), "%d", port); + int addrret = getaddrinfo(hostname, portstr, hints, ai); + if (addrret != 0) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Problem accessing the DNS. addr: "); + strcat(msg, hostname); + + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorAccessDNSFailed; + strcpy(error->message, msg); + RTMP_Log(RTMP_LOGERROR, "Problem accessing the DNS. %d (addr: %s) (port: %s)", addrret, hostname, portstr); + ret = FALSE; + } + + if (hostname != host->av_val) { + free(hostname); + } + return ret; +} + +int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, RTMPError *error) { + r->m_sb.sb_timedout = FALSE; + r->m_pausing = 0; + r->m_fDuration = 0.0; + + r->m_sb.sb_socket = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); + if (ai->ai_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)ai->ai_addr; + in6->sin6_port = htons(port); + } + if (r->m_sb.sb_socket != -1) { +#ifdef RTMP_FEATURE_NONBLOCK + /* set socket non block */ + { + int flags = fcntl(r->m_sb.sb_socket, F_GETFL, 0); + if (fcntl(r->m_sb.sb_socket, F_SETFL, flags | O_NONBLOCK) < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, set socket non block failed", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + } +#endif + if (connect(r->m_sb.sb_socket, ai->ai_addr, ai->ai_addrlen) < 0) { + int err = GetSockError(); +#ifdef RTMP_FEATURE_NONBLOCK + if ((err == EINTR && !PILI_RTMP_ctrlC) || + err == EINPROGRESS) { + SET_RCVTIMEO(tv, r->Link.timeout); + fd_set wfds; + while (1) { + FD_ZERO(&wfds); + FD_SET(r->m_sb.sb_socket, &wfds); + int ret = select(r->m_sb.sb_socket + 1, NULL, &wfds, NULL, &tv); + if (ret < 0) { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect select error %d, %s", __FUNCTION__, + sockerr, strerror(sockerr)); + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP connect select error. "); + strcat(msg, strerror(sockerr)); + RTMPError_Message(error, RTMPErrorFailedToConnectSocket, msg); + PILI_RTMP_Close(r, error); + RTMPError_Free(error); + return FALSE; + } else if (ret == 0) { + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect error select timeout", __FUNCTION__); + RTMPError_Message(error, RTMPErrorSocketTimeout, "PILI_RTMP connect error. select timeout: "); + PILI_RTMP_Close(r, error); + RTMPError_Free(error); + return FALSE; + } else if (!FD_ISSET(r->m_sb.sb_socket, &wfds)) { + PILI_RTMP_Close(r, error); + RTMPError_Message(error, RTMPErrorFailedToConnectSocket, "PILI_RTMP connect error"); + RTMPError_Free(error); + return FALSE; + } else { + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect success", __FUNCTION__); + break; + } + } + } else { +#endif + + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Failed to connect socket. "); + strcat(msg, strerror(err)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorFailedToConnectSocket; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, failed to connect socket. %d (%s)", + __FUNCTION__, err, strerror(err)); + + PILI_RTMP_Close(r, NULL); + return FALSE; +#ifdef RTMP_FEATURE_NONBLOCK + } +#endif + } + + if (r->Link.socksport) { + RTMP_Log(RTMP_LOGDEBUG, "%s ... SOCKS negotiation", __FUNCTION__); + if (!SocksNegotiate(r, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Socks negotiation failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSocksNegotiationFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, SOCKS negotiation failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + } + } else { + int err = GetSockError(); + + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Failed to create socket. "); + strcat(msg, strerror(err)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorFailedToCreateSocket; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, failed to create socket. Error: %d (%s)", __FUNCTION__, err, strerror(err)); + + return FALSE; + } + +#if RTMP_FEATURE_NONBLOCK + +#else + /* set receive timeout */ + { + SET_RCVTIMEO(tv, r->Link.timeout); + if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket recieve timeout to %ds failed!", + __FUNCTION__, r->Link.timeout); + } + } + + /* set send timeout*/ + { + struct timeval timeout; + timeout.tv_sec = r->Link.send_timeout; + timeout.tv_usec = 0; + + if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout, sizeof(timeout))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket send timeout to %ds failed!", + __FUNCTION__, r->Link.timeout); + } + } +#endif + + /* ignore sigpipe */ + int kOne = 1; +#ifdef __linux + setsockopt(r->m_sb.sb_socket, SOL_SOCKET, MSG_NOSIGNAL, &kOne, sizeof(kOne)); +#else + setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_NOSIGPIPE, &kOne, sizeof(kOne)); +#endif + if (r->m_tcp_nodelay) { + int on = 1; + setsockopt(r->m_sb.sb_socket, IPPROTO_TCP, TCP_NODELAY, (char *)&on, sizeof(on)); + } + + return TRUE; +} + +int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + if (r->Link.protocol & RTMP_FEATURE_SSL) { +#if defined(CRYPTO) && !defined(NO_SSL) + TLS_client(RTMP_TLS_ctx, r->m_sb.sb_ssl); + TLS_setfd(r->m_sb.sb_ssl, r->m_sb.sb_socket); + if (TLS_connect(r->m_sb.sb_ssl) < 0) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "TLS_Connect failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorTLSConnectFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); + RTMP_Close(r, NULL); + return FALSE; + } +#else + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "No SSL/TLS support."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorNoSSLOrTLSSupport; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, no SSL/TLS support", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + +#endif + } + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + r->m_msgCounter = 1; + r->m_clientID.av_val = NULL; + r->m_clientID.av_len = 0; + HTTP_Post(r, RTMPT_OPEN, "", 1); + HTTP_read(r, 1); + r->m_msgCounter = 0; + } + RTMP_Log(RTMP_LOGDEBUG, "%s, ... connected, handshaking", __FUNCTION__); + if (!HandShake(r, TRUE, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Handshake failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorHandshakeFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, handshake failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + RTMP_Log(RTMP_LOGDEBUG, "%s, handshaked", __FUNCTION__); + + if (!SendConnectPacket(r, cp, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP connect failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorRTMPConnectFailed; + strcpy(error->message, msg); + } + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + return TRUE; +} + +int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + //获取hub + char hub[5] = {0}; + if (r->Link.app.av_len>4) { + strncpy(hub, r->Link.app.av_val,4); + }else if(r->Link.app.av_len>0){ + strncpy(hub, r->Link.app.av_val,r->Link.app.av_len); + } + + if (strlen(hub)>0) { + time_t nowtime; + time ( &nowtime ); + char tempTime[20]={0}; + sprintf(tempTime,"%ld",nowtime); + reqid[0] = '\0'; + strncat(reqid, hub, strlen(hub)); + strncat(reqid, tempTime, strlen(tempTime)); + } + + struct PILI_CONNECTION_TIME conn_time; + if (!r->Link.hostname.av_len) + return FALSE; + + struct addrinfo hints = {0}, *ai, *cur_ai; + + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + unsigned short port; + if (r->Link.socksport) { + port = r->Link.socksport; + /* Connect via SOCKS */ + if (!add_addr_info(r, &hints, &ai, &r->Link.sockshost, r->Link.socksport, error)) { + return FALSE; + } + } else { + port = r->Link.port; + /* Connect directly */ + if (!add_addr_info(r, &hints, &ai, &r->Link.hostname, r->Link.port, error)) { + return FALSE; + } + } + r->ip = 0; //useless for ipv6 + cur_ai = ai; + + int t1 = PILI_RTMP_GetTime(); + if (!PILI_RTMP_Connect0(r, cur_ai, port, error)) { + freeaddrinfo(ai); + return FALSE; + } + conn_time.connect_time = PILI_RTMP_GetTime() - t1; + r->m_bSendCounter = TRUE; + + int t2 = PILI_RTMP_GetTime(); + int ret = PILI_RTMP_Connect1(r, cp, error); + conn_time.handshake_time = PILI_RTMP_GetTime() - t2; + + if (r->m_connCallback != NULL) { + r->m_connCallback(&conn_time, r->m_userData); + } + freeaddrinfo(ai); + return ret; +} + +//useless +static int + SocksNegotiate(PILI_RTMP *r, RTMPError *error) { + // unsigned long addr; + // struct sockaddr_in service; + // memset(&service, 0, sizeof(struct sockaddr_in)); + // + // add_addr_info(r, &service, &r->Link.hostname, r->Link.port, error); + // addr = htonl(service.sin_addr.s_addr); + // + // { + // char packet[] = { + // 4, 1, /* SOCKS 4, connect */ + // (r->Link.port >> 8) & 0xFF, + // (r->Link.port) & 0xFF, + // (char)(addr >> 24) & 0xFF, (char)(addr >> 16) & 0xFF, + // (char)(addr >> 8) & 0xFF, (char)addr & 0xFF, + // 0 + // }; /* NULL terminate */ + // + // WriteN(r, packet, sizeof packet, error); + // + // if (ReadN(r, packet, 8) != 8) + // return FALSE; + // + // if (packet[0] == 0 && packet[1] == 90) + // { + // return TRUE; + // } + // else + // { + // RTMP_Log(RTMP_LOGERROR, "%s, SOCKS returned error code %d", packet[1]); + // return FALSE; + // } + // } + return 0; +} + +int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { + PILI_RTMPPacket packet = {0}; + + /* seekTime was already set by SetupStream / SetupURL. + * This is only needed by ReconnectStream. + */ + if (seekTime > 0) + r->Link.seekTime = seekTime; + + r->m_mediaChannel = 0; + + while (!r->m_bPlaying && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, &packet)) { + if (RTMPPacket_IsReady(&packet)) { + if (!packet.m_nBodySize) + continue; + if ((packet.m_packetType == RTMP_PACKET_TYPE_AUDIO) || + (packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) || + (packet.m_packetType == RTMP_PACKET_TYPE_INFO)) { + RTMP_Log(RTMP_LOGWARNING, "Received FLV packet before play()! Ignoring."); + PILI_RTMPPacket_Free(&packet); + continue; + } + + PILI_RTMP_ClientPacket(r, &packet); + PILI_RTMPPacket_Free(&packet); + } + } + + if (!r->m_bPlaying && error) { + char *msg = "PILI_RTMP connect stream failed."; + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorRTMPConnectStreamFailed; + strcpy(error->message, msg); + } + + return r->m_bPlaying; +} + +int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { + PILI_RTMP_DeleteStream(r, error); + + PILI_RTMP_SendCreateStream(r, error); + + return PILI_RTMP_ConnectStream(r, seekTime, error); +} + +int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error) { + int res; + + if (!r->m_pausing) { + res = PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, error); + if (!res) + return res; + + r->m_pausing = 1; + sleep(1); + } + res = PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, error); + r->m_pausing = 3; + return res; +} + +void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error) { + if (r->m_stream_id < 0) + return; + + r->m_bPlaying = FALSE; + + SendDeleteStream(r, r->m_stream_id, error); + r->m_stream_id = -1; +} + +int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + int bHasMediaPacket = 0; + + while (!bHasMediaPacket && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, packet)) { + if (!RTMPPacket_IsReady(packet)) { + continue; + } + + bHasMediaPacket = PILI_RTMP_ClientPacket(r, packet); + + if (!bHasMediaPacket) { + PILI_RTMPPacket_Free(packet); + } else if (r->m_pausing == 3) { + if (packet->m_nTimeStamp <= r->m_mediaStamp) { + bHasMediaPacket = 0; +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "Skipped type: %02X, size: %d, TS: %d ms, abs TS: %d, pause: %d ms", + packet->m_packetType, packet->m_nBodySize, + packet->m_nTimeStamp, packet->m_hasAbsTimestamp, + r->m_mediaStamp); +#endif + continue; + } + r->m_pausing = 0; + } + } + + if (bHasMediaPacket) + r->m_bPlaying = TRUE; + else if (r->m_sb.sb_timedout && !r->m_pausing) + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + + return bHasMediaPacket; +} + +int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + int bHasMediaPacket = 0; + switch (packet->m_packetType) { + case 0x01: + /* chunk size */ + HandleChangeChunkSize(r, packet); + break; + + case 0x03: + /* bytes read report */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__); + break; + + case 0x04: + /* ctrl */ + HandleCtrl(r, packet); + break; + + case 0x05: + /* server bw */ + HandleServerBW(r, packet); + break; + + case 0x06: + /* client bw */ + HandleClientBW(r, packet); + break; + + case 0x08: + /* audio data */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + HandleAudio(r, packet); + bHasMediaPacket = 1; + if (!r->m_mediaChannel) + r->m_mediaChannel = packet->m_nChannel; + if (!r->m_pausing) + r->m_mediaStamp = packet->m_nTimeStamp; + break; + + case 0x09: + /* video data */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + HandleVideo(r, packet); + bHasMediaPacket = 1; + if (!r->m_mediaChannel) + r->m_mediaChannel = packet->m_nChannel; + if (!r->m_pausing) + r->m_mediaStamp = packet->m_nTimeStamp; + break; + + case 0x0F: /* flex stream send */ + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex stream send, size %lu bytes, not supported, ignoring", + __FUNCTION__, packet->m_nBodySize); + break; + + case 0x10: /* flex shared object */ + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex shared object, size %lu bytes, not supported, ignoring", + __FUNCTION__, packet->m_nBodySize); + break; + + case 0x11: /* flex message */ + { + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex message, size %lu bytes, not fully supported", + __FUNCTION__, packet->m_nBodySize); +/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + +/* some DEBUG code */ +#if 0 + RTMP_LIB_AMFObject obj; + int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1); + if(nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__); + /*return; */ + } + + obj.Dump(); +#endif + + if (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1) + bHasMediaPacket = 2; + break; + } + case 0x12: + /* metadata (notify) */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__, + packet->m_nBodySize); + if (HandleMetadata(r, packet->m_body, packet->m_nBodySize)) + bHasMediaPacket = 1; + break; + + case 0x13: + RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring", + __FUNCTION__); + break; + + case 0x14: + /* invoke */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__, + packet->m_nBodySize); + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1) + bHasMediaPacket = 2; + break; + + case 0x16: { + /* go through FLV packets and handle metadata packets */ + unsigned int pos = 0; + uint32_t nTimeStamp = packet->m_nTimeStamp; + + while (pos + 11 < packet->m_nBodySize) { + uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1); /* size without header (11) and prevTagSize (4) */ + + if (pos + 11 + dataSize + 4 > packet->m_nBodySize) { + RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!"); + break; + } + if (packet->m_body[pos] == 0x12) { + HandleMetadata(r, packet->m_body + pos + 11, dataSize); + } else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9) { + nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4); + nTimeStamp |= (packet->m_body[pos + 7] << 24); + } + pos += (11 + dataSize + 4); + } + if (!r->m_pausing) + r->m_mediaStamp = nTimeStamp; + + /* FLV tag(s) */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + bHasMediaPacket = 1; + break; + } + default: + RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__, + packet->m_packetType); +#ifdef _DEBUG + RTMP_LogHex(RTMP_LOGDEBUG, packet->m_body, packet->m_nBodySize); +#endif + } + + return bHasMediaPacket; +} + +#ifdef _DEBUG +extern FILE *netstackdump; +extern FILE *netstackdump_read; +#endif + +static int + ReadN(PILI_RTMP *r, char *buffer, int n) { + int nOriginalSize = n; + int avail; + char *ptr; + + r->m_sb.sb_timedout = FALSE; + +#ifdef _DEBUG + memset(buffer, 0, n); +#endif + + ptr = buffer; + while (n > 0) { + int nBytes = 0, nRead; + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + while (!r->m_resplen) { + if (r->m_sb.sb_size < 144) { + if (!r->m_unackd) + HTTP_Post(r, RTMPT_IDLE, "", 1); + if (PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout) < 1) { + if (!r->m_sb.sb_timedout) { + PILI_RTMP_Close(r, NULL); + } else { + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket timeout"); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketTimeout; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } + + return 0; + } + } + HTTP_read(r, 0); + } + if (r->m_resplen && !r->m_sb.sb_size) + PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout); + avail = r->m_sb.sb_size; + if (avail > r->m_resplen) + avail = r->m_resplen; + } else { + avail = r->m_sb.sb_size; + if (avail == 0) { + if (PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout) < 1) { + if (!r->m_sb.sb_timedout) { + PILI_RTMP_Close(r, NULL); + } else { + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket timeout"); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketTimeout; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } + + return 0; + } + avail = r->m_sb.sb_size; + } + } + nRead = ((n < avail) ? n : avail); + if (nRead > 0) { + memcpy(ptr, r->m_sb.sb_start, nRead); + r->m_sb.sb_start += nRead; + r->m_sb.sb_size -= nRead; + nBytes = nRead; + r->m_nBytesIn += nRead; + if (r->m_bSendCounter && r->m_nBytesIn > r->m_nBytesInSent + r->m_nClientBW / 2) + SendBytesReceived(r, NULL); + } +/*RTMP_Log(RTMP_LOGDEBUG, "%s: %d bytes\n", __FUNCTION__, nBytes); */ +#ifdef _DEBUG + fwrite(ptr, 1, nBytes, netstackdump_read); +#endif + + if (nBytes == 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s, PILI_RTMP socket closed by peer", __FUNCTION__); + /*goto again; */ + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket closed by peer. "); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketClosedByPeer; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + break; + } + + if (r->Link.protocol & RTMP_FEATURE_HTTP) + r->m_resplen -= nBytes; + +#ifdef CRYPTO + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, nBytes, ptr); + } +#endif + + n -= nBytes; + ptr += nBytes; + } + + return nOriginalSize - n; +} + +static int + WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error) { + const char *ptr = buffer; +#ifdef CRYPTO + char *encrypted = 0; + char buf[RTMP_BUFFER_CACHE_SIZE]; + + if (r->Link.rc4keyOut) { + if (n > sizeof(buf)) + encrypted = (char *)malloc(n); + else + encrypted = (char *)buf; + ptr = encrypted; + RC4_encrypt2(r->Link.rc4keyOut, n, buffer, ptr); + } +#endif + +#ifdef RTMP_FEATURE_NONBLOCK + SET_RCVTIMEO(tv, r->Link.timeout); + fd_set wfds; +#endif + while (n > 0) { + +#ifdef RTMP_FEATURE_NONBLOCK + FD_ZERO(&wfds); + FD_SET(r->m_sb.sb_socket, &wfds); + int ret = select(r->m_sb.sb_socket + 1, NULL, &wfds, NULL, &tv); + if (ret < 0) { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send select error %d, %s", __FUNCTION__, + sockerr, strerror(sockerr)); + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP send select error. "); + strcat(msg, strerror(sockerr)); + RTMPError_Message(error, RTMPErrorSendFailed, msg); + PILI_RTMP_Close(r, error); + RTMPError_Free(error); + n = 1; + break; + } else if (ret == 0) { + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send error select timeout", __FUNCTION__); + RTMPError_Message(error, RTMPErrorSocketTimeout, "PILI_RTMP send error. select timeout: "); + PILI_RTMP_Close(r, error); + RTMPError_Free(error); + n = 1; + break; + } else if (!FD_ISSET(r->m_sb.sb_socket, &wfds)) { + PILI_RTMP_Close(r, error); + RTMPError_Message(error, RTMPErrorSendFailed, "PILI_RTMP send error socket can not write"); + RTMPError_Free(error); + n = 1; + break; + } +#endif + int nBytes; + + if (r->Link.protocol & RTMP_FEATURE_HTTP) + nBytes = HTTP_Post(r, RTMPT_SEND, ptr, n); + else + nBytes = PILI_RTMPSockBuf_Send(&r->m_sb, ptr, n); + /*RTMP_Log(RTMP_LOGDEBUG, "%s: %d\n", __FUNCTION__, nBytes); */ + + if (nBytes < 0) { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send error %d, %s, (%d bytes)", __FUNCTION__, + sockerr, strerror(sockerr), n); + + /* + Specify the receiving or sending timeouts until reporting an error. + The argument is a struct timeval. + If an input or output function blocks for this period of time, + and data has been sent or received, + the return value of that function will be the amount of data transferred; + if no data has been transferred and the timeout has been reached then -1 is returned + with errno set to EAGAIN or EWOULDBLOCK, or EINPROGRESS (for connect(2)) just as if the socket was specified to be nonblocking. + If the timeout is set to zero (the default) then the operation will never timeout. + Timeouts only have effect for system calls that perform socket I/O (e.g., read(2), recvmsg(2), send(2), sendmsg(2)); + timeouts have no effect for select(2), poll(2), epoll_wait(2), and so on. + */ + if ((sockerr == EINTR && !PILI_RTMP_ctrlC ) || sockerr == EAGAIN) + continue; + +#ifdef RTMP_FEATURE_NONBLOCK + if (sockerr == EWOULDBLOCK || sockerr == EAGAIN) { + continue; + } else if (error) { +#else + if (error) { +#endif + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP send error. socket error: "); + strcat(msg, strerror(sockerr)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSendFailed; + strcpy(error->message, msg); + } + + PILI_RTMP_Close(r, error); + + RTMPError_Free(error); + + n = 1; + break; + } + + if (nBytes == 0) + break; + + n -= nBytes; + ptr += nBytes; + } + +#ifdef CRYPTO + if (encrypted && encrypted != buf) + free(encrypted); +#endif + + return n == 0; +} + +#define SAVC(x) static const AVal av_##x = AVC(#x) + +SAVC(app); +SAVC(connect); +SAVC(flashVer); +SAVC(swfUrl); +SAVC(pageUrl); +SAVC(tcUrl); +SAVC(fpad); +SAVC(capabilities); +SAVC(audioCodecs); +SAVC(videoCodecs); +SAVC(videoFunction); +SAVC(objectEncoding); +SAVC(secureToken); +SAVC(secureTokenResponse); +SAVC(type); +SAVC(nonprivate); +SAVC(xreqid); + +static int + SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[4096], *pend = pbuf + sizeof(pbuf); + char *enc; + + if (cp) + return PILI_RTMP_SendPacket(r, cp, TRUE, error); + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_connect); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_OBJECT; + + enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app); + if (!enc) + return FALSE; + AVal requestId; + requestId.av_val = reqid; + requestId.av_len = (int)strlen(reqid); + + if (requestId.av_len){ + enc = AMF_EncodeNamedString(enc,pend,&av_xreqid,&requestId); + if (!enc) + return FALSE; + } + + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + enc = AMF_EncodeNamedString(enc, pend, &av_type, &av_nonprivate); + if (!enc) + return FALSE; + } + if (r->Link.flashVer.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer); + if (!enc) + return FALSE; + } + if (r->Link.swfUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl); + if (!enc) + return FALSE; + } + if (r->Link.tcUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl); + if (!enc) + return FALSE; + } + if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { + enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0); + if (!enc) + return FALSE; + if (r->Link.pageUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl); + if (!enc) + return FALSE; + } + } + if (r->m_fEncoding != 0.0 || r->m_bSendEncoding) { /* AMF0, AMF3 not fully supported yet */ + enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding); + if (!enc) + return FALSE; + } + if (enc + 3 >= pend) + return FALSE; + *enc++ = 0; + *enc++ = 0; /* end of object - 0x00 0x00 0x09 */ + *enc++ = AMF_OBJECT_END; + + /* add auth string */ + if (r->Link.auth.av_len) { + enc = AMF_EncodeBoolean(enc, pend, r->Link.lFlags & RTMP_LF_AUTH); + if (!enc) + return FALSE; + enc = AMF_EncodeString(enc, pend, &r->Link.auth); + if (!enc) + return FALSE; + } + if (r->Link.extras.o_num) { + int i; + for (i = 0; i < r->Link.extras.o_num; i++) { + enc = AMFProp_Encode(&r->Link.extras.o_props[i], enc, pend); + if (!enc) + return FALSE; + } + } + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +#if 0 /* unused */ +SAVC(bgHasStream); + +static int +SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath) +{ + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_bgHasStream); + enc = AMF_EncodeNumber(enc, pend, dId); + *enc++ = AMF_NULL; + + enc = AMF_EncodeString(enc, pend, playpath); + if (enc == NULL) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE); +} +#endif + +SAVC(createStream); + +int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_createStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; /* NULL */ + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(FCSubscribe); + +static int + SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[512], *pend = pbuf + sizeof(pbuf); + char *enc; + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + RTMP_Log(RTMP_LOGDEBUG, "FCSubscribe: %s", subscribepath->av_val); + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCSubscribe); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, subscribepath); + + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(releaseStream); + +static int + SendReleaseStream(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_releaseStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(FCPublish); + +static int + SendFCPublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCPublish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(FCUnpublish); + +static int + SendFCUnpublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCUnpublish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(publish); +SAVC(live); +SAVC(record); + +static int + SendPublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x04; /* source channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_publish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + /* FIXME: should we choose live based on Link.lFlags & RTMP_LF_LIVE? */ + enc = AMF_EncodeString(enc, pend, &av_live); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(deleteStream); + +static int + SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_deleteStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, dStreamId); + + packet.m_nBodySize = enc - packet.m_body; + + /* no response expected */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(pause); + +int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int iTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* video channel */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* invoke */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_pause); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeBoolean(enc, pend, DoPause); + enc = AMF_EncodeNumber(enc, pend, (double)iTime); + + packet.m_nBodySize = enc - packet.m_body; + + RTMP_Log(RTMP_LOGDEBUG, "%s, %d, pauseTime=%d", __FUNCTION__, DoPause, iTime); + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error) { + if (DoPause) + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + return PILI_RTMP_SendPause(r, DoPause, r->m_pauseStamp, error); +} + +SAVC(seek); + +int PILI_RTMP_SendSeek(PILI_RTMP *r, int iTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* video channel */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* invoke */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_seek); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, (double)iTime); + + packet.m_nBodySize = enc - packet.m_body; + + r->m_read.flags |= RTMP_READ_SEEKING; + r->m_read.nResumeTS = 0; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x05; /* Server BW */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 4; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nServerBW); + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x06; /* Client BW */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 5; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nClientBW); + packet.m_body[4] = r->m_nClientBW2; + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +static int + SendBytesReceived(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x03; /* bytes in */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 4; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nBytesIn); /* hard coded for now */ + r->m_nBytesInSent = r->m_nBytesIn; + + /*RTMP_Log(RTMP_LOGDEBUG, "Send bytes report. 0x%x (%d bytes)", (unsigned int)m_nBytesIn, m_nBytesIn); */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(_checkbw); + +static int + SendCheckBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av__checkbw); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + + packet.m_nBodySize = enc - packet.m_body; + + /* triggers _onbwcheck and eventually results in _onbwdone */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(_result); + +static int + SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av__result); + enc = AMF_EncodeNumber(enc, pend, txn); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, (double)r->m_nBWCheckCounter++); + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(ping); +SAVC(pong); + +static int + SendPong(PILI_RTMP *r, double txn, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_pong); + enc = AMF_EncodeNumber(enc, pend, txn); + *enc++ = AMF_NULL; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(play); + +static int + SendPlay(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* we make 8 our stream channel */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_play); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + + RTMP_Log(RTMP_LOGDEBUG, "%s, seekTime=%d, stopTime=%d, sending play: %s", + __FUNCTION__, r->Link.seekTime, r->Link.stopTime, + r->Link.playpath.av_val); + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + /* Optional parameters start and len. + * + * start: -2, -1, 0, positive number + * -2: looks for a live stream, then a recorded stream, + * if not found any open a live stream + * -1: plays a live stream + * >=0: plays a recorded streams from 'start' milliseconds + */ + if (r->Link.lFlags & RTMP_LF_LIVE) + enc = AMF_EncodeNumber(enc, pend, -1000.0); + else { + if (r->Link.seekTime > 0.0) + enc = AMF_EncodeNumber(enc, pend, r->Link.seekTime); /* resume from here */ + else + enc = AMF_EncodeNumber(enc, pend, 0.0); /*-2000.0);*/ /* recorded as default, -2000.0 is not reliable since that freezes the player if the stream is not found */ + } + if (!enc) + return FALSE; + + /* len: -1, 0, positive number + * -1: plays live or recorded stream to the end (default) + * 0: plays a frame 'start' ms away from the beginning + * >0: plays a live or recoded stream for 'len' milliseconds + */ + /*enc += EncodeNumber(enc, -1.0); */ /* len */ + if (r->Link.stopTime) { + enc = AMF_EncodeNumber(enc, pend, r->Link.stopTime - r->Link.seekTime); + if (!enc) + return FALSE; + } + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(set_playlist); +SAVC(0); + +static int + SendPlaylist(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* we make 8 our stream channel */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_set_playlist); + enc = AMF_EncodeNumber(enc, pend, 0); + *enc++ = AMF_NULL; + *enc++ = AMF_ECMA_ARRAY; + *enc++ = 0; + *enc++ = 0; + *enc++ = 0; + *enc++ = AMF_OBJECT; + enc = AMF_EncodeNamedString(enc, pend, &av_0, &r->Link.playpath); + if (!enc) + return FALSE; + if (enc + 3 >= pend) + return FALSE; + *enc++ = 0; + *enc++ = 0; + *enc++ = AMF_OBJECT_END; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +static int + SendSecureTokenResponse(PILI_RTMP *r, AVal *resp, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_secureTokenResponse); + enc = AMF_EncodeNumber(enc, pend, 0.0); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, resp); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +/* +from http://jira.red5.org/confluence/display/docs/Ping: + +Ping is the most mysterious message in PILI_RTMP and till now we haven't fully interpreted it yet. In summary, Ping message is used as a special command that are exchanged between client and server. This page aims to document all known Ping messages. Expect the list to grow. + +The type of Ping packet is 0x4 and contains two mandatory parameters and two optional parameters. The first parameter is the type of Ping and in short integer. The second parameter is the target of the ping. As Ping is always sent in Channel 2 (control channel) and the target object in PILI_RTMP header is always 0 which means the Connection object, it's necessary to put an extra parameter to indicate the exact target object the Ping is sent to. The second parameter takes this responsibility. The value has the same meaning as the target object field in PILI_RTMP header. (The second value could also be used as other purposes, like RTT Ping/Pong. It is used as the timestamp.) The third and fourth parameters are optional and could be looked upon as the parameter of the Ping packet. Below is an unexhausted list of Ping messages. + + * type 0: Clear the stream. No third and fourth parameters. The second parameter could be 0. After the connection is established, a Ping 0,0 will be sent from server to client. The message will also be sent to client on the start of Play and in response of a Seek or Pause/Resume request. This Ping tells client to re-calibrate the clock with the timestamp of the next packet server sends. + * type 1: Tell the stream to clear the playing buffer. + * type 3: Buffer time of the client. The third parameter is the buffer time in millisecond. + * type 4: Reset a stream. Used together with type 0 in the case of VOD. Often sent before type 0. + * type 6: Ping the client from server. The second parameter is the current time. + * type 7: Pong reply from client. The second parameter is the time the server sent with his ping request. + * type 26: SWFVerification request + * type 27: SWFVerification response +*/ +int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, unsigned int nTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + int nSize; + char *buf; + + RTMP_Log(RTMP_LOGDEBUG, "sending ctrl. type: 0x%04x", (unsigned short)nType); + + packet.m_nChannel = 0x02; /* control channel (ping) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x04; /* ctrl */ + packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + switch (nType) { + case 0x03: + nSize = 10; + break; /* buffer time */ + case 0x1A: + nSize = 3; + break; /* SWF verify request */ + case 0x1B: + nSize = 44; + break; /* SWF verify response */ + default: + nSize = 6; + break; + } + + packet.m_nBodySize = nSize; + + buf = packet.m_body; + buf = AMF_EncodeInt16(buf, pend, nType); + + if (nType == 0x1B) { +#ifdef CRYPTO + memcpy(buf, r->Link.SWFVerificationResponse, 42); + RTMP_Log(RTMP_LOGDEBUG, "Sending SWFVerification response: "); + RTMP_LogHex(RTMP_LOGDEBUG, (uint8_t *)packet.m_body, packet.m_nBodySize); +#endif + } else if (nType == 0x1A) { + *buf = nObject & 0xff; + } else { + if (nSize > 2) + buf = AMF_EncodeInt32(buf, pend, nObject); + + if (nSize > 6) + buf = AMF_EncodeInt32(buf, pend, nTime); + } + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +static void + AV_erase(PILI_RTMP_METHOD *vals, int *num, int i, int freeit) { + if (freeit) + free(vals[i].name.av_val); + (*num)--; + for (; i < *num; i++) { + vals[i] = vals[i + 1]; + } + vals[i].name.av_val = NULL; + vals[i].name.av_len = 0; + vals[i].num = 0; +} + +void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, freeit); +} + +static void + AV_queue(PILI_RTMP_METHOD **vals, int *num, AVal *av, int txn) { + char *tmp; + if (!(*num & 0x0f)) + *vals = realloc(*vals, (*num + 16) * sizeof(PILI_RTMP_METHOD)); + tmp = malloc(av->av_len + 1); + memcpy(tmp, av->av_val, av->av_len); + tmp[av->av_len] = '\0'; + (*vals)[*num].num = txn; + (*vals)[*num].name.av_len = av->av_len; + (*vals)[(*num)++].name.av_val = tmp; +} + +static void + AV_clear(PILI_RTMP_METHOD *vals, int num) { + int i; + for (i = 0; i < num; i++) + free(vals[i].name.av_val); + free(vals); +} + +SAVC(onBWDone); +SAVC(onFCSubscribe); +SAVC(onFCUnsubscribe); +SAVC(_onbwcheck); +SAVC(_onbwdone); +SAVC(_error); +SAVC(close); +SAVC(code); +SAVC(level); +SAVC(onStatus); +SAVC(playlist_ready); +static const AVal av_NetStream_Failed = AVC("NetStream.Failed"); +static const AVal av_NetStream_Play_Failed = AVC("NetStream.Play.Failed"); +static const AVal av_NetStream_Play_StreamNotFound = + AVC("NetStream.Play.StreamNotFound"); +static const AVal av_NetConnection_Connect_InvalidApp = + AVC("NetConnection.Connect.InvalidApp"); +static const AVal av_NetStream_Play_Start = AVC("NetStream.Play.Start"); +static const AVal av_NetStream_Play_Complete = AVC("NetStream.Play.Complete"); +static const AVal av_NetStream_Play_Stop = AVC("NetStream.Play.Stop"); +static const AVal av_NetStream_Seek_Notify = AVC("NetStream.Seek.Notify"); +static const AVal av_NetStream_Pause_Notify = AVC("NetStream.Pause.Notify"); +static const AVal av_NetStream_Play_UnpublishNotify = + AVC("NetStream.Play.UnpublishNotify"); +static const AVal av_NetStream_Publish_Start = AVC("NetStream.Publish.Start"); + +/* Returns 0 for OK/Failed/error, 1 for 'Stop or Complete' */ +static int + HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize) { + AMFObject obj; + AVal method; + int txn; + int ret = 0, nRes; + if (body[0] != 0x02) /* make sure it is a string method name we start with */ + { + RTMP_Log(RTMP_LOGWARNING, "%s, Sanity failed. no string method in invoke packet", + __FUNCTION__); + return 0; + } + + nRes = AMF_Decode(&obj, body, nBodySize, FALSE); + if (nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding invoke packet", __FUNCTION__); + return 0; + } + + AMF_Dump(&obj); + AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &method); + txn = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 1)); + RTMP_Log(RTMP_LOGDEBUG, "%s, server invoking <%s>", __FUNCTION__, method.av_val); + + RTMPError error = {0}; + + if (AVMATCH(&method, &av__result)) { + AVal methodInvoked = {0}; + int i; + + for (i = 0; i < r->m_numCalls; i++) { + if (r->m_methodCalls[i].num == txn) { + methodInvoked = r->m_methodCalls[i].name; + AV_erase(r->m_methodCalls, &r->m_numCalls, i, FALSE); + break; + } + } + if (!methodInvoked.av_val) { + RTMP_Log(RTMP_LOGDEBUG, "%s, received result id %d without matching request", + __FUNCTION__, txn); + goto leave; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s, received result for method call <%s>", __FUNCTION__, + methodInvoked.av_val); + + if (AVMATCH(&methodInvoked, &av_connect)) { + if (r->Link.token.av_len) { + AMFObjectProperty p; + if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_secureToken, &p)) { + DecodeTEA(&r->Link.token, &p.p_vu.p_aval); + SendSecureTokenResponse(r, &p.p_vu.p_aval, &error); + } + } + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + SendReleaseStream(r, &error); + SendFCPublish(r, &error); + } else { + PILI_RTMP_SendServerBW(r, &error); + PILI_RTMP_SendCtrl(r, 3, 0, 300, &error); + } + PILI_RTMP_SendCreateStream(r, &error); + + if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { + /* Send the FCSubscribe if live stream or if subscribepath is set */ + if (r->Link.subscribepath.av_len) + SendFCSubscribe(r, &r->Link.subscribepath, &error); + else if (r->Link.lFlags & RTMP_LF_LIVE) + SendFCSubscribe(r, &r->Link.playpath, &error); + } + } else if (AVMATCH(&methodInvoked, &av_createStream)) { + r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3)); + + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + SendPublish(r, &error); + } else { + if (r->Link.lFlags & RTMP_LF_PLST) + SendPlaylist(r, &error); + SendPlay(r, &error); + PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, &error); + } + } else if (AVMATCH(&methodInvoked, &av_play) || + AVMATCH(&methodInvoked, &av_publish)) { + r->m_bPlaying = TRUE; + } + free(methodInvoked.av_val); + } else if (AVMATCH(&method, &av_onBWDone)) { + if (!r->m_nBWCheckCounter) + SendCheckBW(r, &error); + } else if (AVMATCH(&method, &av_onFCSubscribe)) { + /* SendOnFCSubscribe(); */ + } else if (AVMATCH(&method, &av_onFCUnsubscribe)) { + PILI_RTMP_Close(r, NULL); + ret = 1; + } else if (AVMATCH(&method, &av_ping)) { + SendPong(r, txn, &error); + } else if (AVMATCH(&method, &av__onbwcheck)) { + SendCheckBWResult(r, txn, &error); + } else if (AVMATCH(&method, &av__onbwdone)) { + int i; + for (i = 0; i < r->m_numCalls; i++) + if (AVMATCH(&r->m_methodCalls[i].name, &av__checkbw)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } else if (AVMATCH(&method, &av__error)) { + RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server sent error"); + } else if (AVMATCH(&method, &av_close)) { + RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server requested close"); + RTMPError error = {0}; + char *msg = "PILI_RTMP server requested close."; + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorServerRequestedClose; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } else if (AVMATCH(&method, &av_onStatus)) { + AMFObject obj2; + AVal code, level; + AMFProp_GetObject(AMF_GetProp(&obj, NULL, 3), &obj2); + AMFProp_GetString(AMF_GetProp(&obj2, &av_code, -1), &code); + AMFProp_GetString(AMF_GetProp(&obj2, &av_level, -1), &level); + + RTMP_Log(RTMP_LOGDEBUG, "%s, onStatus: %s", __FUNCTION__, code.av_val); + if (AVMATCH(&code, &av_NetStream_Failed) || AVMATCH(&code, &av_NetStream_Play_Failed) || AVMATCH(&code, &av_NetStream_Play_StreamNotFound) || AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { + r->m_stream_id = -1; + + int err_code; + char msg[100]; + memset(msg, 0, 100); + + if (AVMATCH(&code, &av_NetStream_Failed)) { + err_code = RTMPErrorNetStreamFailed; + strcpy(msg, "NetStream failed."); + } else if (AVMATCH(&code, &av_NetStream_Play_Failed)) { + err_code = RTMPErrorNetStreamPlayFailed; + strcpy(msg, "NetStream play failed."); + } else if (AVMATCH(&code, &av_NetStream_Play_StreamNotFound)) { + err_code = RTMPErrorNetStreamPlayStreamNotFound; + strcpy(msg, "NetStream play stream not found."); + } else if (AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { + err_code = RTMPErrorNetConnectionConnectInvalidApp; + strcpy(msg, "NetConnection connect invalip app."); + } else { + err_code = RTMPErrorUnknow; + strcpy(msg, "Unknow error."); + } + + RTMPError_Alloc(&error, strlen(msg)); + error.code = err_code; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + + RTMP_Log(RTMP_LOGERROR, "Closing connection: %s", code.av_val); + } + + else if (AVMATCH(&code, &av_NetStream_Play_Start)) { + int i; + r->m_bPlaying = TRUE; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_play)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } + + else if (AVMATCH(&code, &av_NetStream_Publish_Start)) { + int i; + r->m_bPlaying = TRUE; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_publish)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } + + /* Return 1 if this is a Play.Complete or Play.Stop */ + else if (AVMATCH(&code, &av_NetStream_Play_Complete) || AVMATCH(&code, &av_NetStream_Play_Stop) || AVMATCH(&code, &av_NetStream_Play_UnpublishNotify)) { + PILI_RTMP_Close(r, NULL); + ret = 1; + } + + else if (AVMATCH(&code, &av_NetStream_Seek_Notify)) { + r->m_read.flags &= ~RTMP_READ_SEEKING; + } + + else if (AVMATCH(&code, &av_NetStream_Pause_Notify)) { + if (r->m_pausing == 1 || r->m_pausing == 2) { + PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, &error); + r->m_pausing = 3; + } + } + } else if (AVMATCH(&method, &av_playlist_ready)) { + int i; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_set_playlist)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } else { + } +leave: + AMF_Reset(&obj); + return ret; +} + +int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p) { + int n; + /* this is a small object search to locate the "duration" property */ + for (n = 0; n < obj->o_num; n++) { + AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); + + if (AVMATCH(&prop->p_name, name)) { + *p = *prop; + return TRUE; + } + + if (prop->p_type == AMF_OBJECT) { + if (PILI_RTMP_FindFirstMatchingProperty(&prop->p_vu.p_object, name, p)) + return TRUE; + } + } + return FALSE; +} + +/* Like above, but only check if name is a prefix of property */ +int PILI_RTMP_FindPrefixProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p) { + int n; + for (n = 0; n < obj->o_num; n++) { + AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); + + if (prop->p_name.av_len > name->av_len && + !memcmp(prop->p_name.av_val, name->av_val, name->av_len)) { + *p = *prop; + return TRUE; + } + + if (prop->p_type == AMF_OBJECT) { + if (PILI_RTMP_FindPrefixProperty(&prop->p_vu.p_object, name, p)) + return TRUE; + } + } + return FALSE; +} + +static int + DumpMetaData(AMFObject *obj) { + AMFObjectProperty *prop; + int n; + for (n = 0; n < obj->o_num; n++) { + prop = AMF_GetProp(obj, NULL, n); + if (prop->p_type != AMF_OBJECT) { + char str[256] = ""; + switch (prop->p_type) { + case AMF_NUMBER: + snprintf(str, 255, "%.2f", prop->p_vu.p_number); + break; + case AMF_BOOLEAN: + snprintf(str, 255, "%s", + prop->p_vu.p_number != 0. ? "TRUE" : "FALSE"); + break; + case AMF_STRING: + snprintf(str, 255, "%.*s", prop->p_vu.p_aval.av_len, + prop->p_vu.p_aval.av_val); + break; + case AMF_DATE: + snprintf(str, 255, "timestamp:%.2f", prop->p_vu.p_number); + break; + default: + snprintf(str, 255, "INVALID TYPE 0x%02x", + (unsigned char)prop->p_type); + } + if (prop->p_name.av_len) { + /* chomp */ + if (strlen(str) >= 1 && str[strlen(str) - 1] == '\n') + str[strlen(str) - 1] = '\0'; + RTMP_Log(RTMP_LOGINFO, " %-22.*s%s", prop->p_name.av_len, + prop->p_name.av_val, str); + } + } else { + if (prop->p_name.av_len) + RTMP_Log(RTMP_LOGINFO, "%.*s:", prop->p_name.av_len, prop->p_name.av_val); + DumpMetaData(&prop->p_vu.p_object); + } + } + return FALSE; +} + +SAVC(onMetaData); +SAVC(duration); +SAVC(video); +SAVC(audio); + +static int + HandleMetadata(PILI_RTMP *r, char *body, unsigned int len) { + /* allright we get some info here, so parse it and print it */ + /* also keep duration or filesize to make a nice progress bar */ + + AMFObject obj; + AVal metastring; + int ret = FALSE; + + int nRes = AMF_Decode(&obj, body, len, FALSE); + if (nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding meta data packet", __FUNCTION__); + return FALSE; + } + + AMF_Dump(&obj); + AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &metastring); + + if (AVMATCH(&metastring, &av_onMetaData)) { + AMFObjectProperty prop; + /* Show metadata */ + RTMP_Log(RTMP_LOGINFO, "Metadata:"); + DumpMetaData(&obj); + if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_duration, &prop)) { + r->m_fDuration = prop.p_vu.p_number; + /*RTMP_Log(RTMP_LOGDEBUG, "Set duration: %.2f", m_fDuration); */ + } + /* Search for audio or video tags */ + if (PILI_RTMP_FindPrefixProperty(&obj, &av_video, &prop)) + r->m_read.dataType |= 1; + if (PILI_RTMP_FindPrefixProperty(&obj, &av_audio, &prop)) + r->m_read.dataType |= 4; + ret = TRUE; + } + AMF_Reset(&obj); + return ret; +} + +static void + HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + if (packet->m_nBodySize >= 4) { + r->m_inChunkSize = AMF_DecodeInt32(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s, received: chunk size change to %d", __FUNCTION__, + r->m_inChunkSize); + } +} + +static void + HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet) { +} + +static void + HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet) { +} + +static void + HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + short nType = -1; + unsigned int tmp; + if (packet->m_body && packet->m_nBodySize >= 2) + nType = AMF_DecodeInt16(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s, received ctrl. type: %d, len: %d", __FUNCTION__, nType, + packet->m_nBodySize); + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + if (packet->m_nBodySize >= 6) { + switch (nType) { + case 0: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Begin %d", __FUNCTION__, tmp); + break; + + case 1: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream EOF %d", __FUNCTION__, tmp); + if (r->m_pausing == 1) + r->m_pausing = 2; + break; + + case 2: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Dry %d", __FUNCTION__, tmp); + break; + + case 4: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream IsRecorded %d", __FUNCTION__, tmp); + break; + + case 6: /* server ping. reply with pong. */ + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Ping %d", __FUNCTION__, tmp); + PILI_RTMP_SendCtrl(r, 0x07, tmp, 0, NULL); + break; + + /* FMS 3.5 servers send the following two controls to let the client + * know when the server has sent a complete buffer. I.e., when the + * server has sent an amount of data equal to m_nBufferMS in duration. + * The server meters its output so that data arrives at the client + * in realtime and no faster. + * + * The rtmpdump program tries to set m_nBufferMS as large as + * possible, to force the server to send data as fast as possible. + * In practice, the server appears to cap this at about 1 hour's + * worth of data. After the server has sent a complete buffer, and + * sends this BufferEmpty message, it will wait until the play + * duration of that buffer has passed before sending a new buffer. + * The BufferReady message will be sent when the new buffer starts. + * (There is no BufferReady message for the very first buffer; + * presumably the Stream Begin message is sufficient for that + * purpose.) + * + * If the network speed is much faster than the data bitrate, then + * there may be long delays between the end of one buffer and the + * start of the next. + * + * Since usually the network allows data to be sent at + * faster than realtime, and rtmpdump wants to download the data + * as fast as possible, we use this RTMP_LF_BUFX hack: when we + * get the BufferEmpty message, we send a Pause followed by an + * Unpause. This causes the server to send the next buffer immediately + * instead of waiting for the full duration to elapse. (That's + * also the purpose of the ToggleStream function, which rtmpdump + * calls if we get a read timeout.) + * + * Media player apps don't need this hack since they are just + * going to play the data in realtime anyway. It also doesn't work + * for live streams since they obviously can only be sent in + * realtime. And it's all moot if the network speed is actually + * slower than the media bitrate. + */ + case 31: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferEmpty %d", __FUNCTION__, tmp); + if (!(r->Link.lFlags & RTMP_LF_BUFX)) + break; + if (!r->m_pausing) { + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, NULL); + r->m_pausing = 1; + } else if (r->m_pausing == 2) { + PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, NULL); + r->m_pausing = 3; + } + break; + + case 32: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferReady %d", __FUNCTION__, tmp); + break; + + default: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream xx %d", __FUNCTION__, tmp); + break; + } + } + + if (nType == 0x1A) { + RTMP_Log(RTMP_LOGDEBUG, "%s, SWFVerification ping received: ", __FUNCTION__); +#ifdef CRYPTO + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + /* respond with HMAC SHA256 of decompressed SWF, key is the 30byte player key, also the last 30 bytes of the server handshake are applied */ + if (r->Link.SWFSize) { + PILI_RTMP_SendCtrl(r, 0x1B, 0, 0); + } else { + RTMP_Log(RTMP_LOGERROR, + "%s: Ignoring SWFVerification request, use --swfVfy!", + __FUNCTION__); + } +#else + RTMP_Log(RTMP_LOGERROR, + "%s: Ignoring SWFVerification request, no CRYPTO support!", + __FUNCTION__); +#endif + } +} + +static void + HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + r->m_nServerBW = AMF_DecodeInt32(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s: server BW = %d", __FUNCTION__, r->m_nServerBW); +} + +static void + HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + r->m_nClientBW = AMF_DecodeInt32(packet->m_body); + if (packet->m_nBodySize > 4) + r->m_nClientBW2 = packet->m_body[4]; + else + r->m_nClientBW2 = -1; + RTMP_Log(RTMP_LOGDEBUG, "%s: client BW = %d %d", __FUNCTION__, r->m_nClientBW, + r->m_nClientBW2); +} + +static int + DecodeInt32LE(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + + val = (c[3] << 24) | (c[2] << 16) | (c[1] << 8) | c[0]; + return val; +} + +static int + EncodeInt32LE(char *output, int nVal) { + output[0] = nVal; + nVal >>= 8; + output[1] = nVal; + nVal >>= 8; + output[2] = nVal; + nVal >>= 8; + output[3] = nVal; + return 4; +} + +int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = {0}; + char *header = (char *)hbuf; + int nSize, hSize, nToRead, nChunk; + int didAlloc = FALSE; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket); + + if (ReadN(r, (char *)hbuf, 1) == 0) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header", __FUNCTION__); + return FALSE; + } + + packet->m_headerType = (hbuf[0] & 0xc0) >> 6; + packet->m_nChannel = (hbuf[0] & 0x3f); + header++; + if (packet->m_nChannel == 0) { + if (ReadN(r, (char *)&hbuf[1], 1) != 1) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 2nd byte", + __FUNCTION__); + return FALSE; + } + packet->m_nChannel = hbuf[1]; + packet->m_nChannel += 64; + header++; + } else if (packet->m_nChannel == 1) { + int tmp; + if (ReadN(r, (char *)&hbuf[1], 2) != 2) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 3nd byte", + __FUNCTION__); + return FALSE; + } + tmp = (hbuf[2] << 8) + hbuf[1]; + packet->m_nChannel = tmp + 64; + RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel); + header += 2; + } + + nSize = packetSize[packet->m_headerType]; + + if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */ + packet->m_hasAbsTimestamp = TRUE; + + else if (nSize < RTMP_LARGE_HEADER_SIZE) { /* using values from the last message of this channel */ + if (r->m_vecChannelsIn[packet->m_nChannel]) + memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel], + sizeof(PILI_RTMPPacket)); + } + + nSize--; + + if (nSize > 0 && ReadN(r, header, nSize) != nSize) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header. type: %x", + __FUNCTION__, (unsigned int)hbuf[0]); + return FALSE; + } + + hSize = nSize + (header - (char *)hbuf); + + if (nSize >= 3) { + packet->m_nTimeStamp = AMF_DecodeInt24(header); + + /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading PILI_RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */ + + if (nSize >= 6) { + packet->m_nBodySize = AMF_DecodeInt24(header + 3); + packet->m_nBytesRead = 0; + PILI_RTMPPacket_Free(packet); + + if (nSize > 6) { + packet->m_packetType = header[6]; + + if (nSize == 11) + packet->m_nInfoField2 = DecodeInt32LE(header + 7); + } + } + if (packet->m_nTimeStamp == 0xffffff) { + if (ReadN(r, header + nSize, 4) != 4) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp", + __FUNCTION__); + return FALSE; + } + packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize); + hSize += 4; + } + } + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize); + + if (packet->m_nBodySize > 0 && packet->m_body == NULL) { + if (!PILI_RTMPPacket_Alloc(packet, packet->m_nBodySize)) { + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); + return FALSE; + } + didAlloc = TRUE; + packet->m_headerType = (hbuf[0] & 0xc0) >> 6; + } + + nToRead = packet->m_nBodySize - packet->m_nBytesRead; + nChunk = r->m_inChunkSize; + if (nToRead < nChunk) + nChunk = nToRead; + + /* Does the caller want the raw chunk? */ + if (packet->m_chunk) { + packet->m_chunk->c_headerSize = hSize; + memcpy(packet->m_chunk->c_header, hbuf, hSize); + packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead; + packet->m_chunk->c_chunkSize = nChunk; + } + + if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet body. len: %lu", + __FUNCTION__, packet->m_nBodySize); + return FALSE; + } + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk); + + packet->m_nBytesRead += nChunk; + + /* keep the packet as ref for other packets on this channel */ + if (!r->m_vecChannelsIn[packet->m_nChannel]) + r->m_vecChannelsIn[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); + memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); + + if (RTMPPacket_IsReady(packet)) { + /* make packet's timestamp absolute */ + if (!packet->m_hasAbsTimestamp) + packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */ + + r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp; + + /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */ + /* arrives and requests to re-use some info (small packet header) */ + r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL; + r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0; + r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */ + } else { + packet->m_body = NULL; /* so it won't be erased on free */ + } + + return TRUE; +} + +#ifndef CRYPTO +static int + HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error) { + int i; + uint32_t uptime, suptime; + int bMatch; + char type; + char clientbuf[RTMP_SIG_SIZE + 1], *clientsig = clientbuf + 1; + char serversig[RTMP_SIG_SIZE]; + + clientbuf[0] = 0x03; /* not encrypted */ + + uptime = htonl(PILI_RTMP_GetTime()); + memcpy(clientsig, &uptime, 4); + + memset(&clientsig[4], 0, 4); + +#ifdef _DEBUG + for (i = 8; i < RTMP_SIG_SIZE; i++) + clientsig[i] = 0xff; +#else + for (i = 8; i < RTMP_SIG_SIZE; i++) + clientsig[i] = (char)(rand() % 256); +#endif + + if (!WriteN(r, clientbuf, RTMP_SIG_SIZE + 1, error)) + return FALSE; + + if (ReadN(r, &type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); + + if (type != clientbuf[0]) + RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d", + __FUNCTION__, clientbuf[0], type); + + if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode server response */ + + memcpy(&suptime, serversig, 4); + suptime = ntohl(suptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, suptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, + serversig[4], serversig[5], serversig[6], serversig[7]); + + /* 2nd part of handshake */ + if (!WriteN(r, serversig, RTMP_SIG_SIZE, error)) + return FALSE; + + if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); + if (!bMatch) { + RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); + } + return TRUE; +} + +static int + SHandShake(PILI_RTMP *r, RTMPError *error) { + int i; + char serverbuf[RTMP_SIG_SIZE + 1], *serversig = serverbuf + 1; + char clientsig[RTMP_SIG_SIZE]; + uint32_t uptime; + int bMatch; + + if (ReadN(r, serverbuf, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Request : %02X", __FUNCTION__, serverbuf[0]); + + if (serverbuf[0] != 3) { + RTMP_Log(RTMP_LOGERROR, "%s: Type unknown: client sent %02X", + __FUNCTION__, serverbuf[0]); + return FALSE; + } + + uptime = htonl(PILI_RTMP_GetTime()); + memcpy(serversig, &uptime, 4); + + memset(&serversig[4], 0, 4); +#ifdef _DEBUG + for (i = 8; i < RTMP_SIG_SIZE; i++) + serversig[i] = 0xff; +#else + for (i = 8; i < RTMP_SIG_SIZE; i++) + serversig[i] = (char)(rand() % 256); +#endif + + if (!WriteN(r, serverbuf, RTMP_SIG_SIZE + 1, error)) + return FALSE; + + if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode client response */ + + memcpy(&uptime, clientsig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, + clientsig[4], clientsig[5], clientsig[6], clientsig[7]); + + /* 2nd part of handshake */ + if (!WriteN(r, clientsig, RTMP_SIG_SIZE, error)) + return FALSE; + + if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); + if (!bMatch) { + RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); + } + return TRUE; +} +#endif + +int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error) { + int wrote; + char hbuf[RTMP_MAX_HEADER_SIZE]; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, + chunk->c_chunkSize); + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_header, chunk->c_headerSize); + if (chunk->c_chunkSize) { + char *ptr = chunk->c_chunk - chunk->c_headerSize; + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_chunk, chunk->c_chunkSize); + /* save header bytes we're about to overwrite */ + memcpy(hbuf, ptr, chunk->c_headerSize); + memcpy(ptr, chunk->c_header, chunk->c_headerSize); + wrote = WriteN(r, ptr, chunk->c_headerSize + chunk->c_chunkSize, error); + memcpy(ptr, hbuf, chunk->c_headerSize); + } else + wrote = WriteN(r, chunk->c_header, chunk->c_headerSize, error); + return wrote; +} + +int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, RTMPError *error) { + const PILI_RTMPPacket *prevPacket = r->m_vecChannelsOut[packet->m_nChannel]; + uint32_t last = 0; + int nSize; + int hSize, cSize; + char *header, *hptr, *hend, hbuf[RTMP_MAX_HEADER_SIZE], c; + uint32_t t; + char *buffer, *tbuf = NULL, *toff = NULL; + int nChunkSize; + int tlen; + + if (prevPacket && packet->m_headerType != RTMP_PACKET_SIZE_LARGE) { + /* compress a bit by using the prev packet's attributes */ + if (prevPacket->m_nBodySize == packet->m_nBodySize && prevPacket->m_packetType == packet->m_packetType && packet->m_headerType == RTMP_PACKET_SIZE_MEDIUM) + packet->m_headerType = RTMP_PACKET_SIZE_SMALL; + + if (prevPacket->m_nTimeStamp == packet->m_nTimeStamp && packet->m_headerType == RTMP_PACKET_SIZE_SMALL) + packet->m_headerType = RTMP_PACKET_SIZE_MINIMUM; + last = prevPacket->m_nTimeStamp; + } + + if (packet->m_headerType > 3) /* sanity */ + { + if (error) { + char *msg = "Sanity failed."; + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSanityFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "sanity failed!! trying to send header of type: 0x%02x.", + (unsigned char)packet->m_headerType); + + return FALSE; + } + + nSize = packetSize[packet->m_headerType]; + hSize = nSize; + cSize = 0; + t = packet->m_nTimeStamp - last; + + if (packet->m_body) { + header = packet->m_body - nSize; + hend = packet->m_body; + } else { + header = hbuf + 6; + hend = hbuf + sizeof(hbuf); + } + + if (packet->m_nChannel > 319) + cSize = 2; + else if (packet->m_nChannel > 63) + cSize = 1; + if (cSize) { + header -= cSize; + hSize += cSize; + } + + if (nSize > 1 && t >= 0xffffff) { + header -= 4; + hSize += 4; + } + + hptr = header; + c = packet->m_headerType << 6; + switch (cSize) { + case 0: + c |= packet->m_nChannel; + break; + case 1: + break; + case 2: + c |= 1; + break; + } + *hptr++ = c; + if (cSize) { + int tmp = packet->m_nChannel - 64; + *hptr++ = tmp & 0xff; + if (cSize == 2) + *hptr++ = tmp >> 8; + } + + if (nSize > 1) { + hptr = AMF_EncodeInt24(hptr, hend, t > 0xffffff ? 0xffffff : t); + } + + if (nSize > 4) { + hptr = AMF_EncodeInt24(hptr, hend, packet->m_nBodySize); + *hptr++ = packet->m_packetType; + } + + if (nSize > 8) + hptr += EncodeInt32LE(hptr, packet->m_nInfoField2); + + if (nSize > 1 && t >= 0xffffff) + hptr = AMF_EncodeInt32(hptr, hend, t); + + nSize = packet->m_nBodySize; + buffer = packet->m_body; + nChunkSize = r->m_outChunkSize; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, + nSize); + /* send all chunks in one HTTP request */ + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + int chunks = (nSize + nChunkSize - 1) / nChunkSize; + if (chunks > 1) { + tlen = chunks * (cSize + 1) + nSize + hSize; + tbuf = malloc(tlen); + if (!tbuf) + return FALSE; + toff = tbuf; + } + } + while (nSize + hSize) { + int wrote; + + if (nSize < nChunkSize) + nChunkSize = nSize; + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)header, hSize); + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)buffer, nChunkSize); + if (tbuf) { + memcpy(toff, header, nChunkSize + hSize); + toff += nChunkSize + hSize; + } else { + wrote = WriteN(r, header, nChunkSize + hSize, error); + if (!wrote) + return FALSE; + } + nSize -= nChunkSize; + buffer += nChunkSize; + hSize = 0; + + if (nSize > 0) { + header = buffer - 1; + hSize = 1; + if (cSize) { + header -= cSize; + hSize += cSize; + } + *header = (0xc0 | c); + if (cSize) { + int tmp = packet->m_nChannel - 64; + header[1] = tmp & 0xff; + if (cSize == 2) + header[2] = tmp >> 8; + } + } + } + if (tbuf) { + int wrote = WriteN(r, tbuf, toff - tbuf, error); + free(tbuf); + tbuf = NULL; + if (!wrote) + return FALSE; + } + + /* we invoked a remote method */ + if (packet->m_packetType == 0x14) { + AVal method; + char *ptr; + ptr = packet->m_body + 1; + AMF_DecodeString(ptr, &method); + RTMP_Log(RTMP_LOGDEBUG, "Invoking %s", method.av_val); + /* keep it in call queue till result arrives */ + if (queue) { + int txn; + ptr += 3 + method.av_len; + txn = (int)AMF_DecodeNumber(ptr); + AV_queue(&r->m_methodCalls, &r->m_numCalls, &method, txn); + } + } + + if (!r->m_vecChannelsOut[packet->m_nChannel]) + r->m_vecChannelsOut[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); + memcpy(r->m_vecChannelsOut[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); + return TRUE; +} + +int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error) { + return SHandShake(r, error); +} + +void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error) { + if (r->m_is_closing) { + return; + } + r->m_is_closing = 1; + int i; + if (PILI_RTMP_IsConnected(r)) { + if (r->m_stream_id > 0) { + if ((r->Link.protocol & RTMP_FEATURE_WRITE)) + SendFCUnpublish(r, NULL); + i = r->m_stream_id; + r->m_stream_id = 0; + SendDeleteStream(r, i, NULL); + } + if (r->m_clientID.av_val) { + HTTP_Post(r, RTMPT_CLOSE, "", 1); + free(r->m_clientID.av_val); + r->m_clientID.av_val = NULL; + r->m_clientID.av_len = 0; + } + PILI_RTMPSockBuf_Close(&r->m_sb); + + if (error && r->m_errorCallback) { + r->m_errorCallback(error, r->m_userData); + } + } + + r->m_stream_id = -1; + r->m_sb.sb_socket = -1; + r->m_nBWCheckCounter = 0; + r->m_nBytesIn = 0; + r->m_nBytesInSent = 0; + + if (r->m_read.flags & RTMP_READ_HEADER) { + free(r->m_read.buf); + r->m_read.buf = NULL; + } + r->m_read.dataType = 0; + r->m_read.flags = 0; + r->m_read.status = 0; + r->m_read.nResumeTS = 0; + r->m_read.nIgnoredFrameCounter = 0; + r->m_read.nIgnoredFlvFrameCounter = 0; + + r->m_write.m_nBytesRead = 0; + PILI_RTMPPacket_Free(&r->m_write); + + for (i = 0; i < RTMP_CHANNELS; i++) { + if (r->m_vecChannelsIn[i]) { + PILI_RTMPPacket_Free(r->m_vecChannelsIn[i]); + free(r->m_vecChannelsIn[i]); + r->m_vecChannelsIn[i] = NULL; + } + if (r->m_vecChannelsOut[i]) { + free(r->m_vecChannelsOut[i]); + r->m_vecChannelsOut[i] = NULL; + } + } + AV_clear(r->m_methodCalls, r->m_numCalls); + r->m_methodCalls = NULL; + r->m_numCalls = 0; + r->m_numInvokes = 0; + + r->m_bPlaying = FALSE; + r->m_sb.sb_size = 0; + + r->m_msgCounter = 0; + r->m_resplen = 0; + r->m_unackd = 0; + + free(r->Link.playpath0.av_val); + r->Link.playpath0.av_val = NULL; + + if (r->Link.lFlags & RTMP_LF_FTCU) { + free(r->Link.tcUrl.av_val); + r->Link.tcUrl.av_val = NULL; + r->Link.tcUrl.av_len = 0; + r->Link.lFlags ^= RTMP_LF_FTCU; + } + +#ifdef CRYPTO + if (r->Link.dh) { + MDH_free(r->Link.dh); + r->Link.dh = NULL; + } + if (r->Link.rc4keyIn) { + RC4_free(r->Link.rc4keyIn); + r->Link.rc4keyIn = NULL; + } + if (r->Link.rc4keyOut) { + RC4_free(r->Link.rc4keyOut); + r->Link.rc4keyOut = NULL; + } +#endif +} + +int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb, int timeout) { + int nBytes; + + if (!sb->sb_size) + sb->sb_start = sb->sb_buf; + +#ifdef RTMP_FEATURE_NONBLOCK + SET_RCVTIMEO(tv, timeout); + fd_set rfds; +#endif + while (1) { +#ifdef RTMP_FEATURE_NONBLOCK + FD_ZERO(&rfds); + FD_SET(sb->sb_socket, &rfds); + int ret = select(sb->sb_socket + 1, &rfds, NULL, NULL, &tv); + if (ret < 0) { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGDEBUG, "%s, recv select error. GetSockError(): %d (%s)", + __FUNCTION__, sockerr, strerror(sockerr)); + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + sb->sb_timedout = TRUE; + nBytes = 0; + break; + } else if (ret == 0) { + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP recv error select timeout %d", __FUNCTION__, timeout); + sb->sb_timedout = TRUE; + nBytes = 0; + break; + } else if (!FD_ISSET(sb->sb_socket, &rfds)) { + sb->sb_timedout = TRUE; + nBytes = 0; + break; + } +#endif + + nBytes = sizeof(sb->sb_buf) - sb->sb_size - (sb->sb_start - sb->sb_buf); +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + nBytes = TLS_read(sb->sb_ssl, sb->sb_start + sb->sb_size, nBytes); + } else +#endif + { + nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, 0); + } + if (nBytes != -1) { + sb->sb_size += nBytes; + } else { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGDEBUG, "%s, recv returned %d. GetSockError(): %d (%s)", + __FUNCTION__, nBytes, sockerr, strerror(sockerr)); + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + if (sockerr == EWOULDBLOCK || sockerr == EAGAIN) { +#ifdef RTMP_FEATURE_NONBLOCK + continue; +#else + sb->sb_timedout = TRUE; + nBytes = 0; +#endif + } + } + break; + } + + return nBytes; +} + +int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len) { + int rc; + +#ifdef _DEBUG + fwrite(buf, 1, len, netstackdump); +#endif + +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + rc = TLS_write(sb->sb_ssl, buf, len); + } else +#endif + { + rc = send(sb->sb_socket, buf, len, 0); + } + return rc; +} + +int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb) { +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + TLS_shutdown(sb->sb_ssl); + TLS_close(sb->sb_ssl); + sb->sb_ssl = NULL; + } +#endif + return closesocket(sb->sb_socket); +} + +#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) + +static void + DecodeTEA(AVal *key, AVal *text) { + uint32_t *v, k[4] = {0}, u; + uint32_t z, y, sum = 0, e, DELTA = 0x9e3779b9; + int32_t p, q; + int i, n; + unsigned char *ptr, *out; + + /* prep key: pack 1st 16 chars into 4 LittleEndian ints */ + ptr = (unsigned char *)key->av_val; + u = 0; + n = 0; + v = k; + p = key->av_len > 16 ? 16 : key->av_len; + for (i = 0; i < p; i++) { + u |= ptr[i] << (n * 8); + if (n == 3) { + *v++ = u; + u = 0; + n = 0; + } else { + n++; + } + } + /* any trailing chars */ + if (u) + *v = u; + + /* prep text: hex2bin, multiples of 4 */ + n = (text->av_len + 7) / 8; + out = malloc(n * 8); + ptr = (unsigned char *)text->av_val; + v = (uint32_t *)out; + for (i = 0; i < n; i++) { + u = (HEX2BIN(ptr[0]) << 4) + HEX2BIN(ptr[1]); + u |= ((HEX2BIN(ptr[2]) << 4) + HEX2BIN(ptr[3])) << 8; + u |= ((HEX2BIN(ptr[4]) << 4) + HEX2BIN(ptr[5])) << 16; + u |= ((HEX2BIN(ptr[6]) << 4) + HEX2BIN(ptr[7])) << 24; + *v++ = u; + ptr += 8; + } + v = (uint32_t *)out; + +/* http://www.movable-type.co.uk/scripts/tea-block.html */ +#define MX (((z >> 5) ^ (y << 2)) + ((y >> 3) ^ (z << 4))) ^ ((sum ^ y) + (k[(p & 3) ^ e] ^ z)); + z = v[n - 1]; + y = v[0]; + q = 6 + 52 / n; + sum = q * DELTA; + while (sum != 0) { + e = sum >> 2 & 3; + for (p = n - 1; p > 0; p--) + z = v[p - 1], y = v[p] -= MX; + z = v[n - 1]; + y = v[0] -= MX; + sum -= DELTA; + } + + text->av_len /= 2; + memcpy(text->av_val, out, text->av_len); + free(out); +} + +static int + HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len) { + char hbuf[512]; + int hlen = snprintf(hbuf, sizeof(hbuf), "POST /%s%s/%d HTTP/1.1\r\n" + "Host: %.*s:%d\r\n" + "Accept: */*\r\n" + "User-Agent: Shockwave Flash\n" + "Connection: Keep-Alive\n" + "Cache-Control: no-cache\r\n" + "Content-type: application/x-fcs\r\n" + "Content-length: %d\r\n\r\n", + RTMPT_cmds[cmd], + r->m_clientID.av_val ? r->m_clientID.av_val : "", + r->m_msgCounter, r->Link.hostname.av_len, r->Link.hostname.av_val, + r->Link.port, len); + PILI_RTMPSockBuf_Send(&r->m_sb, hbuf, hlen); + hlen = PILI_RTMPSockBuf_Send(&r->m_sb, buf, len); + r->m_msgCounter++; + r->m_unackd++; + return hlen; +} + +static int + HTTP_read(PILI_RTMP *r, int fill) { + char *ptr; + int hlen; + + if (fill) + PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout); + if (r->m_sb.sb_size < 144) + return -1; + if (strncmp(r->m_sb.sb_start, "HTTP/1.1 200 ", 13)) + return -1; + ptr = strstr(r->m_sb.sb_start, "Content-Length:"); + if (!ptr) + return -1; + hlen = atoi(ptr + 16); + ptr = strstr(ptr, "\r\n\r\n"); + if (!ptr) + return -1; + ptr += 4; + r->m_sb.sb_size -= ptr - r->m_sb.sb_start; + r->m_sb.sb_start = ptr; + r->m_unackd--; + + if (!r->m_clientID.av_val) { + r->m_clientID.av_len = hlen; + r->m_clientID.av_val = malloc(hlen + 1); + if (!r->m_clientID.av_val) + return -1; + r->m_clientID.av_val[0] = '/'; + memcpy(r->m_clientID.av_val + 1, ptr, hlen - 1); + r->m_clientID.av_val[hlen] = 0; + r->m_sb.sb_size = 0; + } else { + r->m_polling = *ptr++; + r->m_resplen = hlen - 1; + r->m_sb.sb_start++; + r->m_sb.sb_size--; + } + return 0; +} + +#define MAX_IGNORED_FRAMES 50 + +/* Read from the stream until we get a media packet. + * Returns -3 if Play.Close/Stop, -2 if fatal error, -1 if no more media + * packets, 0 if ignorable error, >0 if there is a media packet + */ +static int + Read_1_Packet(PILI_RTMP *r, char *buf, unsigned int buflen) { + uint32_t prevTagSize = 0; + int rtnGetNextMediaPacket = 0, ret = RTMP_READ_EOF; + PILI_RTMPPacket packet = {0}; + int recopy = FALSE; + unsigned int size; + char *ptr, *pend; + uint32_t nTimeStamp = 0; + unsigned int len; + + rtnGetNextMediaPacket = PILI_RTMP_GetNextMediaPacket(r, &packet); + while (rtnGetNextMediaPacket) { + char *packetBody = packet.m_body; + unsigned int nPacketLen = packet.m_nBodySize; + + /* Return -3 if this was completed nicely with invoke message + * Play.Stop or Play.Complete + */ + if (rtnGetNextMediaPacket == 2) { + RTMP_Log(RTMP_LOGDEBUG, + "Got Play.Complete or Play.Stop from server. " + "Assuming stream is complete"); + ret = RTMP_READ_COMPLETE; + break; + } + + r->m_read.dataType |= (((packet.m_packetType == 0x08) << 2) | + (packet.m_packetType == 0x09)); + + if (packet.m_packetType == 0x09 && nPacketLen <= 5) { + RTMP_Log(RTMP_LOGDEBUG, "ignoring too small video packet: size: %d", + nPacketLen); + ret = RTMP_READ_IGNORE; + break; + } + if (packet.m_packetType == 0x08 && nPacketLen <= 1) { + RTMP_Log(RTMP_LOGDEBUG, "ignoring too small audio packet: size: %d", + nPacketLen); + ret = RTMP_READ_IGNORE; + break; + } + + if (r->m_read.flags & RTMP_READ_SEEKING) { + ret = RTMP_READ_IGNORE; + break; + } +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, TS: %d ms, abs TS: %d", + packet.m_packetType, nPacketLen, packet.m_nTimeStamp, + packet.m_hasAbsTimestamp); + if (packet.m_packetType == 0x09) + RTMP_Log(RTMP_LOGDEBUG, "frametype: %02X", (*packetBody & 0xf0)); +#endif + + if (r->m_read.flags & RTMP_READ_RESUME) { + /* check the header if we get one */ + if (packet.m_nTimeStamp == 0) { + if (r->m_read.nMetaHeaderSize > 0 && packet.m_packetType == 0x12) { + AMFObject metaObj; + int nRes = + AMF_Decode(&metaObj, packetBody, nPacketLen, FALSE); + if (nRes >= 0) { + AVal metastring; + AMFProp_GetString(AMF_GetProp(&metaObj, NULL, 0), + &metastring); + + if (AVMATCH(&metastring, &av_onMetaData)) { + /* compare */ + if ((r->m_read.nMetaHeaderSize != nPacketLen) || + (memcmp(r->m_read.metaHeader, packetBody, + r->m_read.nMetaHeaderSize) != 0)) { + ret = RTMP_READ_ERROR; + } + } + AMF_Reset(&metaObj); + if (ret == RTMP_READ_ERROR) + break; + } + } + + /* check first keyframe to make sure we got the right position + * in the stream! (the first non ignored frame) + */ + if (r->m_read.nInitialFrameSize > 0) { + /* video or audio data */ + if (packet.m_packetType == r->m_read.initialFrameType && r->m_read.nInitialFrameSize == nPacketLen) { + /* we don't compare the sizes since the packet can + * contain several FLV packets, just make sure the + * first frame is our keyframe (which we are going + * to rewrite) + */ + if (memcmp(r->m_read.initialFrame, packetBody, + r->m_read.nInitialFrameSize) == 0) { + RTMP_Log(RTMP_LOGDEBUG, "Checked keyframe successfully!"); + r->m_read.flags |= RTMP_READ_GOTKF; + /* ignore it! (what about audio data after it? it is + * handled by ignoring all 0ms frames, see below) + */ + ret = RTMP_READ_IGNORE; + break; + } + } + + /* hande FLV streams, even though the server resends the + * keyframe as an extra video packet it is also included + * in the first FLV stream chunk and we have to compare + * it and filter it out !! + */ + if (packet.m_packetType == 0x16) { + /* basically we have to find the keyframe with the + * correct TS being nResumeTS + */ + unsigned int pos = 0; + uint32_t ts = 0; + + while (pos + 11 < nPacketLen) { + /* size without header (11) and prevTagSize (4) */ + uint32_t dataSize = + AMF_DecodeInt24(packetBody + pos + 1); + ts = AMF_DecodeInt24(packetBody + pos + 4); + ts |= (packetBody[pos + 7] << 24); + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "keyframe search: FLV Packet: type %02X, dataSize: %d, timeStamp: %d ms", + packetBody[pos], dataSize, ts); +#endif + /* ok, is it a keyframe?: + * well doesn't work for audio! + */ + if (packetBody[pos /*6928, test 0 */] == + r->m_read.initialFrameType + /* && (packetBody[11]&0xf0) == 0x10 */) { + if (ts == r->m_read.nResumeTS) { + RTMP_Log(RTMP_LOGDEBUG, + "Found keyframe with resume-keyframe timestamp!"); + if (r->m_read.nInitialFrameSize != dataSize || memcmp(r->m_read.initialFrame, packetBody + pos + 11, r->m_read.nInitialFrameSize) != 0) { + RTMP_Log(RTMP_LOGERROR, + "FLV Stream: Keyframe doesn't match!"); + ret = RTMP_READ_ERROR; + break; + } + r->m_read.flags |= RTMP_READ_GOTFLVK; + + /* skip this packet? + * check whether skippable: + */ + if (pos + 11 + dataSize + 4 > nPacketLen) { + RTMP_Log(RTMP_LOGWARNING, + "Non skipable packet since it doesn't end with chunk, stream corrupt!"); + ret = RTMP_READ_ERROR; + break; + } + packetBody += (pos + 11 + dataSize + 4); + nPacketLen -= (pos + 11 + dataSize + 4); + + goto stopKeyframeSearch; + + } else if (r->m_read.nResumeTS < ts) { + /* the timestamp ts will only increase with + * further packets, wait for seek + */ + goto stopKeyframeSearch; + } + } + pos += (11 + dataSize + 4); + } + if (ts < r->m_read.nResumeTS) { + RTMP_Log(RTMP_LOGERROR, + "First packet does not contain keyframe, all " + "timestamps are smaller than the keyframe " + "timestamp; probably the resume seek failed?"); + } + stopKeyframeSearch:; + if (!(r->m_read.flags & RTMP_READ_GOTFLVK)) { + RTMP_Log(RTMP_LOGERROR, + "Couldn't find the seeked keyframe in this chunk!"); + ret = RTMP_READ_IGNORE; + break; + } + } + } + } + + if (packet.m_nTimeStamp > 0 && (r->m_read.flags & (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK))) { + /* another problem is that the server can actually change from + * 09/08 video/audio packets to an FLV stream or vice versa and + * our keyframe check will prevent us from going along with the + * new stream if we resumed. + * + * in this case set the 'found keyframe' variables to true. + * We assume that if we found one keyframe somewhere and were + * already beyond TS > 0 we have written data to the output + * which means we can accept all forthcoming data including the + * change between 08/09 <-> FLV packets + */ + r->m_read.flags |= (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK); + } + + /* skip till we find our keyframe + * (seeking might put us somewhere before it) + */ + if (!(r->m_read.flags & RTMP_READ_GOTKF) && + packet.m_packetType != 0x16) { + RTMP_Log(RTMP_LOGWARNING, + "Stream does not start with requested frame, ignoring data... "); + r->m_read.nIgnoredFrameCounter++; + if (r->m_read.nIgnoredFrameCounter > MAX_IGNORED_FRAMES) + ret = RTMP_READ_ERROR; /* fatal error, couldn't continue stream */ + else + ret = RTMP_READ_IGNORE; + break; + } + /* ok, do the same for FLV streams */ + if (!(r->m_read.flags & RTMP_READ_GOTFLVK) && + packet.m_packetType == 0x16) { + RTMP_Log(RTMP_LOGWARNING, + "Stream does not start with requested FLV frame, ignoring data... "); + r->m_read.nIgnoredFlvFrameCounter++; + if (r->m_read.nIgnoredFlvFrameCounter > MAX_IGNORED_FRAMES) + ret = RTMP_READ_ERROR; + else + ret = RTMP_READ_IGNORE; + break; + } + + /* we have to ignore the 0ms frames since these are the first + * keyframes; we've got these so don't mess around with multiple + * copies sent by the server to us! (if the keyframe is found at a + * later position there is only one copy and it will be ignored by + * the preceding if clause) + */ + if (!(r->m_read.flags & RTMP_READ_NO_IGNORE) && + packet.m_packetType != 0x16) { /* exclude type 0x16 (FLV) since it can + * contain several FLV packets */ + if (packet.m_nTimeStamp == 0) { + ret = RTMP_READ_IGNORE; + break; + } else { + /* stop ignoring packets */ + r->m_read.flags |= RTMP_READ_NO_IGNORE; + } + } + } + + /* calculate packet size and allocate slop buffer if necessary */ + size = nPacketLen + + ((packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) ? 11 : 0) + + (packet.m_packetType != 0x16 ? 4 : 0); + + if (size + 4 > buflen) { + /* the extra 4 is for the case of an FLV stream without a last + * prevTagSize (we need extra 4 bytes to append it) */ + r->m_read.buf = malloc(size + 4); + if (r->m_read.buf == 0) { + RTMP_Log(RTMP_LOGERROR, "Couldn't allocate memory!"); + ret = RTMP_READ_ERROR; /* fatal error */ + break; + } + recopy = TRUE; + ptr = r->m_read.buf; + } else { + ptr = buf; + } + pend = ptr + size + 4; + + /* use to return timestamp of last processed packet */ + + /* audio (0x08), video (0x09) or metadata (0x12) packets : + * construct 11 byte header then add PILI_RTMP packet's data */ + if (packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) { + nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp; + prevTagSize = 11 + nPacketLen; + + *ptr = packet.m_packetType; + ptr++; + ptr = AMF_EncodeInt24(ptr, pend, nPacketLen); + +#if 0 + if(packet.m_packetType == 0x09) { /* video */ + + /* H264 fix: */ + if((packetBody[0] & 0x0f) == 7) { /* CodecId = H264 */ + uint8_t packetType = *(packetBody+1); + + uint32_t ts = AMF_DecodeInt24(packetBody+2); /* composition time */ + int32_t cts = (ts+0xff800000)^0xff800000; + RTMP_Log(RTMP_LOGDEBUG, "cts : %d\n", cts); + + nTimeStamp -= cts; + /* get rid of the composition time */ + CRTMP::EncodeInt24(packetBody+2, 0); + } + RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp); + } +#endif + + ptr = AMF_EncodeInt24(ptr, pend, nTimeStamp); + *ptr = (char)((nTimeStamp & 0xFF000000) >> 24); + ptr++; + + /* stream id */ + ptr = AMF_EncodeInt24(ptr, pend, 0); + } + + memcpy(ptr, packetBody, nPacketLen); + len = nPacketLen; + + /* correct tagSize and obtain timestamp if we have an FLV stream */ + if (packet.m_packetType == 0x16) { + unsigned int pos = 0; + int delta; + + /* grab first timestamp and see if it needs fixing */ + nTimeStamp = AMF_DecodeInt24(packetBody + 4); + nTimeStamp |= (packetBody[7] << 24); + delta = packet.m_nTimeStamp - nTimeStamp; + + while (pos + 11 < nPacketLen) { + /* size without header (11) and without prevTagSize (4) */ + uint32_t dataSize = AMF_DecodeInt24(packetBody + pos + 1); + nTimeStamp = AMF_DecodeInt24(packetBody + pos + 4); + nTimeStamp |= (packetBody[pos + 7] << 24); + + if (delta) { + nTimeStamp += delta; + AMF_EncodeInt24(ptr + pos + 4, pend, nTimeStamp); + ptr[pos + 7] = nTimeStamp >> 24; + } + + /* set data type */ + r->m_read.dataType |= (((*(packetBody + pos) == 0x08) << 2) | + (*(packetBody + pos) == 0x09)); + + if (pos + 11 + dataSize + 4 > nPacketLen) { + if (pos + 11 + dataSize > nPacketLen) { + RTMP_Log(RTMP_LOGERROR, + "Wrong data size (%lu), stream corrupted, aborting!", + dataSize); + ret = RTMP_READ_ERROR; + break; + } + RTMP_Log(RTMP_LOGWARNING, "No tagSize found, appending!"); + + /* we have to append a last tagSize! */ + prevTagSize = dataSize + 11; + AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, + prevTagSize); + size += 4; + len += 4; + } else { + prevTagSize = + AMF_DecodeInt32(packetBody + pos + 11 + dataSize); + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "FLV Packet: type %02X, dataSize: %lu, tagSize: %lu, timeStamp: %lu ms", + (unsigned char)packetBody[pos], dataSize, prevTagSize, + nTimeStamp); +#endif + + if (prevTagSize != (dataSize + 11)) { +#ifdef _DEBUG + RTMP_Log(RTMP_LOGWARNING, + "Tag and data size are not consitent, writing tag size according to dataSize+11: %d", + dataSize + 11); +#endif + + prevTagSize = dataSize + 11; + AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, + prevTagSize); + } + } + + pos += prevTagSize + 4; /*(11+dataSize+4); */ + } + } + ptr += len; + + if (packet.m_packetType != 0x16) { + /* FLV tag packets contain their own prevTagSize */ + AMF_EncodeInt32(ptr, pend, prevTagSize); + } + + /* In non-live this nTimeStamp can contain an absolute TS. + * Update ext timestamp with this absolute offset in non-live mode + * otherwise report the relative one + */ + /* RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, pktTS: %dms, TS: %dms, bLiveStream: %d", packet.m_packetType, nPacketLen, packet.m_nTimeStamp, nTimeStamp, r->Link.lFlags & RTMP_LF_LIVE); */ + r->m_read.timestamp = (r->Link.lFlags & RTMP_LF_LIVE) ? packet.m_nTimeStamp : nTimeStamp; + + ret = size; + break; + } + + if (rtnGetNextMediaPacket) + PILI_RTMPPacket_Free(&packet); + + if (recopy) { + len = ret > buflen ? buflen : ret; + memcpy(buf, r->m_read.buf, len); + r->m_read.bufpos = r->m_read.buf + len; + r->m_read.buflen = ret - len; + } + return ret; +} + +static const char flvHeader[] = {'F', 'L', 'V', 0x01, + 0x00, /* 0x04 == audio, 0x01 == video */ + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00}; + +#define HEADERBUF (128 * 1024) +int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size) { + int nRead = 0, total = 0; + +/* can't continue */ +fail: + switch (r->m_read.status) { + case RTMP_READ_EOF: + case RTMP_READ_COMPLETE: + return 0; + case RTMP_READ_ERROR: /* corrupted stream, resume failed */ + SetSockError(EINVAL); + return -1; + default: + break; + } + + if ((r->m_read.flags & RTMP_READ_SEEKING) && r->m_read.buf) { + /* drop whatever's here */ + free(r->m_read.buf); + r->m_read.buf = NULL; + r->m_read.bufpos = NULL; + r->m_read.buflen = 0; + } + + /* If there's leftover data buffered, use it up */ + if (r->m_read.buf) { + nRead = r->m_read.buflen; + if (nRead > size) + nRead = size; + memcpy(buf, r->m_read.bufpos, nRead); + r->m_read.buflen -= nRead; + if (!r->m_read.buflen) { + free(r->m_read.buf); + r->m_read.buf = NULL; + r->m_read.bufpos = NULL; + } else { + r->m_read.bufpos += nRead; + } + buf += nRead; + total += nRead; + size -= nRead; + } + + while (size > 0 && (nRead = Read_1_Packet(r, buf, size)) >= 0) { + if (!nRead) continue; + buf += nRead; + total += nRead; + size -= nRead; + break; + } + if (nRead < 0) + r->m_read.status = nRead; + + if (size < 0) + total += size; + return total; +} + +static const AVal av_setDataFrame = AVC("@setDataFrame"); + +int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error) { + PILI_RTMPPacket *pkt = &r->m_write; + char *pend, *enc; + int s2 = size, ret, num; + + pkt->m_nChannel = 0x04; /* source channel */ + pkt->m_nInfoField2 = r->m_stream_id; + + while (s2) { + if (!pkt->m_nBytesRead) { + if (size < 11) { + /* FLV pkt too small */ + return 0; + } + + if (buf[0] == 'F' && buf[1] == 'L' && buf[2] == 'V') { + buf += 13; + s2 -= 13; + } + + pkt->m_packetType = *buf++; + pkt->m_nBodySize = AMF_DecodeInt24(buf); + buf += 3; + pkt->m_nTimeStamp = AMF_DecodeInt24(buf); + buf += 3; + pkt->m_nTimeStamp |= *buf++ << 24; + buf += 3; + s2 -= 11; + + if (((pkt->m_packetType == 0x08 || pkt->m_packetType == 0x09) && + !pkt->m_nTimeStamp) || + pkt->m_packetType == 0x12) { + pkt->m_headerType = RTMP_PACKET_SIZE_LARGE; + if (pkt->m_packetType == 0x12) + pkt->m_nBodySize += 16; + } else { + pkt->m_headerType = RTMP_PACKET_SIZE_MEDIUM; + } + + if (!PILI_RTMPPacket_Alloc(pkt, pkt->m_nBodySize)) { + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); + return FALSE; + } + enc = pkt->m_body; + pend = enc + pkt->m_nBodySize; + if (pkt->m_packetType == 0x12) { + enc = AMF_EncodeString(enc, pend, &av_setDataFrame); + pkt->m_nBytesRead = enc - pkt->m_body; + } + } else { + enc = pkt->m_body + pkt->m_nBytesRead; + } + num = pkt->m_nBodySize - pkt->m_nBytesRead; + if (num > s2) + num = s2; + memcpy(enc, buf, num); + pkt->m_nBytesRead += num; + s2 -= num; + buf += num; + if (pkt->m_nBytesRead == pkt->m_nBodySize) { + ret = PILI_RTMP_SendPacket(r, pkt, FALSE, error); + PILI_RTMPPacket_Free(pkt); + pkt->m_nBytesRead = 0; + if (!ret) + return -1; + buf += 4; + s2 -= 4; + if (s2 < 0) + break; + } + } + return size + s2; +} + +int PILI_RTMP_Version() { + return MAJOR * 100 * 100 + MINOR * 100 + PATCH; +} + +const char * PILI_RTMP_GetReqId(){ + return reqid; +} \ No newline at end of file diff --git a/LFLiveKit/publish/pili-librtmp/rtmp.h b/LFLiveKit/publish/pili-librtmp/rtmp.h new file mode 100755 index 00000000..d8438cf0 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/rtmp.h @@ -0,0 +1,365 @@ +#ifndef __RTMP_H__ +#define __RTMP_H__ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#define NO_CRYPTO + +#if !defined(NO_CRYPTO) && !defined(CRYPTO) +#define CRYPTO +#endif + +#include +#include +#include + +#include "amf.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTMP_LIB_VERSION 0x020300 /* 2.3 */ + +#define RTMP_FEATURE_HTTP 0x01 +#define RTMP_FEATURE_ENC 0x02 +#define RTMP_FEATURE_SSL 0x04 +#define RTMP_FEATURE_MFP 0x08 /* not yet supported */ +#define RTMP_FEATURE_WRITE 0x10 /* publish, not play */ +#define RTMP_FEATURE_HTTP2 0x20 /* server-side rtmpt */ +#define RTMP_FEATURE_NONBLOCK 0x40 /* non block socket */ + +#define RTMP_PROTOCOL_UNDEFINED -1 +#define RTMP_PROTOCOL_RTMP 0 +#define RTMP_PROTOCOL_RTMPE RTMP_FEATURE_ENC +#define RTMP_PROTOCOL_RTMPT RTMP_FEATURE_HTTP +#define RTMP_PROTOCOL_RTMPS RTMP_FEATURE_SSL +#define RTMP_PROTOCOL_RTMPTE (RTMP_FEATURE_HTTP | RTMP_FEATURE_ENC) +#define RTMP_PROTOCOL_RTMPTS (RTMP_FEATURE_HTTP | RTMP_FEATURE_SSL) +#define RTMP_PROTOCOL_RTMFP RTMP_FEATURE_MFP + +#define RTMP_DEFAULT_CHUNKSIZE 128 + +/* needs to fit largest number of bytes recv() may return */ +#define RTMP_BUFFER_CACHE_SIZE (16 * 1024) + +#define RTMP_CHANNELS 65600 + +extern const char PILI_RTMPProtocolStringsLower[][7]; +extern const AVal PILI_RTMP_DefaultFlashVer; +extern int PILI_RTMP_ctrlC; + +uint32_t PILI_RTMP_GetTime(void); + +#define RTMP_PACKET_TYPE_AUDIO 0x08 +#define RTMP_PACKET_TYPE_VIDEO 0x09 +#define RTMP_PACKET_TYPE_INFO 0x12 + +#define RTMP_MAX_HEADER_SIZE 18 + +#define RTMP_PACKET_SIZE_LARGE 0 +#define RTMP_PACKET_SIZE_MEDIUM 1 +#define RTMP_PACKET_SIZE_SMALL 2 +#define RTMP_PACKET_SIZE_MINIMUM 3 + +typedef struct PILI_RTMPChunk { + int c_headerSize; + int c_chunkSize; + char *c_chunk; + char c_header[RTMP_MAX_HEADER_SIZE]; +} PILI_RTMPChunk; + +typedef struct PILI_RTMPPacket { + uint8_t m_headerType; + uint8_t m_packetType; + uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */ + int m_nChannel; + uint32_t m_nTimeStamp; /* timestamp */ + int32_t m_nInfoField2; /* last 4 bytes in a long header */ + uint32_t m_nBodySize; + uint32_t m_nBytesRead; + PILI_RTMPChunk *m_chunk; + char *m_body; +} PILI_RTMPPacket; + +typedef struct PILI_RTMPSockBuf { + int sb_socket; + int sb_size; /* number of unprocessed bytes in buffer */ + char *sb_start; /* pointer into sb_pBuffer of next byte to process */ + char sb_buf[RTMP_BUFFER_CACHE_SIZE]; /* data read from socket */ + int sb_timedout; + void *sb_ssl; +} PILI_RTMPSockBuf; + +void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p); +void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p); +int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize); +void PILI_RTMPPacket_Free(PILI_RTMPPacket *p); + +#define RTMPPacket_IsReady(a) ((a)->m_nBytesRead == (a)->m_nBodySize) + +typedef struct PILI_RTMP_LNK { + AVal hostname; + AVal domain; + AVal sockshost; + + AVal playpath0; /* parsed from URL */ + AVal playpath; /* passed in explicitly */ + AVal tcUrl; + AVal swfUrl; + AVal pageUrl; + AVal app; + AVal auth; + AVal flashVer; + AVal subscribepath; + AVal token; + AMFObject extras; + int edepth; + + int seekTime; + int stopTime; + +#define RTMP_LF_AUTH 0x0001 /* using auth param */ +#define RTMP_LF_LIVE 0x0002 /* stream is live */ +#define RTMP_LF_SWFV 0x0004 /* do SWF verification */ +#define RTMP_LF_PLST 0x0008 /* send playlist before play */ +#define RTMP_LF_BUFX 0x0010 /* toggle stream on BufferEmpty msg */ +#define RTMP_LF_FTCU 0x0020 /* free tcUrl on close */ + int lFlags; + + int swfAge; + + int protocol; + int timeout; /* connection timeout in seconds */ + int send_timeout; /* send data timeout */ + + unsigned short socksport; + unsigned short port; + +#ifdef CRYPTO +#define RTMP_SWF_HASHLEN 32 + void *dh; /* for encryption */ + void *rc4keyIn; + void *rc4keyOut; + + uint32_t SWFSize; + uint8_t SWFHash[RTMP_SWF_HASHLEN]; + char SWFVerificationResponse[RTMP_SWF_HASHLEN + 10]; +#endif +} PILI_RTMP_LNK; + +/* state for read() wrapper */ +typedef struct PILI_RTMP_READ { + char *buf; + char *bufpos; + unsigned int buflen; + uint32_t timestamp; + uint8_t dataType; + uint8_t flags; +#define RTMP_READ_HEADER 0x01 +#define RTMP_READ_RESUME 0x02 +#define RTMP_READ_NO_IGNORE 0x04 +#define RTMP_READ_GOTKF 0x08 +#define RTMP_READ_GOTFLVK 0x10 +#define RTMP_READ_SEEKING 0x20 + int8_t status; +#define RTMP_READ_COMPLETE -3 +#define RTMP_READ_ERROR -2 +#define RTMP_READ_EOF -1 +#define RTMP_READ_IGNORE 0 + + /* if bResume == TRUE */ + uint8_t initialFrameType; + uint32_t nResumeTS; + char *metaHeader; + char *initialFrame; + uint32_t nMetaHeaderSize; + uint32_t nInitialFrameSize; + uint32_t nIgnoredFrameCounter; + uint32_t nIgnoredFlvFrameCounter; +} PILI_RTMP_READ; + +typedef struct PILI_RTMP_METHOD { + AVal name; + int num; +} PILI_RTMP_METHOD; + +typedef void (*PILI_RTMPErrorCallback)(RTMPError *error, void *userData); + +typedef struct PILI_CONNECTION_TIME { + uint32_t connect_time; + uint32_t handshake_time; +} PILI_CONNECTION_TIME; + +typedef void (*PILI_RTMP_ConnectionTimeCallback)( + PILI_CONNECTION_TIME *conn_time, void *userData); + +typedef struct PILI_RTMP { + int m_inChunkSize; + int m_outChunkSize; + int m_nBWCheckCounter; + int m_nBytesIn; + int m_nBytesInSent; + int m_nBufferMS; + int m_stream_id; /* returned in _result from createStream */ + int m_mediaChannel; + uint32_t m_mediaStamp; + uint32_t m_pauseStamp; + int m_pausing; + int m_nServerBW; + int m_nClientBW; + uint8_t m_nClientBW2; + uint8_t m_bPlaying; + uint8_t m_bSendEncoding; + uint8_t m_bSendCounter; + + int m_numInvokes; + int m_numCalls; + PILI_RTMP_METHOD *m_methodCalls; /* remote method calls queue */ + + PILI_RTMPPacket *m_vecChannelsIn[RTMP_CHANNELS]; + PILI_RTMPPacket *m_vecChannelsOut[RTMP_CHANNELS]; + int m_channelTimestamp[RTMP_CHANNELS]; /* abs timestamp of last packet */ + + double m_fAudioCodecs; /* audioCodecs for the connect packet */ + double m_fVideoCodecs; /* videoCodecs for the connect packet */ + double m_fEncoding; /* AMF0 or AMF3 */ + + double m_fDuration; /* duration of stream in seconds */ + + int m_msgCounter; /* RTMPT stuff */ + int m_polling; + int m_resplen; + int m_unackd; + AVal m_clientID; + + PILI_RTMP_READ m_read; + PILI_RTMPPacket m_write; + PILI_RTMPSockBuf m_sb; + PILI_RTMP_LNK Link; + + PILI_RTMPErrorCallback m_errorCallback; + PILI_RTMP_ConnectionTimeCallback m_connCallback; + RTMPError *m_error; + void *m_userData; + int m_is_closing; + int m_tcp_nodelay; + uint32_t ip; +} PILI_RTMP; + +int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app); + +int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app, AVal *domain); + +void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out); +void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size); +void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, + RTMPError *error); +int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error); +void PILI_RTMP_SetupStream(PILI_RTMP *r, int protocol, AVal *hostname, + unsigned int port, AVal *sockshost, AVal *playpath, + AVal *tcUrl, AVal *swfUrl, AVal *pageUrl, AVal *app, + AVal *auth, AVal *swfSHA256Hash, uint32_t swfSize, + AVal *flashVer, AVal *subscribepath, int dStart, + int dStop, int bLiveStream, long int timeout); + +int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +struct sockaddr; +int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, + RTMPError *error); +int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); +int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, + RTMPError *error); +int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error); +int PILI_RTMP_IsConnected(PILI_RTMP *r); +int PILI_RTMP_Socket(PILI_RTMP *r); +int PILI_RTMP_IsTimedout(PILI_RTMP *r); +double PILI_RTMP_GetDuration(PILI_RTMP *r); +int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); +int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); +void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); +int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); + +void PILI_RTMP_Init(PILI_RTMP *r); +void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error); +PILI_RTMP *PILI_RTMP_Alloc(void); +void PILI_RTMP_Free(PILI_RTMP *r); +void PILI_RTMP_EnableWrite(PILI_RTMP *r); + +int PILI_RTMP_LibVersion(void); +void PILI_RTMP_UserInterrupt(void); /* user typed Ctrl-C */ + +int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, + unsigned int nTime, RTMPError *error); + +/* caller probably doesn't know current timestamp, should + * just use RTMP_Pause instead + */ +int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int dTime, RTMPError *error); +int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error); + +int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p); + +int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb, int timeout); +int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len); +int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb); + +int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_SendSeek(PILI_RTMP *r, int dTime, RTMPError *error); +int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error); +void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit); +int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size); +int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error); + +#define MAJOR 1 +#define MINOR 0 +#define PATCH 4 + +int PILI_RTMP_Version(); + +const char * PILI_RTMP_GetReqId(); + +/* hashswf.c */ +int PILI_RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/LFLiveKit/publish/pili-librtmp/rtmp_sys.h b/LFLiveKit/publish/pili-librtmp/rtmp_sys.h new file mode 100755 index 00000000..880457c3 --- /dev/null +++ b/LFLiveKit/publish/pili-librtmp/rtmp_sys.h @@ -0,0 +1,123 @@ +#ifndef __RTMP_SYS_H__ +#define __RTMP_SYS_H__ +/* + * Copyright (C) 2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifdef _WIN32 + +#ifdef _XBOX +#include +#include +#define snprintf _snprintf +#define strcasecmp stricmp +#define strncasecmp strnicmp +#define vsnprintf _vsnprintf + +#else /* !_XBOX */ +#include +#include +#endif + +#define GetSockError() WSAGetLastError() +#define SetSockError(e) WSASetLastError(e) +#define setsockopt(a, b, c, d, e) (setsockopt)(a, b, c, (const char *)d, (int)e) +#define EWOULDBLOCK \ + WSAETIMEDOUT /* we don't use nonblocking, but we do use timeouts */ +#define sleep(n) Sleep(n * 1000) +#define msleep(n) Sleep(n) +#define SET_RCVTIMEO(tv, s) int tv = s * 1000 +#else /* !_WIN32 */ +#include +#include +#include +#include +#include +#include +#include +#include +#define GetSockError() errno +#define SetSockError(e) errno = e +#undef closesocket +#define closesocket(s) close(s) +#define msleep(n) usleep(n * 1000) +#define SET_RCVTIMEO(tv, s) struct timeval tv = {s, 0} +#endif + +#include "rtmp.h" + +#ifdef USE_POLARSSL +#include +#include +#include +typedef struct tls_ctx { + havege_state hs; + ssl_session ssn; +} tls_ctx; +#define TLS_CTX tls_ctx * +#define TLS_client(ctx, s) \ + s = malloc(sizeof(ssl_context)); \ + ssl_init(s); \ + ssl_set_endpoint(s, SSL_IS_CLIENT); \ + ssl_set_authmode(s, SSL_VERIFY_NONE); \ + ssl_set_rng(s, havege_rand, &ctx->hs); \ + ssl_set_ciphers(s, ssl_default_ciphers); \ + ssl_set_session(s, 1, 600, &ctx->ssn) +#define TLS_setfd(s, fd) ssl_set_bio(s, net_recv, &fd, net_send, &fd) +#define TLS_connect(s) ssl_handshake(s) +#define TLS_read(s, b, l) ssl_read(s, (unsigned char *)b, l) +#define TLS_write(s, b, l) ssl_write(s, (unsigned char *)b, l) +#define TLS_shutdown(s) ssl_close_notify(s) +#define TLS_close(s) \ + ssl_free(s); \ + free(s) + +#elif defined(USE_GNUTLS) +#include +typedef struct tls_ctx { + gnutls_certificate_credentials_t cred; + gnutls_priority_t prios; +} tls_ctx; +#define TLS_CTX tls_ctx * +#define TLS_client(ctx, s) \ + gnutls_init((gnutls_session_t *)(&s), GNUTLS_CLIENT); \ + gnutls_priority_set(s, ctx->prios); \ + gnutls_credentials_set(s, GNUTLS_CRD_CERTIFICATE, ctx->cred) +#define TLS_setfd(s, fd) \ + gnutls_transport_set_ptr(s, (gnutls_transport_ptr_t)(long)fd) +#define TLS_connect(s) gnutls_handshake(s) +#define TLS_read(s, b, l) gnutls_record_recv(s, b, l) +#define TLS_write(s, b, l) gnutls_record_send(s, b, l) +#define TLS_shutdown(s) gnutls_bye(s, GNUTLS_SHUT_RDWR) +#define TLS_close(s) gnutls_deinit(s) + +#else /* USE_OPENSSL */ +#define TLS_CTX SSL_CTX * +#define TLS_client(ctx, s) s = SSL_new(ctx) +#define TLS_setfd(s, fd) SSL_set_fd(s, fd) +#define TLS_connect(s) SSL_connect(s) +#define TLS_read(s, b, l) SSL_read(s, b, l) +#define TLS_write(s, b, l) SSL_write(s, b, l) +#define TLS_shutdown(s) SSL_shutdown(s) +#define TLS_close(s) SSL_free(s) + +#endif +#endif diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index 87651a317d6b2e7a3df32fa928f76a215910e956..f93834be58afeb078c2307723e64a3ae081c4ae3 100644 GIT binary patch delta 29 jcmaDA`YLn-Gasv?^PayOH*@f@@q(CJXKfaizRnK-wJi&3 delta 29 jcmaDA`YLn-GaswN!EBAz%^ZAeyg+8w&7RG|(%1O`se=mE diff --git a/Podfile b/Podfile index fd799652..8be53e88 100755 --- a/Podfile +++ b/Podfile @@ -2,7 +2,6 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios,'7.0' target 'LFLiveKit' do - pod 'pili-librtmp', '~> 1.0.3.1' pod 'LMGPUImage', '~> 0.1.9' end From b72b44fece13e426ffcea47e759ad9cacc6524ce Mon Sep 17 00:00:00 2001 From: chenliming Date: Fri, 29 Jul 2016 17:48:54 +0800 Subject: [PATCH 07/39] update podspec --- LFLiveKit.podspec | 2 +- LFLiveKit.xcodeproj/project.pbxproj | 4 ++-- .../UserInterfaceState.xcuserstate | Bin 13633 -> 13213 bytes 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index 1415f6fe..169486c8 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -10,7 +10,7 @@ Pod::Spec.new do |s| s.platform = :ios, "7.0" s.ios.deployment_target = "7.0" s.source = { :git => "https://github.com/LaiFengiOS/LFLiveKit.git", :tag => "#{s.version}" } - s.source_files = "LFLiveKit/**/*.{h,m,mm,cpp}" + s.source_files = "LFLiveKit/**/*.{h,m,mm,cpp,c}" s.public_header_files = "LFLiveKit/**/*.h" s.frameworks = "VideoToolbox", "AudioToolbox","AVFoundation","Foundation","UIKit" diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index ce8ef189..9b5e41bf 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -721,7 +721,7 @@ ENABLE_BITCODE = NO; INFOPLIST_FILE = LFLiveKit/Info.plist; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; - IPHONEOS_DEPLOYMENT_TARGET = 8.0; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; LIBRARY_SEARCH_PATHS = ( "$(inherited)", @@ -744,7 +744,7 @@ ENABLE_BITCODE = NO; INFOPLIST_FILE = LFLiveKit/Info.plist; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; - IPHONEOS_DEPLOYMENT_TARGET = 8.0; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; LIBRARY_SEARCH_PATHS = ( "$(inherited)", diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index b8b1809ad3b584a4c3ba316040f5c736785d2e1f..9b97746439e05a430762504f5e804d9b0c158917 100644 GIT binary patch delta 7599 zcmZ`-349Yp`<{1ZlO|1?b8|LVbG1pi5vg2lxeINBIG1X!1e#g`J)Wm7(WRIjTTGREfr*v1l9`j|4OqEk(=FYp5E{K{aR%dJAnto6r`t z4edamqR-G5=pgzAeT%+Br_s;o4EhC~M|aR&bPxT7{zmuF13&QA(TP|jD>M99$tXS z5Q1qi1LnY7cmP;YTGY_#1o(pTWQ2v-lkTFFqf_zv2t{4}1&X#(&~F_yHkAOL(FqCSoIY5>FCHGx8K^ zMV=vTNLSL0bSG(~2kA-DNe0Ow1IR!!h!l|FWCSTDC8V58AR#i9%pmi~%Op%*AIZTd_qvRMlNluZ|B! ze~??`Hu;m>;RwfZB2LVyI5nr?w49Z*adytd#dEE>B<>lm4cC@S<~nknxXxS`E{*HO zWrwfe4&)fk2{$1xk|{KI%T}@m+Pp&Fp!=zr9;J@<@i57(mMX@1mde`*AF@c_i!6xH|mJSP)m6wi^ z9z{|Vjbvyb3Lx)VGzbkwxhM|}LHVcv4MoGKg<7eN+Np!aQ73g#_gXX@jX-Xc!H&_W z2o)m__0ZM~TO)RiqYLOl%3u#D${$}^8SGOY2;`~s3{hAav%t;JMxb^M6jY8F5vav? z3{5~2(Ikf91vD9j5Dn|hJsmA6qF(y+cW4Tl${xLlrlINV@e61snuTVgIbmwfw6&zI zR6yfZETrb4`OItqT8LgoVYCP>MoYq=%aS&wKH7pN&?ezQy4D@hE2u>kT2B2`aTL9d zR#+ah6|6OQzo+>`V`eC9Ee;TWhRiiy9sX?1I8tp}&=c!ze&7#@V_PuOrszLkF z0pzVhU(%#1^c8)E=7#sgG)+N=*m$h@Fl|$Xj?%U?w@*fXVM)Cc9Y_B`ni}*yI)Q$m z?Pz=2p$7fP_&rIJ8J%O8Pq$Fy_d#bPV9(K%$RgBv->>L8@~%M_&_#3!T}D^XRdkJ} z(vGwf?M%DSuC&`4bOYT)zoFmRaSPq1-Dw&vqUG$Uph1T2yyBe@1}W041_B(2KnzhJ zp*?6%noculChfHvqCp07hyevCX>a;0&7(tTJ{?VS@>GqVi$p}r-Yg^nLxXj})4Mz{ zrm(cKqCsP)qWp@AJXONK*oRLl4Plpx4-Z$xdK3S}Ag@7NTFLOzaIC6zSWv~c%u^Zu z@vsiXLyEuzv5a{z)2u46&}=s4T`F@^2e_&s4xF?P9Y6=y+j<}YX{x~s@w6}PR}DV! z)Bf~1#{b9SxNx+(aj4cypgFV{l$qVRaA>fwv?RZLd^I#jN%fsP1+5!&k_6Atfpief z>Fr9Ho`531x?Va!a&BgJMrAO+peT@9UYe^Hm4#`#MX)-rvW;Q z4yB`>2s6Jcb!X4P3)b~})Nx;U4tcAgAM~fg>4<8`fdOHHp)FFC8WMCAGj))AD(C2A!t-zGk0WOOGex8;*5*P?Z1csJ2)Dvy! zLEZ1@lNwT|7Imt_MT9z>Z*7xy6eydufF{FK^mhNu>?Q^I6@h{5TplRO4>Cn(LQqMY z)v-MtUh12f%`~VQrbkwQX*!546#!Q-#s zYdV|Gp*fF{yHX~W)w*^46pp~L$nZz${3?{e(B|f;)SXI8DoTq2T?>nXf%3@A>y1yq z4|Q%*-|!QhdK9ps)AZ$fm;NOJ|16xNVY=io{9oZx1ALd^3SC4OGklEtIwf$W6b@;C z?k4(40&)1R-vq;Tj*BibKBS-TSq>^>bSCemg|S51p~zukvb)u9Nj5= zUTO`Gkf_7fXs4-upmw+e(p2I0^zAB~OxH7?dMDZx!kuszMkMY`H&o%SbR*4uTrbqW z*#l=V)xkY+I^9IyiCpwz7n^Br?|<6%!TsxO`!SPuBep%+#U`5DzpS#LsIX#WeW!zQ z9)puvhi3?VkLJ`r8H!mI_YhI!<=pxQg?RL%85Gg?AHPwG%j(=VOrs5qc7GPG#35D+ z;xUZSadRLPeuo~Q2kGI* zPQC@d|D@~UZTJKF75$p6^I?%Td{3sz!aMP&4NP|7-SiuJh~_jbwp}T+Df9KmuD%a{ z+2Fkctm`AST@RLb>xK{G6RhyTNAOX641bHi!^iRW^jrEJJx;%;C+H9KKWmU1{|Enw zf5Ip6DSVm<=8poX1kef)CqQQbx-f#rSUyR_7x6WwxcCyjjIZFU^e1|fo}#B$;p_MY zzKMUMKhsO}F9F2NKHti!rMvjAh_l?IXR7eu^cUtVj}r~_u1zXKA96}KB4eQj|Eshj za*z=P#1IuLtKrSr{>w@$7Ha>auz~VO-p{UGOrmEQiWunmC(=AIlh{a_rx)n2 zk>vg`p=;09Lvj$;lL?);Swg>9o6uRmt?R9Pq!F_sev(Kp)2r2_F=;}t(?1(bmP9ri zmGnxU%EQWZo3(f8u;GP8fhXu7NwpT&@>E@!MM~@PU~yTK`gkO5NeUyLv?J|F2a-&0 z(3|u(`ui%9N;)DB=}iBiw^$(DX1r@1%YB(7`(Lg5kiN{iKfOcmGV6QH`X`6SJ(%RN zMIyQM?bq**caDcJCclw7#0eEs>o;ofORlF&gm$PP+KMdriyPPyA@J_ z){-EpWaY#L5+LKlAH~`AB8nvRz5r}5JCQ`TVzbB#WODdMTw*h>Hc|;C0xLOc_eQ<* zgCncS6qNL2L!H%&WLh}g8B3;Bp2@q99<_I8dh<{-OzW|c(m(D@j zMPy0DcCi4_Rb;6EvbqVRyc(InYXZobbC5UcYCx1hsz^1dsoPmTTUj`Cbb4v&=-&Aw z0-2S?1%YzfNC1TZF#;%A=Y2F0?-^8iHZ!FT=Nj@>c!MjBtR?Hhhh52`^<*QHN3uZx zbrsnpfTk|4GxLiB*}?L{k`ezk^OjeI~pB-_bHWQPE}06GEm0vH4^ z3SbhzEI{mA@&ow~Vd3u)prZht7(x3yMOowwxZWhckhA0*`7b#yfLDOV0yGz( zl>luXsT{dTu15O2L@tvn0>lfDAb_uiTqD=X4FUWDBnnVl)*CUd#?s4fX z`p*c^mHy4v^07p7S#{CV?V)Ui9?BLg{xY+B2F3@5rIrsJ$+idOhq-=S|GwG5{9u3r zk{}auvTCdqYJ-wl-Z{ZOs9nW(2qGd9MWjSQR7AtRrL`qlB#(U?D`Q{Bf@BOC$G(wG zBD2X{GM_AD-^dn|rR+P|t7IKH$SJuFTxbY4iCfBT;dXGJaJ#rY+*jOj?lku+cZItp zA|k!WC^CyIBAdt|a*Es{uP8y(R@7dUEJ_u15_J)E6QzlIiZVpKL<2=7qDi9lqEAFu z#UNIRed1(sia0|&L|h;qCLS&xDIO&*5|@a}#O2~?;+MoT#k0k8#q&erh2pSyv3RNY z74a(ZPVoWp8S#}U66K9*6O|E_8&wfCH|njZby4f1Hb%V@wLj`e)QPBb5{ZPD=p{yp zSz?jcBo0X{Nq0$q$tcNm$qdOX$sEZ%$pXo4$pOhh$sx%R$tlT2$#uz1$?uX|QY4j1 z6Qqr$>C(Q^0%?gfG)_7}I!QWNx<$G}dPI6$dPRCGIxgB9{cQB8=%VP7=(6bY=!wyj zqiOV%=-JU*qqj$&jlLq&%8arOvYxU`S#Mdktgo!UEF_yFn=e}^TO?Z|TPAy5wobNL zwnes8woUf2>{Hop*=MrPWjE!~a;H2|-bCI^-a?)%&y@EL$+P5r>$w$kJ z<)!lHg535hA|Es>K{#|`b{ig;rQ5v~Mr!i{G8jGf-CPkB` z>8Z)k^wK=58K4=Y$<++e6ljKNiZ#-o+67Je(gjo;57 z;ZN~r__O?Z{sMoA|DC_X-{b%0AL!&dwN9(k>C8Hd&Zcwde7dJ~ZFTK+$-2(EuDb5J z9=dGZ5Zwq}p>DLUSQpfd(T&qh&`r|4q1&Q6synT_q6=Nq-O&A}`$KnIcSkSQOZ3rt zxn7}H={0&@pQvxIFVfG}FV}ypKcGLTKcqjZ|5kroe?fmse@A~$f8T%%*dRBU3^s$q z;55V=e1=3rV?z@|lA(*ChaugNX~;J8HS{;+81fAzhUX0xhB1b5h6#q346_V#4D$^O zLx!+nrJ=^~rr|BaI>UOyhlcHjj|@8v#|*cPoY7!38DouBqum&1bQwLyc%#qQ&e*}2 zV(e(_Z0u_6ZtP)9H)a}p8%G;M#;~#4xZQZrc+vR4q&L}34wKWAXi73Ao4S}XO}$Ld zng*B#ng*MOn?{%lP35KvQ>7_1#Wc!v?Uw@r6U_sz(R&74_djx{Hk8=IS&o133DCz;!r+nIZrCz)rO zmzkHFUo)>TuQXSiSD9Cvx0<(^KQ!+!e`4Nk-fRBC{H6J4^Zi&^tTPnbH+Dqql-T*P z%VL+uz81S8c4h3^*tcUh#BPdxH}<{Qs}_SL)iT&J%2I48wUk?emNAy8mT8ukEHf=j zEH#!lEo&@mEpJ;kSTlEvJ>q2YTy4bqY`lap|-w zo5|+2HMKRjwY0UewYH_$I@&thy4t$ide{QCIkx4t-L?a^W40e{7i>3ezuRuv{?d-|+RC_0TZ+n)#PsrZSKHI*`{;GY2y~@7IzS_Rq ze!zave%OB0{;U0-L*$S+q8&0vj6>zHI+{B=JGwg399fQB$52P9W1M55W0_;KW1r)g z%A348po^bx?Jmoy&Jm>t?dC7Uz zdBcTWB3G14>XN&ZE{#j)GPq1Gm#c}Zn=98<;hN!E>H5&M*A4C{x5907d)$rOP2DZr zt=#S0$?lHsF79sb3GT)2jqYvk-R`|1_ZRL1?t|{b?xXJ0?(^=8?kn!=?%VFW?!P^3 zXYNV#|&k4^d&t=bVo?D(fp1-`r zEA~pga<9T`@S44DZ*y;wx2?B>H`UwS+tZureb$@p9poMAE%F8vP9*%85IUFeTf$u* z_!PcapUoHNbNk|beqUo>8(({0vah4Bi?5q6&DYac=3DIh#CO4W(|6ZT{9?b-ulL9L z+xa{A)BIWfzW)CH9Djko&_CK=;xF@;`$K-gKh;0oztR7xf3N>QT|tO(-I;r??u=Mn JQ~UFO{eM!*>r4Ou delta 8139 zcmaKQ2V9fa_xBx7<`XhVLKb8XB#MY2V6}<@E~E&8TZ|9^MNo#T*4(GoT5FwgRjtZ! zYhBe^>#nPHuUhM9omFeCtJB*4Z-O}9{`w}LBsce-bMHClp7TBDIUg1789Y4P2u8tJ*xU!>U@O=fz7D&?9GDAxz@D%d><#tRoHQcciJtT)c_h*==NruOPARF`MP(y1JR^z=J(XqUedt7Y&%kugKyOK&yovuL z*7@i0M)~^D`P719N|9Xd^~ixJPp(W)OD`Vb?NVGBJu1Jfz&otGY;fRgFuLf zK|c~v4f2SQMCQpADY@OVhm{mp<`#QLcB=FatvD4V%?E>OKmqU)6LAn%!v;lQ2nekK z#b7Wo6HAR4lz^ecN^Df6`%;T9)7Ex^76e|WKpcn%m7uC`Ms`YZer0i4si%B&4V_7B z*lQ#h^YUJFtFd$|C$ZbwmM(c|69TyY8QEQ{Dm?>ByvgO|p3${{fEFOJ{*3Y99jbJY zAUQzs;Y{^j%D1@8lAEtm-21CvM;ag)ZiU^4gsOd(B38`3slj0nF^t>Jg7sHpN* z_!q0g{NvP?UJdg4!AHQk63ha#!5lCb%mW{S`CtK9NTNwo5<{AiSkj!tk$BQ#C2)Yn zUA8mMV@Cn$wgtV;J z>{GCnTJM?S+1|=p@G1BVB<5sf(?9jM^!YRLz~$})gO(6i{rsMn=Jy51{;_IXa)T;| zz;WPQMiu!A90o_gQE-f^kWAW<_9TU*E(0g1$fxM%3^+^DsIVRACzE7RkxRl%nmoBG z;C7>ic}fes1wo(bUheh!=ZD$-?y&a$T^hBo;iSvpdk|U;z9Sv0!4=Yp*z@Ev%AD&d zp(akQ2=FzK-T*%Yobx8>Tn%oKE+n$ob5Cv{xeI;<&KmF&xJS}S*TCQb9b}Nm%>Reh zNA2+|_@kcqcS`d{fcPgmNGFj!hE)wLDXu7L*!3R>8)SrtbR+g>3y=o|&m{`*Mm8+4 zFyuLhlw`l)P{2^&T(X2@KPOc~O+&~*Ey*E~!3%;0FsK%W!w6_3xuh5A`#i*;1=?Q@ z3+RAO(u4G*5q3~#%Ytqg{mODv7(;rKKE(bi6xiBMCczkL5IG(uzQUJ8x9u0)ww1nv z54M4wsB^=%Fd4Rk?O_T`g=w$@>_`R>4;e`ENdfVaL8OorQRjwTfE}cQRQLwWgxz2k z^^RhSUpo5!j2tH?$Txvtk_Qkh#XGR7u+STXr#yMsbGYhCktr{`l6sJ*yr7?dCx5KH zcZp|oSykmrE&=<)0n~Ufj|{Gc9x{abUk|ovTj+&_f$2e{q#72Hp(L_#glwkLy(w8AHy3^c zLYKmM@IyEsE`STkSn?JbM|^}Vg^S=~xCBF;$>Y?4gDeA{gfxXtN~`x5NhSWvGGXska81AxtI6Bda4i}C z{{eKkfdV?*2se>;$ix@kwgrCnid$@hpOXpXU1}tStVu%@!`<+Um#IJr+)ttUy#Q3d z8~KWxeF;wo{OBuq7#@K~;W2m|o`5IeDe^vNk#k7TKq71YFT-T#mzR~4WR?||R(A1@&Xa57 zQ&XBHC8Z{}PfSWpjZaESYnB?{qFu8jx|A3f6Vog$u|=~K`Wv6pEG8zlMf(=b(_Snk z{%gr6cmK!9Da94}C1n-ff(*}4Zv{O(Hah0TP6_|o>G}Eb|Gkv-uO**c+q^|eOk!MO zYI15yLQ+yv`?$E+9n76ZQD>3)ox@ zegQQ4?TeMDbC!CL75rvd%wUn9vR-1+-{M)=(Tltb>>i|fhzq5iad zp!Tal{Q?CPtsBsQfC3(})vpgVcSK$^7&z z2I`_z^cJww^r0hP&;%0cKO8#JQ7_d55WD0#G^6(rt=+28B(lF6q*7{HCaYALatb!V z!WK1OVNr~ zmRF)x@O6xwS9Bg%dm&mq4lr2`X;ow0hrMiag(BbDy0iGl1DEXS4YpAeZPQ13ZeRl_W&!AK2 zG&(~s{Ves4bLg8qd1!fAS!Mmb1u}1jf0!W2{5=8WJh}EY`W9WF#23*ebeS@oMOWyv z-c``k|D_{lFu6s(BNu`pa=HF4?*u&TM{@B+h};W=2)XoXL@0g# z)W1&uI|^PE<##HK`wE~g4tkO+RM@M5I0-oO)4);ZsPNLD!()h1p4=6DURo}e2`~nU zFaAp<^+Y_(&y(ANL?V_IU_pSW{(l^+=zv97K??&c#w?cL5G=(qEGIX}P4WY|MQ)Qj zmk$2X>O5$UX8i zxnGAPaU&ds-Q)pz!a)fK!zjaCMN9uZ!w3hC#c|Y|aC7po8po5zv=Ds};=r2u{Nto)sa#kUwb=Qjf^E3$-3jC%?Xg$oLJM89-$6JNYdDxX-Y* zWAA67hjVbx=U5v;!4~(TVEac9Yy+rzJv@l4u{aM8q;z-y_K?5G-!(WN7jO`8Q244g z#X)VJ=E)s?PGr<)mK6*tF7dvsZ)uR?AODGnZZX60h=4WAaRsi#RUD*c8{#15AXA4& z;!(hX$8eCxK|V>~pn#gRvvGdRJ9yH6GQE!{Q>G~#6md`-U}7m#h{@sd<&5};koL3uT<=AeS^vCL#OFAbP?83#j&^epvN8D5Fx zcokkv-FO3DfYqo8+O@8O^E zef)rfHV)c3=-{A}gDwt6a*%1!8i_fxz2@`G0B%)u_y5FAYB zVAo}g9Svm)nIc+&45daP2RN9)!Ax3+90-`kO0#4#?J4Q+rvqK!`>%@=yw@U+lW5J{20F>QMKa&QSAjo-uX;}7su+RdDa zXW=<`DPE3O;?;O9UPlpO6YXg3!?$QdvL|gW&S5q%N13mg^UMY25_6Mz!2HQWJONM6 zi{QoZVtH}A7Q94WOI|D9>%6wScDx*34_+@`A6`FR9?!$e=XrUBykg$lyxF{J-bo+t zI$z0m^5gmK`KkOgeqVkuzl2}PAI`7fSMewDXY%LrKjbgqf5czKU&r6T-^Aa_-^Sn0 z-^oA1|BnAX|2F?2{~rM(Pzb^VT7h0*5?BN_fkO~2ND!n6x(adyC4ve85lj?J5=<6M z5zG?I5zG_J7c3NP7n~4068eBpDO3wJLY>ecj1ZcH7NJe(5GD#+3R?+Z7q%6)6Q&5$ zgdK&Qh3UdV;RxaT!bQTh!u`VY!e2yU5i7Ea+@i*!Xi}Yl@JC5bp@$3Y4ChPOF zv)DOoExU|e!LDZ4vg_Eb>@Ic>`vtq7J;%Ye*hkTsQckrm6TWYc5| zWy@tNWvgXtW$R=cWZPvsWxHj2W&30YWQSyrQj8{xh zOjJx#Ojb4JRd!HzSLP^ll|7YS=5n2B?ZvlT_1H>s1F-7gV=Yw^eskchyj>Qrpxm)LH88>RfeC zb#HZF^;Gp-^?db0^&<6Z^%nJZ^-lF}^)PAeAGJSef7bq`gF39^ z>BKsTPO6jZbULf9sV+{}LYJs(t!tx8*0tAl)#d8)bp<-Fu25I08>t(u8><^fTauG> zQ+3mIGj+3c)w*T66}nZrb-E3@O}fpxUAjZMv$}6}7j>6)*K{{@KlpUFbx-t+UagPP zr|9$a9(}&vt1r|O{UrT#{X+dN{bBu4{c-(C{b~Iz{T=;X{XPAC{X_i|{oe*)Kn9sX zVNe>>29qJu5N}8@BpF^abT{N0dK!8g`Wn23BEw)qiJ{7{*idIUW;kbf8jivv!{ftK z!qdV#hIbD4rH5yPXNG5md&2X>z2QaSL&8hLhlf{&j|iUyijmV4eL@bEd7je%hHfoG|W4O^|v>0v17-Ot4&e*~@#5l@0 z-Z;TH(KyLC**L{G&p6+>(74FB#8_jjGp;qRH*WM9KQZn%9yESwJZwB_{MvZUc*}Ul zc-Q#A_{jK+@i&viWHCjXnw#QH38q%2Hl}u_6jL`-wkgNd!{jr4V47;0ZklPjV7g^` zV0vu&&Ge_~shMvUnI&eaSz%V1qs=+yeDh@UEb~X^rRI(1&&@l{d(8XH2hAtUr_EoR zeczZbm@k=cn(v$cvONd2oQCid%tHo|{S{hmUTZULlEyFF9mXVgxmf4m?mTF6# zWtru3%Q4G0mJ60kmdlnamg|-WR-RRB)mtO1PHU_+!P?%MZuMGAtP`vYtShXmtsh%A zSU<6Dv3_CQZ#`)J%4W2=ZPB)7K3kkE!IospvGuokYz4MKwvo2!wwboswt2P%wmREd z+Y#Go+e5q1uC{CK2D{O2vD@s;?6287+I!ji+VkuK?OuD4eTco(KHUDceS&?WeUkkH z`!xGZ`)vDM`-k>g`^WZ!_RIE14xz*BNOGh)ILAcCRL4BWQpXyHZ=GYKW3ywsW0zyE zW545|<4>pB8SPAPwsWRBJ36~KGo0O=+0FsZLg!%TQ0Fk`Naq;mTTbG9+qv3#-oUsv}>$uyz5=pB-aP7sjj)M z`K~(G2G>^C=RVg?*B;j)*J0N&*GboD*G1Pg*In1+s9sThqr6cSQDdUGsHss4q83F} zN7Y3wk6IPAHfmecj;LKxd!zP89gO-i>TuM3x7yvtUFecpZ5ecSz``zQB(_ru0aV^L#SeK|?{b@c=0z53sV@5YM%1I!*#MF0Q* From 677680fa3f0367c8c62f51fcb6646fb1036b1e5b Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 10:59:57 +0800 Subject: [PATCH 08/39] modify readme --- LFLiveKit.xcodeproj/project.pbxproj | 68 - .../UserInterfaceState.xcuserstate | Bin 13213 -> 13597 bytes LFLiveKit/publish/pili-librtmp/amf.c | 1037 ---- LFLiveKit/publish/pili-librtmp/amf.h | 180 - LFLiveKit/publish/pili-librtmp/bytes.h | 91 - LFLiveKit/publish/pili-librtmp/dh.h | 345 -- LFLiveKit/publish/pili-librtmp/dhgroups.h | 198 - LFLiveKit/publish/pili-librtmp/error.c | 26 - LFLiveKit/publish/pili-librtmp/error.h | 46 - LFLiveKit/publish/pili-librtmp/handshake.h | 1034 ---- LFLiveKit/publish/pili-librtmp/hashswf.c | 626 --- LFLiveKit/publish/pili-librtmp/http.h | 49 - LFLiveKit/publish/pili-librtmp/log.c | 209 - LFLiveKit/publish/pili-librtmp/log.h | 68 - LFLiveKit/publish/pili-librtmp/parseurl.c | 312 -- LFLiveKit/publish/pili-librtmp/rtmp.c | 4331 ----------------- LFLiveKit/publish/pili-librtmp/rtmp.h | 365 -- LFLiveKit/publish/pili-librtmp/rtmp_sys.h | 123 - .../UserInterfaceState.xcuserstate | Bin 10986 -> 10986 bytes Podfile | 1 + README.md | 16 +- 21 files changed, 16 insertions(+), 9109 deletions(-) delete mode 100755 LFLiveKit/publish/pili-librtmp/amf.c delete mode 100755 LFLiveKit/publish/pili-librtmp/amf.h delete mode 100755 LFLiveKit/publish/pili-librtmp/bytes.h delete mode 100755 LFLiveKit/publish/pili-librtmp/dh.h delete mode 100755 LFLiveKit/publish/pili-librtmp/dhgroups.h delete mode 100755 LFLiveKit/publish/pili-librtmp/error.c delete mode 100755 LFLiveKit/publish/pili-librtmp/error.h delete mode 100755 LFLiveKit/publish/pili-librtmp/handshake.h delete mode 100755 LFLiveKit/publish/pili-librtmp/hashswf.c delete mode 100755 LFLiveKit/publish/pili-librtmp/http.h delete mode 100755 LFLiveKit/publish/pili-librtmp/log.c delete mode 100755 LFLiveKit/publish/pili-librtmp/log.h delete mode 100755 LFLiveKit/publish/pili-librtmp/parseurl.c delete mode 100755 LFLiveKit/publish/pili-librtmp/rtmp.c delete mode 100755 LFLiveKit/publish/pili-librtmp/rtmp.h delete mode 100755 LFLiveKit/publish/pili-librtmp/rtmp_sys.h diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index 9b5e41bf..60276cb1 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -46,22 +46,6 @@ 84001FFD1D0017680026C63F /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFC1D0017680026C63F /* AudioToolbox.framework */; }; 84001FFF1D00176C0026C63F /* VideoToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFE1D00176C0026C63F /* VideoToolbox.framework */; }; 840020011D0017850026C63F /* libz.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = 840020001D0017850026C63F /* libz.tbd */; }; - 849005B01D4B5C8400D2A3D5 /* amf.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A01D4B5C8400D2A3D5 /* amf.c */; }; - 849005B11D4B5C8400D2A3D5 /* amf.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A11D4B5C8400D2A3D5 /* amf.h */; }; - 849005B21D4B5C8400D2A3D5 /* bytes.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A21D4B5C8400D2A3D5 /* bytes.h */; }; - 849005B31D4B5C8400D2A3D5 /* dh.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A31D4B5C8400D2A3D5 /* dh.h */; }; - 849005B41D4B5C8400D2A3D5 /* dhgroups.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A41D4B5C8400D2A3D5 /* dhgroups.h */; }; - 849005B51D4B5C8400D2A3D5 /* error.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A51D4B5C8400D2A3D5 /* error.c */; }; - 849005B61D4B5C8400D2A3D5 /* error.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A61D4B5C8400D2A3D5 /* error.h */; }; - 849005B71D4B5C8400D2A3D5 /* handshake.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A71D4B5C8400D2A3D5 /* handshake.h */; }; - 849005B81D4B5C8400D2A3D5 /* hashswf.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005A81D4B5C8400D2A3D5 /* hashswf.c */; }; - 849005B91D4B5C8400D2A3D5 /* http.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005A91D4B5C8400D2A3D5 /* http.h */; }; - 849005BA1D4B5C8400D2A3D5 /* log.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AA1D4B5C8400D2A3D5 /* log.c */; }; - 849005BB1D4B5C8400D2A3D5 /* log.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AB1D4B5C8400D2A3D5 /* log.h */; }; - 849005BC1D4B5C8400D2A3D5 /* parseurl.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AC1D4B5C8400D2A3D5 /* parseurl.c */; }; - 849005BD1D4B5C8400D2A3D5 /* rtmp.c in Sources */ = {isa = PBXBuildFile; fileRef = 849005AD1D4B5C8400D2A3D5 /* rtmp.c */; }; - 849005BE1D4B5C8400D2A3D5 /* rtmp.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AE1D4B5C8400D2A3D5 /* rtmp.h */; }; - 849005BF1D4B5C8400D2A3D5 /* rtmp_sys.h in Headers */ = {isa = PBXBuildFile; fileRef = 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */; }; AD7F89B4621A7EFEBEA72D49 /* libPods-LFLiveKit.a in Frameworks */ = {isa = PBXBuildFile; fileRef = B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */; }; B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */; }; B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */; }; @@ -136,22 +120,6 @@ 84001FFC1D0017680026C63F /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; 84001FFE1D00176C0026C63F /* VideoToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = VideoToolbox.framework; path = System/Library/Frameworks/VideoToolbox.framework; sourceTree = SDKROOT; }; 840020001D0017850026C63F /* libz.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libz.tbd; path = usr/lib/libz.tbd; sourceTree = SDKROOT; }; - 849005A01D4B5C8400D2A3D5 /* amf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amf.c; sourceTree = ""; }; - 849005A11D4B5C8400D2A3D5 /* amf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = amf.h; sourceTree = ""; }; - 849005A21D4B5C8400D2A3D5 /* bytes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bytes.h; sourceTree = ""; }; - 849005A31D4B5C8400D2A3D5 /* dh.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dh.h; sourceTree = ""; }; - 849005A41D4B5C8400D2A3D5 /* dhgroups.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dhgroups.h; sourceTree = ""; }; - 849005A51D4B5C8400D2A3D5 /* error.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = error.c; sourceTree = ""; }; - 849005A61D4B5C8400D2A3D5 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; - 849005A71D4B5C8400D2A3D5 /* handshake.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = handshake.h; sourceTree = ""; }; - 849005A81D4B5C8400D2A3D5 /* hashswf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hashswf.c; sourceTree = ""; }; - 849005A91D4B5C8400D2A3D5 /* http.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = http.h; sourceTree = ""; }; - 849005AA1D4B5C8400D2A3D5 /* log.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = log.c; sourceTree = ""; }; - 849005AB1D4B5C8400D2A3D5 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; - 849005AC1D4B5C8400D2A3D5 /* parseurl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = parseurl.c; sourceTree = ""; }; - 849005AD1D4B5C8400D2A3D5 /* rtmp.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rtmp.c; sourceTree = ""; }; - 849005AE1D4B5C8400D2A3D5 /* rtmp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp.h; sourceTree = ""; }; - 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp_sys.h; sourceTree = ""; }; A17586B27CD6843997425CCF /* Pods-LFLiveKit.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.debug.xcconfig"; sourceTree = ""; }; B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamingBuffer.h; path = LFLiveKit/publish/LFStreamingBuffer.h; sourceTree = SOURCE_ROOT; }; B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LFStreamingBuffer.m; path = LFLiveKit/publish/LFStreamingBuffer.m; sourceTree = SOURCE_ROOT; }; @@ -330,7 +298,6 @@ 84001FC91D0016380026C63F /* publish */ = { isa = PBXGroup; children = ( - 8490059F1D4B5C8400D2A3D5 /* pili-librtmp */, B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */, B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */, B289F1D61D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h */, @@ -343,30 +310,6 @@ path = upload; sourceTree = ""; }; - 8490059F1D4B5C8400D2A3D5 /* pili-librtmp */ = { - isa = PBXGroup; - children = ( - 849005A01D4B5C8400D2A3D5 /* amf.c */, - 849005A11D4B5C8400D2A3D5 /* amf.h */, - 849005A21D4B5C8400D2A3D5 /* bytes.h */, - 849005A31D4B5C8400D2A3D5 /* dh.h */, - 849005A41D4B5C8400D2A3D5 /* dhgroups.h */, - 849005A51D4B5C8400D2A3D5 /* error.c */, - 849005A61D4B5C8400D2A3D5 /* error.h */, - 849005A71D4B5C8400D2A3D5 /* handshake.h */, - 849005A81D4B5C8400D2A3D5 /* hashswf.c */, - 849005A91D4B5C8400D2A3D5 /* http.h */, - 849005AA1D4B5C8400D2A3D5 /* log.c */, - 849005AB1D4B5C8400D2A3D5 /* log.h */, - 849005AC1D4B5C8400D2A3D5 /* parseurl.c */, - 849005AD1D4B5C8400D2A3D5 /* rtmp.c */, - 849005AE1D4B5C8400D2A3D5 /* rtmp.h */, - 849005AF1D4B5C8400D2A3D5 /* rtmp_sys.h */, - ); - name = "pili-librtmp"; - path = "LFLiveKit/publish/pili-librtmp"; - sourceTree = SOURCE_ROOT; - }; B2CD14611D45F18B008082E8 /* H264 */ = { isa = PBXGroup; children = ( @@ -399,7 +342,6 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - 849005B21D4B5C8400D2A3D5 /* bytes.h in Headers */, 84001FDB1D0016380026C63F /* LFLiveAudioConfiguration.h in Headers */, B289F1DD1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h in Headers */, 84001FDD1D0016380026C63F /* LFLiveVideoConfiguration.h in Headers */, @@ -407,8 +349,6 @@ 849005B61D4B5C8400D2A3D5 /* error.h in Headers */, 84001FE31D0016380026C63F /* LFLiveSession.h in Headers */, B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */, - 849005B71D4B5C8400D2A3D5 /* handshake.h in Headers */, - 849005BB1D4B5C8400D2A3D5 /* log.h in Headers */, 84001FEB1D0016380026C63F /* LFLiveStreamInfo.h in Headers */, 84001FE91D0016380026C63F /* LFLiveDebug.h in Headers */, 84001FE71D0016380026C63F /* LFFrame.h in Headers */, @@ -418,9 +358,7 @@ 84001FDF1D0016380026C63F /* LFGPUImageBeautyFilter.h in Headers */, 84001FD31D0016380026C63F /* LFVideoCapture.h in Headers */, 84001FD11D0016380026C63F /* LFAudioCapture.h in Headers */, - 849005B91D4B5C8400D2A3D5 /* http.h in Headers */, 84001FE11D0016380026C63F /* LFGPUImageEmptyFilter.h in Headers */, - 849005B31D4B5C8400D2A3D5 /* dh.h in Headers */, 84001FDA1D0016380026C63F /* LFVideoEncoding.h in Headers */, 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, @@ -429,7 +367,6 @@ 849005BF1D4B5C8400D2A3D5 /* rtmp_sys.h in Headers */, 849005BE1D4B5C8400D2A3D5 /* rtmp.h in Headers */, 84001FD81D0016380026C63F /* LFHardwareVideoEncoder.h in Headers */, - 849005B41D4B5C8400D2A3D5 /* dhgroups.h in Headers */, B289F1DF1D3DE77F00D9C7A5 /* LFStreamSocket.h in Headers */, 84001FD51D0016380026C63F /* LFAudioEncoding.h in Headers */, B2CD146D1D45F18B008082E8 /* LFAVEncoder.h in Headers */, @@ -579,23 +516,18 @@ 84001FE81D0016380026C63F /* LFFrame.m in Sources */, B2CD14721D45F18B008082E8 /* LFNALUnit.cpp in Sources */, B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */, - 849005B51D4B5C8400D2A3D5 /* error.c in Sources */, B289F1E11D3DE77F00D9C7A5 /* NSMutableArray+LFAdd.m in Sources */, B2CD14771D45F18B008082E8 /* LFH264VideoEncoder.mm in Sources */, - 849005BC1D4B5C8400D2A3D5 /* parseurl.c in Sources */, 84001FDE1D0016380026C63F /* LFLiveVideoConfiguration.m in Sources */, 84001FD21D0016380026C63F /* LFAudioCapture.m in Sources */, B2CD14751D45F18B008082E8 /* LFVideoEncoder.m in Sources */, B2CD146F1D45F18B008082E8 /* LICENSE.markdown in Sources */, B289F1DE1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.m in Sources */, - 849005BA1D4B5C8400D2A3D5 /* log.c in Sources */, 84001FD91D0016380026C63F /* LFHardwareVideoEncoder.m in Sources */, 84001FEC1D0016380026C63F /* LFLiveStreamInfo.m in Sources */, - 849005B01D4B5C8400D2A3D5 /* amf.c in Sources */, 84001FEA1D0016380026C63F /* LFLiveDebug.m in Sources */, 84001FEE1D0016380026C63F /* LFVideoFrame.m in Sources */, 84001FD71D0016380026C63F /* LFHardwareAudioEncoder.m in Sources */, - 849005BD1D4B5C8400D2A3D5 /* rtmp.c in Sources */, 84001FE01D0016380026C63F /* LFGPUImageBeautyFilter.m in Sources */, B2CD146E1D45F18B008082E8 /* LFAVEncoder.mm in Sources */, ); diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 9b97746439e05a430762504f5e804d9b0c158917..dd95fbe7be4f2bddedc05c5b185d17780923ecad 100644 GIT binary patch delta 7756 zcmZu#2Ygf2_rK$1H@g|lXht(20SYaJmerO$+EQp4WtGr|Qt7fyDbw7ifGk-LnIc_G zL6(4s2sl7RL6#zdfCwU}2m&G?{BIh{`2EfM3)l*_f$d-)*bfeXkHIJ4Q}7u$ z2)+O(z)5fhTmTorPv8o;2CjqOz@OkI_#4~<4A@8G-m zZ~T}MF=9r-L@-iD#>kl{#=saE6XRmsOcSOl6VJ3}5||{WBa_Z#FnyU!rXMqa8O-D` zLztmVE>pmrK%MXblEOt&v797(`w9%9Fh0IzGi&RFB(9!$TRMZxBRh$K>?hqxHrJPT zMl`snaMZ*Wfl>KfGCv{F2?~G%q|<9GC<4WxggA+d(s!rVAo4OPCzD8au0oMDrnIW7 ze^g~bfxkSp-0$P+`HP)HihL7FD+Af(e!uVp5CJiefJtBqaMggx#9aekAx-F_gCs4J zUj24OW3_P(xahRwRzZlh!1Gv{?)4Ks{Iu*3fGm zSWnuLM3PFn(W@WnPi31FF+a_aS6bY{S5{W!ZxQg14>TX;tMKPH&!0f;)VR=lU^h_J zgB@Te*ahAvNu(WVPde0t55OMa1bazG(uvwFnF`uFdQ$Ib;U zAe~DP_7|m-E+mt5Cf&LD!o=REKwKU87JLUzldhyE=@YWX58y1FxrSQo{E%Mx{*pl9 z=t6(_fL0a(D z_E(jbkF8KJjqiUCEke>lJaf{zrxlL#_bCjV0JrG+e}UWJ4!AoszcjD1mq|->ep8PlleO%IezZ89`Eu_!brUV`D2T%44Y? zL*Is&4=eu+<^C`> zm7ahINFWAxArIWkTx)usSVt7mfBe;!ny;>4-@$pcCKp72V zC?^9sgUZ%J4fViP4wj{3xq*Ll_P7xWIuYip!bb=|N_sL{*4NN5kA%IX) zYi3c8Hb~Yh;4iKSGwud^*23ZNbACoev41_*mQ;&qc5gA$Ol>y(VB7bUmxo<)p90}Th z#KuMQU_NkFSCj0zh!GimtDqMA52Fitjl2>@mawl@QD}kHum;wWAR&bPA7}|9Nh1(E2NX|+;HCy_c=bhW z$b11D!Z7fHz;GuOc`OU0#_Zp z0)K&5$$YYaEUbgq;C1*L;YdAM9g0IU7u4wZy(%gy{T19Qjh1^wW6Nobpj+@EaIJ@b z!Q1c-ybJ$^_uxP9K72qHkvGX=vV<%p%gA!Fg1ogJIN>As7y;l!2w{5X(ez(Q-X_)b zw}#ZxdmT;hOudnttyOZx`pr2iq^>5b8jOiFNJ}f#lbp)(2kPjE4kS{TqYn*+Ls3u2 zsba_kdsdUO@VMo7hBc@K zibXA{ym7!u-XUAbb}H!(`pxDdwN>15ZL?r|)Dfs^PzSQH26ZBv8uMqMuZY?#wIbA% zG%nT|bpx(i)CF}V?~={6s5|OTw$Lm4SqaO|EX}W=vD(nYpmfytDep|OjifZb$U@mq zFr?`d(m{v<)e|v>koTUE4o4%xq}*Peu74wgQHX+27&?)k?0OP@XbkLGhYHb4Xe@c3 z>>(dL4M9|bUVc8VP&uj~yU7O>Di1|D3dW;JFT9?CZCW`$!FvsIYd5R3mj-MT7s6MWoS8CK}*gTER1HMjfJr+?9ak1>dr3> z`@O5t23oPu8nhOzL+i|y!bnQLMA?oO;wr}uw2K-N z?IcHQ(EH>VMfGQu8SSMol10AZUJ)g9Xq0z9ItW~=(E;=^`UHK7J|oA;338I0BHyk? zhtTKf3-l#AOui$h$tChL`6HBt&AFRWH8)W@jyoWY24vjV8HD zej!)L?;)djVp8|^8QnYnt(H#~#2E8wieLuw$gkvDEf!!QxlVqg24@v<43=XB_oKqf ztHVmH;?63P`{PKgd7+$QJvLJ>;wWqY>#>RaNp6w;{W;^Z%Q7oP^r}C+<{_DhezQ&oX^@IW+B5u z9t-*Pcr-4cObb~kV4;vWStz0|?j1daQ(3I;3S3Ep8wXe@slnq|D1FgYlR`#iX-7NI z(oy{iCL!7&3uQH!Wobv7&8@N6tY1=?jdh#Y;J#HPW^mugFzs zDMY1}7Ci0VOT+t!I{X%1$t|`fc~;??kZ{#3)YRZwmUh<9!vkI$Rz#bt=obnp&T5NO zZ@?QvQ{G{rz6Nh%VN_V3E?YzTY-6E;CKuijPA-s+cj5Q(?yw~NDhu<*rj?eC&GZ%c zGb)Qm`O8Un7MfUSWTE-l8XnGQm*c(oBd)pKhWFw9Tt9nq@MHWbtsnRk7FufXXDqaa zu{(oqy|M!3g(U^gYE+|xzr-hiY8^g|zrtVRBlsx(1|P%6S!icr3=17Bq;7Sw(9Oao zENr?CpQJl{C;kqf#^2*JfWG%oD5f-C7B-`(+MIJzJy+v@@O}IMKg9pCur&+YvoM*3U0B%j zDdaJLVJJ%mGKis}n83m|ENolH@EATrqcD+$Ni1v^LQ)sk!WqrQIA3eRD459qnxkQ~ zjE;pJSlE$;ov0W`oKbEwWBV^!I}^iDWm8y~%EHc+R_2OM@h~m^i#C>N$xubRvalNq z>4qmJ`OgNB1$n^gM8JBSX#*roTPAUPk*gF1do;u-(~fCB{AqjC)fe#9(V-3?aUk8@ z)2Ao?VmdLj{a8cwPhnCS%yePsGSmvaSlF8yf`xrpn6`%DK_kG2W4cn2<@~R0Bn9YK6PApPs$8r z28D(3*D?b^VyQv_vOy`x2R<+XSm`|gtQq30o}d+I3EI)Ki6q6fVP&-VH>W%1@<4I8 znPY~*o;5&9(39ql8A%;aK|V;ap2^1nit`lhS(r_MgS!10cB4lPIDCa}Z8Ea@_$T=D zQ_J(l&_jdrV@xqqG9WA93-}2HLJ$S)z)RO}2a?h6=r4M3C!~jU23k67^uVqO_Rs^n z7I*+Xl$$^g;AY`DcrKnF#0&8viWIByT6zxm4&Fr1;9j^ng6EB6=nKytph?m0~${Wth z<@tDdJU_31SIDd6&EPHO9prt_7xImK55EgPji1gB4&fK`OZhMJEBKZCD*goiB>ohB zkiU?>h`*S>l)s$+7XNL2HNTc$&tJpe%|Fh+!2er-1bRVxL0`cT!BD{%!B|1DpiEFM z2ngO1tQTw)yerr$*e=*1_&{(-a76Hp;JDzV;0M7u!Fj<&!6l(w=oTgmdkXsq(}jJ7 z!-B%G!eU{m@MU3zuu@nhoFJSboF$wioGY9!Tqs;5Tr6BFTrJ!x{7QI1_)ugJxkOz= znW7xgP|yLqopghAqjX15x=XrSx<|TK zx=;GK^tkk#^o|V4Br>T?E>p_XGLtM?W|i4x4w*~VRpygTm93Pmk*$@jm+h48mhF*! zB-=0hSoW>#wCu9%s_dHVci9cuO*v04ldI(#xlSG>cgel-=JHs1Yk3=aqP(5Fr+lb9 zUtTPqC=be6dGIy)4EZei9Qj=NQhAMhgM7Dqzx-?Y5&2R1G5L@3i}FkIEAp%IYw|}5 zfg(a7Qz#TFg+<{|xD-tk%@whVR*HB$+r^r+I6$Oe3idPk!Vx8h+ z#W}@!#YM#>#bw1Kr9dfD8k8NCy_9{F>B_#!enI7ESL1u28O1ZcuJizN_4$+^gKDJfJ+RJgq#VJgfXs`M2^PCP)f#nB zr;bt^)G_Lo>I8LLb&|T1Iz`=C-Bq2g9-{WC^VEKIfx1LprY=_p)Z^5P)f?1btIw!^ zSKm8$CaNz-I#25AOshG?c}W@(ma zmT6XKR%%vhYBY74Et+kb_cS{-?`uBLe5mVw6YHw@r zYVYZII=)Vz6X~qF=DL=;I9-CStu9H|S=UR~N0+YatLvv5q07_xbp^U2U5TztSFW3; zo2y%)tJc-(>UHaN@8~w^HtY844(d+mPV3I-&gw4eF6l1oe$n068}&`}ZS_g|_WF+c zWPPf>i@uvaTR$+UAFLm$AEEc@^YsP#LOs#1)Njxq(BF>YN7rK2U;l4|K_F4{EPFl`cE?6#E zu2_Dx+_K!U+_T)b{A+n^HCR)veXUhiVx47OWUaB*Ti04QST|WWTR*gZWZiH5#Cp%h zvk7eyo6M%Psco^gL|c1XCtHebfNhMe+*WC;vW>S*vb|!PZChpAW_vGa+hyBt`^t9A zcFuOi_Snw1TkHw;&h~Ehp7uWW411=1xINeIv*+92w|{2;+~g56x?aV5DrxRPC|t_;@z*I?IB z*9e#2HO4j8RpKghEpr`m{pEV-#%`(G;I_FPZgJ<&bcJ)qr1)O*PLh4--cr1!k{qW6;bviGX@y7zbQeeXl>qh_F) Zs+q2tIow(zdMp?EFs0$&#(Oi%{{g*<7yp%zL66ZBKmecsC8$6R8qk6c^k4xixF8N1L1SnFO`!#}fws^NlAt4W zf^LuseINt+24MgUgdE6)LEwi%D1mYq17l$vOoGV}glRAn=E6L99ag{_uo70m2G|Ij zU^8rit*{+-!TYcm4#OvK1ipk5Xf~XLv+xak2bbU%xCOW24m^N|@CXAIVkwqk9oAz9 zc48NH<9Hl=5x2w%xD{@V+u&r}0e8bGxHs;DGw{oJ03M8o;bL5d%W(kD#&hsoJP*&u z3-Cg`2)~9ycqv|iSK&3d3a`bR@jG}o{t)lMALGOL6MP(hiBI55_$PcBU%@})U+}N^ zD*hAS!}sxD_yK-G2vG%znrMiDScsK0AhDz=d4a@}R-`rQLb{S}B!zS*JxD4^BiW=s zd6^6#gUL`*Kt_^cQbxv;Ael;LlKJE{5+bjY6=XH3CU23oWE0s=-XS~45%MWHNa-4igPLPx26gf@KkRQke()A+wkz6IelRwCxc?c-KQzfRwzN2!sXrg1bUR46#F&6TTCyQCJ5_V*|ZG#;B@GQ?k6Rx&)4 zDvWPB6cr#BN@Jf9s0fWjZtA8j+2A<#8A}(^MU+j_zbJ29$*4fzGQU6cfpDoSS1!*O zRx+lOfAFY+0)JU@nLqCg8jmKRiAanlp~)zSXecD_VQWDV_0Si;MpMvK*7YiyhNiRb zNoW?Djpm@ap~v!cOA8t=2iic+Tz@`Vz{D1!Md&pYLW|K7v^3Nzs!wDS>ZQ$TENvWG z9F>s#I%-~lR!|?6o<^(CYBu6chQFeJ$`F5Xpm1oRzbrGQc!+;2ZO9hUDErycPoWo3 zJgP=-g<6ZPq!O(SC5Y|us^F8_ztd;~Q?(IoLYvX#mxq+(j~dDJW%fzU4it{`mk07j zmeQuQ8AZAB<{_|HLT_6lLl;fr$XdN`qwS%KPA${>PAE@eA@8D{p(&C^@LuR`Nu#9t zCU~D2VHTr-$!IQpf!QdYqEje}&AvChv>>yYv{vZ>bTC)mAf;31&`F{-q(0k*bhd|; z=nzVH-W$qSs-nhs!2=CwChb=VUhvT@`VyPFwagyUni>Ub8~~a@^8x7@oeJ{dKyun5OPe4c?K@r*sp7y;!Jo6c`BhFWYM9tpc1m7 zKOII3nO!yJ<{ad~;7|{p3g>}9OYoG!u>0}zDl=9WcjnZH(axuP-qf=Q1$O(C*V?3f-RNGMVHP6Bq-hp?+ zj=qCVtAL$!`cp^$AnfS7;X^uu&VKIb``|!5A3X?%=uA3`Ir_^oWAIZrRKeX>3(tb|VvxR8=e9qE(O-ruG(?v^NB=8auZQn9xIq`wB@7><*xE2~CKV2@7JIP| zH^gzYn!ZKX(sgt_-LMun#!YZjf66KEpVOzDu*~I`Xmq8KUsmoVt!eJmT4iBKqF*5hb{^)`4^7>IoB?#>s6>C&k$& zG>>l;XG?go#iU80{+fnGqi_&;HsI0Bz+>?^JRVQL6Y(TGneJw0{*dmWd+9#<5#3J@ zY+%Ob@f19jJ*VO6?0Y6N_d$Ay9%j!^=n?k)X*j0zVGl1WD=DiDs>0%e&c)&MF(lMW zy*;=XFIhuBuAwo!3@>Ln>S=b#@CT|`%j+nCQ5*YF!yovKr{av^@M>_bp#y8%EB@79 zUGrXNkac)NT_E6%^s~A^*aFTfycNf7!*A1L^f*2FJQ{Z3_x|nUco%-3eont&OjD+F z8TR7+^?DEBgY-*!g7t3GS(_ZepVjL<#>Sosk7cDUvqNT}u&6Ll$oz|yy18;+Xurvq zUWfD~KF5j~dyS0{4M?te~*8_7w| z=Rw7Torg|5bY}iusNe6qhHo)$#@F$0_y)d7zo$RY3-n?&zK!qTyZCqdBfU=l=0V8B z`x>?fAL75m-t>rGs=)uypO`m2&pyzrCe!3UMM*d!W`PL*S208+ASP0XCURB`U1r75 z6;=%W%!(oA3A+EQ>M1rEgG9>`7SYjP{*~{Ek;H`aJ-teQ4QF|0^4=aSv#T!Gfw2+i zzcV{=vCMugoZ0`sLW6io92-S^q#^x{-mD~zNMowMP5-L5ZW2CojG{L}kIg>Cvs_FP zYFclF7Fc?F+mIw?Wzv?kBkf5dy+iNP-{~LKB$;$TZqkYVN$;^BxX+CI#5$=9tlX%0 zi3OgmS&L?qvL>`*Z%rrFEhL>}{8v?dNhVX3MIX?IOw}W%YK+b88c1^4evusdPX!r7 zpVY_guR-`Ybja3DHH-`o3l{Q#D#!>PfGJSf9k!CNtxI{pRQgr;tU}7s1`;5nSh=x< z_{q3XrroL)P$Z&{d0=PH2_$?Xn@uK>$)P#+hS^+Az!FM$R+7}5k$U9?hE5nae|D z{RJ)xFOUaOuDtit4^b6l84u#xMVsCTFM1^p66Rs#&Dx3* zrI89!Nvc8$r!hK?2PqHHJjm*+wvlyYedvJGDySkG$i~n|&P3H_vX${H*}{XOf^6eK z$p}E_y7;WG8u#>>}@z56EuvA=$%&ngkVB{fYJ=w=j&2Dml z93+Rx$H>LAg5OA|@L=Y_!h@9u8~dHTV}>gN8hk|tYE}CODZq7ps9$sLw6B}5P)SUjm$r?B#7sJDgJhbE?f#x_9Z`TwUC?DC}7}-v7 zHYDckoMXrN1|>Mys>a(m7v~=MEQxf=3*=R?CJ##J$4)=&*Hb6qV!0-a#>jQf$2BB3 zxJDd<&z9eYhqlZDJhbDX{aSJhmU7Lx7OX~I%B(@>@sP+vGAotmg(=3td^(vOLq-<{ z#`Ou^@F_#*eZgQ`2C5y`K3ubh&-*o;HEq}CK|CbYB240vYr*&{xg?ZOBA21Qs00l` zd1xH6vF`w~rHeDVu{(y=?55#G`L+J3X9Ww%(R>>evXMw7AZHfti8L*|hMWD&c~T0)kw8?86U zMsl2!aqYQ5+(d2}w}acmeZ(E$4soAzXSs{quiOppUQj>;T7h0*6qp1Sflc5LxC9`%78DC63N{Nq65JGmP%iWe6NO2_G~poOVBrwq zP~kA)aAA?KSXe476HXJ(5Y7_L5zZ4X5H1pigiD0Wgs%&$g?oj^gqMUjB1nWMqIEo zXtrpsXufEn=%DDB=(y;F=#=P!=$h!Z=&tAw(Y;6%85tQH*(fqKGBa{;WO3x!$nlXA zBPU1hh};u-D)MaPjmUdZ_9#zO@2KIysG_LisM4sis0mS%qiEEWs5wzPqjpDKj=CXM ziS^?4;vV93aW8R(I8&S@4vOcB7l;>$7mJsQmy1`4H;T85cZhe2cZv6j_lpmTKNf!? zzAK56I3x`vjU`Pb%_WJFbV)BsA4y+HKgmmy{*nQb5t5OT63Hu)a>*#k7)fxPWP)U- zWU*wML+T`gTBt&+YaT_^oe`myw=^qBMu>6g;)q(4f3l3tNsll~^XDZMRy zEK|thW$k5MWqoDYvZ1mOvXQcqpe!I8EgLHvFPk8HRW?tyQdT9~EITMWD*HipUG|&o zhU}K?FWDp6Ke8usx!fo>$t`l5+#%1B50a0Rm&pV2(eerMN%Ekam(P?hkXOsM%6G}n z%72hQR;U$rg+t*~xD~AxZ58bmNs11NPKq=|FGU|kUqzlGU*T5-3ly&?#w%thW+~<< z<|&pbPAYy-{H(aE_(O3|@s|>m5lV?tqtq*nN|UmMGD(@D?4e9k_Eh#(_E!#2<|qd# z2P=mtM=GZ&7b;gM*D2ptzN36s`JVE9wYQAcrYL%)|Rjpd9+N9c|+NRpB+O0aII-&YXbyjsw z^@Hl7>XPcR>ZaJIA8>aObU>Qr^QI!ir3oueM4 z9;_}=2h^k0W7U(?K{cP(EzKRx@0vd~_caf+Laj&}rIl!#XapOVy?8GIW``EM2xPPgksaMOUsHtsAQwubZKpt(&V`pj)I1>DK6~bZ_a_ z>o)2(>psx!)_thkt2?8+uMcv1o!+32(VO*FyW2{A;V$A5yN@I zPlnruKMnT{4-Aiu$cT-cQE7}Z#u^(Ln;4rJUo<8dTN~RNdm1MiXBn3pR~T0sR~y$D zD~;90wZ@&sUB(ZLdyF3$4;l{}KQ(@4{L%P0MjYda$&4w8nG&-gW_iqtn3XZBW7foM zh}jggC1zVp@ST`#?J?~$?Kd4XeQf%~^r`7H(`C~GGd2s%5#~s<(QGnX%r>*b+|Zn0Zf8z3C!4#N zyP3P2Q_XqiVsp89lzEJKqIt5Jnx~i-m=~EtLGu#xUh`+>&&|ipCoBew$I`^o%+kUV zZ)s^svUIR?vUIU@vvjxkEpsg^EC(&eEN3j=S*}{{TK=%yv;1XwXeCyGHNqNc6c^?WQ($iZP7Nl&1`FC>tyRyDd_J5KBrI3t{qPKi_I zR5~?IozviSIvYE?I&+-m&Y8|N&JUc2UEqpvNnLuE+ZE?(;%e@SceQmTx;nTzySlo@ zyOy}Nx^}q^x(>TObscjZcb#;dc3pJ+;=1O#;kxa*?|SI^#~oy+b$3H|wtJd;zI%!L zP4{N^4)=TR58Qj)huojIkGem1A9tU1pL1Vu|K|SPeb4>C{kMmBgq}!`#3S|SJVuYp z)6A3LY2#_{N%nN}^zfv6dV4ZF13dYjB2OUpTKDn1_pLO59Fa8Gr>&z -#include -#include - -#include "amf.h" -#include "bytes.h" -#include "log.h" -#include "rtmp_sys.h" - -static const AMFObjectProperty AMFProp_Invalid = {{0, 0}, AMF_INVALID}; -static const AVal AV_empty = {0, 0}; - -/* Data is Big-Endian */ -unsigned short - AMF_DecodeInt16(const char *data) { - unsigned char *c = (unsigned char *)data; - unsigned short val; - val = (c[0] << 8) | c[1]; - return val; -} - -unsigned int - AMF_DecodeInt24(const char *data) { - unsigned char *c = (unsigned char *)data; - unsigned int val; - val = (c[0] << 16) | (c[1] << 8) | c[2]; - return val; -} - -unsigned int - AMF_DecodeInt32(const char *data) { - unsigned char *c = (unsigned char *)data; - unsigned int val; - val = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3]; - return val; -} - -void AMF_DecodeString(const char *data, AVal *bv) { - bv->av_len = AMF_DecodeInt16(data); - bv->av_val = (bv->av_len > 0) ? (char *)data + 2 : NULL; -} - -void AMF_DecodeLongString(const char *data, AVal *bv) { - bv->av_len = AMF_DecodeInt32(data); - bv->av_val = (bv->av_len > 0) ? (char *)data + 4 : NULL; -} - -double - AMF_DecodeNumber(const char *data) { - double dVal; -#if __FLOAT_WORD_ORDER == __BYTE_ORDER -#if __BYTE_ORDER == __BIG_ENDIAN - memcpy(&dVal, data, 8); -#elif __BYTE_ORDER == __LITTLE_ENDIAN - unsigned char *ci, *co; - ci = (unsigned char *)data; - co = (unsigned char *)&dVal; - co[0] = ci[7]; - co[1] = ci[6]; - co[2] = ci[5]; - co[3] = ci[4]; - co[4] = ci[3]; - co[5] = ci[2]; - co[6] = ci[1]; - co[7] = ci[0]; -#endif -#else -#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ - unsigned char *ci, *co; - ci = (unsigned char *)data; - co = (unsigned char *)&dVal; - co[0] = ci[3]; - co[1] = ci[2]; - co[2] = ci[1]; - co[3] = ci[0]; - co[4] = ci[7]; - co[5] = ci[6]; - co[6] = ci[5]; - co[7] = ci[4]; -#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ - unsigned char *ci, *co; - ci = (unsigned char *)data; - co = (unsigned char *)&dVal; - co[0] = ci[4]; - co[1] = ci[5]; - co[2] = ci[6]; - co[3] = ci[7]; - co[4] = ci[0]; - co[5] = ci[1]; - co[6] = ci[2]; - co[7] = ci[3]; -#endif -#endif - return dVal; -} - -int AMF_DecodeBoolean(const char *data) { - return *data != 0; -} - -char * - AMF_EncodeInt16(char *output, char *outend, short nVal) { - if (output + 2 > outend) - return NULL; - - output[1] = nVal & 0xff; - output[0] = nVal >> 8; - return output + 2; -} - -char * - AMF_EncodeInt24(char *output, char *outend, int nVal) { - if (output + 3 > outend) - return NULL; - - output[2] = nVal & 0xff; - output[1] = nVal >> 8; - output[0] = nVal >> 16; - return output + 3; -} - -char * - AMF_EncodeInt32(char *output, char *outend, int nVal) { - if (output + 4 > outend) - return NULL; - - output[3] = nVal & 0xff; - output[2] = nVal >> 8; - output[1] = nVal >> 16; - output[0] = nVal >> 24; - return output + 4; -} - -char * - AMF_EncodeString(char *output, char *outend, const AVal *bv) { - if ((bv->av_len < 65536 && output + 1 + 2 + bv->av_len > outend) || - output + 1 + 4 + bv->av_len > outend) - return NULL; - - if (bv->av_len < 65536) { - *output++ = AMF_STRING; - - output = AMF_EncodeInt16(output, outend, bv->av_len); - } else { - *output++ = AMF_LONG_STRING; - - output = AMF_EncodeInt32(output, outend, bv->av_len); - } - memcpy(output, bv->av_val, bv->av_len); - output += bv->av_len; - - return output; -} - -char * - AMF_EncodeNumber(char *output, char *outend, double dVal) { - if (output + 1 + 8 > outend) - return NULL; - - *output++ = AMF_NUMBER; /* type: Number */ - -#if __FLOAT_WORD_ORDER == __BYTE_ORDER -#if __BYTE_ORDER == __BIG_ENDIAN - memcpy(output, &dVal, 8); -#elif __BYTE_ORDER == __LITTLE_ENDIAN - { - unsigned char *ci, *co; - ci = (unsigned char *)&dVal; - co = (unsigned char *)output; - co[0] = ci[7]; - co[1] = ci[6]; - co[2] = ci[5]; - co[3] = ci[4]; - co[4] = ci[3]; - co[5] = ci[2]; - co[6] = ci[1]; - co[7] = ci[0]; - } -#endif -#else -#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ - { - unsigned char *ci, *co; - ci = (unsigned char *)&dVal; - co = (unsigned char *)output; - co[0] = ci[3]; - co[1] = ci[2]; - co[2] = ci[1]; - co[3] = ci[0]; - co[4] = ci[7]; - co[5] = ci[6]; - co[6] = ci[5]; - co[7] = ci[4]; - } -#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ - { - unsigned char *ci, *co; - ci = (unsigned char *)&dVal; - co = (unsigned char *)output; - co[0] = ci[4]; - co[1] = ci[5]; - co[2] = ci[6]; - co[3] = ci[7]; - co[4] = ci[0]; - co[5] = ci[1]; - co[6] = ci[2]; - co[7] = ci[3]; - } -#endif -#endif - - return output + 8; -} - -char * - AMF_EncodeBoolean(char *output, char *outend, int bVal) { - if (output + 2 > outend) - return NULL; - - *output++ = AMF_BOOLEAN; - - *output++ = bVal ? 0x01 : 0x00; - - return output; -} - -char * - AMF_EncodeNamedString(char *output, char *outend, const AVal *strName, const AVal *strValue) { - if (output + 2 + strName->av_len > outend) - return NULL; - output = AMF_EncodeInt16(output, outend, strName->av_len); - - memcpy(output, strName->av_val, strName->av_len); - output += strName->av_len; - - return AMF_EncodeString(output, outend, strValue); -} - -char * - AMF_EncodeNamedNumber(char *output, char *outend, const AVal *strName, double dVal) { - if (output + 2 + strName->av_len > outend) - return NULL; - output = AMF_EncodeInt16(output, outend, strName->av_len); - - memcpy(output, strName->av_val, strName->av_len); - output += strName->av_len; - - return AMF_EncodeNumber(output, outend, dVal); -} - -char * - AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *strName, int bVal) { - if (output + 2 + strName->av_len > outend) - return NULL; - output = AMF_EncodeInt16(output, outend, strName->av_len); - - memcpy(output, strName->av_val, strName->av_len); - output += strName->av_len; - - return AMF_EncodeBoolean(output, outend, bVal); -} - -void AMFProp_GetName(AMFObjectProperty *prop, AVal *name) { - *name = prop->p_name; -} - -void AMFProp_SetName(AMFObjectProperty *prop, AVal *name) { - prop->p_name = *name; -} - -AMFDataType - AMFProp_GetType(AMFObjectProperty *prop) { - return prop->p_type; -} - -double - AMFProp_GetNumber(AMFObjectProperty *prop) { - return prop->p_vu.p_number; -} - -int AMFProp_GetBoolean(AMFObjectProperty *prop) { - return prop->p_vu.p_number != 0; -} - -void AMFProp_GetString(AMFObjectProperty *prop, AVal *str) { - *str = prop->p_vu.p_aval; -} - -void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj) { - *obj = prop->p_vu.p_object; -} - -int AMFProp_IsValid(AMFObjectProperty *prop) { - return prop->p_type != AMF_INVALID; -} - -char * - AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd) { - if (prop->p_type == AMF_INVALID) - return NULL; - - if (prop->p_type != AMF_NULL && pBuffer + prop->p_name.av_len + 2 + 1 >= pBufEnd) - return NULL; - - if (prop->p_type != AMF_NULL && prop->p_name.av_len) { - *pBuffer++ = prop->p_name.av_len >> 8; - *pBuffer++ = prop->p_name.av_len & 0xff; - memcpy(pBuffer, prop->p_name.av_val, prop->p_name.av_len); - pBuffer += prop->p_name.av_len; - } - - switch (prop->p_type) { - case AMF_NUMBER: - pBuffer = AMF_EncodeNumber(pBuffer, pBufEnd, prop->p_vu.p_number); - break; - - case AMF_BOOLEAN: - pBuffer = AMF_EncodeBoolean(pBuffer, pBufEnd, prop->p_vu.p_number != 0); - break; - - case AMF_STRING: - pBuffer = AMF_EncodeString(pBuffer, pBufEnd, &prop->p_vu.p_aval); - break; - - case AMF_NULL: - if (pBuffer + 1 >= pBufEnd) - return NULL; - *pBuffer++ = AMF_NULL; - break; - - case AMF_OBJECT: - pBuffer = AMF_Encode(&prop->p_vu.p_object, pBuffer, pBufEnd); - break; - - default: - RTMP_Log(RTMP_LOGERROR, "%s, invalid type. %d", __FUNCTION__, prop->p_type); - pBuffer = NULL; - }; - - return pBuffer; -} - -#define AMF3_INTEGER_MAX 268435455 -#define AMF3_INTEGER_MIN -268435456 - -int AMF3ReadInteger(const char *data, int32_t *valp) { - int i = 0; - int32_t val = 0; - - while (i <= 2) { /* handle first 3 bytes */ - if (data[i] & 0x80) { /* byte used */ - val <<= 7; /* shift up */ - val |= (data[i] & 0x7f); /* add bits */ - i++; - } else { - break; - } - } - - if (i > 2) { /* use 4th byte, all 8bits */ - val <<= 8; - val |= data[3]; - - /* range check */ - if (val > AMF3_INTEGER_MAX) - val -= (1 << 29); - } else { /* use 7bits of last unparsed byte (0xxxxxxx) */ - val <<= 7; - val |= data[i]; - } - - *valp = val; - - return i > 2 ? 4 : i + 1; -} - -int AMF3ReadString(const char *data, AVal *str) { - int32_t ref = 0; - int len; - assert(str != 0); - - len = AMF3ReadInteger(data, &ref); - data += len; - - if ((ref & 0x1) == 0) { /* reference: 0xxx */ - uint32_t refIndex = (ref >> 1); - RTMP_Log(RTMP_LOGDEBUG, - "%s, string reference, index: %d, not supported, ignoring!", - __FUNCTION__, refIndex); - return len; - } else { - uint32_t nSize = (ref >> 1); - - str->av_val = (char *)data; - str->av_len = nSize; - - return len + nSize; - } - return len; -} - -int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, - int bDecodeName) { - int nOriginalSize = nSize; - AMF3DataType type; - - prop->p_name.av_len = 0; - prop->p_name.av_val = NULL; - - if (nSize == 0 || !pBuffer) { - RTMP_Log(RTMP_LOGDEBUG, "empty buffer/no buffer pointer!"); - return -1; - } - - /* decode name */ - if (bDecodeName) { - AVal name; - int nRes = AMF3ReadString(pBuffer, &name); - - if (name.av_len <= 0) - return nRes; - - prop->p_name = name; - pBuffer += nRes; - nSize -= nRes; - } - - /* decode */ - type = *pBuffer++; - nSize--; - - switch (type) { - case AMF3_UNDEFINED: - case AMF3_NULL: - prop->p_type = AMF_NULL; - break; - case AMF3_FALSE: - prop->p_type = AMF_BOOLEAN; - prop->p_vu.p_number = 0.0; - break; - case AMF3_TRUE: - prop->p_type = AMF_BOOLEAN; - prop->p_vu.p_number = 1.0; - break; - case AMF3_INTEGER: { - int32_t res = 0; - int len = AMF3ReadInteger(pBuffer, &res); - prop->p_vu.p_number = (double)res; - prop->p_type = AMF_NUMBER; - nSize -= len; - break; - } - case AMF3_DOUBLE: - if (nSize < 8) - return -1; - prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); - prop->p_type = AMF_NUMBER; - nSize -= 8; - break; - case AMF3_STRING: - case AMF3_XML_DOC: - case AMF3_XML: { - int len = AMF3ReadString(pBuffer, &prop->p_vu.p_aval); - prop->p_type = AMF_STRING; - nSize -= len; - break; - } - case AMF3_DATE: { - int32_t res = 0; - int len = AMF3ReadInteger(pBuffer, &res); - - nSize -= len; - pBuffer += len; - - if ((res & 0x1) == 0) { /* reference */ - uint32_t nIndex = (res >> 1); - RTMP_Log(RTMP_LOGDEBUG, "AMF3_DATE reference: %d, not supported!", nIndex); - } else { - if (nSize < 8) - return -1; - - prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); - nSize -= 8; - prop->p_type = AMF_NUMBER; - } - break; - } - case AMF3_OBJECT: { - int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); - if (nRes == -1) - return -1; - nSize -= nRes; - prop->p_type = AMF_OBJECT; - break; - } - case AMF3_ARRAY: - case AMF3_BYTE_ARRAY: - default: - RTMP_Log(RTMP_LOGDEBUG, "%s - AMF3 unknown/unsupported datatype 0x%02x, @0x%08X", - __FUNCTION__, (unsigned char)(*pBuffer), pBuffer); - return -1; - } - - return nOriginalSize - nSize; -} - -int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, - int bDecodeName) { - int nOriginalSize = nSize; - int nRes; - - prop->p_name.av_len = 0; - prop->p_name.av_val = NULL; - - if (nSize == 0 || !pBuffer) { - RTMP_Log(RTMP_LOGDEBUG, "%s: Empty buffer/no buffer pointer!", __FUNCTION__); - return -1; - } - - if (bDecodeName && nSize < 4) { /* at least name (length + at least 1 byte) and 1 byte of data */ - RTMP_Log(RTMP_LOGDEBUG, - "%s: Not enough data for decoding with name, less than 4 bytes!", - __FUNCTION__); - return -1; - } - - if (bDecodeName) { - unsigned short nNameSize = AMF_DecodeInt16(pBuffer); - if (nNameSize > nSize - 2) { - RTMP_Log(RTMP_LOGDEBUG, - "%s: Name size out of range: namesize (%d) > len (%d) - 2", - __FUNCTION__, nNameSize, nSize); - return -1; - } - - AMF_DecodeString(pBuffer, &prop->p_name); - nSize -= 2 + nNameSize; - pBuffer += 2 + nNameSize; - } - - if (nSize == 0) { - return -1; - } - - nSize--; - - prop->p_type = *pBuffer++; - switch (prop->p_type) { - case AMF_NUMBER: - if (nSize < 8) - return -1; - prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); - nSize -= 8; - break; - case AMF_BOOLEAN: - if (nSize < 1) - return -1; - prop->p_vu.p_number = (double)AMF_DecodeBoolean(pBuffer); - nSize--; - break; - case AMF_STRING: { - unsigned short nStringSize = AMF_DecodeInt16(pBuffer); - - if (nSize < (long)nStringSize + 2) - return -1; - AMF_DecodeString(pBuffer, &prop->p_vu.p_aval); - nSize -= (2 + nStringSize); - break; - } - case AMF_OBJECT: { - int nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); - if (nRes == -1) - return -1; - nSize -= nRes; - break; - } - case AMF_MOVIECLIP: { - RTMP_Log(RTMP_LOGERROR, "AMF_MOVIECLIP reserved!"); - return -1; - break; - } - case AMF_NULL: - case AMF_UNDEFINED: - case AMF_UNSUPPORTED: - prop->p_type = AMF_NULL; - break; - case AMF_REFERENCE: { - RTMP_Log(RTMP_LOGERROR, "AMF_REFERENCE not supported!"); - return -1; - break; - } - case AMF_ECMA_ARRAY: { - nSize -= 4; - - /* next comes the rest, mixed array has a final 0x000009 mark and names, so its an object */ - nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer + 4, nSize, TRUE); - if (nRes == -1) - return -1; - nSize -= nRes; - prop->p_type = AMF_OBJECT; - break; - } - case AMF_OBJECT_END: { - return -1; - break; - } - case AMF_STRICT_ARRAY: { - unsigned int nArrayLen = AMF_DecodeInt32(pBuffer); - nSize -= 4; - - nRes = AMF_DecodeArray(&prop->p_vu.p_object, pBuffer + 4, nSize, - nArrayLen, FALSE); - if (nRes == -1) - return -1; - nSize -= nRes; - prop->p_type = AMF_OBJECT; - break; - } - case AMF_DATE: { - RTMP_Log(RTMP_LOGDEBUG, "AMF_DATE"); - - if (nSize < 10) - return -1; - - prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); - prop->p_UTCoffset = AMF_DecodeInt16(pBuffer + 8); - - nSize -= 10; - break; - } - case AMF_LONG_STRING: { - unsigned int nStringSize = AMF_DecodeInt32(pBuffer); - if (nSize < (long)nStringSize + 4) - return -1; - AMF_DecodeLongString(pBuffer, &prop->p_vu.p_aval); - nSize -= (4 + nStringSize); - prop->p_type = AMF_STRING; - break; - } - case AMF_RECORDSET: { - RTMP_Log(RTMP_LOGERROR, "AMF_RECORDSET reserved!"); - return -1; - break; - } - case AMF_XML_DOC: { - RTMP_Log(RTMP_LOGERROR, "AMF_XML_DOC not supported!"); - return -1; - break; - } - case AMF_TYPED_OBJECT: { - RTMP_Log(RTMP_LOGERROR, "AMF_TYPED_OBJECT not supported!"); - return -1; - break; - } - case AMF_AVMPLUS: { - int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); - if (nRes == -1) - return -1; - nSize -= nRes; - prop->p_type = AMF_OBJECT; - break; - } - default: - RTMP_Log(RTMP_LOGDEBUG, "%s - unknown datatype 0x%02x, @0x%08X", __FUNCTION__, - prop->p_type, pBuffer - 1); - return -1; - } - - return nOriginalSize - nSize; -} - -void AMFProp_Dump(AMFObjectProperty *prop) { - char strRes[256]; - char str[256]; - AVal name; - - if (prop->p_type == AMF_INVALID) { - RTMP_Log(RTMP_LOGDEBUG, "Property: INVALID"); - return; - } - - if (prop->p_type == AMF_NULL) { - RTMP_Log(RTMP_LOGDEBUG, "Property: NULL"); - return; - } - - if (prop->p_name.av_len) { - name = prop->p_name; - } else { - name.av_val = "no-name."; - name.av_len = sizeof("no-name.") - 1; - } - if (name.av_len > 18) - name.av_len = 18; - - snprintf(strRes, 255, "Name: %18.*s, ", name.av_len, name.av_val); - - if (prop->p_type == AMF_OBJECT) { - RTMP_Log(RTMP_LOGDEBUG, "Property: <%sOBJECT>", strRes); - AMF_Dump(&prop->p_vu.p_object); - return; - } - - switch (prop->p_type) { - case AMF_NUMBER: - snprintf(str, 255, "NUMBER:\t%.2f", prop->p_vu.p_number); - break; - case AMF_BOOLEAN: - snprintf(str, 255, "BOOLEAN:\t%s", - prop->p_vu.p_number != 0.0 ? "TRUE" : "FALSE"); - break; - case AMF_STRING: - snprintf(str, 255, "STRING:\t%.*s", prop->p_vu.p_aval.av_len, - prop->p_vu.p_aval.av_val); - break; - case AMF_DATE: - snprintf(str, 255, "DATE:\ttimestamp: %.2f, UTC offset: %d", - prop->p_vu.p_number, prop->p_UTCoffset); - break; - default: - snprintf(str, 255, "INVALID TYPE 0x%02x", (unsigned char)prop->p_type); - } - - RTMP_Log(RTMP_LOGDEBUG, "Property: <%s%s>", strRes, str); -} - -void AMFProp_Reset(AMFObjectProperty *prop) { - if (prop->p_type == AMF_OBJECT) - AMF_Reset(&prop->p_vu.p_object); - else { - prop->p_vu.p_aval.av_len = 0; - prop->p_vu.p_aval.av_val = NULL; - } - prop->p_type = AMF_INVALID; -} - -/* AMFObject */ - -char * - AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd) { - int i; - - if (pBuffer + 4 >= pBufEnd) - return NULL; - - *pBuffer++ = AMF_OBJECT; - - for (i = 0; i < obj->o_num; i++) { - char *res = AMFProp_Encode(&obj->o_props[i], pBuffer, pBufEnd); - if (res == NULL) { - RTMP_Log(RTMP_LOGERROR, "AMF_Encode - failed to encode property in index %d", - i); - break; - } else { - pBuffer = res; - } - } - - if (pBuffer + 3 >= pBufEnd) - return NULL; /* no room for the end marker */ - - pBuffer = AMF_EncodeInt24(pBuffer, pBufEnd, AMF_OBJECT_END); - - return pBuffer; -} - -int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, - int nArrayLen, int bDecodeName) { - int nOriginalSize = nSize; - int bError = FALSE; - - obj->o_num = 0; - obj->o_props = NULL; - while (nArrayLen > 0) { - AMFObjectProperty prop; - int nRes; - nArrayLen--; - - nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); - if (nRes == -1) - bError = TRUE; - else { - nSize -= nRes; - pBuffer += nRes; - AMF_AddProp(obj, &prop); - } - } - if (bError) - return -1; - - return nOriginalSize - nSize; -} - -int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bAMFData) { - int nOriginalSize = nSize; - int32_t ref; - int len; - - obj->o_num = 0; - obj->o_props = NULL; - if (bAMFData) { - if (*pBuffer != AMF3_OBJECT) - RTMP_Log(RTMP_LOGERROR, - "AMF3 Object encapsulated in AMF stream does not start with AMF3_OBJECT!"); - pBuffer++; - nSize--; - } - - ref = 0; - len = AMF3ReadInteger(pBuffer, &ref); - pBuffer += len; - nSize -= len; - - if ((ref & 1) == 0) { /* object reference, 0xxx */ - uint32_t objectIndex = (ref >> 1); - - RTMP_Log(RTMP_LOGDEBUG, "Object reference, index: %d", objectIndex); - } else /* object instance */ - { - int32_t classRef = (ref >> 1); - - AMF3ClassDef cd = {{0, 0}}; - AMFObjectProperty prop; - - if ((classRef & 0x1) == 0) { /* class reference */ - uint32_t classIndex = (classRef >> 1); - RTMP_Log(RTMP_LOGDEBUG, "Class reference: %d", classIndex); - } else { - int32_t classExtRef = (classRef >> 1); - int i; - - cd.cd_externalizable = (classExtRef & 0x1) == 1; - cd.cd_dynamic = ((classExtRef >> 1) & 0x1) == 1; - - cd.cd_num = classExtRef >> 2; - - /* class name */ - - len = AMF3ReadString(pBuffer, &cd.cd_name); - nSize -= len; - pBuffer += len; - - /*std::string str = className; */ - - RTMP_Log(RTMP_LOGDEBUG, - "Class name: %s, externalizable: %d, dynamic: %d, classMembers: %d", - cd.cd_name.av_val, cd.cd_externalizable, cd.cd_dynamic, - cd.cd_num); - - for (i = 0; i < cd.cd_num; i++) { - AVal memberName; - len = AMF3ReadString(pBuffer, &memberName); - RTMP_Log(RTMP_LOGDEBUG, "Member: %s", memberName.av_val); - AMF3CD_AddProp(&cd, &memberName); - nSize -= len; - pBuffer += len; - } - } - - /* add as referencable object */ - - if (cd.cd_externalizable) { - int nRes; - AVal name = AVC("DEFAULT_ATTRIBUTE"); - - RTMP_Log(RTMP_LOGDEBUG, "Externalizable, TODO check"); - - nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); - if (nRes == -1) - RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", - __FUNCTION__); - else { - nSize -= nRes; - pBuffer += nRes; - } - - AMFProp_SetName(&prop, &name); - AMF_AddProp(obj, &prop); - } else { - int nRes, i; - for (i = 0; i < cd.cd_num; i++) /* non-dynamic */ - { - nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); - if (nRes == -1) - RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", - __FUNCTION__); - - AMFProp_SetName(&prop, AMF3CD_GetProp(&cd, i)); - AMF_AddProp(obj, &prop); - - pBuffer += nRes; - nSize -= nRes; - } - if (cd.cd_dynamic) { - int len = 0; - - do { - nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, TRUE); - AMF_AddProp(obj, &prop); - - pBuffer += nRes; - nSize -= nRes; - - len = prop.p_name.av_len; - } while (len > 0); - } - } - RTMP_Log(RTMP_LOGDEBUG, "class object!"); - } - return nOriginalSize - nSize; -} - -int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName) { - int nOriginalSize = nSize; - int bError = FALSE; /* if there is an error while decoding - try to at least find the end mark AMF_OBJECT_END */ - - obj->o_num = 0; - obj->o_props = NULL; - while (nSize > 0) { - AMFObjectProperty prop; - int nRes; - - if (nSize >= 3 && AMF_DecodeInt24(pBuffer) == AMF_OBJECT_END) { - nSize -= 3; - bError = FALSE; - break; - } - - if (bError) { - RTMP_Log(RTMP_LOGERROR, - "DECODING ERROR, IGNORING BYTES UNTIL NEXT KNOWN PATTERN!"); - nSize--; - pBuffer++; - continue; - } - - nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); - if (nRes == -1) - bError = TRUE; - else { - nSize -= nRes; - pBuffer += nRes; - AMF_AddProp(obj, &prop); - } - } - - if (bError) - return -1; - - return nOriginalSize - nSize; -} - -void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop) { - if (!(obj->o_num & 0x0f)) - obj->o_props = - realloc(obj->o_props, (obj->o_num + 16) * sizeof(AMFObjectProperty)); - obj->o_props[obj->o_num++] = *prop; -} - -int AMF_CountProp(AMFObject *obj) { - return obj->o_num; -} - -AMFObjectProperty * - AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex) { - if (nIndex >= 0) { - if (nIndex <= obj->o_num) - return &obj->o_props[nIndex]; - } else { - int n; - for (n = 0; n < obj->o_num; n++) { - if (AVMATCH(&obj->o_props[n].p_name, name)) - return &obj->o_props[n]; - } - } - - return (AMFObjectProperty *)&AMFProp_Invalid; -} - -void AMF_Dump(AMFObject *obj) { - int n; - RTMP_Log(RTMP_LOGDEBUG, "(object begin)"); - for (n = 0; n < obj->o_num; n++) { - AMFProp_Dump(&obj->o_props[n]); - } - RTMP_Log(RTMP_LOGDEBUG, "(object end)"); -} - -void AMF_Reset(AMFObject *obj) { - int n; - for (n = 0; n < obj->o_num; n++) { - AMFProp_Reset(&obj->o_props[n]); - } - free(obj->o_props); - obj->o_props = NULL; - obj->o_num = 0; -} - -/* AMF3ClassDefinition */ - -void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop) { - if (!(cd->cd_num & 0x0f)) - cd->cd_props = realloc(cd->cd_props, (cd->cd_num + 16) * sizeof(AVal)); - cd->cd_props[cd->cd_num++] = *prop; -} - -AVal * - AMF3CD_GetProp(AMF3ClassDef *cd, int nIndex) { - if (nIndex >= cd->cd_num) - return (AVal *)&AV_empty; - return &cd->cd_props[nIndex]; -} diff --git a/LFLiveKit/publish/pili-librtmp/amf.h b/LFLiveKit/publish/pili-librtmp/amf.h deleted file mode 100755 index 77f93e84..00000000 --- a/LFLiveKit/publish/pili-librtmp/amf.h +++ /dev/null @@ -1,180 +0,0 @@ -#ifndef __AMF_H__ -#define __AMF_H__ -/* - * Copyright (C) 2005-2008 Team XBMC - * http://www.xbmc.org - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include - -#ifndef TRUE -#define TRUE 1 -#define FALSE 0 -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum { - AMF_NUMBER = 0, - AMF_BOOLEAN, - AMF_STRING, - AMF_OBJECT, - AMF_MOVIECLIP, /* reserved, not used */ - AMF_NULL, - AMF_UNDEFINED, - AMF_REFERENCE, - AMF_ECMA_ARRAY, - AMF_OBJECT_END, - AMF_STRICT_ARRAY, - AMF_DATE, - AMF_LONG_STRING, - AMF_UNSUPPORTED, - AMF_RECORDSET, /* reserved, not used */ - AMF_XML_DOC, - AMF_TYPED_OBJECT, - AMF_AVMPLUS, /* switch to AMF3 */ - AMF_INVALID = 0xff -} AMFDataType; - -typedef enum { - AMF3_UNDEFINED = 0, - AMF3_NULL, - AMF3_FALSE, - AMF3_TRUE, - AMF3_INTEGER, - AMF3_DOUBLE, - AMF3_STRING, - AMF3_XML_DOC, - AMF3_DATE, - AMF3_ARRAY, - AMF3_OBJECT, - AMF3_XML, - AMF3_BYTE_ARRAY -} AMF3DataType; - -typedef struct AVal { - char *av_val; - int av_len; -} AVal; -#define AVC(str) \ - { str, sizeof(str) - 1 } -#define AVMATCH(a1, a2) \ - ((a1)->av_len == (a2)->av_len && \ - !memcmp((a1)->av_val, (a2)->av_val, (a1)->av_len)) - -struct AMFObjectProperty; - -typedef struct AMFObject { - int o_num; - struct AMFObjectProperty *o_props; -} AMFObject; - -typedef struct AMFObjectProperty { - AVal p_name; - AMFDataType p_type; - union { - double p_number; - AVal p_aval; - AMFObject p_object; - } p_vu; - int16_t p_UTCoffset; -} AMFObjectProperty; - -char *AMF_EncodeString(char *output, char *outend, const AVal *str); -char *AMF_EncodeNumber(char *output, char *outend, double dVal); -char *AMF_EncodeInt16(char *output, char *outend, short nVal); -char *AMF_EncodeInt24(char *output, char *outend, int nVal); -char *AMF_EncodeInt32(char *output, char *outend, int nVal); -char *AMF_EncodeBoolean(char *output, char *outend, int bVal); - -/* Shortcuts for AMFProp_Encode */ -char *AMF_EncodeNamedString(char *output, char *outend, const AVal *name, - const AVal *value); -char *AMF_EncodeNamedNumber(char *output, char *outend, const AVal *name, - double dVal); -char *AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *name, - int bVal); - -unsigned short AMF_DecodeInt16(const char *data); -unsigned int AMF_DecodeInt24(const char *data); -unsigned int AMF_DecodeInt32(const char *data); -void AMF_DecodeString(const char *data, AVal *str); -void AMF_DecodeLongString(const char *data, AVal *str); -int AMF_DecodeBoolean(const char *data); -double AMF_DecodeNumber(const char *data); - -char *AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd); -int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName); -int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, - int nArrayLen, int bDecodeName); -int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, - int bDecodeName); -void AMF_Dump(AMFObject *obj); -void AMF_Reset(AMFObject *obj); - -void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop); -int AMF_CountProp(AMFObject *obj); -AMFObjectProperty *AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex); - -AMFDataType AMFProp_GetType(AMFObjectProperty *prop); -void AMFProp_SetNumber(AMFObjectProperty *prop, double dval); -void AMFProp_SetBoolean(AMFObjectProperty *prop, int bflag); -void AMFProp_SetString(AMFObjectProperty *prop, AVal *str); -void AMFProp_SetObject(AMFObjectProperty *prop, AMFObject *obj); - -void AMFProp_GetName(AMFObjectProperty *prop, AVal *name); -void AMFProp_SetName(AMFObjectProperty *prop, AVal *name); -double AMFProp_GetNumber(AMFObjectProperty *prop); -int AMFProp_GetBoolean(AMFObjectProperty *prop); -void AMFProp_GetString(AMFObjectProperty *prop, AVal *str); -void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj); - -int AMFProp_IsValid(AMFObjectProperty *prop); - -char *AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd); -int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, - int bDecodeName); -int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, - int bDecodeName); - -void AMFProp_Dump(AMFObjectProperty *prop); -void AMFProp_Reset(AMFObjectProperty *prop); - -typedef struct AMF3ClassDef { - AVal cd_name; - char cd_externalizable; - char cd_dynamic; - int cd_num; - AVal *cd_props; -} AMF3ClassDef; - -void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop); -AVal *AMF3CD_GetProp(AMF3ClassDef *cd, int idx); - -#ifdef __cplusplus -} -#endif - -#endif /* __AMF_H__ */ diff --git a/LFLiveKit/publish/pili-librtmp/bytes.h b/LFLiveKit/publish/pili-librtmp/bytes.h deleted file mode 100755 index 87221cf1..00000000 --- a/LFLiveKit/publish/pili-librtmp/bytes.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2005-2008 Team XBMC - * http://www.xbmc.org - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#ifndef __BYTES_H__ -#define __BYTES_H__ - -#include - -#ifdef _WIN32 -/* Windows is little endian only */ -#define __LITTLE_ENDIAN 1234 -#define __BIG_ENDIAN 4321 -#define __BYTE_ORDER __LITTLE_ENDIAN -#define __FLOAT_WORD_ORDER __BYTE_ORDER - -typedef unsigned char uint8_t; - -#else /* !_WIN32 */ - -#include - -#if defined(BYTE_ORDER) && !defined(__BYTE_ORDER) -#define __BYTE_ORDER BYTE_ORDER -#endif - -#if defined(BIG_ENDIAN) && !defined(__BIG_ENDIAN) -#define __BIG_ENDIAN BIG_ENDIAN -#endif - -#if defined(LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN) -#define __LITTLE_ENDIAN LITTLE_ENDIAN -#endif - -#endif /* !_WIN32 */ - -/* define default endianness */ -#ifndef __LITTLE_ENDIAN -#define __LITTLE_ENDIAN 1234 -#endif - -#ifndef __BIG_ENDIAN -#define __BIG_ENDIAN 4321 -#endif - -#ifndef __BYTE_ORDER -#warning "Byte order not defined on your system, assuming little endian!" -#define __BYTE_ORDER __LITTLE_ENDIAN -#endif - -/* ok, we assume to have the same float word order and byte order if float word - * order is not defined */ -#ifndef __FLOAT_WORD_ORDER -#warning "Float word order not defined, assuming the same as byte order!" -#define __FLOAT_WORD_ORDER __BYTE_ORDER -#endif - -#if !defined(__BYTE_ORDER) || !defined(__FLOAT_WORD_ORDER) -#error "Undefined byte or float word order!" -#endif - -#if __FLOAT_WORD_ORDER != __BIG_ENDIAN && __FLOAT_WORD_ORDER != __LITTLE_ENDIAN -#error "Unknown/unsupported float word order!" -#endif - -#if __BYTE_ORDER != __BIG_ENDIAN && __BYTE_ORDER != __LITTLE_ENDIAN -#error "Unknown/unsupported byte order!" -#endif - -#endif diff --git a/LFLiveKit/publish/pili-librtmp/dh.h b/LFLiveKit/publish/pili-librtmp/dh.h deleted file mode 100755 index d7aeb5a5..00000000 --- a/LFLiveKit/publish/pili-librtmp/dh.h +++ /dev/null @@ -1,345 +0,0 @@ -/* RTMPDump - Diffie-Hellmann Key Exchange - * Copyright (C) 2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include -#include -#include -#include -#include - -#ifdef USE_POLARSSL -#include -typedef mpi *MP_t; -#define MP_new(m) \ - m = malloc(sizeof(mpi)); \ - mpi_init(m, NULL) -#define MP_set_w(mpi, w) mpi_lset(mpi, w) -#define MP_cmp(u, v) mpi_cmp_mpi(u, v) -#define MP_set(u, v) mpi_copy(u, v) -#define MP_sub_w(mpi, w) mpi_sub_int(mpi, mpi, w) -#define MP_cmp_1(mpi) mpi_cmp_int(mpi, 1) -#define MP_modexp(r, y, q, p) mpi_exp_mod(r, y, q, p, NULL) -#define MP_free(mpi) \ - mpi_free(mpi, NULL); \ - free(mpi) -#define MP_gethex(u, hex, res) \ - MP_new(u); \ - res = mpi_read_string(u, 16, hex) == 0 -#define MP_bytes(u) mpi_size(u) -#define MP_setbin(u, buf, len) mpi_write_binary(u, buf, len) -#define MP_getbin(u, buf, len) \ - MP_new(u); \ - mpi_read_binary(u, buf, len) - -typedef struct MDH { - MP_t p; - MP_t g; - MP_t pub_key; - MP_t priv_key; - long length; - dhm_context ctx; -} MDH; - -#define MDH_new() calloc(1, sizeof(MDH)) -#define MDH_free(vp) \ - { \ - MDH *dh = vp; \ - dhm_free(&dh->ctx); \ - MP_free(dh->p); \ - MP_free(dh->g); \ - MP_free(dh->pub_key); \ - MP_free(dh->priv_key); \ - free(dh); \ - } - -static int MDH_generate_key(MDH *dh) { - unsigned char out[2]; - MP_set(&dh->ctx.P, dh->p); - MP_set(&dh->ctx.G, dh->g); - dh->ctx.len = 128; - dhm_make_public(&dh->ctx, 1024, out, 1, havege_rand, &RTMP_TLS_ctx->hs); - MP_new(dh->pub_key); - MP_new(dh->priv_key); - MP_set(dh->pub_key, &dh->ctx.GX); - MP_set(dh->priv_key, &dh->ctx.X); - return 1; -} - -static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { - int n = len; - MP_set(&dh->ctx.GY, pub); - dhm_calc_secret(&dh->ctx, secret, &n); - return 0; -} - -#elif defined(USE_GNUTLS) -#include -typedef gcry_mpi_t MP_t; -#define MP_new(m) m = gcry_mpi_new(1) -#define MP_set_w(mpi, w) gcry_mpi_set_ui(mpi, w) -#define MP_cmp(u, v) gcry_mpi_cmp(u, v) -#define MP_set(u, v) gcry_mpi_set(u, v) -#define MP_sub_w(mpi, w) gcry_mpi_sub_ui(mpi, mpi, w) -#define MP_cmp_1(mpi) gcry_mpi_cmp_ui(mpi, 1) -#define MP_modexp(r, y, q, p) gcry_mpi_powm(r, y, q, p) -#define MP_free(mpi) gcry_mpi_release(mpi) -#define MP_gethex(u, hex, res) \ - res = (gcry_mpi_scan(&u, GCRYMPI_FMT_HEX, hex, 0, 0) == 0) -#define MP_bytes(u) (gcry_mpi_get_nbits(u) + 7) / 8 -#define MP_setbin(u, buf, len) \ - gcry_mpi_print(GCRYMPI_FMT_USG, buf, len, NULL, u) -#define MP_getbin(u, buf, len) \ - gcry_mpi_scan(&u, GCRYMPI_FMT_USG, buf, len, NULL) - -typedef struct MDH { - MP_t p; - MP_t g; - MP_t pub_key; - MP_t priv_key; - long length; -} MDH; - -#define MDH_new() calloc(1, sizeof(MDH)) -#define MDH_free(dh) \ - do { \ - MP_free(((MDH *)(dh))->p); \ - MP_free(((MDH *)(dh))->g); \ - MP_free(((MDH *)(dh))->pub_key); \ - MP_free(((MDH *)(dh))->priv_key); \ - free(dh); \ - } while (0) - -extern MP_t gnutls_calc_dh_secret(MP_t *priv, MP_t g, MP_t p); -extern MP_t gnutls_calc_dh_key(MP_t y, MP_t x, MP_t p); - -#define MDH_generate_key(dh) \ - (dh->pub_key = gnutls_calc_dh_secret(&dh->priv_key, dh->g, dh->p)) -static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { - MP_t sec = gnutls_calc_dh_key(pub, dh->priv_key, dh->p); - if (sec) { - MP_setbin(sec, secret, len); - MP_free(sec); - return 0; - } else - return -1; -} - -#else /* USE_OPENSSL */ -#include -#include - -typedef BIGNUM *MP_t; -#define MP_new(m) m = BN_new() -#define MP_set_w(mpi, w) BN_set_word(mpi, w) -#define MP_cmp(u, v) BN_cmp(u, v) -#define MP_set(u, v) BN_copy(u, v) -#define MP_sub_w(mpi, w) BN_sub_word(mpi, w) -#define MP_cmp_1(mpi) BN_cmp(mpi, BN_value_one()) -#define MP_modexp(r, y, q, p) \ - do { \ - BN_CTX *ctx = BN_CTX_new(); \ - BN_mod_exp(r, y, q, p, ctx); \ - BN_CTX_free(ctx); \ - } while (0) -#define MP_free(mpi) BN_free(mpi) -#define MP_gethex(u, hex, res) res = BN_hex2bn(&u, hex) -#define MP_bytes(u) BN_num_bytes(u) -#define MP_setbin(u, buf, len) BN_bn2bin(u, buf) -#define MP_getbin(u, buf, len) u = BN_bin2bn(buf, len, 0) - -#define MDH DH -#define MDH_new() DH_new() -#define MDH_free(dh) DH_free(dh) -#define MDH_generate_key(dh) DH_generate_key(dh) -#define MDH_compute_key(secret, seclen, pub, dh) DH_compute_key(secret, pub, dh) - -#endif - -#include "dhgroups.h" -#include "log.h" - -/* RFC 2631, Section 2.1.5, http://www.ietf.org/rfc/rfc2631.txt */ -static int isValidPublicKey(MP_t y, MP_t p, MP_t q) { - int ret = TRUE; - MP_t bn; - assert(y); - - MP_new(bn); - assert(bn); - - /* y must lie in [2,p-1] */ - MP_set_w(bn, 1); - if (MP_cmp(y, bn) < 0) { - RTMP_Log(RTMP_LOGERROR, "DH public key must be at least 2"); - ret = FALSE; - goto failed; - } - - /* bn = p-2 */ - MP_set(bn, p); - MP_sub_w(bn, 1); - if (MP_cmp(y, bn) > 0) { - RTMP_Log(RTMP_LOGERROR, "DH public key must be at most p-2"); - ret = FALSE; - goto failed; - } - - /* Verify with Sophie-Germain prime - * - * This is a nice test to make sure the public key position is calculated - * correctly. This test will fail in about 50% of the cases if applied to - * random data. - */ - if (q) { - /* y must fulfill y^q mod p = 1 */ - MP_modexp(bn, y, q, p); - - if (MP_cmp_1(bn) != 0) { - RTMP_Log(RTMP_LOGWARNING, "DH public key does not fulfill y^q mod p = 1"); - } - } - -failed: - MP_free(bn); - return ret; -} - -static MDH *DHInit(int nKeyBits) { - size_t res; - MDH *dh = MDH_new(); - - if (!dh) - goto failed; - - MP_new(dh->g); - - if (!dh->g) - goto failed; - - MP_gethex(dh->p, P1024, res); /* prime P1024, see dhgroups.h */ - if (!res) { - goto failed; - } - - MP_set_w(dh->g, 2); /* base 2 */ - - dh->length = nKeyBits; - return dh; - -failed: - if (dh) - MDH_free(dh); - - return 0; -} - -static int DHGenerateKey(MDH *dh) { - size_t res = 0; - if (!dh) - return 0; - - while (!res) { - MP_t q1 = NULL; - - if (!MDH_generate_key(dh)) - return 0; - - MP_gethex(q1, Q1024, res); - assert(res); - - res = isValidPublicKey(dh->pub_key, dh->p, q1); - if (!res) { - MP_free(dh->pub_key); - MP_free(dh->priv_key); - dh->pub_key = dh->priv_key = 0; - } - - MP_free(q1); - } - return 1; -} - -/* fill pubkey with the public key in BIG ENDIAN order - * 00 00 00 00 00 x1 x2 x3 ..... - */ - -static int DHGetPublicKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen) { - int len; - if (!dh || !dh->pub_key) - return 0; - - len = MP_bytes(dh->pub_key); - if (len <= 0 || len > (int)nPubkeyLen) - return 0; - - memset(pubkey, 0, nPubkeyLen); - MP_setbin(dh->pub_key, pubkey + (nPubkeyLen - len), len); - return 1; -} - -#if 0 /* unused */ -static int -DHGetPrivateKey(MDH *dh, uint8_t *privkey, size_t nPrivkeyLen) -{ - if (!dh || !dh->priv_key) - return 0; - - int len = MP_bytes(dh->priv_key); - if (len <= 0 || len > (int) nPrivkeyLen) - return 0; - - memset(privkey, 0, nPrivkeyLen); - MP_setbin(dh->priv_key, privkey + (nPrivkeyLen - len), len); - return 1; -} -#endif - -/* computes the shared secret key from the private MDH value and the - * other party's public key (pubkey) - */ -static int DHComputeSharedSecretKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen, - uint8_t *secret) { - MP_t q1 = NULL, pubkeyBn = NULL; - size_t len; - int res; - - if (!dh || !secret || nPubkeyLen >= INT_MAX) - return -1; - - MP_getbin(pubkeyBn, pubkey, nPubkeyLen); - if (!pubkeyBn) - return -1; - - MP_gethex(q1, Q1024, len); - assert(len); - - if (isValidPublicKey(pubkeyBn, dh->p, q1)) - res = MDH_compute_key(secret, nPubkeyLen, pubkeyBn, dh); - else - res = -1; - - MP_free(q1); - MP_free(pubkeyBn); - - return res; -} diff --git a/LFLiveKit/publish/pili-librtmp/dhgroups.h b/LFLiveKit/publish/pili-librtmp/dhgroups.h deleted file mode 100755 index f3d0293f..00000000 --- a/LFLiveKit/publish/pili-librtmp/dhgroups.h +++ /dev/null @@ -1,198 +0,0 @@ -/* librtmp - Diffie-Hellmann Key Exchange - * Copyright (C) 2009 Andrej Stepanchuk - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -/* from RFC 3526, see http://www.ietf.org/rfc/rfc3526.txt */ - -/* 2^768 - 2 ^704 - 1 + 2^64 * { [2^638 pi] + 149686 } */ -#define P768 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF" - -/* 2^1024 - 2^960 - 1 + 2^64 * { [2^894 pi] + 129093 } */ -#define P1024 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381" \ - "FFFFFFFFFFFFFFFF" - -/* Group morder largest prime factor: */ -#define Q1024 \ - "7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68" \ - "948127044533E63A0105DF531D89CD9128A5043CC71A026E" \ - "F7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122" \ - "F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6" \ - "F71C35FDAD44CFD2D74F9208BE258FF324943328F67329C0" \ - "FFFFFFFFFFFFFFFF" - -/* 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 } */ -#define P1536 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF" - -/* 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 } */ -#define P2048 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ - "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ - "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ - "15728E5A8AACAA68FFFFFFFFFFFFFFFF" - -/* 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 } */ -#define P3072 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ - "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ - "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ - "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ - "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ - "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ - "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ - "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ - "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF" - -/* 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 } */ -#define P4096 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ - "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ - "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ - "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ - "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ - "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ - "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ - "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ - "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ - "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ - "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ - "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ - "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ - "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" \ - "FFFFFFFFFFFFFFFF" - -/* 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 } */ -#define P6144 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ - "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ - "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ - "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ - "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ - "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ - "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ - "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ - "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ - "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ - "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ - "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ - "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ - "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ - "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ - "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ - "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ - "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ - "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ - "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ - "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ - "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ - "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ - "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ - "12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF" - -/* 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 } */ -#define P8192 \ - "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ - "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ - "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ - "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ - "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ - "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ - "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ - "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ - "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ - "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ - "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ - "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ - "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ - "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ - "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ - "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ - "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ - "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ - "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ - "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ - "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ - "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ - "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ - "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ - "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ - "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ - "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ - "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ - "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ - "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ - "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ - "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" \ - "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" \ - "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" \ - "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" \ - "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" \ - "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" \ - "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" \ - "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" \ - "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" \ - "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" \ - "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" \ - "60C980DD98EDD3DFFFFFFFFFFFFFFFFF" diff --git a/LFLiveKit/publish/pili-librtmp/error.c b/LFLiveKit/publish/pili-librtmp/error.c deleted file mode 100755 index b47913df..00000000 --- a/LFLiveKit/publish/pili-librtmp/error.c +++ /dev/null @@ -1,26 +0,0 @@ -#include "error.h" -#include -#include - -void RTMPError_Alloc(RTMPError *error, size_t msg_size) { - RTMPError_Free(error); - - error->code = 0; - error->message = (char *)malloc(msg_size + 1); - memset(error->message, 0, msg_size); -} - -void RTMPError_Free(RTMPError *error) { - if (error) { - if (error->message) { - free(error->message); - error->message = NULL; - } - } -} - -void RTMPError_Message(RTMPError *error, int code, const char *message) { - RTMPError_Alloc(error, strlen(message)); - error->code = code; - strcpy(error->message, message); -} diff --git a/LFLiveKit/publish/pili-librtmp/error.h b/LFLiveKit/publish/pili-librtmp/error.h deleted file mode 100755 index 6a8dcbe4..00000000 --- a/LFLiveKit/publish/pili-librtmp/error.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef __ERROR_H__ -#define __ERROR_H__ - -#include - -typedef struct RTMPError { - int code; - char *message; -} RTMPError; - -void RTMPError_Alloc(RTMPError *error, size_t msg_size); -void RTMPError_Free(RTMPError *error); -void RTMPError_Message(RTMPError *error, int code, const char *message); - -// error defines -enum { - RTMPErrorUnknow = -1, // "Unknow error" - RTMPErrorUnknowOption = -999, // "Unknown option %s" - RTMPErrorAccessDNSFailed = -1000, // "Failed to access the DNS. (addr: %s)" - RTMPErrorFailedToConnectSocket = - -1001, // "Failed to connect socket. %d (%s)" - RTMPErrorSocksNegotiationFailed = -1002, // "Socks negotiation failed" - RTMPErrorFailedToCreateSocket = - -1003, // "Failed to create socket. %d (%s)" - RTMPErrorHandshakeFailed = -1004, // "Handshake failed" - RTMPErrorRTMPConnectFailed = -1005, // "RTMP connect failed" - RTMPErrorSendFailed = -1006, // "Send error %d (%s), (%d bytes)" - RTMPErrorServerRequestedClose = -1007, // "RTMP server requested close" - RTMPErrorNetStreamFailed = -1008, // "NetStream failed" - RTMPErrorNetStreamPlayFailed = -1009, // "NetStream play failed" - RTMPErrorNetStreamPlayStreamNotFound = - -1010, // "NetStream play stream not found" - RTMPErrorNetConnectionConnectInvalidApp = - -1011, // "NetConnection connect invalip app" - RTMPErrorSanityFailed = - -1012, // "Sanity failed. Trying to send header of type: 0x%02X" - RTMPErrorSocketClosedByPeer = -1013, // "RTMP socket closed by peer" - RTMPErrorRTMPConnectStreamFailed = -1014, // "RTMP connect stream failed" - RTMPErrorSocketTimeout = -1015, // "RTMP socket timeout" - - // SSL errors - RTMPErrorTLSConnectFailed = -1200, // "TLS_Connect failed" - RTMPErrorNoSSLOrTLSSupport = -1201, // "No SSL/TLS support" -}; - -#endif diff --git a/LFLiveKit/publish/pili-librtmp/handshake.h b/LFLiveKit/publish/pili-librtmp/handshake.h deleted file mode 100755 index f791cf74..00000000 --- a/LFLiveKit/publish/pili-librtmp/handshake.h +++ /dev/null @@ -1,1034 +0,0 @@ -/* - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * Copyright (C) 2010 - * 2a665470ced7adb7156fcef47f8199a6371c117b8a79e399a2771e0b36384090 - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -/* This file is #included in rtmp.c, it is not meant to be compiled alone */ - -#ifdef USE_POLARSSL -#include -#include -#ifndef SHA256_DIGEST_LENGTH -#define SHA256_DIGEST_LENGTH 32 -#endif -#define HMAC_CTX sha2_context -#define HMAC_setup(ctx, key, len) \ - sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) -#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) -#define HMAC_finish(ctx, dig, dlen) \ - dlen = SHA256_DIGEST_LENGTH; \ - sha2_hmac_finish(&ctx, dig) - -typedef arc4_context *RC4_handle; -#define RC4_alloc(h) *h = malloc(sizeof(arc4_context)) -#define RC4_setkey(h, l, k) arc4_setup(h, k, l) -#define RC4_encrypt(h, l, d) \ - arc4_crypt(h, l, (unsigned char *)d, (unsigned char *)d) -#define RC4_encrypt2(h, l, s, d) \ - arc4_crypt(h, l, (unsigned char *)s, (unsigned char *)d) -#define RC4_free(h) free(h) - -#elif defined(USE_GNUTLS) -#include -#ifndef SHA256_DIGEST_LENGTH -#define SHA256_DIGEST_LENGTH 32 -#endif -#define HMAC_CTX gcry_md_hd_t -#define HMAC_setup(ctx, key, len) \ - gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ - gcry_md_setkey(ctx, key, len) -#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) -#define HMAC_finish(ctx, dig, dlen) \ - dlen = SHA256_DIGEST_LENGTH; \ - memcpy(dig, gcry_md_read(ctx, 0), dlen); \ - gcry_md_close(ctx) - -typedef gcry_cipher_hd_t RC4_handle; -#define RC4_alloc(h) \ - gcry_cipher_open(h, GCRY_CIPHER_ARCFOUR, GCRY_CIPHER_MODE_STREAM, 0) -#define RC4_setkey(h, l, k) gcry_cipher_setkey(h, k, l) -#define RC4_encrypt(h, l, d) gcry_cipher_encrypt(h, (void *)d, l, NULL, 0) -#define RC4_encrypt2(h, l, s, d) \ - gcry_cipher_encrypt(h, (void *)d, l, (void *)s, l) -#define RC4_free(h) gcry_cipher_close(h) - -#else /* USE_OPENSSL */ -#include -#include -#include -#if OPENSSL_VERSION_NUMBER < 0x0090800 || !defined(SHA256_DIGEST_LENGTH) -#error Your OpenSSL is too old, need 0.9.8 or newer with SHA256 -#endif -#define HMAC_setup(ctx, key, len) \ - HMAC_CTX_init(&ctx); \ - HMAC_Init_ex(&ctx, key, len, EVP_sha256(), 0) -#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, buf, len) -#define HMAC_finish(ctx, dig, dlen) \ - HMAC_Final(&ctx, dig, &dlen); \ - HMAC_CTX_cleanup(&ctx) - -typedef RC4_KEY *RC4_handle; -#define RC4_alloc(h) *h = malloc(sizeof(RC4_KEY)) -#define RC4_setkey(h, l, k) RC4_set_key(h, l, k) -#define RC4_encrypt(h, l, d) RC4(h, l, (uint8_t *)d, (uint8_t *)d) -#define RC4_encrypt2(h, l, s, d) RC4(h, l, (uint8_t *)s, (uint8_t *)d) -#define RC4_free(h) free(h) -#endif - -#define FP10 - -#include "dh.h" - -static const uint8_t GenuineFMSKey[] = { - 0x47, 0x65, 0x6e, 0x75, 0x69, 0x6e, 0x65, 0x20, 0x41, 0x64, 0x6f, - 0x62, 0x65, 0x20, 0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x4d, 0x65, - 0x64, 0x69, 0x61, 0x20, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, - 0x30, 0x30, 0x31, /* Genuine Adobe Flash Media Server 001 */ - - 0xf0, 0xee, 0xc2, 0x4a, 0x80, 0x68, 0xbe, 0xe8, 0x2e, 0x00, 0xd0, - 0xd1, 0x02, 0x9e, 0x7e, 0x57, 0x6e, 0xec, 0x5d, 0x2d, 0x29, 0x80, - 0x6f, 0xab, 0x93, 0xb8, 0xe6, 0x36, 0xcf, 0xeb, 0x31, 0xae}; /* 68 */ - -static const uint8_t GenuineFPKey[] = { - 0x47, 0x65, 0x6E, 0x75, 0x69, 0x6E, 0x65, 0x20, 0x41, 0x64, 0x6F, - 0x62, 0x65, 0x20, 0x46, 0x6C, 0x61, 0x73, 0x68, 0x20, 0x50, 0x6C, - 0x61, 0x79, 0x65, 0x72, 0x20, 0x30, 0x30, 0x31, /* Genuine Adobe Flash - Player 001 */ - 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, - 0xD1, 0x02, 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, - 0x6F, 0xAB, 0x93, 0xB8, 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE}; /* 62 */ - -static void InitRC4Encryption(uint8_t *secretKey, uint8_t *pubKeyIn, - uint8_t *pubKeyOut, RC4_handle *rc4keyIn, - RC4_handle *rc4keyOut) { - uint8_t digest[SHA256_DIGEST_LENGTH]; - unsigned int digestLen = 0; - HMAC_CTX ctx; - - RC4_alloc(rc4keyIn); - RC4_alloc(rc4keyOut); - - HMAC_setup(ctx, secretKey, 128); - HMAC_crunch(ctx, pubKeyIn, 128); - HMAC_finish(ctx, digest, digestLen); - - RTMP_Log(RTMP_LOGDEBUG, "RC4 Out Key: "); - RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); - - RC4_setkey(*rc4keyOut, 16, digest); - - HMAC_setup(ctx, secretKey, 128); - HMAC_crunch(ctx, pubKeyOut, 128); - HMAC_finish(ctx, digest, digestLen); - - RTMP_Log(RTMP_LOGDEBUG, "RC4 In Key: "); - RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); - - RC4_setkey(*rc4keyIn, 16, digest); -} - -typedef unsigned int(getoff)(uint8_t *buf, unsigned int len); - -static unsigned int GetDHOffset2(uint8_t *handshake, unsigned int len) { - unsigned int offset = 0; - uint8_t *ptr = handshake + 768; - unsigned int res; - - assert(RTMP_SIG_SIZE <= len); - - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - - res = (offset % 632) + 8; - - if (res + 128 > 767) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't calculate correct DH offset (got %d), exiting!", - __FUNCTION__, res); - exit(1); - } - return res; -} - -static unsigned int GetDigestOffset2(uint8_t *handshake, unsigned int len) { - unsigned int offset = 0; - uint8_t *ptr = handshake + 772; - unsigned int res; - - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - - res = (offset % 728) + 776; - - if (res + 32 > 1535) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't calculate correct digest offset (got %d), exiting", - __FUNCTION__, res); - exit(1); - } - return res; -} - -static unsigned int GetDHOffset1(uint8_t *handshake, unsigned int len) { - unsigned int offset = 0; - uint8_t *ptr = handshake + 1532; - unsigned int res; - - assert(RTMP_SIG_SIZE <= len); - - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - - res = (offset % 632) + 772; - - if (res + 128 > 1531) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't calculate DH offset (got %d), exiting!", - __FUNCTION__, res); - exit(1); - } - - return res; -} - -static unsigned int GetDigestOffset1(uint8_t *handshake, unsigned int len) { - unsigned int offset = 0; - uint8_t *ptr = handshake + 8; - unsigned int res; - - assert(12 <= len); - - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - ptr++; - offset += (*ptr); - - res = (offset % 728) + 12; - - if (res + 32 > 771) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't calculate digest offset (got %d), exiting!", - __FUNCTION__, res); - exit(1); - } - - return res; -} - -static getoff *digoff[] = {GetDigestOffset1, GetDigestOffset2}; -static getoff *dhoff[] = {GetDHOffset1, GetDHOffset2}; - -static void HMACsha256(const uint8_t *message, size_t messageLen, - const uint8_t *key, size_t keylen, uint8_t *digest) { - unsigned int digestLen; - HMAC_CTX ctx; - - HMAC_setup(ctx, key, keylen); - HMAC_crunch(ctx, message, messageLen); - HMAC_finish(ctx, digest, digestLen); - - assert(digestLen == 32); -} - -static void CalculateDigest(unsigned int digestPos, uint8_t *handshakeMessage, - const uint8_t *key, size_t keyLen, - uint8_t *digest) { - const int messageLen = RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; - uint8_t message[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH]; - - memcpy(message, handshakeMessage, digestPos); - memcpy(message + digestPos, - &handshakeMessage[digestPos + SHA256_DIGEST_LENGTH], - messageLen - digestPos); - - HMACsha256(message, messageLen, key, keyLen, digest); -} - -static int VerifyDigest(unsigned int digestPos, uint8_t *handshakeMessage, - const uint8_t *key, size_t keyLen) { - uint8_t calcDigest[SHA256_DIGEST_LENGTH]; - - CalculateDigest(digestPos, handshakeMessage, key, keyLen, calcDigest); - - return memcmp(&handshakeMessage[digestPos], calcDigest, - SHA256_DIGEST_LENGTH) == 0; -} - -/* handshake - * - * Type = [1 bytes] plain: 0x03, encrypted: 0x06, 0x08, 0x09 - * -------------------------------------------------------------------- [1536 - * bytes] - * Uptime = [4 bytes] big endian unsigned number, uptime - * Version = [4 bytes] each byte represents a version number, e.g. - * 9.0.124.0 - * ... - * - */ - -static const uint32_t rtmpe8_keys[16][4] = { - {0xbff034b2, 0x11d9081f, 0xccdfb795, 0x748de732}, - {0x086a5eb6, 0x1743090e, 0x6ef05ab8, 0xfe5a39e2}, - {0x7b10956f, 0x76ce0521, 0x2388a73a, 0x440149a1}, - {0xa943f317, 0xebf11bb2, 0xa691a5ee, 0x17f36339}, - {0x7a30e00a, 0xb529e22c, 0xa087aea5, 0xc0cb79ac}, - {0xbdce0c23, 0x2febdeff, 0x1cfaae16, 0x1123239d}, - {0x55dd3f7b, 0x77e7e62e, 0x9bb8c499, 0xc9481ee4}, - {0x407bb6b4, 0x71e89136, 0xa7aebf55, 0xca33b839}, - {0xfcf6bdc3, 0xb63c3697, 0x7ce4f825, 0x04d959b2}, - {0x28e091fd, 0x41954c4c, 0x7fb7db00, 0xe3a066f8}, - {0x57845b76, 0x4f251b03, 0x46d45bcd, 0xa2c30d29}, - {0x0acceef8, 0xda55b546, 0x03473452, 0x5863713b}, - {0xb82075dc, 0xa75f1fee, 0xd84268e8, 0xa72a44cc}, - {0x07cf6e9e, 0xa16d7b25, 0x9fa7ae6c, 0xd92f5629}, - {0xfeb1eae4, 0x8c8c3ce1, 0x4e0064a7, 0x6a387c2a}, - {0x893a9427, 0xcc3013a2, 0xf106385b, 0xa829f927}}; - -/* RTMPE type 8 uses XTEA on the regular signature - * http://en.wikipedia.org/wiki/XTEA - */ -static void rtmpe8_sig(uint8_t *in, uint8_t *out, int keyid) { - unsigned int i, num_rounds = 32; - uint32_t v0, v1, sum = 0, delta = 0x9E3779B9; - uint32_t const *k; - - v0 = in[0] | (in[1] << 8) | (in[2] << 16) | (in[3] << 24); - v1 = in[4] | (in[5] << 8) | (in[6] << 16) | (in[7] << 24); - k = rtmpe8_keys[keyid]; - - for (i = 0; i < num_rounds; i++) { - v0 += (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + k[sum & 3]); - sum += delta; - v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + k[(sum >> 11) & 3]); - } - - out[0] = v0; - v0 >>= 8; - out[1] = v0; - v0 >>= 8; - out[2] = v0; - v0 >>= 8; - out[3] = v0; - - out[4] = v1; - v1 >>= 8; - out[5] = v1; - v1 >>= 8; - out[6] = v1; - v1 >>= 8; - out[7] = v1; -} - -static int HandShake(RTMP *r, int FP9HandShake) { - int i, offalg = 0; - int dhposClient = 0; - int digestPosClient = 0; - int encrypted = r->Link.protocol & RTMP_FEATURE_ENC; - - RC4_handle keyIn = 0; - RC4_handle keyOut = 0; - - int32_t *ip; - uint32_t uptime; - - uint8_t clientbuf[RTMP_SIG_SIZE + 4], *clientsig = clientbuf + 4; - uint8_t serversig[RTMP_SIG_SIZE], client2[RTMP_SIG_SIZE], *reply; - uint8_t type; - getoff *getdh = NULL, *getdig = NULL; - - if (encrypted || r->Link.SWFSize) - FP9HandShake = TRUE; - else - FP9HandShake = FALSE; - - r->Link.rc4keyIn = r->Link.rc4keyOut = 0; - - if (encrypted) { - clientsig[-1] = 0x06; /* 0x08 is RTMPE as well */ - offalg = 1; - } else - clientsig[-1] = 0x03; - - uptime = htonl(RTMP_GetTime()); - memcpy(clientsig, &uptime, 4); - - if (FP9HandShake) { - /* set version to at least 9.0.115.0 */ - if (encrypted) { - clientsig[4] = 128; - clientsig[6] = 3; - } else { - clientsig[4] = 10; - clientsig[6] = 45; - } - clientsig[5] = 0; - clientsig[7] = 2; - - RTMP_Log(RTMP_LOGDEBUG, "%s: Client type: %02X", __FUNCTION__, - clientsig[-1]); - getdig = digoff[offalg]; - getdh = dhoff[offalg]; - } else { - memset(&clientsig[4], 0, 4); - } - -/* generate random data */ -#ifdef _DEBUG - memset(clientsig + 8, 0, RTMP_SIG_SIZE - 8); -#else - ip = (int32_t *)(clientsig + 8); - for (i = 2; i < RTMP_SIG_SIZE / 4; i++) - *ip++ = rand(); -#endif - - /* set handshake digest */ - if (FP9HandShake) { - if (encrypted) { - /* generate Diffie-Hellmann parameters */ - r->Link.dh = DHInit(1024); - if (!r->Link.dh) { - RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", - __FUNCTION__); - return FALSE; - } - - dhposClient = getdh(clientsig, RTMP_SIG_SIZE); - RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, - dhposClient); - - if (!DHGenerateKey(r->Link.dh)) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't generate Diffie-Hellmann public key!", - __FUNCTION__); - return FALSE; - } - - if (!DHGetPublicKey(r->Link.dh, &clientsig[dhposClient], 128)) { - RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); - return FALSE; - } - } - - digestPosClient = - getdig(clientsig, RTMP_SIG_SIZE); /* reuse this value in verification */ - RTMP_Log(RTMP_LOGDEBUG, "%s: Client digest offset: %d", __FUNCTION__, - digestPosClient); - - CalculateDigest(digestPosClient, clientsig, GenuineFPKey, 30, - &clientsig[digestPosClient]); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Initial client digest: ", __FUNCTION__); - RTMP_LogHex(RTMP_LOGDEBUG, clientsig + digestPosClient, - SHA256_DIGEST_LENGTH); - } - -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, "Clientsig: "); - RTMP_LogHex(RTMP_LOGDEBUG, clientsig, RTMP_SIG_SIZE); -#endif - - if (!WriteN(r, (char *)clientsig - 1, RTMP_SIG_SIZE + 1)) - return FALSE; - - if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ - return FALSE; - - RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); - - if (type != clientsig[-1]) - RTMP_Log(RTMP_LOGWARNING, - "%s: Type mismatch: client sent %d, server answered %d", - __FUNCTION__, clientsig[-1], type); - - if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - /* decode server response */ - memcpy(&uptime, serversig, 4); - uptime = ntohl(uptime); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, uptime); - RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, - serversig[4], serversig[5], serversig[6], serversig[7]); - - if (FP9HandShake && type == 3 && !serversig[4]) - FP9HandShake = FALSE; - -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, "Server signature:"); - RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE); -#endif - - if (FP9HandShake) { - uint8_t digestResp[SHA256_DIGEST_LENGTH]; - uint8_t *signatureResp = NULL; - - /* we have to use this signature now to find the correct algorithms for - * getting the digest and DH positions */ - int digestPosServer = getdig(serversig, RTMP_SIG_SIZE); - - if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { - RTMP_Log(RTMP_LOGWARNING, "Trying different position for server digest!"); - offalg ^= 1; - getdig = digoff[offalg]; - getdh = dhoff[offalg]; - digestPosServer = getdig(serversig, RTMP_SIG_SIZE); - - if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { - RTMP_Log( - RTMP_LOGERROR, - "Couldn't verify the server digest"); /* continuing anyway will - probably fail */ - return FALSE; - } - } - - /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key - * are the last 32 bytes of the server handshake) */ - if (r->Link.SWFSize) { - const char swfVerify[] = {0x01, 0x01}; - char *vend = r->Link.SWFVerificationResponse + - sizeof(r->Link.SWFVerificationResponse); - - memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); - AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, - r->Link.SWFSize); - AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, - r->Link.SWFSize); - HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, - &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], - SHA256_DIGEST_LENGTH, - (uint8_t *)&r->Link.SWFVerificationResponse[10]); - } - - /* do Diffie-Hellmann Key exchange for encrypted RTMP */ - if (encrypted) { - /* compute secret key */ - uint8_t secretKey[128] = {0}; - int len, dhposServer; - - dhposServer = getdh(serversig, RTMP_SIG_SIZE); - RTMP_Log(RTMP_LOGDEBUG, "%s: Server DH public key offset: %d", - __FUNCTION__, dhposServer); - len = DHComputeSharedSecretKey(r->Link.dh, &serversig[dhposServer], 128, - secretKey); - if (len < 0) { - RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); - return FALSE; - } - - RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); - RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); - - InitRC4Encryption(secretKey, (uint8_t *)&serversig[dhposServer], - (uint8_t *)&clientsig[dhposClient], &keyIn, &keyOut); - } - - reply = client2; -#ifdef _DEBUG - memset(reply, 0xff, RTMP_SIG_SIZE); -#else - ip = (int32_t *)reply; - for (i = 0; i < RTMP_SIG_SIZE / 4; i++) - *ip++ = rand(); -#endif - /* calculate response now */ - signatureResp = reply + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; - - HMACsha256(&serversig[digestPosServer], SHA256_DIGEST_LENGTH, GenuineFPKey, - sizeof(GenuineFPKey), digestResp); - HMACsha256(reply, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, - SHA256_DIGEST_LENGTH, signatureResp); - - /* some info output */ - RTMP_Log(RTMP_LOGDEBUG, - "%s: Calculated digest key from secure key and server digest: ", - __FUNCTION__); - RTMP_LogHex(RTMP_LOGDEBUG, digestResp, SHA256_DIGEST_LENGTH); - -#ifdef FP10 - if (type == 8) { - uint8_t *dptr = digestResp; - uint8_t *sig = signatureResp; - /* encrypt signatureResp */ - for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) - rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); - } -#if 0 - else if (type == 9)) - { - uint8_t *dptr = digestResp; - uint8_t *sig = signatureResp; - /* encrypt signatureResp */ - for (i=0; iLink.rc4keyIn = keyIn; - r->Link.rc4keyOut = keyOut; - - /* update the keystreams */ - if (r->Link.rc4keyIn) { - RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); - } - - if (r->Link.rc4keyOut) { - RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); - } - } - } else { - if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { - RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", - __FUNCTION__); - } - } - - RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); - return TRUE; -} - -static int SHandShake(RTMP *r) { - int i, offalg = 0; - int dhposServer = 0; - int digestPosServer = 0; - RC4_handle keyIn = 0; - RC4_handle keyOut = 0; - int FP9HandShake = FALSE; - int encrypted; - int32_t *ip; - - uint8_t clientsig[RTMP_SIG_SIZE]; - uint8_t serverbuf[RTMP_SIG_SIZE + 4], *serversig = serverbuf + 4; - uint8_t type; - uint32_t uptime; - getoff *getdh = NULL, *getdig = NULL; - - if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ - return FALSE; - - if (ReadN(r, (char *)clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - RTMP_Log(RTMP_LOGDEBUG, "%s: Type Requested : %02X", __FUNCTION__, type); - RTMP_LogHex(RTMP_LOGDEBUG2, clientsig, RTMP_SIG_SIZE); - - if (type == 3) { - encrypted = FALSE; - } else if (type == 6 || type == 8) { - offalg = 1; - encrypted = TRUE; - FP9HandShake = TRUE; - r->Link.protocol |= RTMP_FEATURE_ENC; - /* use FP10 if client is capable */ - if (clientsig[4] == 128) - type = 8; - } else { - RTMP_Log(RTMP_LOGERROR, "%s: Unknown version %02x", __FUNCTION__, type); - return FALSE; - } - - if (!FP9HandShake && clientsig[4]) - FP9HandShake = TRUE; - - serversig[-1] = type; - - r->Link.rc4keyIn = r->Link.rc4keyOut = 0; - - uptime = htonl(RTMP_GetTime()); - memcpy(serversig, &uptime, 4); - - if (FP9HandShake) { - /* Server version */ - serversig[4] = 3; - serversig[5] = 5; - serversig[6] = 1; - serversig[7] = 1; - - getdig = digoff[offalg]; - getdh = dhoff[offalg]; - } else { - memset(&serversig[4], 0, 4); - } - -/* generate random data */ -#ifdef _DEBUG - memset(serversig + 8, 0, RTMP_SIG_SIZE - 8); -#else - ip = (int32_t *)(serversig + 8); - for (i = 2; i < RTMP_SIG_SIZE / 4; i++) - *ip++ = rand(); -#endif - - /* set handshake digest */ - if (FP9HandShake) { - if (encrypted) { - /* generate Diffie-Hellmann parameters */ - r->Link.dh = DHInit(1024); - if (!r->Link.dh) { - RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", - __FUNCTION__); - return FALSE; - } - - dhposServer = getdh(serversig, RTMP_SIG_SIZE); - RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, - dhposServer); - - if (!DHGenerateKey(r->Link.dh)) { - RTMP_Log(RTMP_LOGERROR, - "%s: Couldn't generate Diffie-Hellmann public key!", - __FUNCTION__); - return FALSE; - } - - if (!DHGetPublicKey(r->Link.dh, (uint8_t *)&serversig[dhposServer], - 128)) { - RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); - return FALSE; - } - } - - digestPosServer = - getdig(serversig, RTMP_SIG_SIZE); /* reuse this value in verification */ - RTMP_Log(RTMP_LOGDEBUG, "%s: Server digest offset: %d", __FUNCTION__, - digestPosServer); - - CalculateDigest(digestPosServer, serversig, GenuineFMSKey, 36, - &serversig[digestPosServer]); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Initial server digest: ", __FUNCTION__); - RTMP_LogHex(RTMP_LOGDEBUG, serversig + digestPosServer, - SHA256_DIGEST_LENGTH); - } - - RTMP_Log(RTMP_LOGDEBUG2, "Serversig: "); - RTMP_LogHex(RTMP_LOGDEBUG2, serversig, RTMP_SIG_SIZE); - - if (!WriteN(r, (char *)serversig - 1, RTMP_SIG_SIZE + 1)) - return FALSE; - - /* decode client response */ - memcpy(&uptime, clientsig, 4); - uptime = ntohl(uptime); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); - RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, - clientsig[4], clientsig[5], clientsig[6], clientsig[7]); - - if (FP9HandShake) { - uint8_t digestResp[SHA256_DIGEST_LENGTH]; - uint8_t *signatureResp = NULL; - - /* we have to use this signature now to find the correct algorithms for - * getting the digest and DH positions */ - int digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); - - if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { - RTMP_Log(RTMP_LOGWARNING, "Trying different position for client digest!"); - offalg ^= 1; - getdig = digoff[offalg]; - getdh = dhoff[offalg]; - - digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); - - if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { - RTMP_Log( - RTMP_LOGERROR, - "Couldn't verify the client digest"); /* continuing anyway will - probably fail */ - return FALSE; - } - } - - /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key - * are the last 32 bytes of the server handshake) */ - if (r->Link.SWFSize) { - const char swfVerify[] = {0x01, 0x01}; - char *vend = r->Link.SWFVerificationResponse + - sizeof(r->Link.SWFVerificationResponse); - - memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); - AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, - r->Link.SWFSize); - AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, - r->Link.SWFSize); - HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, - &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], - SHA256_DIGEST_LENGTH, - (uint8_t *)&r->Link.SWFVerificationResponse[10]); - } - - /* do Diffie-Hellmann Key exchange for encrypted RTMP */ - if (encrypted) { - int dhposClient, len; - /* compute secret key */ - uint8_t secretKey[128] = {0}; - - dhposClient = getdh(clientsig, RTMP_SIG_SIZE); - RTMP_Log(RTMP_LOGDEBUG, "%s: Client DH public key offset: %d", - __FUNCTION__, dhposClient); - len = DHComputeSharedSecretKey( - r->Link.dh, (uint8_t *)&clientsig[dhposClient], 128, secretKey); - if (len < 0) { - RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); - return FALSE; - } - - RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); - RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); - - InitRC4Encryption(secretKey, (uint8_t *)&clientsig[dhposClient], - (uint8_t *)&serversig[dhposServer], &keyIn, &keyOut); - } - - /* calculate response now */ - signatureResp = clientsig + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; - - HMACsha256(&clientsig[digestPosClient], SHA256_DIGEST_LENGTH, GenuineFMSKey, - sizeof(GenuineFMSKey), digestResp); - HMACsha256(clientsig, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, - SHA256_DIGEST_LENGTH, signatureResp); -#ifdef FP10 - if (type == 8) { - uint8_t *dptr = digestResp; - uint8_t *sig = signatureResp; - /* encrypt signatureResp */ - for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) - rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); - } -#if 0 - else if (type == 9)) - { - uint8_t *dptr = digestResp; - uint8_t *sig = signatureResp; - /* encrypt signatureResp */ - for (i=0; iLink.rc4keyIn = keyIn; - r->Link.rc4keyOut = keyOut; - - /* update the keystreams */ - if (r->Link.rc4keyIn) { - RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); - } - - if (r->Link.rc4keyOut) { - RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); - } - } - } else { - if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { - RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", - __FUNCTION__); - } - } - - RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); - return TRUE; -} diff --git a/LFLiveKit/publish/pili-librtmp/hashswf.c b/LFLiveKit/publish/pili-librtmp/hashswf.c deleted file mode 100755 index 28ceeb4a..00000000 --- a/LFLiveKit/publish/pili-librtmp/hashswf.c +++ /dev/null @@ -1,626 +0,0 @@ -/* - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include -#include -#include -#include -#include - -#include "http.h" -#include "log.h" -#include "rtmp_sys.h" - -#ifdef CRYPTO -#ifdef USE_POLARSSL -#include -#ifndef SHA256_DIGEST_LENGTH -#define SHA256_DIGEST_LENGTH 32 -#endif -#define HMAC_CTX sha2_context -#define HMAC_setup(ctx, key, len) sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) -#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) -#define HMAC_finish(ctx, dig, dlen) \ - dlen = SHA256_DIGEST_LENGTH; \ - sha2_hmac_finish(&ctx, dig) -#define HMAC_close(ctx) -#elif defined(USE_GNUTLS) -#include -#include -#ifndef SHA256_DIGEST_LENGTH -#define SHA256_DIGEST_LENGTH 32 -#endif -#define HMAC_CTX gcry_md_hd_t -#define HMAC_setup(ctx, key, len) \ - gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ - gcry_md_setkey(ctx, key, len) -#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) -#define HMAC_finish(ctx, dig, dlen) \ - dlen = SHA256_DIGEST_LENGTH; \ - memcpy(dig, gcry_md_read(ctx, 0), dlen) -#define HMAC_close(ctx) gcry_md_close(ctx) -#else /* USE_OPENSSL */ -#include -#include -#include -#include -#define HMAC_setup(ctx, key, len) \ - HMAC_CTX_init(&ctx); \ - HMAC_Init_ex(&ctx, (unsigned char *)key, len, EVP_sha256(), 0) -#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, (unsigned char *)buf, len) -#define HMAC_finish(ctx, dig, dlen) HMAC_Final(&ctx, (unsigned char *)dig, &dlen); -#define HMAC_close(ctx) HMAC_CTX_cleanup(&ctx) -#endif - -extern void RTMP_TLS_Init(); -extern TLS_CTX RTMP_TLS_ctx; - -#endif /* CRYPTO */ - -#include - -#define AGENT "Mozilla/5.0" - -HTTPResult - HTTP_get(struct HTTP_ctx *http, const char *url, HTTP_read_callback *cb) { - char *host, *path; - char *p1, *p2; - char hbuf[256]; - int port = 80; -#ifdef CRYPTO - int ssl = 0; -#endif - int hlen, flen = 0; - int rc, i; - int len_known; - HTTPResult ret = HTTPRES_OK; - // struct sockaddr_in sa; - PILI_RTMPSockBuf sb = {0}; - - http->status = -1; - - // memset(&sa, 0, sizeof(struct sockaddr_in)); - // sa.sin_family = AF_INET; - - /* we only handle http here */ - if (strncasecmp(url, "http", 4)) - return HTTPRES_BAD_REQUEST; - - if (url[4] == 's') { -#ifdef CRYPTO - ssl = 1; - port = 443; - if (!RTMP_TLS_ctx) - RTMP_TLS_Init(); -#else - return HTTPRES_BAD_REQUEST; -#endif - } - - p1 = strchr(url + 4, ':'); - if (!p1 || strncmp(p1, "://", 3)) - return HTTPRES_BAD_REQUEST; - - host = p1 + 3; - path = strchr(host, '/'); - hlen = path - host; - strncpy(hbuf, host, hlen); - hbuf[hlen] = '\0'; - host = hbuf; - p1 = strrchr(host, ':'); - if (p1) { - *p1++ = '\0'; - port = atoi(p1); - } - - // sa.sin_addr.s_addr = inet_addr(host); - // if (sa.sin_addr.s_addr == INADDR_NONE) - // { - // struct hostent *hp = gethostbyname(host); - // if (!hp || !hp->h_addr) - // return HTTPRES_LOST_CONNECTION; - // sa.sin_addr = *(struct in_addr *)hp->h_addr; - // } - // sa.sin_port = htons(port); - struct addrinfo hints = {0}, *ai, *cur_ai; - char portstr[10]; - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - snprintf(portstr, sizeof(portstr), "%d", port); - ret = getaddrinfo(host, portstr, &hints, &ai); - if (ret != 0) { - return HTTPRES_LOST_CONNECTION; - } - - cur_ai = ai; - - sb.sb_socket = socket(cur_ai->ai_family, - cur_ai->ai_socktype, - cur_ai->ai_protocol); - // sb.sb_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); - if (sb.sb_socket == -1) { - freeaddrinfo(ai); - return HTTPRES_LOST_CONNECTION; - } - i = sprintf(sb.sb_buf, - "GET %s HTTP/1.0\r\nUser-Agent: %s\r\nHost: %s\r\nReferrer: %.*s\r\n", - path, AGENT, host, (int)(path - url + 1), url); - if (http->date[0]) - i += sprintf(sb.sb_buf + i, "If-Modified-Since: %s\r\n", http->date); - i += sprintf(sb.sb_buf + i, "\r\n"); - - if (cur_ai->ai_family == AF_INET6) { - struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)cur_ai->ai_addr; - in6->sin6_port = htons(port); - } - - if (connect(sb.sb_socket, cur_ai->ai_addr, cur_ai->ai_addrlen) < 0) { - ret = HTTPRES_LOST_CONNECTION; - goto leave; - } -#ifdef CRYPTO - if (ssl) { -#ifdef NO_SSL - RTMP_Log(RTMP_LOGERROR, "%s, No SSL/TLS support", __FUNCTION__); - ret = HTTPRES_BAD_REQUEST; - goto leave; -#else - TLS_client(RTMP_TLS_ctx, sb.sb_ssl); - TLS_setfd(sb.sb_ssl, sb.sb_socket); - if ((i = TLS_connect(sb.sb_ssl)) < 0) { - RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); - ret = HTTPRES_LOST_CONNECTION; - goto leave; - } -#endif - } -#endif - PILI_RTMPSockBuf_Send(&sb, sb.sb_buf, i); - -/* set timeout */ -#define HTTP_TIMEOUT 5 - { - SET_RCVTIMEO(tv, HTTP_TIMEOUT); - if (setsockopt(sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { - RTMP_Log(RTMP_LOGERROR, "%s, Setting socket timeout to %ds failed!", - __FUNCTION__, HTTP_TIMEOUT); - } - } - - sb.sb_size = 0; - sb.sb_timedout = FALSE; - if (PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) < 1) { - ret = HTTPRES_LOST_CONNECTION; - goto leave; - } - if (strncmp(sb.sb_buf, "HTTP/1", 6)) { - ret = HTTPRES_BAD_REQUEST; - goto leave; - } - - p1 = strchr(sb.sb_buf, ' '); - rc = atoi(p1 + 1); - http->status = rc; - - if (rc >= 300) { - if (rc == 304) { - ret = HTTPRES_OK_NOT_MODIFIED; - goto leave; - } else if (rc == 404) - ret = HTTPRES_NOT_FOUND; - else if (rc >= 500) - ret = HTTPRES_SERVER_ERROR; - else if (rc >= 400) - ret = HTTPRES_BAD_REQUEST; - else - ret = HTTPRES_REDIRECTED; - } - - p1 = memchr(sb.sb_buf, '\n', sb.sb_size); - if (!p1) { - ret = HTTPRES_BAD_REQUEST; - goto leave; - } - sb.sb_start = p1 + 1; - sb.sb_size -= sb.sb_start - sb.sb_buf; - - while ((p2 = memchr(sb.sb_start, '\r', sb.sb_size))) { - if (*sb.sb_start == '\r') { - sb.sb_start += 2; - sb.sb_size -= 2; - break; - } else if (!strncasecmp(sb.sb_start, "Content-Length: ", sizeof("Content-Length: ") - 1)) { - flen = atoi(sb.sb_start + sizeof("Content-Length: ") - 1); - } else if (!strncasecmp(sb.sb_start, "Last-Modified: ", sizeof("Last-Modified: ") - 1)) { - *p2 = '\0'; - strcpy(http->date, sb.sb_start + sizeof("Last-Modified: ") - 1); - } - p2 += 2; - sb.sb_size -= p2 - sb.sb_start; - sb.sb_start = p2; - if (sb.sb_size < 1) { - if (PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) < 1) { - ret = HTTPRES_LOST_CONNECTION; - goto leave; - } - } - } - - len_known = flen > 0; - while ((!len_known || flen > 0) && - (sb.sb_size > 0 || PILI_RTMPSockBuf_Fill(&sb, HTTP_TIMEOUT) > 0)) { - cb(sb.sb_start, 1, sb.sb_size, http->data); - if (len_known) - flen -= sb.sb_size; - http->size += sb.sb_size; - sb.sb_size = 0; - } - - if (flen > 0) - ret = HTTPRES_LOST_CONNECTION; - -leave: - PILI_RTMPSockBuf_Close(&sb); - freeaddrinfo(ai); - return ret; -} - -#ifdef CRYPTO - -#define CHUNK 16384 - -struct info { - z_stream *zs; - HMAC_CTX ctx; - int first; - int zlib; - int size; -}; - -static size_t - swfcrunch(void *ptr, size_t size, size_t nmemb, void *stream) { - struct info *i = stream; - char *p = ptr; - size_t len = size * nmemb; - - if (i->first) { - i->first = 0; - /* compressed? */ - if (!strncmp(p, "CWS", 3)) { - *p = 'F'; - i->zlib = 1; - } - HMAC_crunch(i->ctx, (unsigned char *)p, 8); - p += 8; - len -= 8; - i->size = 8; - } - - if (i->zlib) { - unsigned char out[CHUNK]; - i->zs->next_in = (unsigned char *)p; - i->zs->avail_in = len; - do { - i->zs->avail_out = CHUNK; - i->zs->next_out = out; - inflate(i->zs, Z_NO_FLUSH); - len = CHUNK - i->zs->avail_out; - i->size += len; - HMAC_crunch(i->ctx, out, len); - } while (i->zs->avail_out == 0); - } else { - i->size += len; - HMAC_crunch(i->ctx, (unsigned char *)p, len); - } - return size * nmemb; -} - -static int tzoff; -static int tzchecked; - -#define JAN02_1980 318340800 - -static const char *monthtab[12] = {"Jan", "Feb", "Mar", - "Apr", "May", "Jun", - "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec"}; -static const char *days[] = - {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; - -/* Parse an HTTP datestamp into Unix time */ -static time_t - make_unix_time(char *s) { - struct tm time; - int i, ysub = 1900, fmt = 0; - char *month; - char *n; - time_t res; - - if (s[3] != ' ') { - fmt = 1; - if (s[3] != ',') - ysub = 0; - } - for (n = s; *n; ++n) - if (*n == '-' || *n == ':') - *n = ' '; - - time.tm_mon = 0; - n = strchr(s, ' '); - if (fmt) { - /* Day, DD-MMM-YYYY HH:MM:SS GMT */ - time.tm_mday = strtol(n + 1, &n, 0); - month = n + 1; - n = strchr(month, ' '); - time.tm_year = strtol(n + 1, &n, 0); - time.tm_hour = strtol(n + 1, &n, 0); - time.tm_min = strtol(n + 1, &n, 0); - time.tm_sec = strtol(n + 1, NULL, 0); - } else { - /* Unix ctime() format. Does not conform to HTTP spec. */ - /* Day MMM DD HH:MM:SS YYYY */ - month = n + 1; - n = strchr(month, ' '); - while (isspace(*n)) - n++; - time.tm_mday = strtol(n, &n, 0); - time.tm_hour = strtol(n + 1, &n, 0); - time.tm_min = strtol(n + 1, &n, 0); - time.tm_sec = strtol(n + 1, &n, 0); - time.tm_year = strtol(n + 1, NULL, 0); - } - if (time.tm_year > 100) - time.tm_year -= ysub; - - for (i = 0; i < 12; i++) - if (!strncasecmp(month, monthtab[i], 3)) { - time.tm_mon = i; - break; - } - time.tm_isdst = 0; /* daylight saving is never in effect in GMT */ - - /* this is normally the value of extern int timezone, but some - * braindead C libraries don't provide it. - */ - if (!tzchecked) { - struct tm *tc; - time_t then = JAN02_1980; - tc = localtime(&then); - tzoff = (12 - tc->tm_hour) * 3600 + tc->tm_min * 60 + tc->tm_sec; - tzchecked = 1; - } - res = mktime(&time); - /* Unfortunately, mktime() assumes the input is in local time, - * not GMT, so we have to correct it here. - */ - if (res != -1) - res += tzoff; - return res; -} - -/* Convert a Unix time to a network time string - * Weekday, DD-MMM-YYYY HH:MM:SS GMT - */ -void strtime(time_t *t, char *s) { - struct tm *tm; - - tm = gmtime((time_t *)t); - sprintf(s, "%s, %02d %s %d %02d:%02d:%02d GMT", - days[tm->tm_wday], tm->tm_mday, monthtab[tm->tm_mon], - tm->tm_year + 1900, tm->tm_hour, tm->tm_min, tm->tm_sec); -} - -#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) - -int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, - int age) { - FILE *f = NULL; - char *path, date[64], cctim[64]; - long pos = 0; - time_t ctim = -1, cnow; - int i, got = 0, ret = 0; - unsigned int hlen; - struct info in = {0}; - struct HTTP_ctx http = {0}; - HTTPResult httpres; - z_stream zs = {0}; - AVal home, hpre; - - date[0] = '\0'; -#ifdef _WIN32 -#ifdef _XBOX - hpre.av_val = "Q:"; - hpre.av_len = 2; - home.av_val = "\\UserData"; -#else - hpre.av_val = getenv("HOMEDRIVE"); - hpre.av_len = strlen(hpre.av_val); - home.av_val = getenv("HOMEPATH"); -#endif -#define DIRSEP "\\" - -#else /* !_WIN32 */ - hpre.av_val = ""; - hpre.av_len = 0; - home.av_val = getenv("HOME"); -#define DIRSEP "/" -#endif - if (!home.av_val) - home.av_val = "."; - home.av_len = strlen(home.av_val); - - /* SWF hash info is cached in a fixed-format file. - * url: - * ctim: HTTP datestamp of when we last checked it. - * date: HTTP datestamp of the SWF's last modification. - * size: SWF size in hex - * hash: SWF hash in hex - * - * These fields must be present in this order. All fields - * besides URL are fixed size. - */ - path = malloc(hpre.av_len + home.av_len + sizeof(DIRSEP ".swfinfo")); - sprintf(path, "%s%s" DIRSEP ".swfinfo", hpre.av_val, home.av_val); - - f = fopen(path, "r+"); - while (f) { - char buf[4096], *file, *p; - - file = strchr(url, '/'); - if (!file) - break; - file += 2; - file = strchr(file, '/'); - if (!file) - break; - file++; - hlen = file - url; - p = strrchr(file, '/'); - if (p) - file = p; - else - file--; - - while (fgets(buf, sizeof(buf), f)) { - char *r1; - - got = 0; - - if (strncmp(buf, "url: ", 5)) - continue; - if (strncmp(buf + 5, url, hlen)) - continue; - r1 = strrchr(buf, '/'); - i = strlen(r1); - r1[--i] = '\0'; - if (strncmp(r1, file, i)) - continue; - pos = ftell(f); - while (got < 4 && fgets(buf, sizeof(buf), f)) { - if (!strncmp(buf, "size: ", 6)) { - *size = strtol(buf + 6, NULL, 16); - got++; - } else if (!strncmp(buf, "hash: ", 6)) { - unsigned char *ptr = hash, *in = (unsigned char *)buf + 6; - int l = strlen((char *)in) - 1; - for (i = 0; i < l; i += 2) - *ptr++ = (HEX2BIN(in[i]) << 4) | HEX2BIN(in[i + 1]); - got++; - } else if (!strncmp(buf, "date: ", 6)) { - buf[strlen(buf) - 1] = '\0'; - strncpy(date, buf + 6, sizeof(date)); - got++; - } else if (!strncmp(buf, "ctim: ", 6)) { - buf[strlen(buf) - 1] = '\0'; - ctim = make_unix_time(buf + 6); - got++; - } else if (!strncmp(buf, "url: ", 5)) - break; - } - break; - } - break; - } - - cnow = time(NULL); - /* If we got a cache time, see if it's young enough to use directly */ - if (age && ctim > 0) { - ctim = cnow - ctim; - ctim /= 3600 * 24; /* seconds to days */ - if (ctim < age) /* ok, it's new enough */ - goto out; - } - - in.first = 1; - HMAC_setup(in.ctx, "Genuine Adobe Flash Player 001", 30); - inflateInit(&zs); - in.zs = &zs; - - http.date = date; - http.data = ∈ - - httpres = HTTP_get(&http, url, swfcrunch); - - inflateEnd(&zs); - - if (httpres != HTTPRES_OK && httpres != HTTPRES_OK_NOT_MODIFIED) { - ret = -1; - if (httpres == HTTPRES_LOST_CONNECTION) - RTMP_Log(RTMP_LOGERROR, "%s: connection lost while downloading swfurl %s", - __FUNCTION__, url); - else if (httpres == HTTPRES_NOT_FOUND) - RTMP_Log(RTMP_LOGERROR, "%s: swfurl %s not found", __FUNCTION__, url); - else - RTMP_Log(RTMP_LOGERROR, "%s: couldn't contact swfurl %s (HTTP error %d)", - __FUNCTION__, url, http.status); - } else { - if (got && pos) - fseek(f, pos, SEEK_SET); - else { - char *q; - if (!f) - f = fopen(path, "w"); - if (!f) { - int err = errno; - RTMP_Log(RTMP_LOGERROR, - "%s: couldn't open %s for writing, errno %d (%s)", - __FUNCTION__, path, err, strerror(err)); - ret = -1; - goto out; - } - fseek(f, 0, SEEK_END); - q = strchr(url, '?'); - if (q) - i = q - url; - else - i = strlen(url); - - fprintf(f, "url: %.*s\n", i, url); - } - strtime(&cnow, cctim); - fprintf(f, "ctim: %s\n", cctim); - - if (!in.first) { - HMAC_finish(in.ctx, hash, hlen); - *size = in.size; - - fprintf(f, "date: %s\n", date); - fprintf(f, "size: %08x\n", in.size); - fprintf(f, "hash: "); - for (i = 0; i < SHA256_DIGEST_LENGTH; i++) - fprintf(f, "%02x", hash[i]); - fprintf(f, "\n"); - } - } - HMAC_close(in.ctx); -out: - free(path); - if (f) - fclose(f); - return ret; -} -#else -int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, - int age) { - return -1; -} -#endif diff --git a/LFLiveKit/publish/pili-librtmp/http.h b/LFLiveKit/publish/pili-librtmp/http.h deleted file mode 100755 index 1eb7a462..00000000 --- a/LFLiveKit/publish/pili-librtmp/http.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef __RTMP_HTTP_H__ -#define __RTMP_HTTP_H__ -/* - * Copyright (C) 2010 Howard Chu - * Copyright (C) 2010 Antti Ajanki - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -typedef enum { - HTTPRES_OK, /* result OK */ - HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */ - HTTPRES_NOT_FOUND, /* not found */ - HTTPRES_BAD_REQUEST, /* client error */ - HTTPRES_SERVER_ERROR, /* server reported an error */ - HTTPRES_REDIRECTED, /* resource has been moved */ - HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */ -} HTTPResult; - -struct HTTP_ctx { - char *date; - int size; - int status; - void *data; -}; - -typedef size_t(HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, - void *stream); - -HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, - HTTP_read_callback *cb); - -#endif diff --git a/LFLiveKit/publish/pili-librtmp/log.c b/LFLiveKit/publish/pili-librtmp/log.c deleted file mode 100755 index d3934366..00000000 --- a/LFLiveKit/publish/pili-librtmp/log.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include -#include -#include -#include -#include - -#include "log.h" -#include "rtmp_sys.h" - -#define MAX_PRINT_LEN 2048 - -RTMP_LogLevel RTMP_debuglevel = RTMP_LOGERROR; - -static int neednl; - -static FILE *fmsg; - -static RTMP_LogCallback rtmp_log_default, *cb = rtmp_log_default; - -static const char *levels[] = { - "CRIT", "ERROR", "WARNING", "INFO", - "DEBUG", "DEBUG2"}; - -static void rtmp_log_default(int level, const char *format, va_list vl) { - char str[MAX_PRINT_LEN] = ""; - - vsnprintf(str, MAX_PRINT_LEN - 1, format, vl); - - /* Filter out 'no-name' */ - if (RTMP_debuglevel < RTMP_LOGALL && strstr(str, "no-name") != NULL) - return; - - if (!fmsg) fmsg = stderr; - - if (level <= RTMP_debuglevel) { - if (neednl) { - putc('\n', fmsg); - neednl = 0; - } - fprintf(fmsg, "%s: %s\n", levels[level], str); -#ifdef _DEBUG - fflush(fmsg); -#endif - } -} - -void RTMP_LogSetOutput(FILE *file) { - fmsg = file; -} - -void RTMP_LogSetLevel(RTMP_LogLevel level) { - RTMP_debuglevel = level; -} - -void RTMP_LogSetCallback(RTMP_LogCallback *cbp) { - cb = cbp; -} - -RTMP_LogLevel RTMP_LogGetLevel() { - return RTMP_debuglevel; -} - -void RTMP_Log(int level, const char *format, ...) { - va_list args; - va_start(args, format); - cb(level, format, args); - va_end(args); -} - -static const char hexdig[] = "0123456789abcdef"; - -void RTMP_LogHex(int level, const uint8_t *data, unsigned long len) { - unsigned long i; - char line[50], *ptr; - - if (level > RTMP_debuglevel) - return; - - ptr = line; - - for (i = 0; i < len; i++) { - *ptr++ = hexdig[0x0f & (data[i] >> 4)]; - *ptr++ = hexdig[0x0f & data[i]]; - if ((i & 0x0f) == 0x0f) { - *ptr = '\0'; - ptr = line; - RTMP_Log(level, "%s", line); - } else { - *ptr++ = ' '; - } - } - if (i & 0x0f) { - *ptr = '\0'; - RTMP_Log(level, "%s", line); - } -} - -void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len) { -#define BP_OFFSET 9 -#define BP_GRAPH 60 -#define BP_LEN 80 - char line[BP_LEN]; - unsigned long i; - - if (!data || level > RTMP_debuglevel) - return; - - /* in case len is zero */ - line[0] = '\0'; - - for (i = 0; i < len; i++) { - int n = i % 16; - unsigned off; - - if (!n) { - if (i) RTMP_Log(level, "%s", line); - memset(line, ' ', sizeof(line) - 2); - line[sizeof(line) - 2] = '\0'; - - off = i % 0x0ffffU; - - line[2] = hexdig[0x0f & (off >> 12)]; - line[3] = hexdig[0x0f & (off >> 8)]; - line[4] = hexdig[0x0f & (off >> 4)]; - line[5] = hexdig[0x0f & off]; - line[6] = ':'; - } - - off = BP_OFFSET + n * 3 + ((n >= 8) ? 1 : 0); - line[off] = hexdig[0x0f & (data[i] >> 4)]; - line[off + 1] = hexdig[0x0f & data[i]]; - - off = BP_GRAPH + n + ((n >= 8) ? 1 : 0); - - if (isprint(data[i])) { - line[BP_GRAPH + n] = data[i]; - } else { - line[BP_GRAPH + n] = '.'; - } - } - - RTMP_Log(level, "%s", line); -} - -/* These should only be used by apps, never by the library itself */ -void RTMP_LogPrintf(const char *format, ...) { - char str[MAX_PRINT_LEN] = ""; - int len; - va_list args; - va_start(args, format); - len = vsnprintf(str, MAX_PRINT_LEN - 1, format, args); - va_end(args); - - if (RTMP_debuglevel == RTMP_LOGCRIT) - return; - - if (!fmsg) fmsg = stderr; - - if (neednl) { - putc('\n', fmsg); - neednl = 0; - } - - if (len > MAX_PRINT_LEN - 1) - len = MAX_PRINT_LEN - 1; - fprintf(fmsg, "%s", str); - if (str[len - 1] == '\n') - fflush(fmsg); -} - -void RTMP_LogStatus(const char *format, ...) { - char str[MAX_PRINT_LEN] = ""; - va_list args; - va_start(args, format); - vsnprintf(str, MAX_PRINT_LEN - 1, format, args); - va_end(args); - - if (RTMP_debuglevel == RTMP_LOGCRIT) - return; - - if (!fmsg) fmsg = stderr; - - fprintf(fmsg, "%s", str); - fflush(fmsg); - neednl = 1; -} diff --git a/LFLiveKit/publish/pili-librtmp/log.h b/LFLiveKit/publish/pili-librtmp/log.h deleted file mode 100755 index f7daf375..00000000 --- a/LFLiveKit/publish/pili-librtmp/log.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#ifndef __RTMP_LOG_H__ -#define __RTMP_LOG_H__ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif -/* Enable this to get full debugging output */ -/* #define _DEBUG */ - -#ifdef _DEBUG -#undef NODEBUG -#endif - -typedef enum { - RTMP_LOGCRIT = 0, - RTMP_LOGERROR, - RTMP_LOGWARNING, - RTMP_LOGINFO, - RTMP_LOGDEBUG, - RTMP_LOGDEBUG2, - RTMP_LOGALL -} RTMP_LogLevel; - -extern RTMP_LogLevel RTMP_debuglevel; - -typedef void(RTMP_LogCallback)(int level, const char *fmt, va_list); -void RTMP_LogSetCallback(RTMP_LogCallback *cb); -void RTMP_LogSetOutput(FILE *file); -void RTMP_LogPrintf(const char *format, ...); -void RTMP_LogStatus(const char *format, ...); -void RTMP_Log(int level, const char *format, ...); -void RTMP_LogHex(int level, const uint8_t *data, unsigned long len); -void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len); -void RTMP_LogSetLevel(RTMP_LogLevel lvl); -RTMP_LogLevel RTMP_LogGetLevel(void); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/LFLiveKit/publish/pili-librtmp/parseurl.c b/LFLiveKit/publish/pili-librtmp/parseurl.c deleted file mode 100755 index 0e50352b..00000000 --- a/LFLiveKit/publish/pili-librtmp/parseurl.c +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright (C) 2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include -#include - -#include -#include - -#include "log.h" -#include "rtmp_sys.h" - -int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, unsigned int *port, - AVal *playpath, AVal *app, AVal *domainName) { - char *p, *end, *col, *ques, *slash; - - RTMP_Log(RTMP_LOGDEBUG, "Parsing..."); - - *protocol = RTMP_PROTOCOL_RTMP; - *port = 0; - playpath->av_len = 0; - playpath->av_val = NULL; - app->av_len = 0; - app->av_val = NULL; - - /* Old School Parsing */ - - /* look for usual :// pattern */ - p = strstr(url, "://"); - if (!p) { - RTMP_Log(RTMP_LOGERROR, "RTMP URL: No :// in url!"); - return FALSE; - } - { - int len = (int)(p - url); - - if (len == 4 && strncasecmp(url, "rtmp", 4) == 0) - *protocol = RTMP_PROTOCOL_RTMP; - else if (len == 5 && strncasecmp(url, "rtmpt", 5) == 0) - *protocol = RTMP_PROTOCOL_RTMPT; - else if (len == 5 && strncasecmp(url, "rtmps", 5) == 0) - *protocol = RTMP_PROTOCOL_RTMPS; - else if (len == 5 && strncasecmp(url, "rtmpe", 5) == 0) - *protocol = RTMP_PROTOCOL_RTMPE; - else if (len == 5 && strncasecmp(url, "rtmfp", 5) == 0) - *protocol = RTMP_PROTOCOL_RTMFP; - else if (len == 6 && strncasecmp(url, "rtmpte", 6) == 0) - *protocol = RTMP_PROTOCOL_RTMPTE; - else if (len == 6 && strncasecmp(url, "rtmpts", 6) == 0) - *protocol = RTMP_PROTOCOL_RTMPTS; - else { - RTMP_Log(RTMP_LOGWARNING, "Unknown protocol!\n"); - goto parsehost; - } - } - - RTMP_Log(RTMP_LOGDEBUG, "Parsed protocol: %d", *protocol); - -parsehost: - /* let's get the hostname */ - p += 3; - - /* check for sudden death */ - if (*p == 0) { - RTMP_Log(RTMP_LOGWARNING, "No hostname in URL!"); - return FALSE; - } - - end = p + strlen(p); - col = strchr(p, ':'); - ques = strchr(p, '?'); - slash = strchr(p, '/'); - - { - int hostlen; - if (slash) - hostlen = slash - p; - else - hostlen = end - p; - if (col && col - p < hostlen) - hostlen = col - p; - - if (hostlen < 256) { - host->av_val = p; - host->av_len = hostlen; - RTMP_Log(RTMP_LOGDEBUG, "Parsed host : %.*s", hostlen, host->av_val); - } else { - RTMP_Log(RTMP_LOGWARNING, "Hostname exceeds 255 characters!"); - } - - p += hostlen; - } - - /* get the port number if available */ - if (*p == ':') { - unsigned int p2; - p++; - p2 = atoi(p); - if (p2 > 65535) { - RTMP_Log(RTMP_LOGWARNING, "Invalid port number!"); - } else { - *port = p2; - } - } - - if (!slash) { - RTMP_Log(RTMP_LOGWARNING, "No application or playpath in URL!"); - return TRUE; - } - p = slash + 1; - - /** parse domain - + * rtmp://host:[port]/app/...?domain=a.com - + * use domain to replace host - + */ - - if (domainName != NULL && ques != NULL) { - char *domain = strstr(ques, "domain="); - if (domain) { - end = domain - 1; - domain += 7; //skip "domain=" - char *domain_end = strchr(domain, '&'); - int host_len = 0; - if (domain_end) { - host_len = domain_end - domain; - } else { - host_len = strlen(domain); - } - if (host_len < 256) { - domainName->av_val = domain; - domainName->av_len = host_len; - RTMP_Log(RTMP_LOGDEBUG, "Parsed host and domain : %.*s", host_len, host->av_val); - } - } - } - - { - /* parse application - * - * rtmp://host[:port]/app[/appinstance][/...] - * application = app[/appinstance] - */ - - char *slash2, *slash3 = NULL; - int applen, appnamelen; - - slash2 = strchr(p, '/'); - if (slash2) - slash3 = strchr(slash2 + 1, '/'); - - applen = end - p; /* ondemand, pass all parameters as app */ - appnamelen = applen; /* ondemand length */ - - if (ques && strstr(p, "slist=")) { /* whatever it is, the '?' and slist= means we need to use everything as app and parse plapath from slist= */ - appnamelen = ques - p; - } else if (strncmp(p, "ondemand/", 9) == 0) { - /* app = ondemand/foobar, only pass app=ondemand */ - applen = 8; - appnamelen = 8; - } else { /* app!=ondemand, so app is app[/appinstance] */ - if (slash3) - appnamelen = slash3 - p; - else if (slash2) - appnamelen = slash2 - p; - - applen = appnamelen; - } - - app->av_val = p; - app->av_len = applen; - RTMP_Log(RTMP_LOGDEBUG, "Parsed app : %.*s", applen, p); - - p += appnamelen; - } - - if (*p == '/') - p++; - - if (end - p) { - AVal av = {p, end - p}; - PILI_RTMP_ParsePlaypath(&av, playpath); - } - - return TRUE; -} - -/* - * Extracts playpath from RTMP URL. playpath is the file part of the - * URL, i.e. the part that comes after rtmp://host:port/app/ - * - * Returns the stream name in a format understood by FMS. The name is - * the playpath part of the URL with formatting depending on the stream - * type: - * - * mp4 streams: prepend "mp4:", remove extension - * mp3 streams: prepend "mp3:", remove extension - * flv streams: remove extension - */ -void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out) { - int addMP4 = 0; - int addMP3 = 0; - int subExt = 0; - const char *playpath = in->av_val; - const char *temp, *q, *ext = NULL; - const char *ppstart = playpath; - char *streamname, *destptr, *p; - - int pplen = in->av_len; - - out->av_val = NULL; - out->av_len = 0; - - if ((*ppstart == '?') && - (temp = strstr(ppstart, "slist=")) != 0) { - ppstart = temp + 6; - pplen = strlen(ppstart); - - temp = strchr(ppstart, '&'); - if (temp) { - pplen = temp - ppstart; - } - } - - q = strchr(ppstart, '?'); - if (pplen >= 4) { - if (q) - ext = q - 4; - else - ext = &ppstart[pplen - 4]; - if ((strncmp(ext, ".f4v", 4) == 0) || - (strncmp(ext, ".mp4", 4) == 0)) { - addMP4 = 1; - subExt = 1; - /* Only remove .flv from rtmp URL, not slist params */ - } else if ((ppstart == playpath) && - (strncmp(ext, ".flv", 4) == 0)) { - subExt = 1; - } else if (strncmp(ext, ".mp3", 4) == 0) { - addMP3 = 1; - subExt = 1; - } - } - - streamname = (char *)malloc((pplen + 4 + 1) * sizeof(char)); - if (!streamname) - return; - - destptr = streamname; - if (addMP4) { - if (strncmp(ppstart, "mp4:", 4)) { - strcpy(destptr, "mp4:"); - destptr += 4; - } else { - subExt = 0; - } - } else if (addMP3) { - if (strncmp(ppstart, "mp3:", 4)) { - strcpy(destptr, "mp3:"); - destptr += 4; - } else { - subExt = 0; - } - } - - for (p = (char *)ppstart; pplen > 0;) { - /* skip extension */ - if (subExt && p == ext) { - p += 4; - pplen -= 4; - continue; - } - if (*p == '%') { - unsigned int c; - sscanf(p + 1, "%02x", &c); - *destptr++ = c; - pplen -= 3; - p += 3; - } else { - *destptr++ = *p++; - pplen--; - } - } - *destptr = '\0'; - - out->av_val = streamname; - out->av_len = destptr - streamname; -} - -int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, - unsigned int *port, AVal *playpath, AVal *app) { - return PILI_RTMP_ParseURL2(url, protocol, host, port, playpath, app, NULL); -} diff --git a/LFLiveKit/publish/pili-librtmp/rtmp.c b/LFLiveKit/publish/pili-librtmp/rtmp.c deleted file mode 100755 index c7e0df79..00000000 --- a/LFLiveKit/publish/pili-librtmp/rtmp.c +++ /dev/null @@ -1,4331 +0,0 @@ -/* - * Copyright (C) 2005-2008 Team XBMC - * http://www.xbmc.org - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * libPILI_RTMP is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * libPILI_RTMP is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with libPILI_RTMP see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#include -#include -#include -#include -#include -#include - -#include "log.h" -#include "rtmp_sys.h" -#include "time.h" - -#ifdef CRYPTO -#ifdef USE_POLARSSL -#include -#elif defined(USE_GNUTLS) -#include -#else /* USE_OPENSSL */ -#include -#include -#endif -TLS_CTX RTMP_TLS_ctx; -#endif - -#define RTMP_SIG_SIZE 1536 -#define RTMP_LARGE_HEADER_SIZE 12 - -static const int packetSize[] = {12, 8, 4, 1}; - -int PILI_RTMP_ctrlC; -static char reqid[30]; - -const char PILI_RTMPProtocolStrings[][7] = { - "RTMP", - "RTMPT", - "RTMPE", - "RTMPTE", - "RTMPS", - "RTMPTS", - "", - "", - "RTMFP"}; - -const char PILI_RTMPProtocolStringsLower[][7] = { - "rtmp", - "rtmpt", - "rtmpe", - "rtmpte", - "rtmps", - "rtmpts", - "", - "", - "rtmfp"}; - -static const char *RTMPT_cmds[] = { - "open", - "send", - "idle", - "close"}; - -typedef enum { - RTMPT_OPEN = 0, - RTMPT_SEND, - RTMPT_IDLE, - RTMPT_CLOSE -} RTMPTCmd; - -static int DumpMetaData(AMFObject *obj); -static int HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error); -static int SocksNegotiate(PILI_RTMP *r, RTMPError *error); - -static int SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); -static int SendCheckBW(PILI_RTMP *r, RTMPError *error); -static int SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error); -static int SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error); -static int SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error); -static int SendPlay(PILI_RTMP *r, RTMPError *error); -static int SendBytesReceived(PILI_RTMP *r, RTMPError *error); - -#if 0 /* unused */ -static int SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath); -#endif - -static int HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize); -static int HandleMetadata(PILI_RTMP *r, char *body, unsigned int len); -static void HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet); -static void HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet); -static void HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet); -static void HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet); -static void HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); -static void HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); - -static int ReadN(PILI_RTMP *r, char *buffer, int n); -static int WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error); - -static void DecodeTEA(AVal *key, AVal *text); - -static int HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len); -static int HTTP_read(PILI_RTMP *r, int fill); - -#ifndef _WIN32 -static int clk_tck; -#endif - -#ifdef CRYPTO -#include "handshake.h" -#endif - -uint32_t - PILI_RTMP_GetTime() { -#ifdef _DEBUG - return 0; -#elif defined(_WIN32) - return timeGetTime(); -#else - struct tms t; - if (!clk_tck) clk_tck = sysconf(_SC_CLK_TCK); - return times(&t) * 1000 / clk_tck; -#endif -} - -void PILI_RTMP_UserInterrupt() { - PILI_RTMP_ctrlC = TRUE; -} - -void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p) { - p->m_headerType = 0; - p->m_packetType = 0; - p->m_nChannel = 0; - p->m_nTimeStamp = 0; - p->m_nInfoField2 = 0; - p->m_hasAbsTimestamp = FALSE; - p->m_nBodySize = 0; - p->m_nBytesRead = 0; -} - -int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize) { - char *ptr = calloc(1, nSize + RTMP_MAX_HEADER_SIZE); - if (!ptr) - return FALSE; - p->m_body = ptr + RTMP_MAX_HEADER_SIZE; - p->m_nBytesRead = 0; - return TRUE; -} - -void PILI_RTMPPacket_Free(PILI_RTMPPacket *p) { - if (p->m_body) { - free(p->m_body - RTMP_MAX_HEADER_SIZE); - p->m_body = NULL; - } -} - -void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p) { - RTMP_Log(RTMP_LOGDEBUG, - "PILI_RTMP PACKET: packet type: 0x%02x. channel: 0x%02x. info 1: %d info 2: %d. Body size: %lu. body: 0x%02x", - p->m_packetType, p->m_nChannel, p->m_nTimeStamp, p->m_nInfoField2, - p->m_nBodySize, p->m_body ? (unsigned char)p->m_body[0] : 0); -} - -int PILI_RTMP_LibVersion() { - return RTMP_LIB_VERSION; -} - -void PILI_RTMP_TLS_Init() { -#ifdef CRYPTO -#ifdef USE_POLARSSL - /* Do this regardless of NO_SSL, we use havege for rtmpe too */ - RTMP_TLS_ctx = calloc(1, sizeof(struct tls_ctx)); - havege_init(&RTMP_TLS_ctx->hs); -#elif defined(USE_GNUTLS) && !defined(NO_SSL) - /* Technically we need to initialize libgcrypt ourselves if - * we're not going to call gnutls_global_init(). Ignoring this - * for now. - */ - gnutls_global_init(); - RTMP_TLS_ctx = malloc(sizeof(struct tls_ctx)); - gnutls_certificate_allocate_credentials(&RTMP_TLS_ctx->cred); - gnutls_priority_init(&RTMP_TLS_ctx->prios, "NORMAL", NULL); - gnutls_certificate_set_x509_trust_file(RTMP_TLS_ctx->cred, - "ca.pem", GNUTLS_X509_FMT_PEM); -#elif !defined(NO_SSL) /* USE_OPENSSL */ - /* libcrypto doesn't need anything special */ - SSL_load_error_strings(); - SSL_library_init(); - OpenSSL_add_all_digests(); - RTMP_TLS_ctx = SSL_CTX_new(SSLv23_method()); - SSL_CTX_set_options(RTMP_TLS_ctx, SSL_OP_ALL); - SSL_CTX_set_default_verify_paths(RTMP_TLS_ctx); -#endif -#endif -} - -PILI_RTMP * - PILI_RTMP_Alloc() { - return calloc(1, sizeof(PILI_RTMP)); -} - -void PILI_RTMP_Free(PILI_RTMP *r) { - r->m_errorCallback = NULL; - r->m_userData = NULL; - RTMPError_Free(r->m_error); - r->m_error = NULL; - - free(r); -} - -void PILI_RTMP_Init(PILI_RTMP *r) { -#ifdef CRYPTO - if (!RTMP_TLS_ctx) - RTMP_TLS_Init(); -#endif - - memset(r, 0, sizeof(PILI_RTMP)); - r->m_sb.sb_socket = -1; - r->m_inChunkSize = RTMP_DEFAULT_CHUNKSIZE; - r->m_outChunkSize = RTMP_DEFAULT_CHUNKSIZE; - r->m_nBufferMS = 30000; - r->m_nClientBW = 2500000; - r->m_nClientBW2 = 2; - r->m_nServerBW = 2500000; - r->m_fAudioCodecs = 3191.0; - r->m_fVideoCodecs = 252.0; - r->Link.timeout = 10; - r->Link.send_timeout = 10; - r->Link.swfAge = 30; - - r->m_errorCallback = NULL; - r->m_error = NULL; - r->m_userData = NULL; - r->m_is_closing = 0; - r->m_tcp_nodelay = 1; - - r->m_connCallback = NULL; - r->ip = 0; -} - -void PILI_RTMP_EnableWrite(PILI_RTMP *r) { - r->Link.protocol |= RTMP_FEATURE_WRITE; -} - -double - PILI_RTMP_GetDuration(PILI_RTMP *r) { - return r->m_fDuration; -} - -int PILI_RTMP_IsConnected(PILI_RTMP *r) { - return r->m_sb.sb_socket != -1; -} - -int PILI_RTMP_Socket(PILI_RTMP *r) { - return r->m_sb.sb_socket; -} - -int PILI_RTMP_IsTimedout(PILI_RTMP *r) { - return r->m_sb.sb_timedout; -} - -void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size) { - r->m_nBufferMS = size; -} - -void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error) { - PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, error); -} - -#undef OSS -#ifdef _WIN32 -#define OSS "WIN" -#elif defined(__sun__) -#define OSS "SOL" -#elif defined(__APPLE__) -#define OSS "MAC" -#elif defined(__linux__) -#define OSS "LNX" -#else -#define OSS "GNU" -#endif -#define DEF_VERSTR OSS " 10,0,32,18" -static const char DEFAULT_FLASH_VER[] = DEF_VERSTR; -const AVal RTMP_DefaultFlashVer = - {(char *)DEFAULT_FLASH_VER, sizeof(DEFAULT_FLASH_VER) - 1}; - -void PILI_RTMP_SetupStream(PILI_RTMP *r, - int protocol, - AVal *host, - unsigned int port, - AVal *sockshost, - AVal *playpath, - AVal *tcUrl, - AVal *swfUrl, - AVal *pageUrl, - AVal *app, - AVal *auth, - AVal *swfSHA256Hash, - uint32_t swfSize, - AVal *flashVer, - AVal *subscribepath, - int dStart, - int dStop, int bLiveStream, long int timeout) { - RTMP_Log(RTMP_LOGDEBUG, "Protocol : %s", PILI_RTMPProtocolStrings[protocol & 7]); - RTMP_Log(RTMP_LOGDEBUG, "Hostname : %.*s", host->av_len, host->av_val); - RTMP_Log(RTMP_LOGDEBUG, "Port : %d", port); - RTMP_Log(RTMP_LOGDEBUG, "Playpath : %s", playpath->av_val); - - if (tcUrl && tcUrl->av_val) - RTMP_Log(RTMP_LOGDEBUG, "tcUrl : %s", tcUrl->av_val); - if (swfUrl && swfUrl->av_val) - RTMP_Log(RTMP_LOGDEBUG, "swfUrl : %s", swfUrl->av_val); - if (pageUrl && pageUrl->av_val) - RTMP_Log(RTMP_LOGDEBUG, "pageUrl : %s", pageUrl->av_val); - if (app && app->av_val) - RTMP_Log(RTMP_LOGDEBUG, "app : %.*s", app->av_len, app->av_val); - if (auth && auth->av_val) - RTMP_Log(RTMP_LOGDEBUG, "auth : %s", auth->av_val); - if (subscribepath && subscribepath->av_val) - RTMP_Log(RTMP_LOGDEBUG, "subscribepath : %s", subscribepath->av_val); - if (flashVer && flashVer->av_val) - RTMP_Log(RTMP_LOGDEBUG, "flashVer : %s", flashVer->av_val); - if (dStart > 0) - RTMP_Log(RTMP_LOGDEBUG, "StartTime : %d msec", dStart); - if (dStop > 0) - RTMP_Log(RTMP_LOGDEBUG, "StopTime : %d msec", dStop); - - RTMP_Log(RTMP_LOGDEBUG, "live : %s", bLiveStream ? "yes" : "no"); - RTMP_Log(RTMP_LOGDEBUG, "timeout : %d sec", timeout); - -#ifdef CRYPTO - if (swfSHA256Hash != NULL && swfSize > 0) { - memcpy(r->Link.SWFHash, swfSHA256Hash->av_val, sizeof(r->Link.SWFHash)); - r->Link.SWFSize = swfSize; - RTMP_Log(RTMP_LOGDEBUG, "SWFSHA256:"); - RTMP_LogHex(RTMP_LOGDEBUG, r->Link.SWFHash, sizeof(r->Link.SWFHash)); - RTMP_Log(RTMP_LOGDEBUG, "SWFSize : %lu", r->Link.SWFSize); - } else { - r->Link.SWFSize = 0; - } -#endif - - if (sockshost->av_len) { - const char *socksport = strchr(sockshost->av_val, ':'); - char *hostname = strdup(sockshost->av_val); - - if (socksport) - hostname[socksport - sockshost->av_val] = '\0'; - r->Link.sockshost.av_val = hostname; - r->Link.sockshost.av_len = strlen(hostname); - - r->Link.socksport = socksport ? atoi(socksport + 1) : 1080; - RTMP_Log(RTMP_LOGDEBUG, "Connecting via SOCKS proxy: %s:%d", r->Link.sockshost.av_val, - r->Link.socksport); - } else { - r->Link.sockshost.av_val = NULL; - r->Link.sockshost.av_len = 0; - r->Link.socksport = 0; - } - - if (tcUrl && tcUrl->av_len) - r->Link.tcUrl = *tcUrl; - if (swfUrl && swfUrl->av_len) - r->Link.swfUrl = *swfUrl; - if (pageUrl && pageUrl->av_len) - r->Link.pageUrl = *pageUrl; - if (app && app->av_len) - r->Link.app = *app; - if (auth && auth->av_len) { - r->Link.auth = *auth; - r->Link.lFlags |= RTMP_LF_AUTH; - } - if (flashVer && flashVer->av_len) - r->Link.flashVer = *flashVer; - else - r->Link.flashVer = RTMP_DefaultFlashVer; - if (subscribepath && subscribepath->av_len) - r->Link.subscribepath = *subscribepath; - r->Link.seekTime = dStart; - r->Link.stopTime = dStop; - if (bLiveStream) - r->Link.lFlags |= RTMP_LF_LIVE; - r->Link.timeout = timeout; - - r->Link.protocol = protocol; - r->Link.hostname = *host; - r->Link.port = port; - r->Link.playpath = *playpath; - - if (r->Link.port == 0) { - if (protocol & RTMP_FEATURE_SSL) - r->Link.port = 443; - else if (protocol & RTMP_FEATURE_HTTP) - r->Link.port = 80; - else - r->Link.port = 1935; - } -} - -enum { OPT_STR = 0, - OPT_INT, - OPT_BOOL, - OPT_CONN }; -static const char *optinfo[] = { - "string", "integer", "boolean", "AMF"}; - -#define OFF(x) offsetof(struct PILI_RTMP, x) - -static struct urlopt { - AVal name; - off_t off; - int otype; - int omisc; - char *use; -} options[] = { - {AVC("socks"), OFF(Link.sockshost), OPT_STR, 0, - "Use the specified SOCKS proxy"}, - {AVC("app"), OFF(Link.app), OPT_STR, 0, - "Name of target app on server"}, - {AVC("tcUrl"), OFF(Link.tcUrl), OPT_STR, 0, - "URL to played stream"}, - {AVC("pageUrl"), OFF(Link.pageUrl), OPT_STR, 0, - "URL of played media's web page"}, - {AVC("swfUrl"), OFF(Link.swfUrl), OPT_STR, 0, - "URL to player SWF file"}, - {AVC("flashver"), OFF(Link.flashVer), OPT_STR, 0, - "Flash version string (default " DEF_VERSTR ")"}, - {AVC("conn"), OFF(Link.extras), OPT_CONN, 0, - "Append arbitrary AMF data to Connect message"}, - {AVC("playpath"), OFF(Link.playpath), OPT_STR, 0, - "Path to target media on server"}, - {AVC("playlist"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_PLST, - "Set playlist before play command"}, - {AVC("live"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_LIVE, - "Stream is live, no seeking possible"}, - {AVC("subscribe"), OFF(Link.subscribepath), OPT_STR, 0, - "Stream to subscribe to"}, - {AVC("token"), OFF(Link.token), OPT_STR, 0, - "Key for SecureToken response"}, - {AVC("swfVfy"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_SWFV, - "Perform SWF Verification"}, - {AVC("swfAge"), OFF(Link.swfAge), OPT_INT, 0, - "Number of days to use cached SWF hash"}, - {AVC("start"), OFF(Link.seekTime), OPT_INT, 0, - "Stream start position in milliseconds"}, - {AVC("stop"), OFF(Link.stopTime), OPT_INT, 0, - "Stream stop position in milliseconds"}, - {AVC("buffer"), OFF(m_nBufferMS), OPT_INT, 0, - "Buffer time in milliseconds"}, - {AVC("timeout"), OFF(Link.timeout), OPT_INT, 0, - "Session timeout in seconds"}, - {{NULL, 0}, 0, 0}}; - -static const AVal truth[] = { - AVC("1"), - AVC("on"), - AVC("yes"), - AVC("true"), - {0, 0}}; - -static void RTMP_OptUsage() { - int i; - - RTMP_Log(RTMP_LOGERROR, "Valid PILI_RTMP options are:\n"); - for (i = 0; options[i].name.av_len; i++) { - RTMP_Log(RTMP_LOGERROR, "%10s %-7s %s\n", options[i].name.av_val, - optinfo[options[i].otype], options[i].use); - } -} - -static int - parseAMF(AMFObject *obj, AVal *av, int *depth) { - AMFObjectProperty prop = {{0, 0}}; - int i; - char *p, *arg = av->av_val; - - if (arg[1] == ':') { - p = (char *)arg + 2; - switch (arg[0]) { - case 'B': - prop.p_type = AMF_BOOLEAN; - prop.p_vu.p_number = atoi(p); - break; - case 'S': - prop.p_type = AMF_STRING; - prop.p_vu.p_aval.av_val = p; - prop.p_vu.p_aval.av_len = av->av_len - (p - arg); - break; - case 'N': - prop.p_type = AMF_NUMBER; - prop.p_vu.p_number = strtod(p, NULL); - break; - case 'Z': - prop.p_type = AMF_NULL; - break; - case 'O': - i = atoi(p); - if (i) { - prop.p_type = AMF_OBJECT; - } else { - (*depth)--; - return 0; - } - break; - default: - return -1; - } - } else if (arg[2] == ':' && arg[0] == 'N') { - p = strchr(arg + 3, ':'); - if (!p || !*depth) - return -1; - prop.p_name.av_val = (char *)arg + 3; - prop.p_name.av_len = p - (arg + 3); - - p++; - switch (arg[1]) { - case 'B': - prop.p_type = AMF_BOOLEAN; - prop.p_vu.p_number = atoi(p); - break; - case 'S': - prop.p_type = AMF_STRING; - prop.p_vu.p_aval.av_val = p; - prop.p_vu.p_aval.av_len = av->av_len - (p - arg); - break; - case 'N': - prop.p_type = AMF_NUMBER; - prop.p_vu.p_number = strtod(p, NULL); - break; - case 'O': - prop.p_type = AMF_OBJECT; - break; - default: - return -1; - } - } else - return -1; - - if (*depth) { - AMFObject *o2; - for (i = 0; i < *depth; i++) { - o2 = &obj->o_props[obj->o_num - 1].p_vu.p_object; - obj = o2; - } - } - AMF_AddProp(obj, &prop); - if (prop.p_type == AMF_OBJECT) - (*depth)++; - return 0; -} - -int RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, RTMPError *error) { - int i; - void *v; - - for (i = 0; options[i].name.av_len; i++) { - if (opt->av_len != options[i].name.av_len) continue; - if (strcasecmp(opt->av_val, options[i].name.av_val)) continue; - v = (char *)r + options[i].off; - switch (options[i].otype) { - case OPT_STR: { - AVal *aptr = v; - *aptr = *arg; - } break; - case OPT_INT: { - long l = strtol(arg->av_val, NULL, 0); - *(int *)v = l; - } break; - case OPT_BOOL: { - int j, fl; - fl = *(int *)v; - for (j = 0; truth[j].av_len; j++) { - if (arg->av_len != truth[j].av_len) continue; - if (strcasecmp(arg->av_val, truth[j].av_val)) continue; - fl |= options[i].omisc; - break; - } - *(int *)v = fl; - } break; - case OPT_CONN: - if (parseAMF(&r->Link.extras, arg, &r->Link.edepth)) - return FALSE; - break; - } - break; - } - if (!options[i].name.av_len) { - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Unknown option "); - strcat(msg, opt->av_val); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorUnknowOption; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "Unknown option %s", opt->av_val); - RTMP_OptUsage(); - return FALSE; - } - - return TRUE; -} - -int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error) { - AVal opt, arg; - char *p1, *p2, *ptr = strchr(url, ' '); - int ret, len; - unsigned int port = 0; - - if (ptr) - *ptr = '\0'; - - len = (int)strlen(url); - ret = PILI_RTMP_ParseURL2(url, &r->Link.protocol, &r->Link.hostname, - &port, &r->Link.playpath0, &r->Link.app, &r->Link.domain); - if (!ret) - return ret; - r->Link.port = port; - r->Link.playpath = r->Link.playpath0; - - while (ptr) { - *ptr++ = '\0'; - p1 = ptr; - p2 = strchr(p1, '='); - if (!p2) - break; - opt.av_val = p1; - opt.av_len = p2 - p1; - *p2++ = '\0'; - arg.av_val = p2; - ptr = strchr(p2, ' '); - if (ptr) { - *ptr = '\0'; - arg.av_len = ptr - p2; - /* skip repeated spaces */ - while (ptr[1] == ' ') - *ptr++ = '\0'; - } else { - arg.av_len = strlen(p2); - } - - /* unescape */ - port = arg.av_len; - for (p1 = p2; port > 0;) { - if (*p1 == '\\') { - unsigned int c; - if (port < 3) - return FALSE; - sscanf(p1 + 1, "%02x", &c); - *p2++ = c; - port -= 3; - p1 += 3; - } else { - *p2++ = *p1++; - port--; - } - } - arg.av_len = p2 - arg.av_val; - - ret = RTMP_SetOpt(r, &opt, &arg, error); - if (!ret) - return ret; - } - - if (!r->Link.tcUrl.av_len) { - r->Link.tcUrl.av_val = url; - if (r->Link.app.av_len) { - AVal *domain = &r->Link.domain; - if (domain->av_len == 0 && r->Link.app.av_val < url + len) { - /* if app is part of original url, just use it */ - r->Link.tcUrl.av_len = r->Link.app.av_len + (r->Link.app.av_val - url); - } else { - if (domain->av_len == 0) { - domain = &r->Link.hostname; - } - if (r->Link.port = 0) { - r->Link.port = 1935; - } - len = domain->av_len + r->Link.app.av_len + sizeof("rtmpte://:65535/"); - r->Link.tcUrl.av_val = malloc(len); - r->Link.tcUrl.av_len = snprintf(r->Link.tcUrl.av_val, len, - "%s://%.*s:%d/%.*s", - PILI_RTMPProtocolStringsLower[r->Link.protocol], - domain->av_len, domain->av_val, - r->Link.port, - r->Link.app.av_len, r->Link.app.av_val); - r->Link.lFlags |= RTMP_LF_FTCU; - } - } else { - r->Link.tcUrl.av_len = strlen(url); - } - } - -#ifdef CRYPTO - if ((r->Link.lFlags & RTMP_LF_SWFV) && r->Link.swfUrl.av_len) - RTMP_HashSWF(r->Link.swfUrl.av_val, &r->Link.SWFSize, - (unsigned char *)r->Link.SWFHash, r->Link.swfAge); -#endif - - if (r->Link.port == 0) { - if (r->Link.protocol & RTMP_FEATURE_SSL) - r->Link.port = 443; - else if (r->Link.protocol & RTMP_FEATURE_HTTP) - r->Link.port = 80; - else - r->Link.port = 1935; - } - return TRUE; -} - -static int add_addr_info(PILI_RTMP *r, struct addrinfo *hints, struct addrinfo **ai, AVal *host, int port, RTMPError *error) { - char *hostname; - int ret = TRUE; - if (host->av_val[host->av_len]) { - hostname = malloc(host->av_len + 1); - memcpy(hostname, host->av_val, host->av_len); - hostname[host->av_len] = '\0'; - } else { - hostname = host->av_val; - } - - struct addrinfo *cur_ai; - char portstr[10]; - snprintf(portstr, sizeof(portstr), "%d", port); - int addrret = getaddrinfo(hostname, portstr, hints, ai); - if (addrret != 0) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Problem accessing the DNS. addr: "); - strcat(msg, hostname); - - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorAccessDNSFailed; - strcpy(error->message, msg); - RTMP_Log(RTMP_LOGERROR, "Problem accessing the DNS. %d (addr: %s) (port: %s)", addrret, hostname, portstr); - ret = FALSE; - } - - if (hostname != host->av_val) { - free(hostname); - } - return ret; -} - -int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, RTMPError *error) { - r->m_sb.sb_timedout = FALSE; - r->m_pausing = 0; - r->m_fDuration = 0.0; - - r->m_sb.sb_socket = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); - if (ai->ai_family == AF_INET6) { - struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)ai->ai_addr; - in6->sin6_port = htons(port); - } - if (r->m_sb.sb_socket != -1) { -#ifdef RTMP_FEATURE_NONBLOCK - /* set socket non block */ - { - int flags = fcntl(r->m_sb.sb_socket, F_GETFL, 0); - if (fcntl(r->m_sb.sb_socket, F_SETFL, flags | O_NONBLOCK) < 0) { - RTMP_Log(RTMP_LOGERROR, "%s, set socket non block failed", __FUNCTION__); - PILI_RTMP_Close(r, NULL); - return FALSE; - } - } -#endif - if (connect(r->m_sb.sb_socket, ai->ai_addr, ai->ai_addrlen) < 0) { - int err = GetSockError(); -#ifdef RTMP_FEATURE_NONBLOCK - if ((err == EINTR && !PILI_RTMP_ctrlC) || - err == EINPROGRESS) { - SET_RCVTIMEO(tv, r->Link.timeout); - fd_set wfds; - while (1) { - FD_ZERO(&wfds); - FD_SET(r->m_sb.sb_socket, &wfds); - int ret = select(r->m_sb.sb_socket + 1, NULL, &wfds, NULL, &tv); - if (ret < 0) { - int sockerr = GetSockError(); - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect select error %d, %s", __FUNCTION__, - sockerr, strerror(sockerr)); - if (sockerr == EINTR && !PILI_RTMP_ctrlC) - continue; - - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP connect select error. "); - strcat(msg, strerror(sockerr)); - RTMPError_Message(error, RTMPErrorFailedToConnectSocket, msg); - PILI_RTMP_Close(r, error); - RTMPError_Free(error); - return FALSE; - } else if (ret == 0) { - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect error select timeout", __FUNCTION__); - RTMPError_Message(error, RTMPErrorSocketTimeout, "PILI_RTMP connect error. select timeout: "); - PILI_RTMP_Close(r, error); - RTMPError_Free(error); - return FALSE; - } else if (!FD_ISSET(r->m_sb.sb_socket, &wfds)) { - PILI_RTMP_Close(r, error); - RTMPError_Message(error, RTMPErrorFailedToConnectSocket, "PILI_RTMP connect error"); - RTMPError_Free(error); - return FALSE; - } else { - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect success", __FUNCTION__); - break; - } - } - } else { -#endif - - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Failed to connect socket. "); - strcat(msg, strerror(err)); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorFailedToConnectSocket; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, failed to connect socket. %d (%s)", - __FUNCTION__, err, strerror(err)); - - PILI_RTMP_Close(r, NULL); - return FALSE; -#ifdef RTMP_FEATURE_NONBLOCK - } -#endif - } - - if (r->Link.socksport) { - RTMP_Log(RTMP_LOGDEBUG, "%s ... SOCKS negotiation", __FUNCTION__); - if (!SocksNegotiate(r, error)) { - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Socks negotiation failed."); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorSocksNegotiationFailed; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, SOCKS negotiation failed.", __FUNCTION__); - PILI_RTMP_Close(r, NULL); - return FALSE; - } - } - } else { - int err = GetSockError(); - - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Failed to create socket. "); - strcat(msg, strerror(err)); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorFailedToCreateSocket; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, failed to create socket. Error: %d (%s)", __FUNCTION__, err, strerror(err)); - - return FALSE; - } - -#if RTMP_FEATURE_NONBLOCK - -#else - /* set receive timeout */ - { - SET_RCVTIMEO(tv, r->Link.timeout); - if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { - RTMP_Log(RTMP_LOGERROR, "%s, Setting socket recieve timeout to %ds failed!", - __FUNCTION__, r->Link.timeout); - } - } - - /* set send timeout*/ - { - struct timeval timeout; - timeout.tv_sec = r->Link.send_timeout; - timeout.tv_usec = 0; - - if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout, sizeof(timeout))) { - RTMP_Log(RTMP_LOGERROR, "%s, Setting socket send timeout to %ds failed!", - __FUNCTION__, r->Link.timeout); - } - } -#endif - - /* ignore sigpipe */ - int kOne = 1; -#ifdef __linux - setsockopt(r->m_sb.sb_socket, SOL_SOCKET, MSG_NOSIGNAL, &kOne, sizeof(kOne)); -#else - setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_NOSIGPIPE, &kOne, sizeof(kOne)); -#endif - if (r->m_tcp_nodelay) { - int on = 1; - setsockopt(r->m_sb.sb_socket, IPPROTO_TCP, TCP_NODELAY, (char *)&on, sizeof(on)); - } - - return TRUE; -} - -int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { - if (r->Link.protocol & RTMP_FEATURE_SSL) { -#if defined(CRYPTO) && !defined(NO_SSL) - TLS_client(RTMP_TLS_ctx, r->m_sb.sb_ssl); - TLS_setfd(r->m_sb.sb_ssl, r->m_sb.sb_socket); - if (TLS_connect(r->m_sb.sb_ssl) < 0) { - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "TLS_Connect failed."); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorTLSConnectFailed; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); - RTMP_Close(r, NULL); - return FALSE; - } -#else - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "No SSL/TLS support."); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorNoSSLOrTLSSupport; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, no SSL/TLS support", __FUNCTION__); - PILI_RTMP_Close(r, NULL); - return FALSE; - -#endif - } - if (r->Link.protocol & RTMP_FEATURE_HTTP) { - r->m_msgCounter = 1; - r->m_clientID.av_val = NULL; - r->m_clientID.av_len = 0; - HTTP_Post(r, RTMPT_OPEN, "", 1); - HTTP_read(r, 1); - r->m_msgCounter = 0; - } - RTMP_Log(RTMP_LOGDEBUG, "%s, ... connected, handshaking", __FUNCTION__); - if (!HandShake(r, TRUE, error)) { - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "Handshake failed."); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorHandshakeFailed; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "%s, handshake failed.", __FUNCTION__); - PILI_RTMP_Close(r, NULL); - return FALSE; - } - RTMP_Log(RTMP_LOGDEBUG, "%s, handshaked", __FUNCTION__); - - if (!SendConnectPacket(r, cp, error)) { - if (error) { - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP connect failed."); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorRTMPConnectFailed; - strcpy(error->message, msg); - } - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect failed.", __FUNCTION__); - PILI_RTMP_Close(r, NULL); - return FALSE; - } - return TRUE; -} - -int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { - //获取hub - char hub[5] = {0}; - if (r->Link.app.av_len>4) { - strncpy(hub, r->Link.app.av_val,4); - }else if(r->Link.app.av_len>0){ - strncpy(hub, r->Link.app.av_val,r->Link.app.av_len); - } - - if (strlen(hub)>0) { - time_t nowtime; - time ( &nowtime ); - char tempTime[20]={0}; - sprintf(tempTime,"%ld",nowtime); - reqid[0] = '\0'; - strncat(reqid, hub, strlen(hub)); - strncat(reqid, tempTime, strlen(tempTime)); - } - - struct PILI_CONNECTION_TIME conn_time; - if (!r->Link.hostname.av_len) - return FALSE; - - struct addrinfo hints = {0}, *ai, *cur_ai; - - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - - unsigned short port; - if (r->Link.socksport) { - port = r->Link.socksport; - /* Connect via SOCKS */ - if (!add_addr_info(r, &hints, &ai, &r->Link.sockshost, r->Link.socksport, error)) { - return FALSE; - } - } else { - port = r->Link.port; - /* Connect directly */ - if (!add_addr_info(r, &hints, &ai, &r->Link.hostname, r->Link.port, error)) { - return FALSE; - } - } - r->ip = 0; //useless for ipv6 - cur_ai = ai; - - int t1 = PILI_RTMP_GetTime(); - if (!PILI_RTMP_Connect0(r, cur_ai, port, error)) { - freeaddrinfo(ai); - return FALSE; - } - conn_time.connect_time = PILI_RTMP_GetTime() - t1; - r->m_bSendCounter = TRUE; - - int t2 = PILI_RTMP_GetTime(); - int ret = PILI_RTMP_Connect1(r, cp, error); - conn_time.handshake_time = PILI_RTMP_GetTime() - t2; - - if (r->m_connCallback != NULL) { - r->m_connCallback(&conn_time, r->m_userData); - } - freeaddrinfo(ai); - return ret; -} - -//useless -static int - SocksNegotiate(PILI_RTMP *r, RTMPError *error) { - // unsigned long addr; - // struct sockaddr_in service; - // memset(&service, 0, sizeof(struct sockaddr_in)); - // - // add_addr_info(r, &service, &r->Link.hostname, r->Link.port, error); - // addr = htonl(service.sin_addr.s_addr); - // - // { - // char packet[] = { - // 4, 1, /* SOCKS 4, connect */ - // (r->Link.port >> 8) & 0xFF, - // (r->Link.port) & 0xFF, - // (char)(addr >> 24) & 0xFF, (char)(addr >> 16) & 0xFF, - // (char)(addr >> 8) & 0xFF, (char)addr & 0xFF, - // 0 - // }; /* NULL terminate */ - // - // WriteN(r, packet, sizeof packet, error); - // - // if (ReadN(r, packet, 8) != 8) - // return FALSE; - // - // if (packet[0] == 0 && packet[1] == 90) - // { - // return TRUE; - // } - // else - // { - // RTMP_Log(RTMP_LOGERROR, "%s, SOCKS returned error code %d", packet[1]); - // return FALSE; - // } - // } - return 0; -} - -int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { - PILI_RTMPPacket packet = {0}; - - /* seekTime was already set by SetupStream / SetupURL. - * This is only needed by ReconnectStream. - */ - if (seekTime > 0) - r->Link.seekTime = seekTime; - - r->m_mediaChannel = 0; - - while (!r->m_bPlaying && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, &packet)) { - if (RTMPPacket_IsReady(&packet)) { - if (!packet.m_nBodySize) - continue; - if ((packet.m_packetType == RTMP_PACKET_TYPE_AUDIO) || - (packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) || - (packet.m_packetType == RTMP_PACKET_TYPE_INFO)) { - RTMP_Log(RTMP_LOGWARNING, "Received FLV packet before play()! Ignoring."); - PILI_RTMPPacket_Free(&packet); - continue; - } - - PILI_RTMP_ClientPacket(r, &packet); - PILI_RTMPPacket_Free(&packet); - } - } - - if (!r->m_bPlaying && error) { - char *msg = "PILI_RTMP connect stream failed."; - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorRTMPConnectStreamFailed; - strcpy(error->message, msg); - } - - return r->m_bPlaying; -} - -int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { - PILI_RTMP_DeleteStream(r, error); - - PILI_RTMP_SendCreateStream(r, error); - - return PILI_RTMP_ConnectStream(r, seekTime, error); -} - -int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error) { - int res; - - if (!r->m_pausing) { - res = PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, error); - if (!res) - return res; - - r->m_pausing = 1; - sleep(1); - } - res = PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, error); - r->m_pausing = 3; - return res; -} - -void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error) { - if (r->m_stream_id < 0) - return; - - r->m_bPlaying = FALSE; - - SendDeleteStream(r, r->m_stream_id, error); - r->m_stream_id = -1; -} - -int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { - int bHasMediaPacket = 0; - - while (!bHasMediaPacket && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, packet)) { - if (!RTMPPacket_IsReady(packet)) { - continue; - } - - bHasMediaPacket = PILI_RTMP_ClientPacket(r, packet); - - if (!bHasMediaPacket) { - PILI_RTMPPacket_Free(packet); - } else if (r->m_pausing == 3) { - if (packet->m_nTimeStamp <= r->m_mediaStamp) { - bHasMediaPacket = 0; -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, - "Skipped type: %02X, size: %d, TS: %d ms, abs TS: %d, pause: %d ms", - packet->m_packetType, packet->m_nBodySize, - packet->m_nTimeStamp, packet->m_hasAbsTimestamp, - r->m_mediaStamp); -#endif - continue; - } - r->m_pausing = 0; - } - } - - if (bHasMediaPacket) - r->m_bPlaying = TRUE; - else if (r->m_sb.sb_timedout && !r->m_pausing) - r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; - - return bHasMediaPacket; -} - -int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { - int bHasMediaPacket = 0; - switch (packet->m_packetType) { - case 0x01: - /* chunk size */ - HandleChangeChunkSize(r, packet); - break; - - case 0x03: - /* bytes read report */ - RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__); - break; - - case 0x04: - /* ctrl */ - HandleCtrl(r, packet); - break; - - case 0x05: - /* server bw */ - HandleServerBW(r, packet); - break; - - case 0x06: - /* client bw */ - HandleClientBW(r, packet); - break; - - case 0x08: - /* audio data */ - /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */ - HandleAudio(r, packet); - bHasMediaPacket = 1; - if (!r->m_mediaChannel) - r->m_mediaChannel = packet->m_nChannel; - if (!r->m_pausing) - r->m_mediaStamp = packet->m_nTimeStamp; - break; - - case 0x09: - /* video data */ - /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */ - HandleVideo(r, packet); - bHasMediaPacket = 1; - if (!r->m_mediaChannel) - r->m_mediaChannel = packet->m_nChannel; - if (!r->m_pausing) - r->m_mediaStamp = packet->m_nTimeStamp; - break; - - case 0x0F: /* flex stream send */ - RTMP_Log(RTMP_LOGDEBUG, - "%s, flex stream send, size %lu bytes, not supported, ignoring", - __FUNCTION__, packet->m_nBodySize); - break; - - case 0x10: /* flex shared object */ - RTMP_Log(RTMP_LOGDEBUG, - "%s, flex shared object, size %lu bytes, not supported, ignoring", - __FUNCTION__, packet->m_nBodySize); - break; - - case 0x11: /* flex message */ - { - RTMP_Log(RTMP_LOGDEBUG, - "%s, flex message, size %lu bytes, not fully supported", - __FUNCTION__, packet->m_nBodySize); -/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ - -/* some DEBUG code */ -#if 0 - RTMP_LIB_AMFObject obj; - int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1); - if(nRes < 0) { - RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__); - /*return; */ - } - - obj.Dump(); -#endif - - if (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1) - bHasMediaPacket = 2; - break; - } - case 0x12: - /* metadata (notify) */ - RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__, - packet->m_nBodySize); - if (HandleMetadata(r, packet->m_body, packet->m_nBodySize)) - bHasMediaPacket = 1; - break; - - case 0x13: - RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring", - __FUNCTION__); - break; - - case 0x14: - /* invoke */ - RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__, - packet->m_nBodySize); - /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ - - if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1) - bHasMediaPacket = 2; - break; - - case 0x16: { - /* go through FLV packets and handle metadata packets */ - unsigned int pos = 0; - uint32_t nTimeStamp = packet->m_nTimeStamp; - - while (pos + 11 < packet->m_nBodySize) { - uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1); /* size without header (11) and prevTagSize (4) */ - - if (pos + 11 + dataSize + 4 > packet->m_nBodySize) { - RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!"); - break; - } - if (packet->m_body[pos] == 0x12) { - HandleMetadata(r, packet->m_body + pos + 11, dataSize); - } else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9) { - nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4); - nTimeStamp |= (packet->m_body[pos + 7] << 24); - } - pos += (11 + dataSize + 4); - } - if (!r->m_pausing) - r->m_mediaStamp = nTimeStamp; - - /* FLV tag(s) */ - /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */ - bHasMediaPacket = 1; - break; - } - default: - RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__, - packet->m_packetType); -#ifdef _DEBUG - RTMP_LogHex(RTMP_LOGDEBUG, packet->m_body, packet->m_nBodySize); -#endif - } - - return bHasMediaPacket; -} - -#ifdef _DEBUG -extern FILE *netstackdump; -extern FILE *netstackdump_read; -#endif - -static int - ReadN(PILI_RTMP *r, char *buffer, int n) { - int nOriginalSize = n; - int avail; - char *ptr; - - r->m_sb.sb_timedout = FALSE; - -#ifdef _DEBUG - memset(buffer, 0, n); -#endif - - ptr = buffer; - while (n > 0) { - int nBytes = 0, nRead; - if (r->Link.protocol & RTMP_FEATURE_HTTP) { - while (!r->m_resplen) { - if (r->m_sb.sb_size < 144) { - if (!r->m_unackd) - HTTP_Post(r, RTMPT_IDLE, "", 1); - if (PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout) < 1) { - if (!r->m_sb.sb_timedout) { - PILI_RTMP_Close(r, NULL); - } else { - RTMPError error = {0}; - - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP socket timeout"); - RTMPError_Alloc(&error, strlen(msg)); - error.code = RTMPErrorSocketTimeout; - strcpy(error.message, msg); - - PILI_RTMP_Close(r, &error); - - RTMPError_Free(&error); - } - - return 0; - } - } - HTTP_read(r, 0); - } - if (r->m_resplen && !r->m_sb.sb_size) - PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout); - avail = r->m_sb.sb_size; - if (avail > r->m_resplen) - avail = r->m_resplen; - } else { - avail = r->m_sb.sb_size; - if (avail == 0) { - if (PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout) < 1) { - if (!r->m_sb.sb_timedout) { - PILI_RTMP_Close(r, NULL); - } else { - RTMPError error = {0}; - - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP socket timeout"); - RTMPError_Alloc(&error, strlen(msg)); - error.code = RTMPErrorSocketTimeout; - strcpy(error.message, msg); - - PILI_RTMP_Close(r, &error); - - RTMPError_Free(&error); - } - - return 0; - } - avail = r->m_sb.sb_size; - } - } - nRead = ((n < avail) ? n : avail); - if (nRead > 0) { - memcpy(ptr, r->m_sb.sb_start, nRead); - r->m_sb.sb_start += nRead; - r->m_sb.sb_size -= nRead; - nBytes = nRead; - r->m_nBytesIn += nRead; - if (r->m_bSendCounter && r->m_nBytesIn > r->m_nBytesInSent + r->m_nClientBW / 2) - SendBytesReceived(r, NULL); - } -/*RTMP_Log(RTMP_LOGDEBUG, "%s: %d bytes\n", __FUNCTION__, nBytes); */ -#ifdef _DEBUG - fwrite(ptr, 1, nBytes, netstackdump_read); -#endif - - if (nBytes == 0) { - RTMP_Log(RTMP_LOGDEBUG, "%s, PILI_RTMP socket closed by peer", __FUNCTION__); - /*goto again; */ - RTMPError error = {0}; - - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP socket closed by peer. "); - RTMPError_Alloc(&error, strlen(msg)); - error.code = RTMPErrorSocketClosedByPeer; - strcpy(error.message, msg); - - PILI_RTMP_Close(r, &error); - - RTMPError_Free(&error); - break; - } - - if (r->Link.protocol & RTMP_FEATURE_HTTP) - r->m_resplen -= nBytes; - -#ifdef CRYPTO - if (r->Link.rc4keyIn) { - RC4_encrypt(r->Link.rc4keyIn, nBytes, ptr); - } -#endif - - n -= nBytes; - ptr += nBytes; - } - - return nOriginalSize - n; -} - -static int - WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error) { - const char *ptr = buffer; -#ifdef CRYPTO - char *encrypted = 0; - char buf[RTMP_BUFFER_CACHE_SIZE]; - - if (r->Link.rc4keyOut) { - if (n > sizeof(buf)) - encrypted = (char *)malloc(n); - else - encrypted = (char *)buf; - ptr = encrypted; - RC4_encrypt2(r->Link.rc4keyOut, n, buffer, ptr); - } -#endif - -#ifdef RTMP_FEATURE_NONBLOCK - SET_RCVTIMEO(tv, r->Link.timeout); - fd_set wfds; -#endif - while (n > 0) { - -#ifdef RTMP_FEATURE_NONBLOCK - FD_ZERO(&wfds); - FD_SET(r->m_sb.sb_socket, &wfds); - int ret = select(r->m_sb.sb_socket + 1, NULL, &wfds, NULL, &tv); - if (ret < 0) { - int sockerr = GetSockError(); - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send select error %d, %s", __FUNCTION__, - sockerr, strerror(sockerr)); - if (sockerr == EINTR && !PILI_RTMP_ctrlC) - continue; - - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP send select error. "); - strcat(msg, strerror(sockerr)); - RTMPError_Message(error, RTMPErrorSendFailed, msg); - PILI_RTMP_Close(r, error); - RTMPError_Free(error); - n = 1; - break; - } else if (ret == 0) { - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send error select timeout", __FUNCTION__); - RTMPError_Message(error, RTMPErrorSocketTimeout, "PILI_RTMP send error. select timeout: "); - PILI_RTMP_Close(r, error); - RTMPError_Free(error); - n = 1; - break; - } else if (!FD_ISSET(r->m_sb.sb_socket, &wfds)) { - PILI_RTMP_Close(r, error); - RTMPError_Message(error, RTMPErrorSendFailed, "PILI_RTMP send error socket can not write"); - RTMPError_Free(error); - n = 1; - break; - } -#endif - int nBytes; - - if (r->Link.protocol & RTMP_FEATURE_HTTP) - nBytes = HTTP_Post(r, RTMPT_SEND, ptr, n); - else - nBytes = PILI_RTMPSockBuf_Send(&r->m_sb, ptr, n); - /*RTMP_Log(RTMP_LOGDEBUG, "%s: %d\n", __FUNCTION__, nBytes); */ - - if (nBytes < 0) { - int sockerr = GetSockError(); - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send error %d, %s, (%d bytes)", __FUNCTION__, - sockerr, strerror(sockerr), n); - - /* - Specify the receiving or sending timeouts until reporting an error. - The argument is a struct timeval. - If an input or output function blocks for this period of time, - and data has been sent or received, - the return value of that function will be the amount of data transferred; - if no data has been transferred and the timeout has been reached then -1 is returned - with errno set to EAGAIN or EWOULDBLOCK, or EINPROGRESS (for connect(2)) just as if the socket was specified to be nonblocking. - If the timeout is set to zero (the default) then the operation will never timeout. - Timeouts only have effect for system calls that perform socket I/O (e.g., read(2), recvmsg(2), send(2), sendmsg(2)); - timeouts have no effect for select(2), poll(2), epoll_wait(2), and so on. - */ - if ((sockerr == EINTR && !PILI_RTMP_ctrlC ) || sockerr == EAGAIN) - continue; - -#ifdef RTMP_FEATURE_NONBLOCK - if (sockerr == EWOULDBLOCK || sockerr == EAGAIN) { - continue; - } else if (error) { -#else - if (error) { -#endif - char msg[100]; - memset(msg, 0, 100); - strcat(msg, "PILI_RTMP send error. socket error: "); - strcat(msg, strerror(sockerr)); - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorSendFailed; - strcpy(error->message, msg); - } - - PILI_RTMP_Close(r, error); - - RTMPError_Free(error); - - n = 1; - break; - } - - if (nBytes == 0) - break; - - n -= nBytes; - ptr += nBytes; - } - -#ifdef CRYPTO - if (encrypted && encrypted != buf) - free(encrypted); -#endif - - return n == 0; -} - -#define SAVC(x) static const AVal av_##x = AVC(#x) - -SAVC(app); -SAVC(connect); -SAVC(flashVer); -SAVC(swfUrl); -SAVC(pageUrl); -SAVC(tcUrl); -SAVC(fpad); -SAVC(capabilities); -SAVC(audioCodecs); -SAVC(videoCodecs); -SAVC(videoFunction); -SAVC(objectEncoding); -SAVC(secureToken); -SAVC(secureTokenResponse); -SAVC(type); -SAVC(nonprivate); -SAVC(xreqid); - -static int - SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[4096], *pend = pbuf + sizeof(pbuf); - char *enc; - - if (cp) - return PILI_RTMP_SendPacket(r, cp, TRUE, error); - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_connect); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_OBJECT; - - enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app); - if (!enc) - return FALSE; - AVal requestId; - requestId.av_val = reqid; - requestId.av_len = (int)strlen(reqid); - - if (requestId.av_len){ - enc = AMF_EncodeNamedString(enc,pend,&av_xreqid,&requestId); - if (!enc) - return FALSE; - } - - if (r->Link.protocol & RTMP_FEATURE_WRITE) { - enc = AMF_EncodeNamedString(enc, pend, &av_type, &av_nonprivate); - if (!enc) - return FALSE; - } - if (r->Link.flashVer.av_len) { - enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer); - if (!enc) - return FALSE; - } - if (r->Link.swfUrl.av_len) { - enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl); - if (!enc) - return FALSE; - } - if (r->Link.tcUrl.av_len) { - enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl); - if (!enc) - return FALSE; - } - if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { - enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE); - if (!enc) - return FALSE; - enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0); - if (!enc) - return FALSE; - enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs); - if (!enc) - return FALSE; - enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs); - if (!enc) - return FALSE; - enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0); - if (!enc) - return FALSE; - if (r->Link.pageUrl.av_len) { - enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl); - if (!enc) - return FALSE; - } - } - if (r->m_fEncoding != 0.0 || r->m_bSendEncoding) { /* AMF0, AMF3 not fully supported yet */ - enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding); - if (!enc) - return FALSE; - } - if (enc + 3 >= pend) - return FALSE; - *enc++ = 0; - *enc++ = 0; /* end of object - 0x00 0x00 0x09 */ - *enc++ = AMF_OBJECT_END; - - /* add auth string */ - if (r->Link.auth.av_len) { - enc = AMF_EncodeBoolean(enc, pend, r->Link.lFlags & RTMP_LF_AUTH); - if (!enc) - return FALSE; - enc = AMF_EncodeString(enc, pend, &r->Link.auth); - if (!enc) - return FALSE; - } - if (r->Link.extras.o_num) { - int i; - for (i = 0; i < r->Link.extras.o_num; i++) { - enc = AMFProp_Encode(&r->Link.extras.o_props[i], enc, pend); - if (!enc) - return FALSE; - } - } - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -#if 0 /* unused */ -SAVC(bgHasStream); - -static int -SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath) -{ - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_bgHasStream); - enc = AMF_EncodeNumber(enc, pend, dId); - *enc++ = AMF_NULL; - - enc = AMF_EncodeString(enc, pend, playpath); - if (enc == NULL) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE); -} -#endif - -SAVC(createStream); - -int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_createStream); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; /* NULL */ - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -SAVC(FCSubscribe); - -static int - SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[512], *pend = pbuf + sizeof(pbuf); - char *enc; - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - RTMP_Log(RTMP_LOGDEBUG, "FCSubscribe: %s", subscribepath->av_val); - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_FCSubscribe); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, subscribepath); - - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -SAVC(releaseStream); - -static int - SendReleaseStream(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_releaseStream); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, &r->Link.playpath); - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(FCPublish); - -static int - SendFCPublish(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_FCPublish); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, &r->Link.playpath); - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(FCUnpublish); - -static int - SendFCUnpublish(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_FCUnpublish); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, &r->Link.playpath); - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(publish); -SAVC(live); -SAVC(record); - -static int - SendPublish(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x04; /* source channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = r->m_stream_id; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_publish); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, &r->Link.playpath); - if (!enc) - return FALSE; - - /* FIXME: should we choose live based on Link.lFlags & RTMP_LF_LIVE? */ - enc = AMF_EncodeString(enc, pend, &av_live); - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -SAVC(deleteStream); - -static int - SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_deleteStream); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeNumber(enc, pend, dStreamId); - - packet.m_nBodySize = enc - packet.m_body; - - /* no response expected */ - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(pause); - -int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int iTime, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x08; /* video channel */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* invoke */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_pause); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeBoolean(enc, pend, DoPause); - enc = AMF_EncodeNumber(enc, pend, (double)iTime); - - packet.m_nBodySize = enc - packet.m_body; - - RTMP_Log(RTMP_LOGDEBUG, "%s, %d, pauseTime=%d", __FUNCTION__, DoPause, iTime); - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error) { - if (DoPause) - r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; - return PILI_RTMP_SendPause(r, DoPause, r->m_pauseStamp, error); -} - -SAVC(seek); - -int PILI_RTMP_SendSeek(PILI_RTMP *r, int iTime, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x08; /* video channel */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* invoke */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_seek); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - enc = AMF_EncodeNumber(enc, pend, (double)iTime); - - packet.m_nBodySize = enc - packet.m_body; - - r->m_read.flags |= RTMP_READ_SEEKING; - r->m_read.nResumeTS = 0; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - - packet.m_nChannel = 0x02; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x05; /* Server BW */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - packet.m_nBodySize = 4; - - AMF_EncodeInt32(packet.m_body, pend, r->m_nServerBW); - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - - packet.m_nChannel = 0x02; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x06; /* Client BW */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - packet.m_nBodySize = 5; - - AMF_EncodeInt32(packet.m_body, pend, r->m_nClientBW); - packet.m_body[4] = r->m_nClientBW2; - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -static int - SendBytesReceived(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - - packet.m_nChannel = 0x02; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x03; /* bytes in */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - packet.m_nBodySize = 4; - - AMF_EncodeInt32(packet.m_body, pend, r->m_nBytesIn); /* hard coded for now */ - r->m_nBytesInSent = r->m_nBytesIn; - - /*RTMP_Log(RTMP_LOGDEBUG, "Send bytes report. 0x%x (%d bytes)", (unsigned int)m_nBytesIn, m_nBytesIn); */ - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(_checkbw); - -static int - SendCheckBW(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av__checkbw); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - - packet.m_nBodySize = enc - packet.m_body; - - /* triggers _onbwcheck and eventually results in _onbwdone */ - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(_result); - -static int - SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av__result); - enc = AMF_EncodeNumber(enc, pend, txn); - *enc++ = AMF_NULL; - enc = AMF_EncodeNumber(enc, pend, (double)r->m_nBWCheckCounter++); - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(ping); -SAVC(pong); - -static int - SendPong(PILI_RTMP *r, double txn, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_pong); - enc = AMF_EncodeNumber(enc, pend, txn); - *enc++ = AMF_NULL; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -SAVC(play); - -static int - SendPlay(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x08; /* we make 8 our stream channel */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_play); - enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); - *enc++ = AMF_NULL; - - RTMP_Log(RTMP_LOGDEBUG, "%s, seekTime=%d, stopTime=%d, sending play: %s", - __FUNCTION__, r->Link.seekTime, r->Link.stopTime, - r->Link.playpath.av_val); - enc = AMF_EncodeString(enc, pend, &r->Link.playpath); - if (!enc) - return FALSE; - - /* Optional parameters start and len. - * - * start: -2, -1, 0, positive number - * -2: looks for a live stream, then a recorded stream, - * if not found any open a live stream - * -1: plays a live stream - * >=0: plays a recorded streams from 'start' milliseconds - */ - if (r->Link.lFlags & RTMP_LF_LIVE) - enc = AMF_EncodeNumber(enc, pend, -1000.0); - else { - if (r->Link.seekTime > 0.0) - enc = AMF_EncodeNumber(enc, pend, r->Link.seekTime); /* resume from here */ - else - enc = AMF_EncodeNumber(enc, pend, 0.0); /*-2000.0);*/ /* recorded as default, -2000.0 is not reliable since that freezes the player if the stream is not found */ - } - if (!enc) - return FALSE; - - /* len: -1, 0, positive number - * -1: plays live or recorded stream to the end (default) - * 0: plays a frame 'start' ms away from the beginning - * >0: plays a live or recoded stream for 'len' milliseconds - */ - /*enc += EncodeNumber(enc, -1.0); */ /* len */ - if (r->Link.stopTime) { - enc = AMF_EncodeNumber(enc, pend, r->Link.stopTime - r->Link.seekTime); - if (!enc) - return FALSE; - } - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -SAVC(set_playlist); -SAVC(0); - -static int - SendPlaylist(PILI_RTMP *r, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x08; /* we make 8 our stream channel */ - packet.m_headerType = RTMP_PACKET_SIZE_LARGE; - packet.m_packetType = 0x14; /* INVOKE */ - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_set_playlist); - enc = AMF_EncodeNumber(enc, pend, 0); - *enc++ = AMF_NULL; - *enc++ = AMF_ECMA_ARRAY; - *enc++ = 0; - *enc++ = 0; - *enc++ = 0; - *enc++ = AMF_OBJECT; - enc = AMF_EncodeNamedString(enc, pend, &av_0, &r->Link.playpath); - if (!enc) - return FALSE; - if (enc + 3 >= pend) - return FALSE; - *enc++ = 0; - *enc++ = 0; - *enc++ = AMF_OBJECT_END; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, TRUE, error); -} - -static int - SendSecureTokenResponse(PILI_RTMP *r, AVal *resp, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[1024], *pend = pbuf + sizeof(pbuf); - char *enc; - - packet.m_nChannel = 0x03; /* control channel (invoke) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x14; - packet.m_nTimeStamp = 0; - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - enc = packet.m_body; - enc = AMF_EncodeString(enc, pend, &av_secureTokenResponse); - enc = AMF_EncodeNumber(enc, pend, 0.0); - *enc++ = AMF_NULL; - enc = AMF_EncodeString(enc, pend, resp); - if (!enc) - return FALSE; - - packet.m_nBodySize = enc - packet.m_body; - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -/* -from http://jira.red5.org/confluence/display/docs/Ping: - -Ping is the most mysterious message in PILI_RTMP and till now we haven't fully interpreted it yet. In summary, Ping message is used as a special command that are exchanged between client and server. This page aims to document all known Ping messages. Expect the list to grow. - -The type of Ping packet is 0x4 and contains two mandatory parameters and two optional parameters. The first parameter is the type of Ping and in short integer. The second parameter is the target of the ping. As Ping is always sent in Channel 2 (control channel) and the target object in PILI_RTMP header is always 0 which means the Connection object, it's necessary to put an extra parameter to indicate the exact target object the Ping is sent to. The second parameter takes this responsibility. The value has the same meaning as the target object field in PILI_RTMP header. (The second value could also be used as other purposes, like RTT Ping/Pong. It is used as the timestamp.) The third and fourth parameters are optional and could be looked upon as the parameter of the Ping packet. Below is an unexhausted list of Ping messages. - - * type 0: Clear the stream. No third and fourth parameters. The second parameter could be 0. After the connection is established, a Ping 0,0 will be sent from server to client. The message will also be sent to client on the start of Play and in response of a Seek or Pause/Resume request. This Ping tells client to re-calibrate the clock with the timestamp of the next packet server sends. - * type 1: Tell the stream to clear the playing buffer. - * type 3: Buffer time of the client. The third parameter is the buffer time in millisecond. - * type 4: Reset a stream. Used together with type 0 in the case of VOD. Often sent before type 0. - * type 6: Ping the client from server. The second parameter is the current time. - * type 7: Pong reply from client. The second parameter is the time the server sent with his ping request. - * type 26: SWFVerification request - * type 27: SWFVerification response -*/ -int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, unsigned int nTime, RTMPError *error) { - PILI_RTMPPacket packet; - char pbuf[256], *pend = pbuf + sizeof(pbuf); - int nSize; - char *buf; - - RTMP_Log(RTMP_LOGDEBUG, "sending ctrl. type: 0x%04x", (unsigned short)nType); - - packet.m_nChannel = 0x02; /* control channel (ping) */ - packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; - packet.m_packetType = 0x04; /* ctrl */ - packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ - packet.m_nInfoField2 = 0; - packet.m_hasAbsTimestamp = 0; - packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; - - switch (nType) { - case 0x03: - nSize = 10; - break; /* buffer time */ - case 0x1A: - nSize = 3; - break; /* SWF verify request */ - case 0x1B: - nSize = 44; - break; /* SWF verify response */ - default: - nSize = 6; - break; - } - - packet.m_nBodySize = nSize; - - buf = packet.m_body; - buf = AMF_EncodeInt16(buf, pend, nType); - - if (nType == 0x1B) { -#ifdef CRYPTO - memcpy(buf, r->Link.SWFVerificationResponse, 42); - RTMP_Log(RTMP_LOGDEBUG, "Sending SWFVerification response: "); - RTMP_LogHex(RTMP_LOGDEBUG, (uint8_t *)packet.m_body, packet.m_nBodySize); -#endif - } else if (nType == 0x1A) { - *buf = nObject & 0xff; - } else { - if (nSize > 2) - buf = AMF_EncodeInt32(buf, pend, nObject); - - if (nSize > 6) - buf = AMF_EncodeInt32(buf, pend, nTime); - } - - return PILI_RTMP_SendPacket(r, &packet, FALSE, error); -} - -static void - AV_erase(PILI_RTMP_METHOD *vals, int *num, int i, int freeit) { - if (freeit) - free(vals[i].name.av_val); - (*num)--; - for (; i < *num; i++) { - vals[i] = vals[i + 1]; - } - vals[i].name.av_val = NULL; - vals[i].name.av_len = 0; - vals[i].num = 0; -} - -void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit) { - AV_erase(r->m_methodCalls, &r->m_numCalls, i, freeit); -} - -static void - AV_queue(PILI_RTMP_METHOD **vals, int *num, AVal *av, int txn) { - char *tmp; - if (!(*num & 0x0f)) - *vals = realloc(*vals, (*num + 16) * sizeof(PILI_RTMP_METHOD)); - tmp = malloc(av->av_len + 1); - memcpy(tmp, av->av_val, av->av_len); - tmp[av->av_len] = '\0'; - (*vals)[*num].num = txn; - (*vals)[*num].name.av_len = av->av_len; - (*vals)[(*num)++].name.av_val = tmp; -} - -static void - AV_clear(PILI_RTMP_METHOD *vals, int num) { - int i; - for (i = 0; i < num; i++) - free(vals[i].name.av_val); - free(vals); -} - -SAVC(onBWDone); -SAVC(onFCSubscribe); -SAVC(onFCUnsubscribe); -SAVC(_onbwcheck); -SAVC(_onbwdone); -SAVC(_error); -SAVC(close); -SAVC(code); -SAVC(level); -SAVC(onStatus); -SAVC(playlist_ready); -static const AVal av_NetStream_Failed = AVC("NetStream.Failed"); -static const AVal av_NetStream_Play_Failed = AVC("NetStream.Play.Failed"); -static const AVal av_NetStream_Play_StreamNotFound = - AVC("NetStream.Play.StreamNotFound"); -static const AVal av_NetConnection_Connect_InvalidApp = - AVC("NetConnection.Connect.InvalidApp"); -static const AVal av_NetStream_Play_Start = AVC("NetStream.Play.Start"); -static const AVal av_NetStream_Play_Complete = AVC("NetStream.Play.Complete"); -static const AVal av_NetStream_Play_Stop = AVC("NetStream.Play.Stop"); -static const AVal av_NetStream_Seek_Notify = AVC("NetStream.Seek.Notify"); -static const AVal av_NetStream_Pause_Notify = AVC("NetStream.Pause.Notify"); -static const AVal av_NetStream_Play_UnpublishNotify = - AVC("NetStream.Play.UnpublishNotify"); -static const AVal av_NetStream_Publish_Start = AVC("NetStream.Publish.Start"); - -/* Returns 0 for OK/Failed/error, 1 for 'Stop or Complete' */ -static int - HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize) { - AMFObject obj; - AVal method; - int txn; - int ret = 0, nRes; - if (body[0] != 0x02) /* make sure it is a string method name we start with */ - { - RTMP_Log(RTMP_LOGWARNING, "%s, Sanity failed. no string method in invoke packet", - __FUNCTION__); - return 0; - } - - nRes = AMF_Decode(&obj, body, nBodySize, FALSE); - if (nRes < 0) { - RTMP_Log(RTMP_LOGERROR, "%s, error decoding invoke packet", __FUNCTION__); - return 0; - } - - AMF_Dump(&obj); - AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &method); - txn = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 1)); - RTMP_Log(RTMP_LOGDEBUG, "%s, server invoking <%s>", __FUNCTION__, method.av_val); - - RTMPError error = {0}; - - if (AVMATCH(&method, &av__result)) { - AVal methodInvoked = {0}; - int i; - - for (i = 0; i < r->m_numCalls; i++) { - if (r->m_methodCalls[i].num == txn) { - methodInvoked = r->m_methodCalls[i].name; - AV_erase(r->m_methodCalls, &r->m_numCalls, i, FALSE); - break; - } - } - if (!methodInvoked.av_val) { - RTMP_Log(RTMP_LOGDEBUG, "%s, received result id %d without matching request", - __FUNCTION__, txn); - goto leave; - } - - RTMP_Log(RTMP_LOGDEBUG, "%s, received result for method call <%s>", __FUNCTION__, - methodInvoked.av_val); - - if (AVMATCH(&methodInvoked, &av_connect)) { - if (r->Link.token.av_len) { - AMFObjectProperty p; - if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_secureToken, &p)) { - DecodeTEA(&r->Link.token, &p.p_vu.p_aval); - SendSecureTokenResponse(r, &p.p_vu.p_aval, &error); - } - } - if (r->Link.protocol & RTMP_FEATURE_WRITE) { - SendReleaseStream(r, &error); - SendFCPublish(r, &error); - } else { - PILI_RTMP_SendServerBW(r, &error); - PILI_RTMP_SendCtrl(r, 3, 0, 300, &error); - } - PILI_RTMP_SendCreateStream(r, &error); - - if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { - /* Send the FCSubscribe if live stream or if subscribepath is set */ - if (r->Link.subscribepath.av_len) - SendFCSubscribe(r, &r->Link.subscribepath, &error); - else if (r->Link.lFlags & RTMP_LF_LIVE) - SendFCSubscribe(r, &r->Link.playpath, &error); - } - } else if (AVMATCH(&methodInvoked, &av_createStream)) { - r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3)); - - if (r->Link.protocol & RTMP_FEATURE_WRITE) { - SendPublish(r, &error); - } else { - if (r->Link.lFlags & RTMP_LF_PLST) - SendPlaylist(r, &error); - SendPlay(r, &error); - PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, &error); - } - } else if (AVMATCH(&methodInvoked, &av_play) || - AVMATCH(&methodInvoked, &av_publish)) { - r->m_bPlaying = TRUE; - } - free(methodInvoked.av_val); - } else if (AVMATCH(&method, &av_onBWDone)) { - if (!r->m_nBWCheckCounter) - SendCheckBW(r, &error); - } else if (AVMATCH(&method, &av_onFCSubscribe)) { - /* SendOnFCSubscribe(); */ - } else if (AVMATCH(&method, &av_onFCUnsubscribe)) { - PILI_RTMP_Close(r, NULL); - ret = 1; - } else if (AVMATCH(&method, &av_ping)) { - SendPong(r, txn, &error); - } else if (AVMATCH(&method, &av__onbwcheck)) { - SendCheckBWResult(r, txn, &error); - } else if (AVMATCH(&method, &av__onbwdone)) { - int i; - for (i = 0; i < r->m_numCalls; i++) - if (AVMATCH(&r->m_methodCalls[i].name, &av__checkbw)) { - AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); - break; - } - } else if (AVMATCH(&method, &av__error)) { - RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server sent error"); - } else if (AVMATCH(&method, &av_close)) { - RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server requested close"); - RTMPError error = {0}; - char *msg = "PILI_RTMP server requested close."; - RTMPError_Alloc(&error, strlen(msg)); - error.code = RTMPErrorServerRequestedClose; - strcpy(error.message, msg); - - PILI_RTMP_Close(r, &error); - - RTMPError_Free(&error); - } else if (AVMATCH(&method, &av_onStatus)) { - AMFObject obj2; - AVal code, level; - AMFProp_GetObject(AMF_GetProp(&obj, NULL, 3), &obj2); - AMFProp_GetString(AMF_GetProp(&obj2, &av_code, -1), &code); - AMFProp_GetString(AMF_GetProp(&obj2, &av_level, -1), &level); - - RTMP_Log(RTMP_LOGDEBUG, "%s, onStatus: %s", __FUNCTION__, code.av_val); - if (AVMATCH(&code, &av_NetStream_Failed) || AVMATCH(&code, &av_NetStream_Play_Failed) || AVMATCH(&code, &av_NetStream_Play_StreamNotFound) || AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { - r->m_stream_id = -1; - - int err_code; - char msg[100]; - memset(msg, 0, 100); - - if (AVMATCH(&code, &av_NetStream_Failed)) { - err_code = RTMPErrorNetStreamFailed; - strcpy(msg, "NetStream failed."); - } else if (AVMATCH(&code, &av_NetStream_Play_Failed)) { - err_code = RTMPErrorNetStreamPlayFailed; - strcpy(msg, "NetStream play failed."); - } else if (AVMATCH(&code, &av_NetStream_Play_StreamNotFound)) { - err_code = RTMPErrorNetStreamPlayStreamNotFound; - strcpy(msg, "NetStream play stream not found."); - } else if (AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { - err_code = RTMPErrorNetConnectionConnectInvalidApp; - strcpy(msg, "NetConnection connect invalip app."); - } else { - err_code = RTMPErrorUnknow; - strcpy(msg, "Unknow error."); - } - - RTMPError_Alloc(&error, strlen(msg)); - error.code = err_code; - strcpy(error.message, msg); - - PILI_RTMP_Close(r, &error); - - RTMPError_Free(&error); - - RTMP_Log(RTMP_LOGERROR, "Closing connection: %s", code.av_val); - } - - else if (AVMATCH(&code, &av_NetStream_Play_Start)) { - int i; - r->m_bPlaying = TRUE; - for (i = 0; i < r->m_numCalls; i++) { - if (AVMATCH(&r->m_methodCalls[i].name, &av_play)) { - AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); - break; - } - } - } - - else if (AVMATCH(&code, &av_NetStream_Publish_Start)) { - int i; - r->m_bPlaying = TRUE; - for (i = 0; i < r->m_numCalls; i++) { - if (AVMATCH(&r->m_methodCalls[i].name, &av_publish)) { - AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); - break; - } - } - } - - /* Return 1 if this is a Play.Complete or Play.Stop */ - else if (AVMATCH(&code, &av_NetStream_Play_Complete) || AVMATCH(&code, &av_NetStream_Play_Stop) || AVMATCH(&code, &av_NetStream_Play_UnpublishNotify)) { - PILI_RTMP_Close(r, NULL); - ret = 1; - } - - else if (AVMATCH(&code, &av_NetStream_Seek_Notify)) { - r->m_read.flags &= ~RTMP_READ_SEEKING; - } - - else if (AVMATCH(&code, &av_NetStream_Pause_Notify)) { - if (r->m_pausing == 1 || r->m_pausing == 2) { - PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, &error); - r->m_pausing = 3; - } - } - } else if (AVMATCH(&method, &av_playlist_ready)) { - int i; - for (i = 0; i < r->m_numCalls; i++) { - if (AVMATCH(&r->m_methodCalls[i].name, &av_set_playlist)) { - AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); - break; - } - } - } else { - } -leave: - AMF_Reset(&obj); - return ret; -} - -int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, - AMFObjectProperty *p) { - int n; - /* this is a small object search to locate the "duration" property */ - for (n = 0; n < obj->o_num; n++) { - AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); - - if (AVMATCH(&prop->p_name, name)) { - *p = *prop; - return TRUE; - } - - if (prop->p_type == AMF_OBJECT) { - if (PILI_RTMP_FindFirstMatchingProperty(&prop->p_vu.p_object, name, p)) - return TRUE; - } - } - return FALSE; -} - -/* Like above, but only check if name is a prefix of property */ -int PILI_RTMP_FindPrefixProperty(AMFObject *obj, const AVal *name, - AMFObjectProperty *p) { - int n; - for (n = 0; n < obj->o_num; n++) { - AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); - - if (prop->p_name.av_len > name->av_len && - !memcmp(prop->p_name.av_val, name->av_val, name->av_len)) { - *p = *prop; - return TRUE; - } - - if (prop->p_type == AMF_OBJECT) { - if (PILI_RTMP_FindPrefixProperty(&prop->p_vu.p_object, name, p)) - return TRUE; - } - } - return FALSE; -} - -static int - DumpMetaData(AMFObject *obj) { - AMFObjectProperty *prop; - int n; - for (n = 0; n < obj->o_num; n++) { - prop = AMF_GetProp(obj, NULL, n); - if (prop->p_type != AMF_OBJECT) { - char str[256] = ""; - switch (prop->p_type) { - case AMF_NUMBER: - snprintf(str, 255, "%.2f", prop->p_vu.p_number); - break; - case AMF_BOOLEAN: - snprintf(str, 255, "%s", - prop->p_vu.p_number != 0. ? "TRUE" : "FALSE"); - break; - case AMF_STRING: - snprintf(str, 255, "%.*s", prop->p_vu.p_aval.av_len, - prop->p_vu.p_aval.av_val); - break; - case AMF_DATE: - snprintf(str, 255, "timestamp:%.2f", prop->p_vu.p_number); - break; - default: - snprintf(str, 255, "INVALID TYPE 0x%02x", - (unsigned char)prop->p_type); - } - if (prop->p_name.av_len) { - /* chomp */ - if (strlen(str) >= 1 && str[strlen(str) - 1] == '\n') - str[strlen(str) - 1] = '\0'; - RTMP_Log(RTMP_LOGINFO, " %-22.*s%s", prop->p_name.av_len, - prop->p_name.av_val, str); - } - } else { - if (prop->p_name.av_len) - RTMP_Log(RTMP_LOGINFO, "%.*s:", prop->p_name.av_len, prop->p_name.av_val); - DumpMetaData(&prop->p_vu.p_object); - } - } - return FALSE; -} - -SAVC(onMetaData); -SAVC(duration); -SAVC(video); -SAVC(audio); - -static int - HandleMetadata(PILI_RTMP *r, char *body, unsigned int len) { - /* allright we get some info here, so parse it and print it */ - /* also keep duration or filesize to make a nice progress bar */ - - AMFObject obj; - AVal metastring; - int ret = FALSE; - - int nRes = AMF_Decode(&obj, body, len, FALSE); - if (nRes < 0) { - RTMP_Log(RTMP_LOGERROR, "%s, error decoding meta data packet", __FUNCTION__); - return FALSE; - } - - AMF_Dump(&obj); - AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &metastring); - - if (AVMATCH(&metastring, &av_onMetaData)) { - AMFObjectProperty prop; - /* Show metadata */ - RTMP_Log(RTMP_LOGINFO, "Metadata:"); - DumpMetaData(&obj); - if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_duration, &prop)) { - r->m_fDuration = prop.p_vu.p_number; - /*RTMP_Log(RTMP_LOGDEBUG, "Set duration: %.2f", m_fDuration); */ - } - /* Search for audio or video tags */ - if (PILI_RTMP_FindPrefixProperty(&obj, &av_video, &prop)) - r->m_read.dataType |= 1; - if (PILI_RTMP_FindPrefixProperty(&obj, &av_audio, &prop)) - r->m_read.dataType |= 4; - ret = TRUE; - } - AMF_Reset(&obj); - return ret; -} - -static void - HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet) { - if (packet->m_nBodySize >= 4) { - r->m_inChunkSize = AMF_DecodeInt32(packet->m_body); - RTMP_Log(RTMP_LOGDEBUG, "%s, received: chunk size change to %d", __FUNCTION__, - r->m_inChunkSize); - } -} - -static void - HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet) { -} - -static void - HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet) { -} - -static void - HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet) { - short nType = -1; - unsigned int tmp; - if (packet->m_body && packet->m_nBodySize >= 2) - nType = AMF_DecodeInt16(packet->m_body); - RTMP_Log(RTMP_LOGDEBUG, "%s, received ctrl. type: %d, len: %d", __FUNCTION__, nType, - packet->m_nBodySize); - /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ - - if (packet->m_nBodySize >= 6) { - switch (nType) { - case 0: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Begin %d", __FUNCTION__, tmp); - break; - - case 1: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream EOF %d", __FUNCTION__, tmp); - if (r->m_pausing == 1) - r->m_pausing = 2; - break; - - case 2: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Dry %d", __FUNCTION__, tmp); - break; - - case 4: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream IsRecorded %d", __FUNCTION__, tmp); - break; - - case 6: /* server ping. reply with pong. */ - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Ping %d", __FUNCTION__, tmp); - PILI_RTMP_SendCtrl(r, 0x07, tmp, 0, NULL); - break; - - /* FMS 3.5 servers send the following two controls to let the client - * know when the server has sent a complete buffer. I.e., when the - * server has sent an amount of data equal to m_nBufferMS in duration. - * The server meters its output so that data arrives at the client - * in realtime and no faster. - * - * The rtmpdump program tries to set m_nBufferMS as large as - * possible, to force the server to send data as fast as possible. - * In practice, the server appears to cap this at about 1 hour's - * worth of data. After the server has sent a complete buffer, and - * sends this BufferEmpty message, it will wait until the play - * duration of that buffer has passed before sending a new buffer. - * The BufferReady message will be sent when the new buffer starts. - * (There is no BufferReady message for the very first buffer; - * presumably the Stream Begin message is sufficient for that - * purpose.) - * - * If the network speed is much faster than the data bitrate, then - * there may be long delays between the end of one buffer and the - * start of the next. - * - * Since usually the network allows data to be sent at - * faster than realtime, and rtmpdump wants to download the data - * as fast as possible, we use this RTMP_LF_BUFX hack: when we - * get the BufferEmpty message, we send a Pause followed by an - * Unpause. This causes the server to send the next buffer immediately - * instead of waiting for the full duration to elapse. (That's - * also the purpose of the ToggleStream function, which rtmpdump - * calls if we get a read timeout.) - * - * Media player apps don't need this hack since they are just - * going to play the data in realtime anyway. It also doesn't work - * for live streams since they obviously can only be sent in - * realtime. And it's all moot if the network speed is actually - * slower than the media bitrate. - */ - case 31: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferEmpty %d", __FUNCTION__, tmp); - if (!(r->Link.lFlags & RTMP_LF_BUFX)) - break; - if (!r->m_pausing) { - r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; - PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, NULL); - r->m_pausing = 1; - } else if (r->m_pausing == 2) { - PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, NULL); - r->m_pausing = 3; - } - break; - - case 32: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferReady %d", __FUNCTION__, tmp); - break; - - default: - tmp = AMF_DecodeInt32(packet->m_body + 2); - RTMP_Log(RTMP_LOGDEBUG, "%s, Stream xx %d", __FUNCTION__, tmp); - break; - } - } - - if (nType == 0x1A) { - RTMP_Log(RTMP_LOGDEBUG, "%s, SWFVerification ping received: ", __FUNCTION__); -#ifdef CRYPTO - /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ - - /* respond with HMAC SHA256 of decompressed SWF, key is the 30byte player key, also the last 30 bytes of the server handshake are applied */ - if (r->Link.SWFSize) { - PILI_RTMP_SendCtrl(r, 0x1B, 0, 0); - } else { - RTMP_Log(RTMP_LOGERROR, - "%s: Ignoring SWFVerification request, use --swfVfy!", - __FUNCTION__); - } -#else - RTMP_Log(RTMP_LOGERROR, - "%s: Ignoring SWFVerification request, no CRYPTO support!", - __FUNCTION__); -#endif - } -} - -static void - HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { - r->m_nServerBW = AMF_DecodeInt32(packet->m_body); - RTMP_Log(RTMP_LOGDEBUG, "%s: server BW = %d", __FUNCTION__, r->m_nServerBW); -} - -static void - HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { - r->m_nClientBW = AMF_DecodeInt32(packet->m_body); - if (packet->m_nBodySize > 4) - r->m_nClientBW2 = packet->m_body[4]; - else - r->m_nClientBW2 = -1; - RTMP_Log(RTMP_LOGDEBUG, "%s: client BW = %d %d", __FUNCTION__, r->m_nClientBW, - r->m_nClientBW2); -} - -static int - DecodeInt32LE(const char *data) { - unsigned char *c = (unsigned char *)data; - unsigned int val; - - val = (c[3] << 24) | (c[2] << 16) | (c[1] << 8) | c[0]; - return val; -} - -static int - EncodeInt32LE(char *output, int nVal) { - output[0] = nVal; - nVal >>= 8; - output[1] = nVal; - nVal >>= 8; - output[2] = nVal; - nVal >>= 8; - output[3] = nVal; - return 4; -} - -int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { - uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = {0}; - char *header = (char *)hbuf; - int nSize, hSize, nToRead, nChunk; - int didAlloc = FALSE; - - RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket); - - if (ReadN(r, (char *)hbuf, 1) == 0) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header", __FUNCTION__); - return FALSE; - } - - packet->m_headerType = (hbuf[0] & 0xc0) >> 6; - packet->m_nChannel = (hbuf[0] & 0x3f); - header++; - if (packet->m_nChannel == 0) { - if (ReadN(r, (char *)&hbuf[1], 1) != 1) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 2nd byte", - __FUNCTION__); - return FALSE; - } - packet->m_nChannel = hbuf[1]; - packet->m_nChannel += 64; - header++; - } else if (packet->m_nChannel == 1) { - int tmp; - if (ReadN(r, (char *)&hbuf[1], 2) != 2) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 3nd byte", - __FUNCTION__); - return FALSE; - } - tmp = (hbuf[2] << 8) + hbuf[1]; - packet->m_nChannel = tmp + 64; - RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel); - header += 2; - } - - nSize = packetSize[packet->m_headerType]; - - if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */ - packet->m_hasAbsTimestamp = TRUE; - - else if (nSize < RTMP_LARGE_HEADER_SIZE) { /* using values from the last message of this channel */ - if (r->m_vecChannelsIn[packet->m_nChannel]) - memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel], - sizeof(PILI_RTMPPacket)); - } - - nSize--; - - if (nSize > 0 && ReadN(r, header, nSize) != nSize) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header. type: %x", - __FUNCTION__, (unsigned int)hbuf[0]); - return FALSE; - } - - hSize = nSize + (header - (char *)hbuf); - - if (nSize >= 3) { - packet->m_nTimeStamp = AMF_DecodeInt24(header); - - /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading PILI_RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */ - - if (nSize >= 6) { - packet->m_nBodySize = AMF_DecodeInt24(header + 3); - packet->m_nBytesRead = 0; - PILI_RTMPPacket_Free(packet); - - if (nSize > 6) { - packet->m_packetType = header[6]; - - if (nSize == 11) - packet->m_nInfoField2 = DecodeInt32LE(header + 7); - } - } - if (packet->m_nTimeStamp == 0xffffff) { - if (ReadN(r, header + nSize, 4) != 4) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp", - __FUNCTION__); - return FALSE; - } - packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize); - hSize += 4; - } - } - - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize); - - if (packet->m_nBodySize > 0 && packet->m_body == NULL) { - if (!PILI_RTMPPacket_Alloc(packet, packet->m_nBodySize)) { - RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); - return FALSE; - } - didAlloc = TRUE; - packet->m_headerType = (hbuf[0] & 0xc0) >> 6; - } - - nToRead = packet->m_nBodySize - packet->m_nBytesRead; - nChunk = r->m_inChunkSize; - if (nToRead < nChunk) - nChunk = nToRead; - - /* Does the caller want the raw chunk? */ - if (packet->m_chunk) { - packet->m_chunk->c_headerSize = hSize; - memcpy(packet->m_chunk->c_header, hbuf, hSize); - packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead; - packet->m_chunk->c_chunkSize = nChunk; - } - - if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk) { - RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet body. len: %lu", - __FUNCTION__, packet->m_nBodySize); - return FALSE; - } - - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk); - - packet->m_nBytesRead += nChunk; - - /* keep the packet as ref for other packets on this channel */ - if (!r->m_vecChannelsIn[packet->m_nChannel]) - r->m_vecChannelsIn[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); - memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); - - if (RTMPPacket_IsReady(packet)) { - /* make packet's timestamp absolute */ - if (!packet->m_hasAbsTimestamp) - packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */ - - r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp; - - /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */ - /* arrives and requests to re-use some info (small packet header) */ - r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL; - r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0; - r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */ - } else { - packet->m_body = NULL; /* so it won't be erased on free */ - } - - return TRUE; -} - -#ifndef CRYPTO -static int - HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error) { - int i; - uint32_t uptime, suptime; - int bMatch; - char type; - char clientbuf[RTMP_SIG_SIZE + 1], *clientsig = clientbuf + 1; - char serversig[RTMP_SIG_SIZE]; - - clientbuf[0] = 0x03; /* not encrypted */ - - uptime = htonl(PILI_RTMP_GetTime()); - memcpy(clientsig, &uptime, 4); - - memset(&clientsig[4], 0, 4); - -#ifdef _DEBUG - for (i = 8; i < RTMP_SIG_SIZE; i++) - clientsig[i] = 0xff; -#else - for (i = 8; i < RTMP_SIG_SIZE; i++) - clientsig[i] = (char)(rand() % 256); -#endif - - if (!WriteN(r, clientbuf, RTMP_SIG_SIZE + 1, error)) - return FALSE; - - if (ReadN(r, &type, 1) != 1) /* 0x03 or 0x06 */ - return FALSE; - - RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); - - if (type != clientbuf[0]) - RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d", - __FUNCTION__, clientbuf[0], type); - - if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - /* decode server response */ - - memcpy(&suptime, serversig, 4); - suptime = ntohl(suptime); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, suptime); - RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, - serversig[4], serversig[5], serversig[6], serversig[7]); - - /* 2nd part of handshake */ - if (!WriteN(r, serversig, RTMP_SIG_SIZE, error)) - return FALSE; - - if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); - if (!bMatch) { - RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); - } - return TRUE; -} - -static int - SHandShake(PILI_RTMP *r, RTMPError *error) { - int i; - char serverbuf[RTMP_SIG_SIZE + 1], *serversig = serverbuf + 1; - char clientsig[RTMP_SIG_SIZE]; - uint32_t uptime; - int bMatch; - - if (ReadN(r, serverbuf, 1) != 1) /* 0x03 or 0x06 */ - return FALSE; - - RTMP_Log(RTMP_LOGDEBUG, "%s: Type Request : %02X", __FUNCTION__, serverbuf[0]); - - if (serverbuf[0] != 3) { - RTMP_Log(RTMP_LOGERROR, "%s: Type unknown: client sent %02X", - __FUNCTION__, serverbuf[0]); - return FALSE; - } - - uptime = htonl(PILI_RTMP_GetTime()); - memcpy(serversig, &uptime, 4); - - memset(&serversig[4], 0, 4); -#ifdef _DEBUG - for (i = 8; i < RTMP_SIG_SIZE; i++) - serversig[i] = 0xff; -#else - for (i = 8; i < RTMP_SIG_SIZE; i++) - serversig[i] = (char)(rand() % 256); -#endif - - if (!WriteN(r, serverbuf, RTMP_SIG_SIZE + 1, error)) - return FALSE; - - if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - /* decode client response */ - - memcpy(&uptime, clientsig, 4); - uptime = ntohl(uptime); - - RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); - RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, - clientsig[4], clientsig[5], clientsig[6], clientsig[7]); - - /* 2nd part of handshake */ - if (!WriteN(r, clientsig, RTMP_SIG_SIZE, error)) - return FALSE; - - if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) - return FALSE; - - bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); - if (!bMatch) { - RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); - } - return TRUE; -} -#endif - -int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error) { - int wrote; - char hbuf[RTMP_MAX_HEADER_SIZE]; - - RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, - chunk->c_chunkSize); - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_header, chunk->c_headerSize); - if (chunk->c_chunkSize) { - char *ptr = chunk->c_chunk - chunk->c_headerSize; - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_chunk, chunk->c_chunkSize); - /* save header bytes we're about to overwrite */ - memcpy(hbuf, ptr, chunk->c_headerSize); - memcpy(ptr, chunk->c_header, chunk->c_headerSize); - wrote = WriteN(r, ptr, chunk->c_headerSize + chunk->c_chunkSize, error); - memcpy(ptr, hbuf, chunk->c_headerSize); - } else - wrote = WriteN(r, chunk->c_header, chunk->c_headerSize, error); - return wrote; -} - -int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, RTMPError *error) { - const PILI_RTMPPacket *prevPacket = r->m_vecChannelsOut[packet->m_nChannel]; - uint32_t last = 0; - int nSize; - int hSize, cSize; - char *header, *hptr, *hend, hbuf[RTMP_MAX_HEADER_SIZE], c; - uint32_t t; - char *buffer, *tbuf = NULL, *toff = NULL; - int nChunkSize; - int tlen; - - if (prevPacket && packet->m_headerType != RTMP_PACKET_SIZE_LARGE) { - /* compress a bit by using the prev packet's attributes */ - if (prevPacket->m_nBodySize == packet->m_nBodySize && prevPacket->m_packetType == packet->m_packetType && packet->m_headerType == RTMP_PACKET_SIZE_MEDIUM) - packet->m_headerType = RTMP_PACKET_SIZE_SMALL; - - if (prevPacket->m_nTimeStamp == packet->m_nTimeStamp && packet->m_headerType == RTMP_PACKET_SIZE_SMALL) - packet->m_headerType = RTMP_PACKET_SIZE_MINIMUM; - last = prevPacket->m_nTimeStamp; - } - - if (packet->m_headerType > 3) /* sanity */ - { - if (error) { - char *msg = "Sanity failed."; - RTMPError_Alloc(error, strlen(msg)); - error->code = RTMPErrorSanityFailed; - strcpy(error->message, msg); - } - - RTMP_Log(RTMP_LOGERROR, "sanity failed!! trying to send header of type: 0x%02x.", - (unsigned char)packet->m_headerType); - - return FALSE; - } - - nSize = packetSize[packet->m_headerType]; - hSize = nSize; - cSize = 0; - t = packet->m_nTimeStamp - last; - - if (packet->m_body) { - header = packet->m_body - nSize; - hend = packet->m_body; - } else { - header = hbuf + 6; - hend = hbuf + sizeof(hbuf); - } - - if (packet->m_nChannel > 319) - cSize = 2; - else if (packet->m_nChannel > 63) - cSize = 1; - if (cSize) { - header -= cSize; - hSize += cSize; - } - - if (nSize > 1 && t >= 0xffffff) { - header -= 4; - hSize += 4; - } - - hptr = header; - c = packet->m_headerType << 6; - switch (cSize) { - case 0: - c |= packet->m_nChannel; - break; - case 1: - break; - case 2: - c |= 1; - break; - } - *hptr++ = c; - if (cSize) { - int tmp = packet->m_nChannel - 64; - *hptr++ = tmp & 0xff; - if (cSize == 2) - *hptr++ = tmp >> 8; - } - - if (nSize > 1) { - hptr = AMF_EncodeInt24(hptr, hend, t > 0xffffff ? 0xffffff : t); - } - - if (nSize > 4) { - hptr = AMF_EncodeInt24(hptr, hend, packet->m_nBodySize); - *hptr++ = packet->m_packetType; - } - - if (nSize > 8) - hptr += EncodeInt32LE(hptr, packet->m_nInfoField2); - - if (nSize > 1 && t >= 0xffffff) - hptr = AMF_EncodeInt32(hptr, hend, t); - - nSize = packet->m_nBodySize; - buffer = packet->m_body; - nChunkSize = r->m_outChunkSize; - - RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, - nSize); - /* send all chunks in one HTTP request */ - if (r->Link.protocol & RTMP_FEATURE_HTTP) { - int chunks = (nSize + nChunkSize - 1) / nChunkSize; - if (chunks > 1) { - tlen = chunks * (cSize + 1) + nSize + hSize; - tbuf = malloc(tlen); - if (!tbuf) - return FALSE; - toff = tbuf; - } - } - while (nSize + hSize) { - int wrote; - - if (nSize < nChunkSize) - nChunkSize = nSize; - - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)header, hSize); - RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)buffer, nChunkSize); - if (tbuf) { - memcpy(toff, header, nChunkSize + hSize); - toff += nChunkSize + hSize; - } else { - wrote = WriteN(r, header, nChunkSize + hSize, error); - if (!wrote) - return FALSE; - } - nSize -= nChunkSize; - buffer += nChunkSize; - hSize = 0; - - if (nSize > 0) { - header = buffer - 1; - hSize = 1; - if (cSize) { - header -= cSize; - hSize += cSize; - } - *header = (0xc0 | c); - if (cSize) { - int tmp = packet->m_nChannel - 64; - header[1] = tmp & 0xff; - if (cSize == 2) - header[2] = tmp >> 8; - } - } - } - if (tbuf) { - int wrote = WriteN(r, tbuf, toff - tbuf, error); - free(tbuf); - tbuf = NULL; - if (!wrote) - return FALSE; - } - - /* we invoked a remote method */ - if (packet->m_packetType == 0x14) { - AVal method; - char *ptr; - ptr = packet->m_body + 1; - AMF_DecodeString(ptr, &method); - RTMP_Log(RTMP_LOGDEBUG, "Invoking %s", method.av_val); - /* keep it in call queue till result arrives */ - if (queue) { - int txn; - ptr += 3 + method.av_len; - txn = (int)AMF_DecodeNumber(ptr); - AV_queue(&r->m_methodCalls, &r->m_numCalls, &method, txn); - } - } - - if (!r->m_vecChannelsOut[packet->m_nChannel]) - r->m_vecChannelsOut[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); - memcpy(r->m_vecChannelsOut[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); - return TRUE; -} - -int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error) { - return SHandShake(r, error); -} - -void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error) { - if (r->m_is_closing) { - return; - } - r->m_is_closing = 1; - int i; - if (PILI_RTMP_IsConnected(r)) { - if (r->m_stream_id > 0) { - if ((r->Link.protocol & RTMP_FEATURE_WRITE)) - SendFCUnpublish(r, NULL); - i = r->m_stream_id; - r->m_stream_id = 0; - SendDeleteStream(r, i, NULL); - } - if (r->m_clientID.av_val) { - HTTP_Post(r, RTMPT_CLOSE, "", 1); - free(r->m_clientID.av_val); - r->m_clientID.av_val = NULL; - r->m_clientID.av_len = 0; - } - PILI_RTMPSockBuf_Close(&r->m_sb); - - if (error && r->m_errorCallback) { - r->m_errorCallback(error, r->m_userData); - } - } - - r->m_stream_id = -1; - r->m_sb.sb_socket = -1; - r->m_nBWCheckCounter = 0; - r->m_nBytesIn = 0; - r->m_nBytesInSent = 0; - - if (r->m_read.flags & RTMP_READ_HEADER) { - free(r->m_read.buf); - r->m_read.buf = NULL; - } - r->m_read.dataType = 0; - r->m_read.flags = 0; - r->m_read.status = 0; - r->m_read.nResumeTS = 0; - r->m_read.nIgnoredFrameCounter = 0; - r->m_read.nIgnoredFlvFrameCounter = 0; - - r->m_write.m_nBytesRead = 0; - PILI_RTMPPacket_Free(&r->m_write); - - for (i = 0; i < RTMP_CHANNELS; i++) { - if (r->m_vecChannelsIn[i]) { - PILI_RTMPPacket_Free(r->m_vecChannelsIn[i]); - free(r->m_vecChannelsIn[i]); - r->m_vecChannelsIn[i] = NULL; - } - if (r->m_vecChannelsOut[i]) { - free(r->m_vecChannelsOut[i]); - r->m_vecChannelsOut[i] = NULL; - } - } - AV_clear(r->m_methodCalls, r->m_numCalls); - r->m_methodCalls = NULL; - r->m_numCalls = 0; - r->m_numInvokes = 0; - - r->m_bPlaying = FALSE; - r->m_sb.sb_size = 0; - - r->m_msgCounter = 0; - r->m_resplen = 0; - r->m_unackd = 0; - - free(r->Link.playpath0.av_val); - r->Link.playpath0.av_val = NULL; - - if (r->Link.lFlags & RTMP_LF_FTCU) { - free(r->Link.tcUrl.av_val); - r->Link.tcUrl.av_val = NULL; - r->Link.tcUrl.av_len = 0; - r->Link.lFlags ^= RTMP_LF_FTCU; - } - -#ifdef CRYPTO - if (r->Link.dh) { - MDH_free(r->Link.dh); - r->Link.dh = NULL; - } - if (r->Link.rc4keyIn) { - RC4_free(r->Link.rc4keyIn); - r->Link.rc4keyIn = NULL; - } - if (r->Link.rc4keyOut) { - RC4_free(r->Link.rc4keyOut); - r->Link.rc4keyOut = NULL; - } -#endif -} - -int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb, int timeout) { - int nBytes; - - if (!sb->sb_size) - sb->sb_start = sb->sb_buf; - -#ifdef RTMP_FEATURE_NONBLOCK - SET_RCVTIMEO(tv, timeout); - fd_set rfds; -#endif - while (1) { -#ifdef RTMP_FEATURE_NONBLOCK - FD_ZERO(&rfds); - FD_SET(sb->sb_socket, &rfds); - int ret = select(sb->sb_socket + 1, &rfds, NULL, NULL, &tv); - if (ret < 0) { - int sockerr = GetSockError(); - RTMP_Log(RTMP_LOGDEBUG, "%s, recv select error. GetSockError(): %d (%s)", - __FUNCTION__, sockerr, strerror(sockerr)); - if (sockerr == EINTR && !PILI_RTMP_ctrlC) - continue; - - sb->sb_timedout = TRUE; - nBytes = 0; - break; - } else if (ret == 0) { - RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP recv error select timeout %d", __FUNCTION__, timeout); - sb->sb_timedout = TRUE; - nBytes = 0; - break; - } else if (!FD_ISSET(sb->sb_socket, &rfds)) { - sb->sb_timedout = TRUE; - nBytes = 0; - break; - } -#endif - - nBytes = sizeof(sb->sb_buf) - sb->sb_size - (sb->sb_start - sb->sb_buf); -#if defined(CRYPTO) && !defined(NO_SSL) - if (sb->sb_ssl) { - nBytes = TLS_read(sb->sb_ssl, sb->sb_start + sb->sb_size, nBytes); - } else -#endif - { - nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, 0); - } - if (nBytes != -1) { - sb->sb_size += nBytes; - } else { - int sockerr = GetSockError(); - RTMP_Log(RTMP_LOGDEBUG, "%s, recv returned %d. GetSockError(): %d (%s)", - __FUNCTION__, nBytes, sockerr, strerror(sockerr)); - if (sockerr == EINTR && !PILI_RTMP_ctrlC) - continue; - - if (sockerr == EWOULDBLOCK || sockerr == EAGAIN) { -#ifdef RTMP_FEATURE_NONBLOCK - continue; -#else - sb->sb_timedout = TRUE; - nBytes = 0; -#endif - } - } - break; - } - - return nBytes; -} - -int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len) { - int rc; - -#ifdef _DEBUG - fwrite(buf, 1, len, netstackdump); -#endif - -#if defined(CRYPTO) && !defined(NO_SSL) - if (sb->sb_ssl) { - rc = TLS_write(sb->sb_ssl, buf, len); - } else -#endif - { - rc = send(sb->sb_socket, buf, len, 0); - } - return rc; -} - -int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb) { -#if defined(CRYPTO) && !defined(NO_SSL) - if (sb->sb_ssl) { - TLS_shutdown(sb->sb_ssl); - TLS_close(sb->sb_ssl); - sb->sb_ssl = NULL; - } -#endif - return closesocket(sb->sb_socket); -} - -#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) - -static void - DecodeTEA(AVal *key, AVal *text) { - uint32_t *v, k[4] = {0}, u; - uint32_t z, y, sum = 0, e, DELTA = 0x9e3779b9; - int32_t p, q; - int i, n; - unsigned char *ptr, *out; - - /* prep key: pack 1st 16 chars into 4 LittleEndian ints */ - ptr = (unsigned char *)key->av_val; - u = 0; - n = 0; - v = k; - p = key->av_len > 16 ? 16 : key->av_len; - for (i = 0; i < p; i++) { - u |= ptr[i] << (n * 8); - if (n == 3) { - *v++ = u; - u = 0; - n = 0; - } else { - n++; - } - } - /* any trailing chars */ - if (u) - *v = u; - - /* prep text: hex2bin, multiples of 4 */ - n = (text->av_len + 7) / 8; - out = malloc(n * 8); - ptr = (unsigned char *)text->av_val; - v = (uint32_t *)out; - for (i = 0; i < n; i++) { - u = (HEX2BIN(ptr[0]) << 4) + HEX2BIN(ptr[1]); - u |= ((HEX2BIN(ptr[2]) << 4) + HEX2BIN(ptr[3])) << 8; - u |= ((HEX2BIN(ptr[4]) << 4) + HEX2BIN(ptr[5])) << 16; - u |= ((HEX2BIN(ptr[6]) << 4) + HEX2BIN(ptr[7])) << 24; - *v++ = u; - ptr += 8; - } - v = (uint32_t *)out; - -/* http://www.movable-type.co.uk/scripts/tea-block.html */ -#define MX (((z >> 5) ^ (y << 2)) + ((y >> 3) ^ (z << 4))) ^ ((sum ^ y) + (k[(p & 3) ^ e] ^ z)); - z = v[n - 1]; - y = v[0]; - q = 6 + 52 / n; - sum = q * DELTA; - while (sum != 0) { - e = sum >> 2 & 3; - for (p = n - 1; p > 0; p--) - z = v[p - 1], y = v[p] -= MX; - z = v[n - 1]; - y = v[0] -= MX; - sum -= DELTA; - } - - text->av_len /= 2; - memcpy(text->av_val, out, text->av_len); - free(out); -} - -static int - HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len) { - char hbuf[512]; - int hlen = snprintf(hbuf, sizeof(hbuf), "POST /%s%s/%d HTTP/1.1\r\n" - "Host: %.*s:%d\r\n" - "Accept: */*\r\n" - "User-Agent: Shockwave Flash\n" - "Connection: Keep-Alive\n" - "Cache-Control: no-cache\r\n" - "Content-type: application/x-fcs\r\n" - "Content-length: %d\r\n\r\n", - RTMPT_cmds[cmd], - r->m_clientID.av_val ? r->m_clientID.av_val : "", - r->m_msgCounter, r->Link.hostname.av_len, r->Link.hostname.av_val, - r->Link.port, len); - PILI_RTMPSockBuf_Send(&r->m_sb, hbuf, hlen); - hlen = PILI_RTMPSockBuf_Send(&r->m_sb, buf, len); - r->m_msgCounter++; - r->m_unackd++; - return hlen; -} - -static int - HTTP_read(PILI_RTMP *r, int fill) { - char *ptr; - int hlen; - - if (fill) - PILI_RTMPSockBuf_Fill(&r->m_sb, r->Link.timeout); - if (r->m_sb.sb_size < 144) - return -1; - if (strncmp(r->m_sb.sb_start, "HTTP/1.1 200 ", 13)) - return -1; - ptr = strstr(r->m_sb.sb_start, "Content-Length:"); - if (!ptr) - return -1; - hlen = atoi(ptr + 16); - ptr = strstr(ptr, "\r\n\r\n"); - if (!ptr) - return -1; - ptr += 4; - r->m_sb.sb_size -= ptr - r->m_sb.sb_start; - r->m_sb.sb_start = ptr; - r->m_unackd--; - - if (!r->m_clientID.av_val) { - r->m_clientID.av_len = hlen; - r->m_clientID.av_val = malloc(hlen + 1); - if (!r->m_clientID.av_val) - return -1; - r->m_clientID.av_val[0] = '/'; - memcpy(r->m_clientID.av_val + 1, ptr, hlen - 1); - r->m_clientID.av_val[hlen] = 0; - r->m_sb.sb_size = 0; - } else { - r->m_polling = *ptr++; - r->m_resplen = hlen - 1; - r->m_sb.sb_start++; - r->m_sb.sb_size--; - } - return 0; -} - -#define MAX_IGNORED_FRAMES 50 - -/* Read from the stream until we get a media packet. - * Returns -3 if Play.Close/Stop, -2 if fatal error, -1 if no more media - * packets, 0 if ignorable error, >0 if there is a media packet - */ -static int - Read_1_Packet(PILI_RTMP *r, char *buf, unsigned int buflen) { - uint32_t prevTagSize = 0; - int rtnGetNextMediaPacket = 0, ret = RTMP_READ_EOF; - PILI_RTMPPacket packet = {0}; - int recopy = FALSE; - unsigned int size; - char *ptr, *pend; - uint32_t nTimeStamp = 0; - unsigned int len; - - rtnGetNextMediaPacket = PILI_RTMP_GetNextMediaPacket(r, &packet); - while (rtnGetNextMediaPacket) { - char *packetBody = packet.m_body; - unsigned int nPacketLen = packet.m_nBodySize; - - /* Return -3 if this was completed nicely with invoke message - * Play.Stop or Play.Complete - */ - if (rtnGetNextMediaPacket == 2) { - RTMP_Log(RTMP_LOGDEBUG, - "Got Play.Complete or Play.Stop from server. " - "Assuming stream is complete"); - ret = RTMP_READ_COMPLETE; - break; - } - - r->m_read.dataType |= (((packet.m_packetType == 0x08) << 2) | - (packet.m_packetType == 0x09)); - - if (packet.m_packetType == 0x09 && nPacketLen <= 5) { - RTMP_Log(RTMP_LOGDEBUG, "ignoring too small video packet: size: %d", - nPacketLen); - ret = RTMP_READ_IGNORE; - break; - } - if (packet.m_packetType == 0x08 && nPacketLen <= 1) { - RTMP_Log(RTMP_LOGDEBUG, "ignoring too small audio packet: size: %d", - nPacketLen); - ret = RTMP_READ_IGNORE; - break; - } - - if (r->m_read.flags & RTMP_READ_SEEKING) { - ret = RTMP_READ_IGNORE; - break; - } -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, TS: %d ms, abs TS: %d", - packet.m_packetType, nPacketLen, packet.m_nTimeStamp, - packet.m_hasAbsTimestamp); - if (packet.m_packetType == 0x09) - RTMP_Log(RTMP_LOGDEBUG, "frametype: %02X", (*packetBody & 0xf0)); -#endif - - if (r->m_read.flags & RTMP_READ_RESUME) { - /* check the header if we get one */ - if (packet.m_nTimeStamp == 0) { - if (r->m_read.nMetaHeaderSize > 0 && packet.m_packetType == 0x12) { - AMFObject metaObj; - int nRes = - AMF_Decode(&metaObj, packetBody, nPacketLen, FALSE); - if (nRes >= 0) { - AVal metastring; - AMFProp_GetString(AMF_GetProp(&metaObj, NULL, 0), - &metastring); - - if (AVMATCH(&metastring, &av_onMetaData)) { - /* compare */ - if ((r->m_read.nMetaHeaderSize != nPacketLen) || - (memcmp(r->m_read.metaHeader, packetBody, - r->m_read.nMetaHeaderSize) != 0)) { - ret = RTMP_READ_ERROR; - } - } - AMF_Reset(&metaObj); - if (ret == RTMP_READ_ERROR) - break; - } - } - - /* check first keyframe to make sure we got the right position - * in the stream! (the first non ignored frame) - */ - if (r->m_read.nInitialFrameSize > 0) { - /* video or audio data */ - if (packet.m_packetType == r->m_read.initialFrameType && r->m_read.nInitialFrameSize == nPacketLen) { - /* we don't compare the sizes since the packet can - * contain several FLV packets, just make sure the - * first frame is our keyframe (which we are going - * to rewrite) - */ - if (memcmp(r->m_read.initialFrame, packetBody, - r->m_read.nInitialFrameSize) == 0) { - RTMP_Log(RTMP_LOGDEBUG, "Checked keyframe successfully!"); - r->m_read.flags |= RTMP_READ_GOTKF; - /* ignore it! (what about audio data after it? it is - * handled by ignoring all 0ms frames, see below) - */ - ret = RTMP_READ_IGNORE; - break; - } - } - - /* hande FLV streams, even though the server resends the - * keyframe as an extra video packet it is also included - * in the first FLV stream chunk and we have to compare - * it and filter it out !! - */ - if (packet.m_packetType == 0x16) { - /* basically we have to find the keyframe with the - * correct TS being nResumeTS - */ - unsigned int pos = 0; - uint32_t ts = 0; - - while (pos + 11 < nPacketLen) { - /* size without header (11) and prevTagSize (4) */ - uint32_t dataSize = - AMF_DecodeInt24(packetBody + pos + 1); - ts = AMF_DecodeInt24(packetBody + pos + 4); - ts |= (packetBody[pos + 7] << 24); - -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, - "keyframe search: FLV Packet: type %02X, dataSize: %d, timeStamp: %d ms", - packetBody[pos], dataSize, ts); -#endif - /* ok, is it a keyframe?: - * well doesn't work for audio! - */ - if (packetBody[pos /*6928, test 0 */] == - r->m_read.initialFrameType - /* && (packetBody[11]&0xf0) == 0x10 */) { - if (ts == r->m_read.nResumeTS) { - RTMP_Log(RTMP_LOGDEBUG, - "Found keyframe with resume-keyframe timestamp!"); - if (r->m_read.nInitialFrameSize != dataSize || memcmp(r->m_read.initialFrame, packetBody + pos + 11, r->m_read.nInitialFrameSize) != 0) { - RTMP_Log(RTMP_LOGERROR, - "FLV Stream: Keyframe doesn't match!"); - ret = RTMP_READ_ERROR; - break; - } - r->m_read.flags |= RTMP_READ_GOTFLVK; - - /* skip this packet? - * check whether skippable: - */ - if (pos + 11 + dataSize + 4 > nPacketLen) { - RTMP_Log(RTMP_LOGWARNING, - "Non skipable packet since it doesn't end with chunk, stream corrupt!"); - ret = RTMP_READ_ERROR; - break; - } - packetBody += (pos + 11 + dataSize + 4); - nPacketLen -= (pos + 11 + dataSize + 4); - - goto stopKeyframeSearch; - - } else if (r->m_read.nResumeTS < ts) { - /* the timestamp ts will only increase with - * further packets, wait for seek - */ - goto stopKeyframeSearch; - } - } - pos += (11 + dataSize + 4); - } - if (ts < r->m_read.nResumeTS) { - RTMP_Log(RTMP_LOGERROR, - "First packet does not contain keyframe, all " - "timestamps are smaller than the keyframe " - "timestamp; probably the resume seek failed?"); - } - stopKeyframeSearch:; - if (!(r->m_read.flags & RTMP_READ_GOTFLVK)) { - RTMP_Log(RTMP_LOGERROR, - "Couldn't find the seeked keyframe in this chunk!"); - ret = RTMP_READ_IGNORE; - break; - } - } - } - } - - if (packet.m_nTimeStamp > 0 && (r->m_read.flags & (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK))) { - /* another problem is that the server can actually change from - * 09/08 video/audio packets to an FLV stream or vice versa and - * our keyframe check will prevent us from going along with the - * new stream if we resumed. - * - * in this case set the 'found keyframe' variables to true. - * We assume that if we found one keyframe somewhere and were - * already beyond TS > 0 we have written data to the output - * which means we can accept all forthcoming data including the - * change between 08/09 <-> FLV packets - */ - r->m_read.flags |= (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK); - } - - /* skip till we find our keyframe - * (seeking might put us somewhere before it) - */ - if (!(r->m_read.flags & RTMP_READ_GOTKF) && - packet.m_packetType != 0x16) { - RTMP_Log(RTMP_LOGWARNING, - "Stream does not start with requested frame, ignoring data... "); - r->m_read.nIgnoredFrameCounter++; - if (r->m_read.nIgnoredFrameCounter > MAX_IGNORED_FRAMES) - ret = RTMP_READ_ERROR; /* fatal error, couldn't continue stream */ - else - ret = RTMP_READ_IGNORE; - break; - } - /* ok, do the same for FLV streams */ - if (!(r->m_read.flags & RTMP_READ_GOTFLVK) && - packet.m_packetType == 0x16) { - RTMP_Log(RTMP_LOGWARNING, - "Stream does not start with requested FLV frame, ignoring data... "); - r->m_read.nIgnoredFlvFrameCounter++; - if (r->m_read.nIgnoredFlvFrameCounter > MAX_IGNORED_FRAMES) - ret = RTMP_READ_ERROR; - else - ret = RTMP_READ_IGNORE; - break; - } - - /* we have to ignore the 0ms frames since these are the first - * keyframes; we've got these so don't mess around with multiple - * copies sent by the server to us! (if the keyframe is found at a - * later position there is only one copy and it will be ignored by - * the preceding if clause) - */ - if (!(r->m_read.flags & RTMP_READ_NO_IGNORE) && - packet.m_packetType != 0x16) { /* exclude type 0x16 (FLV) since it can - * contain several FLV packets */ - if (packet.m_nTimeStamp == 0) { - ret = RTMP_READ_IGNORE; - break; - } else { - /* stop ignoring packets */ - r->m_read.flags |= RTMP_READ_NO_IGNORE; - } - } - } - - /* calculate packet size and allocate slop buffer if necessary */ - size = nPacketLen + - ((packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) ? 11 : 0) + - (packet.m_packetType != 0x16 ? 4 : 0); - - if (size + 4 > buflen) { - /* the extra 4 is for the case of an FLV stream without a last - * prevTagSize (we need extra 4 bytes to append it) */ - r->m_read.buf = malloc(size + 4); - if (r->m_read.buf == 0) { - RTMP_Log(RTMP_LOGERROR, "Couldn't allocate memory!"); - ret = RTMP_READ_ERROR; /* fatal error */ - break; - } - recopy = TRUE; - ptr = r->m_read.buf; - } else { - ptr = buf; - } - pend = ptr + size + 4; - - /* use to return timestamp of last processed packet */ - - /* audio (0x08), video (0x09) or metadata (0x12) packets : - * construct 11 byte header then add PILI_RTMP packet's data */ - if (packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) { - nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp; - prevTagSize = 11 + nPacketLen; - - *ptr = packet.m_packetType; - ptr++; - ptr = AMF_EncodeInt24(ptr, pend, nPacketLen); - -#if 0 - if(packet.m_packetType == 0x09) { /* video */ - - /* H264 fix: */ - if((packetBody[0] & 0x0f) == 7) { /* CodecId = H264 */ - uint8_t packetType = *(packetBody+1); - - uint32_t ts = AMF_DecodeInt24(packetBody+2); /* composition time */ - int32_t cts = (ts+0xff800000)^0xff800000; - RTMP_Log(RTMP_LOGDEBUG, "cts : %d\n", cts); - - nTimeStamp -= cts; - /* get rid of the composition time */ - CRTMP::EncodeInt24(packetBody+2, 0); - } - RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp); - } -#endif - - ptr = AMF_EncodeInt24(ptr, pend, nTimeStamp); - *ptr = (char)((nTimeStamp & 0xFF000000) >> 24); - ptr++; - - /* stream id */ - ptr = AMF_EncodeInt24(ptr, pend, 0); - } - - memcpy(ptr, packetBody, nPacketLen); - len = nPacketLen; - - /* correct tagSize and obtain timestamp if we have an FLV stream */ - if (packet.m_packetType == 0x16) { - unsigned int pos = 0; - int delta; - - /* grab first timestamp and see if it needs fixing */ - nTimeStamp = AMF_DecodeInt24(packetBody + 4); - nTimeStamp |= (packetBody[7] << 24); - delta = packet.m_nTimeStamp - nTimeStamp; - - while (pos + 11 < nPacketLen) { - /* size without header (11) and without prevTagSize (4) */ - uint32_t dataSize = AMF_DecodeInt24(packetBody + pos + 1); - nTimeStamp = AMF_DecodeInt24(packetBody + pos + 4); - nTimeStamp |= (packetBody[pos + 7] << 24); - - if (delta) { - nTimeStamp += delta; - AMF_EncodeInt24(ptr + pos + 4, pend, nTimeStamp); - ptr[pos + 7] = nTimeStamp >> 24; - } - - /* set data type */ - r->m_read.dataType |= (((*(packetBody + pos) == 0x08) << 2) | - (*(packetBody + pos) == 0x09)); - - if (pos + 11 + dataSize + 4 > nPacketLen) { - if (pos + 11 + dataSize > nPacketLen) { - RTMP_Log(RTMP_LOGERROR, - "Wrong data size (%lu), stream corrupted, aborting!", - dataSize); - ret = RTMP_READ_ERROR; - break; - } - RTMP_Log(RTMP_LOGWARNING, "No tagSize found, appending!"); - - /* we have to append a last tagSize! */ - prevTagSize = dataSize + 11; - AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, - prevTagSize); - size += 4; - len += 4; - } else { - prevTagSize = - AMF_DecodeInt32(packetBody + pos + 11 + dataSize); - -#ifdef _DEBUG - RTMP_Log(RTMP_LOGDEBUG, - "FLV Packet: type %02X, dataSize: %lu, tagSize: %lu, timeStamp: %lu ms", - (unsigned char)packetBody[pos], dataSize, prevTagSize, - nTimeStamp); -#endif - - if (prevTagSize != (dataSize + 11)) { -#ifdef _DEBUG - RTMP_Log(RTMP_LOGWARNING, - "Tag and data size are not consitent, writing tag size according to dataSize+11: %d", - dataSize + 11); -#endif - - prevTagSize = dataSize + 11; - AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, - prevTagSize); - } - } - - pos += prevTagSize + 4; /*(11+dataSize+4); */ - } - } - ptr += len; - - if (packet.m_packetType != 0x16) { - /* FLV tag packets contain their own prevTagSize */ - AMF_EncodeInt32(ptr, pend, prevTagSize); - } - - /* In non-live this nTimeStamp can contain an absolute TS. - * Update ext timestamp with this absolute offset in non-live mode - * otherwise report the relative one - */ - /* RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, pktTS: %dms, TS: %dms, bLiveStream: %d", packet.m_packetType, nPacketLen, packet.m_nTimeStamp, nTimeStamp, r->Link.lFlags & RTMP_LF_LIVE); */ - r->m_read.timestamp = (r->Link.lFlags & RTMP_LF_LIVE) ? packet.m_nTimeStamp : nTimeStamp; - - ret = size; - break; - } - - if (rtnGetNextMediaPacket) - PILI_RTMPPacket_Free(&packet); - - if (recopy) { - len = ret > buflen ? buflen : ret; - memcpy(buf, r->m_read.buf, len); - r->m_read.bufpos = r->m_read.buf + len; - r->m_read.buflen = ret - len; - } - return ret; -} - -static const char flvHeader[] = {'F', 'L', 'V', 0x01, - 0x00, /* 0x04 == audio, 0x01 == video */ - 0x00, 0x00, 0x00, 0x09, - 0x00, 0x00, 0x00, 0x00}; - -#define HEADERBUF (128 * 1024) -int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size) { - int nRead = 0, total = 0; - -/* can't continue */ -fail: - switch (r->m_read.status) { - case RTMP_READ_EOF: - case RTMP_READ_COMPLETE: - return 0; - case RTMP_READ_ERROR: /* corrupted stream, resume failed */ - SetSockError(EINVAL); - return -1; - default: - break; - } - - if ((r->m_read.flags & RTMP_READ_SEEKING) && r->m_read.buf) { - /* drop whatever's here */ - free(r->m_read.buf); - r->m_read.buf = NULL; - r->m_read.bufpos = NULL; - r->m_read.buflen = 0; - } - - /* If there's leftover data buffered, use it up */ - if (r->m_read.buf) { - nRead = r->m_read.buflen; - if (nRead > size) - nRead = size; - memcpy(buf, r->m_read.bufpos, nRead); - r->m_read.buflen -= nRead; - if (!r->m_read.buflen) { - free(r->m_read.buf); - r->m_read.buf = NULL; - r->m_read.bufpos = NULL; - } else { - r->m_read.bufpos += nRead; - } - buf += nRead; - total += nRead; - size -= nRead; - } - - while (size > 0 && (nRead = Read_1_Packet(r, buf, size)) >= 0) { - if (!nRead) continue; - buf += nRead; - total += nRead; - size -= nRead; - break; - } - if (nRead < 0) - r->m_read.status = nRead; - - if (size < 0) - total += size; - return total; -} - -static const AVal av_setDataFrame = AVC("@setDataFrame"); - -int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error) { - PILI_RTMPPacket *pkt = &r->m_write; - char *pend, *enc; - int s2 = size, ret, num; - - pkt->m_nChannel = 0x04; /* source channel */ - pkt->m_nInfoField2 = r->m_stream_id; - - while (s2) { - if (!pkt->m_nBytesRead) { - if (size < 11) { - /* FLV pkt too small */ - return 0; - } - - if (buf[0] == 'F' && buf[1] == 'L' && buf[2] == 'V') { - buf += 13; - s2 -= 13; - } - - pkt->m_packetType = *buf++; - pkt->m_nBodySize = AMF_DecodeInt24(buf); - buf += 3; - pkt->m_nTimeStamp = AMF_DecodeInt24(buf); - buf += 3; - pkt->m_nTimeStamp |= *buf++ << 24; - buf += 3; - s2 -= 11; - - if (((pkt->m_packetType == 0x08 || pkt->m_packetType == 0x09) && - !pkt->m_nTimeStamp) || - pkt->m_packetType == 0x12) { - pkt->m_headerType = RTMP_PACKET_SIZE_LARGE; - if (pkt->m_packetType == 0x12) - pkt->m_nBodySize += 16; - } else { - pkt->m_headerType = RTMP_PACKET_SIZE_MEDIUM; - } - - if (!PILI_RTMPPacket_Alloc(pkt, pkt->m_nBodySize)) { - RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); - return FALSE; - } - enc = pkt->m_body; - pend = enc + pkt->m_nBodySize; - if (pkt->m_packetType == 0x12) { - enc = AMF_EncodeString(enc, pend, &av_setDataFrame); - pkt->m_nBytesRead = enc - pkt->m_body; - } - } else { - enc = pkt->m_body + pkt->m_nBytesRead; - } - num = pkt->m_nBodySize - pkt->m_nBytesRead; - if (num > s2) - num = s2; - memcpy(enc, buf, num); - pkt->m_nBytesRead += num; - s2 -= num; - buf += num; - if (pkt->m_nBytesRead == pkt->m_nBodySize) { - ret = PILI_RTMP_SendPacket(r, pkt, FALSE, error); - PILI_RTMPPacket_Free(pkt); - pkt->m_nBytesRead = 0; - if (!ret) - return -1; - buf += 4; - s2 -= 4; - if (s2 < 0) - break; - } - } - return size + s2; -} - -int PILI_RTMP_Version() { - return MAJOR * 100 * 100 + MINOR * 100 + PATCH; -} - -const char * PILI_RTMP_GetReqId(){ - return reqid; -} \ No newline at end of file diff --git a/LFLiveKit/publish/pili-librtmp/rtmp.h b/LFLiveKit/publish/pili-librtmp/rtmp.h deleted file mode 100755 index d8438cf0..00000000 --- a/LFLiveKit/publish/pili-librtmp/rtmp.h +++ /dev/null @@ -1,365 +0,0 @@ -#ifndef __RTMP_H__ -#define __RTMP_H__ -/* - * Copyright (C) 2005-2008 Team XBMC - * http://www.xbmc.org - * Copyright (C) 2008-2009 Andrej Stepanchuk - * Copyright (C) 2009-2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#define NO_CRYPTO - -#if !defined(NO_CRYPTO) && !defined(CRYPTO) -#define CRYPTO -#endif - -#include -#include -#include - -#include "amf.h" -#include "error.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define RTMP_LIB_VERSION 0x020300 /* 2.3 */ - -#define RTMP_FEATURE_HTTP 0x01 -#define RTMP_FEATURE_ENC 0x02 -#define RTMP_FEATURE_SSL 0x04 -#define RTMP_FEATURE_MFP 0x08 /* not yet supported */ -#define RTMP_FEATURE_WRITE 0x10 /* publish, not play */ -#define RTMP_FEATURE_HTTP2 0x20 /* server-side rtmpt */ -#define RTMP_FEATURE_NONBLOCK 0x40 /* non block socket */ - -#define RTMP_PROTOCOL_UNDEFINED -1 -#define RTMP_PROTOCOL_RTMP 0 -#define RTMP_PROTOCOL_RTMPE RTMP_FEATURE_ENC -#define RTMP_PROTOCOL_RTMPT RTMP_FEATURE_HTTP -#define RTMP_PROTOCOL_RTMPS RTMP_FEATURE_SSL -#define RTMP_PROTOCOL_RTMPTE (RTMP_FEATURE_HTTP | RTMP_FEATURE_ENC) -#define RTMP_PROTOCOL_RTMPTS (RTMP_FEATURE_HTTP | RTMP_FEATURE_SSL) -#define RTMP_PROTOCOL_RTMFP RTMP_FEATURE_MFP - -#define RTMP_DEFAULT_CHUNKSIZE 128 - -/* needs to fit largest number of bytes recv() may return */ -#define RTMP_BUFFER_CACHE_SIZE (16 * 1024) - -#define RTMP_CHANNELS 65600 - -extern const char PILI_RTMPProtocolStringsLower[][7]; -extern const AVal PILI_RTMP_DefaultFlashVer; -extern int PILI_RTMP_ctrlC; - -uint32_t PILI_RTMP_GetTime(void); - -#define RTMP_PACKET_TYPE_AUDIO 0x08 -#define RTMP_PACKET_TYPE_VIDEO 0x09 -#define RTMP_PACKET_TYPE_INFO 0x12 - -#define RTMP_MAX_HEADER_SIZE 18 - -#define RTMP_PACKET_SIZE_LARGE 0 -#define RTMP_PACKET_SIZE_MEDIUM 1 -#define RTMP_PACKET_SIZE_SMALL 2 -#define RTMP_PACKET_SIZE_MINIMUM 3 - -typedef struct PILI_RTMPChunk { - int c_headerSize; - int c_chunkSize; - char *c_chunk; - char c_header[RTMP_MAX_HEADER_SIZE]; -} PILI_RTMPChunk; - -typedef struct PILI_RTMPPacket { - uint8_t m_headerType; - uint8_t m_packetType; - uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */ - int m_nChannel; - uint32_t m_nTimeStamp; /* timestamp */ - int32_t m_nInfoField2; /* last 4 bytes in a long header */ - uint32_t m_nBodySize; - uint32_t m_nBytesRead; - PILI_RTMPChunk *m_chunk; - char *m_body; -} PILI_RTMPPacket; - -typedef struct PILI_RTMPSockBuf { - int sb_socket; - int sb_size; /* number of unprocessed bytes in buffer */ - char *sb_start; /* pointer into sb_pBuffer of next byte to process */ - char sb_buf[RTMP_BUFFER_CACHE_SIZE]; /* data read from socket */ - int sb_timedout; - void *sb_ssl; -} PILI_RTMPSockBuf; - -void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p); -void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p); -int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize); -void PILI_RTMPPacket_Free(PILI_RTMPPacket *p); - -#define RTMPPacket_IsReady(a) ((a)->m_nBytesRead == (a)->m_nBodySize) - -typedef struct PILI_RTMP_LNK { - AVal hostname; - AVal domain; - AVal sockshost; - - AVal playpath0; /* parsed from URL */ - AVal playpath; /* passed in explicitly */ - AVal tcUrl; - AVal swfUrl; - AVal pageUrl; - AVal app; - AVal auth; - AVal flashVer; - AVal subscribepath; - AVal token; - AMFObject extras; - int edepth; - - int seekTime; - int stopTime; - -#define RTMP_LF_AUTH 0x0001 /* using auth param */ -#define RTMP_LF_LIVE 0x0002 /* stream is live */ -#define RTMP_LF_SWFV 0x0004 /* do SWF verification */ -#define RTMP_LF_PLST 0x0008 /* send playlist before play */ -#define RTMP_LF_BUFX 0x0010 /* toggle stream on BufferEmpty msg */ -#define RTMP_LF_FTCU 0x0020 /* free tcUrl on close */ - int lFlags; - - int swfAge; - - int protocol; - int timeout; /* connection timeout in seconds */ - int send_timeout; /* send data timeout */ - - unsigned short socksport; - unsigned short port; - -#ifdef CRYPTO -#define RTMP_SWF_HASHLEN 32 - void *dh; /* for encryption */ - void *rc4keyIn; - void *rc4keyOut; - - uint32_t SWFSize; - uint8_t SWFHash[RTMP_SWF_HASHLEN]; - char SWFVerificationResponse[RTMP_SWF_HASHLEN + 10]; -#endif -} PILI_RTMP_LNK; - -/* state for read() wrapper */ -typedef struct PILI_RTMP_READ { - char *buf; - char *bufpos; - unsigned int buflen; - uint32_t timestamp; - uint8_t dataType; - uint8_t flags; -#define RTMP_READ_HEADER 0x01 -#define RTMP_READ_RESUME 0x02 -#define RTMP_READ_NO_IGNORE 0x04 -#define RTMP_READ_GOTKF 0x08 -#define RTMP_READ_GOTFLVK 0x10 -#define RTMP_READ_SEEKING 0x20 - int8_t status; -#define RTMP_READ_COMPLETE -3 -#define RTMP_READ_ERROR -2 -#define RTMP_READ_EOF -1 -#define RTMP_READ_IGNORE 0 - - /* if bResume == TRUE */ - uint8_t initialFrameType; - uint32_t nResumeTS; - char *metaHeader; - char *initialFrame; - uint32_t nMetaHeaderSize; - uint32_t nInitialFrameSize; - uint32_t nIgnoredFrameCounter; - uint32_t nIgnoredFlvFrameCounter; -} PILI_RTMP_READ; - -typedef struct PILI_RTMP_METHOD { - AVal name; - int num; -} PILI_RTMP_METHOD; - -typedef void (*PILI_RTMPErrorCallback)(RTMPError *error, void *userData); - -typedef struct PILI_CONNECTION_TIME { - uint32_t connect_time; - uint32_t handshake_time; -} PILI_CONNECTION_TIME; - -typedef void (*PILI_RTMP_ConnectionTimeCallback)( - PILI_CONNECTION_TIME *conn_time, void *userData); - -typedef struct PILI_RTMP { - int m_inChunkSize; - int m_outChunkSize; - int m_nBWCheckCounter; - int m_nBytesIn; - int m_nBytesInSent; - int m_nBufferMS; - int m_stream_id; /* returned in _result from createStream */ - int m_mediaChannel; - uint32_t m_mediaStamp; - uint32_t m_pauseStamp; - int m_pausing; - int m_nServerBW; - int m_nClientBW; - uint8_t m_nClientBW2; - uint8_t m_bPlaying; - uint8_t m_bSendEncoding; - uint8_t m_bSendCounter; - - int m_numInvokes; - int m_numCalls; - PILI_RTMP_METHOD *m_methodCalls; /* remote method calls queue */ - - PILI_RTMPPacket *m_vecChannelsIn[RTMP_CHANNELS]; - PILI_RTMPPacket *m_vecChannelsOut[RTMP_CHANNELS]; - int m_channelTimestamp[RTMP_CHANNELS]; /* abs timestamp of last packet */ - - double m_fAudioCodecs; /* audioCodecs for the connect packet */ - double m_fVideoCodecs; /* videoCodecs for the connect packet */ - double m_fEncoding; /* AMF0 or AMF3 */ - - double m_fDuration; /* duration of stream in seconds */ - - int m_msgCounter; /* RTMPT stuff */ - int m_polling; - int m_resplen; - int m_unackd; - AVal m_clientID; - - PILI_RTMP_READ m_read; - PILI_RTMPPacket m_write; - PILI_RTMPSockBuf m_sb; - PILI_RTMP_LNK Link; - - PILI_RTMPErrorCallback m_errorCallback; - PILI_RTMP_ConnectionTimeCallback m_connCallback; - RTMPError *m_error; - void *m_userData; - int m_is_closing; - int m_tcp_nodelay; - uint32_t ip; -} PILI_RTMP; - -int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, - unsigned int *port, AVal *playpath, AVal *app); - -int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, - unsigned int *port, AVal *playpath, AVal *app, AVal *domain); - -void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out); -void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size); -void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error); - -int PILI_RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, - RTMPError *error); -int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error); -void PILI_RTMP_SetupStream(PILI_RTMP *r, int protocol, AVal *hostname, - unsigned int port, AVal *sockshost, AVal *playpath, - AVal *tcUrl, AVal *swfUrl, AVal *pageUrl, AVal *app, - AVal *auth, AVal *swfSHA256Hash, uint32_t swfSize, - AVal *flashVer, AVal *subscribepath, int dStart, - int dStop, int bLiveStream, long int timeout); - -int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); -struct sockaddr; -int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, - RTMPError *error); -int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); -int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error); - -int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); -int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, - RTMPError *error); -int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error); -int PILI_RTMP_IsConnected(PILI_RTMP *r); -int PILI_RTMP_Socket(PILI_RTMP *r); -int PILI_RTMP_IsTimedout(PILI_RTMP *r); -double PILI_RTMP_GetDuration(PILI_RTMP *r); -int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error); - -int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); -int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); -void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error); -int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); -int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); - -void PILI_RTMP_Init(PILI_RTMP *r); -void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error); -PILI_RTMP *PILI_RTMP_Alloc(void); -void PILI_RTMP_Free(PILI_RTMP *r); -void PILI_RTMP_EnableWrite(PILI_RTMP *r); - -int PILI_RTMP_LibVersion(void); -void PILI_RTMP_UserInterrupt(void); /* user typed Ctrl-C */ - -int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, - unsigned int nTime, RTMPError *error); - -/* caller probably doesn't know current timestamp, should - * just use RTMP_Pause instead - */ -int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int dTime, RTMPError *error); -int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error); - -int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, - AMFObjectProperty *p); - -int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb, int timeout); -int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len); -int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb); - -int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error); -int PILI_RTMP_SendSeek(PILI_RTMP *r, int dTime, RTMPError *error); -int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error); -int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error); -void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit); -int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size); -int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error); - -#define MAJOR 1 -#define MINOR 0 -#define PATCH 4 - -int PILI_RTMP_Version(); - -const char * PILI_RTMP_GetReqId(); - -/* hashswf.c */ -int PILI_RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, - int age); - -#ifdef __cplusplus -}; -#endif - -#endif diff --git a/LFLiveKit/publish/pili-librtmp/rtmp_sys.h b/LFLiveKit/publish/pili-librtmp/rtmp_sys.h deleted file mode 100755 index 880457c3..00000000 --- a/LFLiveKit/publish/pili-librtmp/rtmp_sys.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef __RTMP_SYS_H__ -#define __RTMP_SYS_H__ -/* - * Copyright (C) 2010 Howard Chu - * - * This file is part of librtmp. - * - * librtmp is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1, - * or (at your option) any later version. - * - * librtmp is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with librtmp see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * http://www.gnu.org/copyleft/lgpl.html - */ - -#ifdef _WIN32 - -#ifdef _XBOX -#include -#include -#define snprintf _snprintf -#define strcasecmp stricmp -#define strncasecmp strnicmp -#define vsnprintf _vsnprintf - -#else /* !_XBOX */ -#include -#include -#endif - -#define GetSockError() WSAGetLastError() -#define SetSockError(e) WSASetLastError(e) -#define setsockopt(a, b, c, d, e) (setsockopt)(a, b, c, (const char *)d, (int)e) -#define EWOULDBLOCK \ - WSAETIMEDOUT /* we don't use nonblocking, but we do use timeouts */ -#define sleep(n) Sleep(n * 1000) -#define msleep(n) Sleep(n) -#define SET_RCVTIMEO(tv, s) int tv = s * 1000 -#else /* !_WIN32 */ -#include -#include -#include -#include -#include -#include -#include -#include -#define GetSockError() errno -#define SetSockError(e) errno = e -#undef closesocket -#define closesocket(s) close(s) -#define msleep(n) usleep(n * 1000) -#define SET_RCVTIMEO(tv, s) struct timeval tv = {s, 0} -#endif - -#include "rtmp.h" - -#ifdef USE_POLARSSL -#include -#include -#include -typedef struct tls_ctx { - havege_state hs; - ssl_session ssn; -} tls_ctx; -#define TLS_CTX tls_ctx * -#define TLS_client(ctx, s) \ - s = malloc(sizeof(ssl_context)); \ - ssl_init(s); \ - ssl_set_endpoint(s, SSL_IS_CLIENT); \ - ssl_set_authmode(s, SSL_VERIFY_NONE); \ - ssl_set_rng(s, havege_rand, &ctx->hs); \ - ssl_set_ciphers(s, ssl_default_ciphers); \ - ssl_set_session(s, 1, 600, &ctx->ssn) -#define TLS_setfd(s, fd) ssl_set_bio(s, net_recv, &fd, net_send, &fd) -#define TLS_connect(s) ssl_handshake(s) -#define TLS_read(s, b, l) ssl_read(s, (unsigned char *)b, l) -#define TLS_write(s, b, l) ssl_write(s, (unsigned char *)b, l) -#define TLS_shutdown(s) ssl_close_notify(s) -#define TLS_close(s) \ - ssl_free(s); \ - free(s) - -#elif defined(USE_GNUTLS) -#include -typedef struct tls_ctx { - gnutls_certificate_credentials_t cred; - gnutls_priority_t prios; -} tls_ctx; -#define TLS_CTX tls_ctx * -#define TLS_client(ctx, s) \ - gnutls_init((gnutls_session_t *)(&s), GNUTLS_CLIENT); \ - gnutls_priority_set(s, ctx->prios); \ - gnutls_credentials_set(s, GNUTLS_CRD_CERTIFICATE, ctx->cred) -#define TLS_setfd(s, fd) \ - gnutls_transport_set_ptr(s, (gnutls_transport_ptr_t)(long)fd) -#define TLS_connect(s) gnutls_handshake(s) -#define TLS_read(s, b, l) gnutls_record_recv(s, b, l) -#define TLS_write(s, b, l) gnutls_record_send(s, b, l) -#define TLS_shutdown(s) gnutls_bye(s, GNUTLS_SHUT_RDWR) -#define TLS_close(s) gnutls_deinit(s) - -#else /* USE_OPENSSL */ -#define TLS_CTX SSL_CTX * -#define TLS_client(ctx, s) s = SSL_new(ctx) -#define TLS_setfd(s, fd) SSL_set_fd(s, fd) -#define TLS_connect(s) SSL_connect(s) -#define TLS_read(s, b, l) SSL_read(s, b, l) -#define TLS_write(s, b, l) SSL_write(s, b, l) -#define TLS_shutdown(s) SSL_shutdown(s) -#define TLS_close(s) SSL_free(s) - -#endif -#endif diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index f93834be58afeb078c2307723e64a3ae081c4ae3..7e60ef0942582c91938e949be495bdc8aef1617b 100644 GIT binary patch delta 27 icmaDA`YLn-3m;3xo~^q#bMUeA0$B;+n}wyX^8)~&`3igh delta 27 icmaDA`YLn-3m=Q~p1&J6bMUeA0$E#UZ5Eck&JO^iItu6j diff --git a/Podfile b/Podfile index 8be53e88..3d1f0976 100755 --- a/Podfile +++ b/Podfile @@ -3,6 +3,7 @@ platform :ios,'7.0' target 'LFLiveKit' do pod 'LMGPUImage', '~> 0.1.9' + pod 'pili-librtmp', '~> 1.0.3.1' end diff --git a/README.md b/README.md index b541aa91..bfe29565 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +LFLiveKit +============== [![Build Status](https://travis-ci.org/LaiFengiOS/LFLiveKit.svg)](https://travis-ci.org/LaiFengiOS/LFLiveKit)  [![License MIT](https://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://raw.githubusercontent.com/chenliming777/LFLiveKit/master/LICENSE)  @@ -5,7 +7,6 @@ [![Support](https://img.shields.io/badge/support-ios8%2B-orange.svg)](https://www.apple.com/nl/ios/)  ![platform](https://img.shields.io/badge/platform-ios-ff69b4.svg)  -## LFLiveKit **LFLiveKit is a opensource RTMP streaming SDK for iOS.** @@ -40,6 +41,19 @@ $ pod install +### Manually + +1. Download all the files in the `LFLiveKit` subdirectory. +2. Add the source files to your Xcode project. +3. Link with required frameworks: +* UIKit +* Foundation +* AVFoundation +* VideoToolbox +* AudioToolbox +* libz +5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. + ## Architecture: capture: LFAudioCapture and LFVideoCapture From 4cf864b79c4bcf5c2ef6d440c3d8d814fdabb0db Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 11:01:55 +0800 Subject: [PATCH 09/39] update read --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index bfe29565..c1a34844 100644 --- a/README.md +++ b/README.md @@ -46,12 +46,12 @@ LFLiveKit 1. Download all the files in the `LFLiveKit` subdirectory. 2. Add the source files to your Xcode project. 3. Link with required frameworks: -* UIKit -* Foundation -* AVFoundation -* VideoToolbox -* AudioToolbox -* libz + * UIKit + * Foundation + * AVFoundation + * VideoToolbox + * AudioToolbox + * libz 5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. ## Architecture: From 48098066bec70b596598de4d22db6a2745097d7d Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 11:22:16 +0800 Subject: [PATCH 10/39] update version --- LFLiveKit.podspec | 2 +- LFLiveKit.xcodeproj/project.pbxproj | 24 ++++++++++++++++-- .../UserInterfaceState.xcuserstate | Bin 13597 -> 15247 bytes LFLiveKit/Info.plist | 2 +- .../UserInterfaceState.xcuserstate | Bin 95062 -> 118380 bytes 5 files changed, 24 insertions(+), 4 deletions(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index 169486c8..a199f57d 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.2" + s.version = "1.9.3" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index 60276cb1..56555621 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -650,7 +650,7 @@ DYLIB_COMPATIBILITY_VERSION = 1; DYLIB_CURRENT_VERSION = 1; DYLIB_INSTALL_NAME_BASE = "@rpath"; - ENABLE_BITCODE = NO; + ENABLE_BITCODE = YES; INFOPLIST_FILE = LFLiveKit/Info.plist; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 7.0; @@ -659,6 +659,16 @@ "$(inherited)", "$(PROJECT_DIR)/LFLiveKit/publish/libpili-librtmp", ); + OTHER_CFLAGS = ( + "$(inherited)", + "-isystem", + "\"${PODS_ROOT}/Headers/Public\"", + "-isystem", + "\"${PODS_ROOT}/Headers/Public/LMGPUImage\"", + "-isystem", + "\"${PODS_ROOT}/Headers/Public/pili-librtmp\"", + "-fembed-bitcode", + ); PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; @@ -673,7 +683,7 @@ DYLIB_COMPATIBILITY_VERSION = 1; DYLIB_CURRENT_VERSION = 1; DYLIB_INSTALL_NAME_BASE = "@rpath"; - ENABLE_BITCODE = NO; + ENABLE_BITCODE = YES; INFOPLIST_FILE = LFLiveKit/Info.plist; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 7.0; @@ -682,6 +692,16 @@ "$(inherited)", "$(PROJECT_DIR)/LFLiveKit/publish/libpili-librtmp", ); + OTHER_CFLAGS = ( + "$(inherited)", + "-isystem", + "\"${PODS_ROOT}/Headers/Public\"", + "-isystem", + "\"${PODS_ROOT}/Headers/Public/LMGPUImage\"", + "-isystem", + "\"${PODS_ROOT}/Headers/Public/pili-librtmp\"", + "-fembed-bitcode", + ); PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index dd95fbe7be4f2bddedc05c5b185d17780923ecad..14d471beccc50fbeaa3995d90dc1974515a05060 100644 GIT binary patch delta 8584 zcmd5=cUaTc*S{-)Kp=aPkR3vTBq&Re!u7a_s#ct@=fl&=iGD7J?C@Ixyy&l88ugl z_mczjH1V)z(z0;x}gU)fh}Px z*czt7_Anj31~Xwd*cVK)q3aREUbv zU^D^w5k(xDh~7k#&}1|PO+_=%TWB6yfEJ;pXcbz6Hlrb zSLkbW5q*O$q08tNx{dCjyXYQzfH5YRhXsBdjYU|6)!2@m*oEELgOhPn+#I*SZE#!M z1$V_6I1_in-Ej}x6X)T4JOCHqfw&MCVJ{wzD{&R};SqQw9)(BaNq91zf~VqXcsibe zXW~V8Feyd1B_(VveNHj4M3$YR#v6DFBAo0XWTqJ=sB8kLJnv)i!C22*{Nhaw= zx|1HHC&?n&B#-2i0i=KoC8cB-8AV2uH^_Lxk$Gf33FycIvXCqx%gA!Fg4B>rWGmT5 zc9IXshh$&i9%_Tn(%e8I)9X!|w|ysuFg!V>ae^@^xmjQzSrN6AuA(1PE&ZG((7Zqi z@1inaEbH2_YspA&ml9v%=)&?MZ$)MKkU&>{Q_mn!46Go7Jwrh$7zS+AMqgpX3G8`; z&ZF}w+oW%4!5ifxeA$&=Z{P#|+tz%sIBRhEsC4hZ5yi#c%Ct&v!FezSj0NLBBp44S z06(CCfVhjX2?f+nn_U1C!JBMq5||98u<7w&I+y`wf?0vb;>_44w5b@ zY%Z7w=7RuO02YF`1I;6P3fwfFHl_}m7+4UIlC}git_Dk~lSZEd%fSjpv69JO-M3Sb zx6D^EsKi^D)2Xb;JDR$f5{+`7seTqDgQlPktPZq@G~gPrCXf=3IgBhA_XD7Tfw%# zcNQh%`(B_x7>jp+oq>tMMsQbPtFTe(%O=>(j4+*H%LFisCNmo~rQj@xV7u=Ps^(`l ziw-e82oB|o<2t2x2z-u}ny$}|A(Ne94LA%^p3epfrLqWc6dcPJTb>dqo)gr7kAsUF zW-QNV&H+LF*2lp~V6O%zXi7CWMVm9%@r8NXo?(Kq(a&j%YH*IWqLwnW0c<7|P zX&<(G3y~?H)HU+gy8ujrjbG2qN-rt&m6VqiRK8IIlR!!XlVteHOH5K=bJ~x-PIJ3k zQl~gT@Dt+F2DZ)1%*q(yD;QYnO{=Udc%v4!0nI>4XgUqHV<7QtLH%yR`t53AT5!Wa znNHgWc3>w7JJJFgeI9mVp~3@~0l>`9BLw+3dz96E>=Gb<_djR|}m_78N?n+VK<`GE?3TR$v-g+X%;q(iD< z5gkhN^2O%=dIB9wN`2nSF5Wl#ck>PQRvOzIJC+s{S2b*20*3(ms#P@a1(boqU|A4l z;BZ<=%jywkczqngDyC>B=%eM&Fb0kaVhpW#8B+c;c!Xf$Iq(FsH7z=wgA4-UBrpL^ z0ULW~W+e_RsPgt>?@DiJfscVR!-0|16FQISa7IpM7Q>zzI6Zhq3>)CApz&wZ(SZ?B zrpEK&+rVB2=feP802k6RbSxc5$JfC{a4}p0m(mGzGJT8Ap?Q6}dI$M>m3T)5E=Q?5 z_6@~-2J`IPo+ZVD>t`BFR1MbzdDg&MSO-^AKc$qT6YGEtWWe>n$}&L$oy2lOUceqb z${rq)>OIhS)w95cJK_7m6Wc|nRKwkL>eB%DC>Q{H;XXQz&U_vK2jP*IeD^3kMyJym zECBk6wEmOuvzKO1!!vXiolSF}%WX*=TT$=g4GMe#zYOkvfzGW48BAE_FOv4v30@94M8m*$@cU=Md(BS+G%uv#k3s2gz?*adUG!Y~JMjKX^8EyVrVHuY zOg;v_AxyBOmJE1FzTe;?UaH<3>TKuTubU?^^0tg3@QKDN;O-#GC8=kY}HFkb;t

z6Lpw3nidNC=t2oYPyE5rR(T=x?v4ULXDZ{*-%r|jBcdcnD@O&-=jO& zF8dTJQ695buac?~hGsn(>;#m`_JJ=HX|FeMZ;R4^v>K(-P1UF!eYe5Hy$VW4c&pN? zf~8Eu(vGMTgC+DTdW~+TTWU~e)R}H&Pu}wY&hB1bRMkBoP#Lm&qU>jkbLe)O+rXv| z>dVdty-GJ%*E48H<1XckK{Ptya-Y_)7;Hb`L@#QxNW0h;r4 zIfEiJA6iz?{-N=OFN`m)|J`7bchK@@9=n2m(%`Yx&{B(PP(m%LqsQqfdiuHBu0TT}-39<=YJ+5L?6OpumUtIQ5LzLL@s zUkN*WR;}iXoq+>7XJ&(>N71LO9zn;@$LJGu9GyTX(J6Y4o~K{X3-n9+75#cGu%grG z4Eh|MMd#3YhJqJ4_zDMGaPTz_mT+(gbIcO;0oS+a8Us;u1$~FEqVMT9^b);HzpX<* zpzG*Ibc0@@H#r!?K^t42t=U$3AF)!XnpHR>s?jg>JNnFR9-_Zle8B%!y`Xz&V2QddDIVfZLKhKjmzCI`34m{R7CC{=aPOKlj8`vL7l`pOi zb{iw<{d{ry)AC#z*R^AokDd3hp4l1Qv&svHdVPt*UL;AWC;2%rC$_WVg?(Gs5C57k zPJPNLB2W~o@r0^ZoQgZJBgSdC9d3`)=|lP(eMEn+!yWOfz=k`~Kj@z<0{>zM{KPOe zBlGujwh5Q|jPml7w%2}q5smy^hq7g#eEsO z*Et9{2EaXN}3N=a`)QNl=7Nkn0#c7LUW@ zIVj{{6bGX@D5}GLOc~dS929d<667ka=epc83(pG@&BkxxIe0Dy zD*Pn_D_IBNPWO)VmX=p|E8X?6$NpEc`mrdzz*pdY*6lVb96Dr_&*v>K7*RHC=$OL7 zF~ciH7gYL2SNVn(xa(UOH*0Q+Milz07zftMH!7(ZTwdlauX2}`3=C$xM$ecf76hY4 z_OyP*7ejKz=Zkwh4G?o+v_%rr;L?}zmEd`N%RyH){*HseiwK#?tP6I6_&Nt0G3>!N zLk$GTz_;-od^aGmXrmH1=;mM|2R;7}R1P*U(ZBui$BGT1bN(6s5;$lv@M`g|_(5R5 zrLF8Y{CiN(M;v5&{lUQ|Aw8Y{1oixvgUK)HNgzS^E@e74<6zTz9bYt`w!wTPjIhSF znlSH7sU{H|>*;a>604y%5fL?zt|MY1AyOhEa-twgqT*mn4z}W8YYw*IU|SBRaxjg9 z?bZi|f|7mS8864!B%fY_&d@ZTs`U)2GY+-!ro1~0X{wLBZ;v*wC$l$g=2lHs2 zrR~ol(n4UgAz(a9#(+pNmW&IwZri_&8;bn-^=?5X5P$z?wNo$#YuOM5DZN^ja3He@2Mak^w1#wsOUNuToAq!@n0@G59Q1Oqn6+|m z)px%PWYSoZJ+j32M$f=Kr!;WU=}&x{3ATtV4hj~$sjBb0y%(HWjo7VmJ9b~3&GKnK+@IYN7vVuHFV4Yh@OB39 zNANKQ(I*%fe}>QCEBGqA?!C?~d~Y#Ky2q}3AF~VILUt{?gPb9Ecwsy-PsUU5R6HZk z#Y^V3;-&FA`+2>2LwKdUGF}C*lIP=%?X;mzX(cnf)pcuRQi@Rsvd@~U~Y zye+&#yfb{jm+{;2bNEI4@%#XP0l%8RgTITvoBtvIBmO@A0sbNW5&kj$*Zgnzm-$!t zSNYfY*ZDX2xA=GX_xOK=Nx}?a&BD^cdWVe(dn?Re9rj+>$6;5){t0^=4#H753D<|) z!yAP+4}U#;aQKk$((tnIitx&CU-wm0-PKm*9}#l;AVL8Nu0zun2L4J;D`{7STCkM8ueg z)e-yr5eFg;MI4DZ7I8Y_Y{dD93lUc$1(DH_DUoTB#gV0v^CMSA)n9Z|cYc1PWemPNOV9v)pCeIojY=wG5AMn8)F zGy3o7e?=0JR%8&FL>7@v)L8V2sJW=6C{@%>lrHKh$`%!iW{MVwmWwuswun9w9TXiF z9rcS&h)#(<6P*!#F1jeXCi+$Mrx=OVVuLtY+*+I}ZYNF`=ZN#g1>!=nS6nQv5RVs6 z6i*UQ5ziFQ7S9pS6E7C86z>r46MrneA-*q(mRKc8l4MCUNpnd{No&bKNrhy*WP-#m z;UsTM7E6{&mPuAfR!P=NHc2*1wo3eaCHo}@C5I)SN-jvQNv=z7NN!0UNE=C8NZUy> zrQM}HrP2Rq}I#N1XI!C%px>~wcx?Z|b`mS`Bbhq?F=||Fi(gV`t(yP+D z(nm5Z6Uw4xVwqGXm+55&nMr1m*<^9D4zhu=$+8u)m9llR^|Fn!-LgG?*aq&!NllpEv;@+5f^c~f}{c`JDvd8)j#JWD=6 zUMTm<2g`@ZOXXBPTmF`Oj(nbcg}ho`D_nV zH}cE!EAm_NU*%5}exl$j!WB^pkwT)7DQt>FMN35+MXI8`qJ!d9MR!GxqPHSf(N8g0 zF-%de7_Jzp7_Ati7^j%5n6Fr*SfY4Gu}V>+s8g&_Y*BoqIIK9T_*ijVaYk`gab9sj z@s;9lrAX;kwo-Odc2Q<1yD57pvy?f?fyyG~Amw1?5WliiS*EN|PF23G+^PIpc}JyH z#i^XC1eIHrq-vt-pz5jWt;$vPQw>lJR28WPsVY<>R3lZhRZCUNRV!6Bsyfvg)ppe` z)o#@u)n3(p)hX3!)#s}7stc;GRM%BMs&1%msfB8dIzgSN_NW`Hlhw`CDe4yLR_ZqD z9%_GUNu!OP_I+JuRg85rhcN4Yh0RSO;gP)np91OCR@{2 zQ>ZD@4APWqDm0avv6^w337V;zX_^_DMVh6WWtx?mYE7+XlV-bSr{;al2bw*aPc)xu zPHR5bT+n=_xv06M`APGKmZy!-3boOGtxT)XsY}K?oHif-89_{-7KAdiEfQ< zoo<6}lWwzatL{DBe%(RcVck*P$GYRXle$lJH+28#dHQgDgkGrE>kWF7-lDhZ6ZI|i zsrq*MbbTj%7k!4lo4!atTt7}fK~MD)^^^2d^z-xq{X+d>{X6;<`c?XO^;`7Y^xO5{ z=x^$8>+kCC$Cky8jU6BB_s4RvpTvF<`(^CcvELZ1h9pB1LsP>mhB1bzhPj6Mh6RSV z4T}xchFZgF!&<|7!$!j~!#%^_MvF1Q*won8*wvV0>}||7_A~Z34lSyv#G|e^bHXSsbG@UnH zHQhGdGyQCOV0vVFVn$}3Im{ek7MkPCndaBclgzWti_F#L&F1&bADZ`?510>|Pnl1f z&zjGhzchbserSoY#9Mk;`dW%Dl@`BcqGhsWnq{VCwq>bhnPr8g+H%2i&GMt=mgTPH zC(AF^D67({@mqD)SZfn&M{9R$mNm!P%bII_-CANDZ=GwMZ(V3zVclTeV%=jsV!dqr z!TPUFW;5DsHiyk+^Vpi$n%Pop9c-`KI@!9~uGsF{?%RI0J+QxO&$jor_qF%853mok zPq0t5&#=$7&#|wve_-EZ-)BE)KN8nFZa`c~+_X6VoVd+#N8&z@J0JID+{L)daaZF0 zb`VFHBf=qc=p6=!*A2&#?|2fA;z>L|UJx&g7sX5C$?i6ly4B>M8S7c*cAD9?ml<-E-3OmFJ@8 wlIM!&uIIkzXU~JAswA2;F=O?sjK)XXZQK+1Z_2ykf*WJs!#AgeKK- zGh0=#_xyHA!3lr>9=r>tg9RW27J~P|Qm_$h0-M1WuoY|r+rbX76C3~s!69%Mdz$5S!0tlf9N}(L8pc+O(4n{#cbijJB zK8%44U_;mhwty{Rs~~I#+ruufC+r1NU@Ghl2f@KG9S()VU=ADw3*ZDe5l(_r;8Zvb z&VftdN>~Bc!A)=*+zxlakKw2AF#HmJ3s1nG;5qm+yaKPnKj9tt7rcu^NQ@*%iexAP z$&msn5sOqvi;T#EB9R?AQP72&pr)u9ibZj#If_RKs0-?fx}ikW3-v(*(GcWES!fg* zgT|s_6hs8^Xa<^z-a{+UO0)`up zbPD~1&Y_FwH*^F2j&7n`=x=l%JwOkG=rINuVuYnwh9j^XE3gI|a5VPdTDT6biyPpE zxHWEr<8cB`!pXQh?tus5L3l7u$0KnL9))vp9?r+3aRDBOC*jHX9Xth3$Mf*}cqv|q zEATqJ9&g3l@Q3(gybphZ58)&DEBrOSfPcaMi}(`0jDN*f@Kt;bU&p`U8~As86W_-V z@I(9$e!)l>DI;Sd7&)V0l#GcnGZx0mxET*qi>b{tVj43|m^h{tlf)!5-I*RtPo_81 zk4aK`KUq2W|57fIcD2SX@s)F`G-RO2Rbp-0S0`E)Xu>?US%kaQ%e zp>n}FNxz)zp%dyA4b2RNgiT^yAPYD_5`9L3954#x5*Kk(`HuAIOGc9dGLfXFvuw)n zyyEtOp<}YL0tE>L0e|R`aFMHjj(>dKn4;8zKtOy7NPrZ`z(g<^xJ$t#;wc4hky>=o zjk1_FZ-Xh})^|wlQZSWxNos16KRdT(%nUFasLR1j@E({&e54MkTMp(>mFAIpRE^%$ zKy$>VuMD_n%0SmnS2s722H>b@YTTNl=j&fMcH||{(|vkv?cZj@C`Uh-S;i{4jco= z!S~<dRLAeO%M)veFK^H5i5UHpQ!c9r^Y zI_nO_Sgpa7yfFnCfp&SBfm09x8N}cr6oC7ElT+%2(HcPC1%VuY5!HtPl0uRz7lC3Z z>7ATH6)A&a5L?wIgAwVhCOIW(Op$+RPT&;`k zK@BtlcLmf!9n?bu=||E?e=?u~nxGk4pp^_HLrEqHkko#Ofnh~`vIE6wy|S~0SAOfC zSV@#5YCt-x>(ahMN`6juWltri!i1_0H5d(Rg_m(c7j#1p8AJw?bTXs@s9Yck`hb(h zSx4fh3{4GrG{vb^>NWxmOJ33fHifa2Y_J*0C>~fq#5C90Hnx z*qTK%U?y;vl#tY#hAfyJ)_6E6EQKRTQKfMerJazQ*6`Cb+qhvTwgh*`zl(TgA>d6o$L0 z%8O_T@(^4mQ>jJM2!$(5jeTKi%%T=5s^B{S4~8yEeGEKANqbT1t)epg41OL;ka-yR z1#M1{xq~&}9f4oJ47YE{3{oA!--R(d29J}OWcDk}PQoAFK;;blk-SG{k@z0b@l%bJ zAk@rz9$x$((aThH4vCMB&n*F<4i&Buy#{{=?s9k?{swQ5d1O9WP!4axTktjskqWY! zGC--1(o)u>1ablyMS;v@|Jdv-e^FjR=y!uR2=BqCz`Y*+4e!GT@FDyMK7#+k$M6YR zNZu!l$YQdDEG5gxa`M4?;DXQL3j}}*A%y9tfD(8GSxHLhyOfmC&+>5m8flDmC@9D) zsA6b#ZdQlfaDK`hOgs8y7iQD%y{Il!sn9#dEoo|`DIu#W5kNYmryELjLQ4r0mD3gj zh^48I{-^{EnX1JpfXE6vmyrCbcJ8%yN9A{Fp)byj>H_x~78$v(n6GDsh?P?H5El?|UfV&K}L+!~Hvb79#L>WgS*s#c>v`S6wKAT+p2GBnn}bsdUmwy8>oC_wg9$0r&N zJC~zuGy;tzACZ0J)0Z)da?$A50}mCTLb8{9O!LZllhZT~O?;zw5}HgtA^T~s$;_GF zMpNJDokqtVs2tnVtb>uE1kTW#+B+;e=cS8gp@p>QL9@{uG#AZ7^U(qnB8SOm)o64Z-FZXmFLak&CcjqZmT9#rb4!S0 z%?;2$G~=O1OfAbi)2_!@4&5 zAf|d*jIof0IA*Yb{7!C`VG$OSTjVyiXg=GJ!Ai`A{$!&BL10d|K1n6!$>#7DwVyg+QS1@8< zrPSZ)Y<#tpCNz{Y>0XnFsg!t-&UUPp@P#5dQCwC1j~iiHeEk1X8OPz~z=d0qf5;=6 zDE=j>)G^8TrrhqdBUC5KQoAkg^q<2z<1TbqH}Zr$rNf?4FZGPB9qftw{HJtZ+>c84 zCogyih>M4?dYO6aUbOqKg1iQkAvh3rs2>l-890-Nn1>7x1w0g1;9)q6&Y8_a5f8=0 z#X|{!skF|Y7<$*?@D$=PG%j%w4`rozEDz;x`g~&8S3Hael}5MHyoE_vHpoLoDdu@v zU#5ocMmzdUrSXNQ@sK5oAHsz+o&oORnfN`r_m0OC@mzZR3XIG5=VqoB_;U+03bONy z(z3(FeGh-pa2{$15Yr`is5*t`;{|k!Ziz#9VJO?_OH)_w5O@(@Ol#!I13`F98D0Wn zUvH?AT85XWvwFJUB<5wcudd<~^D<~zotImVKfo(M?92YpN>4rSDqI>isDy{QQe4K< zs`&McfY(+TuTN)tR!2%pOO3}j;7#G78+m9f#hZC(s`Q0dYo;W_yQ)YAlJFk<5#Adr zahv5Gd1&RKg@?9R({-q-HSm7?X=tv8qntT_4~AxY+Gq~r&uOm5pYf0@#b59+igLfC z7S|VlgHHkVI(!s=i@(Fi@NxV-K7mj2(80rK9y)nQ0e17y!^2uUti2AOrl(^U{t=(W zKjCwL_IqhMr7}Jq*5P4Y9@eA3<9F(73t=sM3*Vu21-^~{z<=^EhKCJ!*svV`h412f zJZ!|n9z5(x4Q=nu>5d=4&a3gi_%VKhpWVWsFm?NOyFT#N}qVPG82Mc%|r8jns_f$@4trCXJQy?Y0Pl4%NLOfx2SXL_AH6zo(Pa7=Ti#h{mkOM8EjznnI;1hIYS@tFRqjuoagLyLkn z)CKWO0)v@$3|)pgq6-hZQeW_}8xIrLFal^{Ix}79S&RlrEXm|y5>HPjne?Icm4k+L z0zGk$%`O_>E0k1!e!X5)p%f-He1;4kQ!CTaybeMNT-E83>BID`QY28u^a(}8L?a4o?{D6Fr%2<-YG@?q5y$F3{3R++&~gUf1tnV zwU(G(WtnNZx6|vaTG&gkv+CjA^vY^Hy`GwhXW=>Yl4=27NSVJHucen%8}Vj(QMDcK z#Jlk4_y)aQ>c>oA7BSnI@0c^pPt4EE1?D<)mw7Be0+E0f*aR_xhJwa|rh-^Ob3sc% zYeBrAt)RCc*jJDy7$6uVNEi488G?WyOOP!XBbY8&B=}PBlTa+Q2))8~!bD+`u)lDW zFi$vISSTDLEEbLzP83cS289cR3x$h>ON7gW9|%_pON3>@3gH^zUg1gMCE-6JBr=Lx zh`NjViw1~>i$;n@iSk7Sq9V};qV=LpqAjBBLD5dpF44!LBcfxX??op?r$s-DE{HCQ zE{m>+m12*$jkvS8n>b0_T|7`cQannWCmt;>6ps-Xi^q$ni)V^wiRXyti5G|$iWiBO zh*yiZi@z0L5&lzTTgnsUUFAdMS@L{&p?r+ISUyL-TwWnx zBVQ-qAm1e4CEp|8E8i#IFFzpvN`6v)LH*tWm60tXJ$->{aYje5yF8IIK9MIIFm(xS_bI_(O3=aaS1>C>2VLQl~U1O-i@Y zr>v{2uWYPrs*F`OS9VqoP-ZGeDJLj{N?!Sna=LP+a+Y$Aa*483xk0&Cc~JSC@|g0t z@`UmiVfI3YKNnEXO)oH(QIX%hqQbu#MPOY)3YQ?Z@_K2eN)P zgAD}PEOtEmHXCBsv4`0U>_zr6dxgEmK39oU3YA&aO4UWxO_ikTuIi~8rW&pqp~_L^ zs`6FiRYdi+YKm%_YP#xu)ne6B)pFH3)dtlj)fUx0)qd3h)gjeU)mha!)p^w~s()1f zs-CExsa~kXYMENDR;rEaw(3sm(dys?b%}bl`l$M>`e*e8^(FPM>Z|HM)c4f))eqH= zGy;uO6QNOPG#Z`8pfPF8nrKaorirGRCQj2@6R&BjX|GAr^w;<`8Jd75OOva~*A!@q zG-EZ3G#fPEY0hc>(A?47)%>k_p!rAhujYy7ndXI7t<`GvTBFviwFb43+9<6<>(sin z3ECdoOzk-ByV_OSo!aBttJ)_zMkmmTbga&%bLzahM!Lqjrn)w|cwJjvH(jDGS=U$B zPuE{JSvOO+ShrNST(?5EN>{2Y*KO17(0!=et@}v#vF;Pyr@9|>zv=b*Ci*n}FnzXu zq&`=luP@L~(Ff=27wMPim+4pOOZDaY)%vaa5B0nBhxK3RkLbVAf2%*H|5<-te?fmy ze_MY?e^-BB|4{$PATS6GB7?*bWvFY2F*GzZF*Gy88QL1U7`hpf4BZVq4TB9ChJYc< zkYmU-T;j-b1;hNz$!(*ex zSj*VV7-wu@Y-Ma?Ofa@Hb}*(I`xyHf2N(w%{l-jVmNDB%j4O;AjE9W(O+u61RL|7R z6lZE-YGrC;>TK$2N;Gvh^)jWJ=9u=FE}Nd2rDnNVX;z!HX1zJu>@s`IwatUgBhA6F z=5gi;=1Jza%*6bjdA513dA>Phe&4*0LjJJd=C6;o_YRfvy2FoVPLCe>c zW0vnNCoQKfmo3*VH!L?TcPw`;e_I|{u~lzvZf$4nVeM`0YfZBbwhpllwPsoa)?Dj5 z)>+oM*7??uwZgi^y3M-7`k{4q(0a>y-v(_$o7g6`DQzm7#-_7bZPB(^TU%Q<+ell1 zZGml_ZI^AgZI5lQ?MvHn+X>q#+Yh!MZI^7n+OFEJ+kUs*vOTgrwmr2yk90>iifj_u zEHW-~Qsj)t_abLU&W$`9c`fqK$iE`*Mc$8m82N&O9OeX^h?8(?E@4d ziMzsG=YHqzaSymh++*$;_ae$1l@QfEsyK>7&5T+YRT@^tlq+V|KG z+P}4*uwSrWx4&=*9h{?yqphQZqqC!%BiYfzG02hb@H;XcA345oeC0UmIOaIvIOVwG zc;tBEcpeR+Rngw)I??r`8%8&Yo)NtudU^E6(ZNH}m!cm!u~X=jI3t`&r^?AWYdf1d zJ2|^Llbk)ADb7C5H0MBPx-;Kd=p5rLc200kb`s}1&Z*9Ko%5X^ICnX}bzXKpbSYdO zS0h)RYm94>Ynm(MTIO2eDsh#&Hn=vswz+n?cDZi2vD@l)xntao+)dqa?w0O0L3e^X z+1=aS&pp6B*d1^WcaL=Ey7S#j-ACMiyPtZnNA59u>>j7by$ihWdzW~Zd)Ik4dN+Hwd3SnudG~ntdVld@ zUtM2c-w@w$-)LWvZ?bQSZ-#G^Z;NlA?{nV~-`BpQzSF*ozRSKVzH7c4zFWROe2;xk feb4KFI_f%xI<~615z#BN@E?;`^{)9ub-4cno@Uug diff --git a/LFLiveKit/Info.plist b/LFLiveKit/Info.plist index ce0bd8b0..8a41c117 100644 --- a/LFLiveKit/Info.plist +++ b/LFLiveKit/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.2 + 1.9.3 CFBundleSignature ???? CFBundleVersion diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index ed553b16cf088ab26b69e4031e98d5c6428917c7..e3be43c628bce40badbc2eddaf300751b5f74be9 100644 GIT binary patch literal 118380 zcmc$n2Y3@lw}y9SrCn{&t}MxtOz(uyJ0y@`Y#9u0v20q3VvGQXVqzN-5|Zpm@4a^* zAw8t`-h1!8_uhN%97!v$tYXP_{(JM>Kj576&6!?y=JnEbtD9Qe#*KT4BOK*;&c@j} zq3EunyB7`F5NT;`YF@i&Nb}NtB8_dWi-$C{G_Gu7Kdu|n*1XQmks~kOULbtViJXJ; za9*yE8_w;(jo^0VMslOL(cBnrEVnZ^gPY0i!tKf}4BaA$Dma2If!xl6e#xNErUxtqD$xx2YXxhJ`&xaYX% zxtFqx>_j9waghR2NQy{bGL#G>!^wCu zflMTm$Ye5w>_p~~`J{qWk}6V7YRGP+j?~kA>3X`6ZlZ_KL+KIpXnGtyk)A?Nr)SY~ z=>_y+x|MFDSJ12Jc6tN7nchb4r1#MK=|l8U`UHKNK1W}quh7@&Tl8J}0sWYMM!%%r z(C_I_^jG=^{hQ}_o)>tDS9zUx^M!nX@6Gq)2l7Ms;rx#LXnq_&k)Og(=V$P{^0WCM zKbN1+SMj^?b^Jnp3BM=5H{ZxF=bQM|d^6v|ujlvY59AN#fj^u-ia(Y=fj^l)jX#q= zhd-ach~L6r%3sc3#b3)`&)>w~%HP4?&ELmA$UnkA&OgOJ%fG! z%74Ls&40)L$p6Cs&i}>#Yoj*1&0$k)nytX*wfSwmY<+D5Y=dpXY$I%=Y-4Q`Y?E!% zY&+X_u@&1&ZF6k%Y?ZbeTgbM+w%E3ZEo@tAi`Z7$R@v6t_O-Ry_Ol&eJIH3(4znF; zJH~dr?Ihc&wli#J+s?CHXxnVN#CDnOO4~KI>ufjLZn52NyUTX3?E%}vw#RHw+McmJ zZ+pr1s_hNi+qU;?AKE^#eQx{8_O0y)+t0S&Y=7GRu@k$^F4|?g)9$i+>^^&uy^p=W zeUN>qeFytU`xyIp`y~5R`%dH`q7YH`x!dA8J3sezg5K`-%2b?5Eq$vY%_ez<#lPt9_gO3j5Xe?e-h&H`{Ns z-)X`&X@w7+G4+y0LIUHg0X_wAqBKeK;r|JnYF{a5>M0w)lG3WA^r zs^Ap-LO>`IdI`OS{=!&coG@OPAWRe{36q8C!VGbtxJ2Aj+*@oEmy1o}YOz^t5!Z|R ziwBAaiy$5@9wiE@H+gC zUXH$w0gl0rVU7`wQI4^W369B*X^x#8yEuv+rH(m{d5%g)jU(h(;8^U~!x45YbwnI1 z9jhE`9s4@k9Q!#Aa2(_?9EUlMbR6S2-f@!SRL2>PvmNI-E_7^mT;jOQai!xL$90Yy z9k)1cciiQ;*YSYkVaH>RCmqi?o_DEfWCg~9AQ0WNiXz4iVMClaibm=VVTgJ=A^k0LGA|3VB&)J6yX8VTAorI0$phsf@^E=ad9*xEo+wX|r^_?sUFF$wP@XH# zm#gI6zm&;A^YPnf%k=M)n%LmE_%OD>vA0;0vpCF$spC+FvpCg|y zUnFmpx69Yb*ULA^H_ErmcgXk3kIIk9kIPTUPs%UKFUc>C)Ol*9TBC;41?pmT4>hbV zRU_(3b(OkS-B)c>_frp04^j>FF!f0F81;DdB=uDF4E1dFJoQ3#vwDeonR=yqje4DW zqk4;ayLy*;ulj)cu=<$#r235dy!w**s``fdw)&p>q56sXx%!p*t@?xdv-+F*r}~eR zIBibRDLb7`m(%0)Ig6Zqoc*1HoI{;EI7d3iILA9DIj1^za?W(la+WyDoaN36XSK7| zS?^rr+}*jCv%$H{xx%@RbB%MIv(>r5xzV}Fd5H5+=Mm1MoyR#(be`fo-FcStT;~PO zi=A7Y+niT8uXb*C-r&61d7JZ2=RMB*oew!5bw1&I+WDOGMdvHd*PU-U-*tZA{Mh-K z^GoM9&hMQ+Ie&Hj;rv_UG+q-lNmDglb8CfKKVuT^QgX?5B{ZHcz0wzt-(E!Uc~)mpRGqOI5V*ACPU)<8R4J4!oNJ3%{H zJ54)NJ4ZWTyGYxjU8-HKU8P;CU9a7w-KyQ8-L2iHJ*YjRJ+3{aJ*&N-y{x^ay{WyU zy{~mt~+!^*YpD2tNZm{dS88jK3E^7kI+ZyWAzF8 zWPO^xv%ZU7te5I@^m%%vUZaQf1^Qxr4?V0e)g$^!eU-jeZ_(H58}*y@+x0v2d-Qwt z2la>a$Mnbbr}by_7xWkPSM}HQxAeF5_w^6-PxMdqFZHkV@AU8WpY>n#KlDHKe_fo* z=CZpam+aD9y36D8x&p2uS6^2@*C5wm*KpSku2HVhuJNu3t|_jmuAN;oT(exou1Z&x ztJ+oL+Rat#3c2cB^{xijQdgsEnJeO2?ponm>1uPWcWrR(=i1-3(RG09NY_!Wqg}_i z&TyUSI?Hvo>k`+cu5GRxTsOLIa^38@#dWLeHrGS0hh2}jUU0qWddc;k>wVV;u5Vo5 zy1sLL@A}*IkL%w8u0Snt7H9>%3VIjxDd<};qF~2@kp)u=rWH&t*r}kbU{1l@g1UnG zf&~Q&3mOZS6+{Y_7qk?#7PJ)@1yFEk!HESY6`WjfO2GvM7ZzMpa8<$81=kc@Td=+0 zx`OKqZYa2?;NF7!3hpm>y5O0DXA7Pyc)s9;f)@+kEqJfs{elk)zA5;&;JbqF3w|&7 zyWk%;b@Oh?ExR?h&)wVI-#x^=qkE)#tb4M1s(Yq;7x%92kh{)Z?_S_u=w9Sr>|WyD z-MxozwG|N{fYYv_m}Q(-QT%?a{ug+ zJhDges2-U%6Fd_=lRT3>Q#?~W(>&8X)t(y9Zk}3C$W!O3 z_bl)%^epo1?b*lE=2`DK*mJn&2+xt8V?8Iji##WJPV=1ZIoor-=K{}W&lb-%&t;yg zJlj1td2aLE<+;!Ei04ty)1GHMuX$egyy1D%^RDN6&kvp-JwJJV_Wa`c)$^O@cdy+m zctx+nt9kw2-roM+!QSECk>0W1iQY-x$=>PSnciKz^StxD72Zm3m3N7EckdqFJ-vH* z8@wyLE4^#H>%AMi`+4{GZuElpQ14;hW4y^8W1o#rv!GH}CJ>e+#)nN1;?$ zQ0Ok~Q`on#U*V30BMT=M?p!#(u%fWCu&S`SaAD!1!o3R{3Re`aDQqp=Sa?9;frU_b zRN*Ow=M-)(yuR@E!siQLD15Q-rNWmBUnzXG@U_C%3*RVwv+(`Gj|;yn{HE}?!ru%3 zDE!lB_X$4Hr}#9V&)3U0$T!$G%s0|E&Ns<7)wh#xhOgMSo3GXv^40n3eG7brzJ6uvw~ud?Z=G*nUyE;p?*O0S1K-iU<9#RiPV}AXqrTI8XZgTe7E{;^WE;d!*{3eF5lh0dwlo%9`Zfvd&2j$??vD1zW03}_&)S~>HEs}weM%& zFTP)W|N1#U@l!wV@9pp7@9Xd9@9!VrALt+CAM79EAL<|GALSqIpWxrgKigm9-_2j^ z5Bcl-_5O%|xqpR!rN7C)kAIbaUw@1L0RMshP5#6ENBB?hpXfi`f42WZ{}%sN|DFE3 z{CE5B@!#ve&ws!F0sn*khx`xwAN4=!f5!iu|3&{R{T+_%rZV z;P1dcfq#p*B2wfm(u(vVSCOyCU(}~)K+(XWp+&=rb}SlMG`475(Tt**MY|NuE1F+a zQB+w}Ra940U$lGC-bE{mnu_)sWk>k{_w_9+0Q7mj)yR3P? z*03v38);-du^`OFD^^5W<~OyCNtPZnr(ykyNM&SAbIV3Exv*<$%q{xA6Kh(U zamOE8zYc@$=$~X0b`6Vpvg%HgR;YP>OJk&jMHF@o&t_QByuu8|%=Rp9J~!-2u9%z6 zm2jn8kSpWnaC5nGZl2*Wd`7_NW%M!n83T+##t?%UR&bSE6<5vGaJzA}T!^dV>Wv+Y z9gUI3C}XrS#u#giGsYVej1ez#i&(Q>%q`(|=k~ycy_D6|?(E+!k=3kbH$)81C^Wob z*G~J7-XBYI^twpP^3iLeMYhw}u`SWM+;w%s`n8QKLy=ZIC8>=ppE-4T>Bmv@2Wa5O04N38x6xAEv1--^;%a@Ly)-ZiC(~Hd>r*fxp!!Ewq z7!m6cXL4tSU7o5?<@&aUrK=;QO>8)BUfa;JaVvKwcQ)6!M^&i(XQsxv+<7q#^9L7l z7bX8-3%51-2iv&Il7Da|cUAHauI08T|KJAh#^fK|!rhwugFCo8lYejz_W(C+D|at< zA9ueo$(UkH+sZx2J;Xh1>;&X+pbsPuHcv#$b#mBMy1Z$1gpDL)>s#5RF}7jZnx?g5 zE6OU^*uw_V_CJ-fAt(87W1<7nIyOL$wf695qsmjmu3&oQP#c>8*W|2im3}EIJw5Cy zPAARg!&U4sI627VzqN;KR^O}KJKV5s+-uzH+#B4R+*{n+#?HnJW2Ui-v8yp_8}}~v z9``=?0rw&Ikx^{SHmZ#pAcq1u3@5nJ(P_k-qhfPgO-p1$Q}g=P_{0_aVF|8I!mb_C z*pF$A&Px?+x*pTYPQcqXG)49cyGErk4aa^~(y*>!X%n+-inPY$)0>APtJ%yMS!PaV z70r#&#R3Z?UEp!`+3TBDFAGK5*wSJ}R(Ug{bqy^mBJnjtJfB6eK+{N>#r_TV9XD(< z_pMQ~nfu--#lkEoT~HUo0$9Kbpu8=zruBvBn(t@sw=LW++^0)KFSbM7tN zpWI(YnK5Sz_Ye24G1n+J=IzO3%9pWKOw;nFNK4q|Vt+@)%7ztdo7&dn>_4!*$deLm zcN;t_GK<6ii;Wa#6JZOE2=T;;gDBR-iAtPCg;8nDs~J4&l&D|Kk+_LxufzaT+|tsp zaVz_omuqBGee|!UqL27v-8q_P(u?%jOnMu;0Wn@8{YZZ@fK$joGKdT&Ll(D2>y-^v zi`$ykur*-AnsvJ`i?3?gTyE4EA)^)u^iba(Tzq%cFJp$?`L!AA4rCM?X~+n&BN=Jb z8w-quTghlLhKw~98H$qX`+>_T=Wvq-VAhq0%zmk~DhHX62(5>iTn?4LO#Xe>1vjb-eg2>XB5 z^sE80e#@3vZ5ylDDyg)I^~i>emC-e0k=dUHMBDX%xFPF=Y%7s5he;BHEoNpq3mR6h zPfloXN%QK}4eQv7JvpU~14`JoU`2CFG|9det>o^C;krnyoNBq>E;g!MtU0pn6IsCh zw!MITgIY)yk;P;Q*`4e`_9T0eFxi_lkfo%NEF%%JoU9-#NfX(JtRkz)8nTu&lXYZY z(n4BE8(B{_kp0O1WFt9%97r~igUG?;5MmHEDw4y<;p7N%Bsq#4O^zYQlH~av`~hTue5TEo3XXgj`Ctk;};CKfILVZA`g>C z$fM*j@;G^dJV~A+Pm^cJv*bDQJb8h5`_M`pj z06LHkqJ!xWI+PBh!|4um1l^I2q@(C)I);v=aakz1WainpSakO!aajbEi zalCPYaiVdOak6oWajJ2eak_Daai(#Wakg=eajtQmalUbZaiMXMaj~)4*kWuoE-@}O zwi%Zhmm60YR~lCtR~y$D*BaZ6>x}D-8;l!`n~a-{TZ~(c+lDa6nk6pg?#aHXwE&0uT|11Be7f2BH8_fjEI^ zKy)B3AO%3&Ks-RaKnj8QfcSv~fD{4g1*A8SK0x{c=?A1gkO4pj0vQBkFpwcYh5{J| zWH^u=fQ$gLBao3mMgbWOWDJn8K*j+X4`c$6i9jX+nG9qKkf}hX0htbjEfRMIG6Tp= zAiDtB704_g#Xx2QDFIRnBnYGo$Q&SZfs_N82V_2w3Luq0s(@4jVe29`U$KcN1f&i~ zJ&*-J76MrWWHFE>K-gft2ar91>;)tYWN#o1K$Ze&1hNcB1juqAD}by7(gb85Agh3^ z2C@doS|H6p)&bcUNDGiwAZRYSJ#2;<~cx+RB9L;pou2qr-yYit^HMu{HUl&~P{@Zf%XUVU|OoU|qPTxMY5~ zvbd_eEEuW_vz0@*rZ!kszQ_#cSaceda0*t=W-+w8hQj4l%%i%rzN9WZyS}QlB4~P^ z5YI?FxnNcC?22Hxx++{6Wa*WLYlETc`r49UA|ofG!}vr}X)IY{td>|El~;x8iYqF@ zb;WZM?x&&q$gJ*|wP0Pz+;DYmX|UD|^h|UtBC!cQu^8F3DYUrKS~2HjFsq(D&uVr) znhi~(Hn+GItFJa#Qe9aS3YV5A0=NiW`o&$U*Wm_~)z9qu@`}>%g5uip;;OntF1Mg< z?~b-}N=m{Dg0&&mg3VlAiiTr58b<4{l*Lg}T~!yXs*C=+m=$L@w6MG^(FiY38&SLp zXP4KNRF@{|`6{#>-Z7y$!Kz?waa|A_M_GAAdDR@2RJ+r)=rpUNQ=)Lnt4b>BOM~IL z!D3cXp>SDkbtP+FK#u{5v0bY-4V>+r8*VI&$mlR{S4hI*p++y*Bi)+eRBdl0# zmeWn>JE5a*sHB*gvKAI=qhXeXAPb|qmNmU-Xo(8B75#@2JW?DGYGT`qwJSDl!mNfv z<#SlVRdwZc6~RjO?}W=8=rSs!i`g;4we^^2O>x~^GuXS)b4Ulz>bkkX+Gy#T1htl2ji-qdrMS#T9kI z+A6H^IknaGHD<(*XS1wc$Qp@h`4n3A&Y)RoX*ukpx~4i*$BK=OT_IM2rq65W(<{lx9H)|s zNd@u_+VxAThs6=b8Zpy;A8ox!wxz7vEc41oXi=18VMb?dVV|Pe&?K|U;*#pnA~tL% zN~^B8c1|!cSbTvlqmx|BCLCfTG@7yjr!=^ryo3c^Tv^`U`MypKDm1^m#vI?x+9A1)rzbP}ebH!WyHV-lit^cPT&`sOt9&+_1M3!dn56ju z=+ZyMrM|Wn2c-@{3`XO=7GpL!#)p0E0X6MI5kCw~hqs%u`FB2>BTCCdZ27o2gbTho z6|5^@5hp5R1UijscVe?~C|J87$XcP95U#&MVO+hj!9KAB)d?N6gNxz|VYWP*U0o3y+3{EVtj^CtOG|rhMA6-I4mtfeXSTL+TGqv{qw-`l+ zw2RaR6K$oemNoQ+)wT1JTl^mAF(|D^tcDjS+lM>1$GJ3-t)*yhY3eLnY}iDTa43j- zv*_Y3VIDzqONq757ty934zfL}SpzH4*3tsZ8c3?yRUOg~#deWwR%YcN?PY8)S5t0k zu0`{r_TmV!YLB;4YvbD&%`D?kvRG0(X+u*>6O1-KR%PX7i^G;B9lu|PK}( z$sdL;W2`PEwLxs$0th|%d3iO&0Pn7B>MKU`m%YHjX`W^j!u4N zl^lb11FUwzMZuE#y69dyv8gewk4Nji8LU|gY2VlLC!y&`t7&`Gtn6`!k8MENApsj4 z6WeV5RCF9@bxf3L&HOnil2oo|q=eMo7dy;vXQOFSnY4#w9emD1yQFe&w~GygEUWS5 zIoq%$^@t16cd#{ji*J2hO+6du*x^cXqMvQfl2}qQ(fW&?Atn0rC0P=S)kb_ZW!B$i z=#W$bvDQ#ooZMKiM7yL`(VmO;piI?k(0Z6P&)Ak@Tff*09}3ScV~fq=x?wM{&L9!UjSQ$L#>H6+%?Y0Bnd z^x46h3T^^ezl!!2Go4@+J2|kd;~qn=qz=8XmW@lS+*A7glW01egdz>>WOG1GL)%I` z*j4frUf$v)Nf%bgHr}$lge&6hFJkPw8z`>eC@6R+RDD zW_{D_H1DBh$0~>xdY1C}5S=`vE_#oT9Uax-IV{_jB}Sl6&|rea04JGn^pJy{-r!Q1 zEt1&8QH;|%^DQebuc&8BcQdNb(SHb85PLlrJD!SfFj(J-F6}xD#9yIDza)>iMRaOQ z8sEP~+o73lWAixMLX=dP0sVk312VhBhDOu+=T59c^<|c6?YE4f<9HWZ%oenjrt+U? zJ1}z`(fO}^==lfDtBTjL%asEvDi%}@m^x8raQ(rUN1^Y@b}g zwpU^HNrBY@>wN5Ry{>!->w0+R%FbHZpQ>4TmQ_?QjE7<)sJ&~7cJxqjPGoJQC3>Eb0<%G@VM2T6sNhDS!(b8fI6RiH|j?v7T z3)N=lsTS>GX4axTAyHHw)SHx>Ug=^s1+f_;x;rf~Bl6{uzw)Yhx39%BcI&RnR4d9O zf3d2M9)+?&VGi3av2|1RLi~V~$X_4S%b}5C1;!f!J9MZDR^ZuRu%fyK<;~#wqu7p} z7Z&!ka7A6U={E@dCU)+JLDU9un2yi0rod1Xn39_Ss~xu2tYjT*Haj53&w|kpdA1!; zqquX8*g#NSQ&(PDz9iODS?NX>7M698Z6wN#?OYD?RL1t??D!_}Nx(J+ou}pITsN1^ z9i^e-GAtDqPkY^tN0l79d9-uJ4_?x@x=E-sA-8zyt7@y+HY7B^PDTG5`gp8t zv4Yc;uUS(&p<0?gxxTfndCdU4D$g!HwhSm+-LQf!m*QVitUCtV%nT)yRiUP$o}IMS zhmw?Lp;DTbmtM)-)0QM@l%&s3MZ?+^>l;?Evs0{~685PvTpnV_v-k-LKlZU7m?ct% z3gc7iCs`rheTzet;rYSE?BJx-bT3Eu)ap%kFA3ueSkDIk#F3flUxEIkI!V5S4LmIA z&|)@UnqQ)A)#y386VLLh=yIJc-7QCNwpw)EsT0@e0ao;w&m5dPp7Po1Q75&dCTBig zc5DV?!)Q(7FvzwDol~cqw9ahoW!;laSvA$=_;o6J+_*Sg!oEl*I?C?3=&=+-CV zutin;*sL_Za1Do8t7zYA*!Dubsht#3y9_F_g;(+p*4BU`omFpI&t<{cdfq5PrJ2pskszWfH%$+nfop`?Erw@s>rWyC4s5d>2xRdk}VI6@o zJLe%2&sMm&zOEVvHZz{1QK%%FQ0(;&yZAq#BsQDGBVu0xSS_bi?QtlWI$c{DYm#8B za-u_RRFEBqv&B(jk8mQY?UHkNF*R#|Y`tn`^%T@fU9M)2%&N68Sez)g(@`w7C$Q`9 z>sp!{@yQil`-pqTcQ4TlR)*$;f>nv>^DNZaEhU2;bg)CQL6WT%@rnW4nOP1OS(|3} zv_foFXYDvX94FezxhS7n8w(p+@LtM*n&#C_jqKuZtYUG6!9?b=ZKpNma@xA<%v4D7!^|MAiY9e^kBmb0?#vowYAl?iEy@}*!+~Ti;K0gzFE9_ z^?=p!rI0m~C3A!9mK(d_gm=F}Y~`DDAIG*0g;RI-IS3ceW(S0bOmZx zD}$-fb*ru#r*Hggn&=ZYq*+-kiDDYgqMcpAzQ<{BbVV_4W zg_B6(MNA=|5iCOrY&NT_#_~ugzknNLgI+J<8v9xS(JHp z>^`^Iw^O_I$CyGxo((uQtFy&-{2mnhsAiT3HipEaBXz$243pclYss0JFn5U24-k0y z4!0$#Nqvb)RpyygboRqM#FiJ)&3TxOTI^h`s?NL=XZr@FyVUF1jk(x$cQ#hBdm3yd zpJ+MXqiE_fK64XKw4C~?`Bm(eDGRcA4!hrD_L-khuq=-Pu=E*riWg+pouY%b<&L-Q z*KPzEUErFPX9oEP3U;Z#)K^6X*~HTRndEO&%V$cplwnj2FLPG1Im0|!vvb{zk3HI` zH~X`l?{4u$pAN7$7xCh=3n-Ynx3x4H%eg|dakKp%o8)nW#`;g}+fe&@+%9#R-Kbuo z*l;txpuD!Oo}Ie1%bC%sxkZ;yn_FJWZU-a;&9-8;XuFPjSzB~+bg^l>yejr^rn;<* zU3{r7V@K^JwdThMyBjrAPiC@|AUmEJx2XMNeImL-RLVN_rP(&eHU-gd-FPXlgq`sg zv#ntJH#U0!WwUOPGRcNZ>tnAaf??LC=T?{EM}`vf;eow33TIt!WfG1BYL3Unb=COU zi>(FNC5F28@AdY6-Alk+#>9meN6(JgLa97a;sf(5nnXKe8)J5m6q~=;sdDr|OI>wI zwfVW#KBUW$TlS5KQi|?OW7Cw`GlpXVS!a->vQI=FKZaoU17e?dlPYva6wSK6O)tux zc(G6UY}rxDE@8xk%{WJ+WY$S3NzyWdM01ai6~gR?iS?0v911SVHYr&I2Q;i*Hh>-f zm?1#xj=2V~PwAN?*ohF{ zFD%95pD-JnEC*Ef>6k>W9W-rO;kRD44=OKZUu{b8DN3Q zQL<~KkwJc+S&H`JJ#rv`RV_SGXHQimcF z@RMxxK21&4oB?I*<18LzS#ITGi;7;WVgEhp`h|TV`j6?%9|uKt4V^jTwPmvqmZ0zA z^0jS@tHtneG2{aKE*xSUwv9`h8(Nm7ZR_l|JNw$2Xya^!9%}-43Am)5JvL#tDA=8! zL>BkNWM+3Q8P+)K*c--hRr?it``)Nm+%>&4O}-K3W^_$1>8RGe9F=BvO{qiMYC@@9 zx+c}JiLFMh@ztT?_=kViv}PmerhW8^l?qzKYBgy{Xh!#mS=|$kY|V%FwDG}n;<=l> z1qEh=7}&3=aX|6fhSeL{eK9<@#y>JvU+jQ>5JJNGPA+1 zN2TfS)h3<}+xJKDncWgM<2(?hN>inxm)ck%l()97$L|rbFwI=D z?E!-?;u}Z&1M5fa*BR{xqvGtm6)S_4)wS%=0ZU>|bZj*v1k|g@TMr+MRlyQi?->I*~cbm}* zIe1SkD%?H}Uyjn{SxUBZX?DSi9iFirL;FiC`&B5rs9Un^MkfXz#OHpvQf6O3gX}^w zew8rG(@^9Ij4^|dH{FML`rC zf`l-wbAX%;T z7hzXnmQXCr7D|LtAt;mybA-7@Msf>?!OegoV9@24ShtC@d2q!g67Suu^Ce_7PSItA#bfTA^83C+sV<2(3b!uwK|8 z>?iCmY!nU<4iq*C2MGrYhX{rM!lA-p!r{UZ!jZyJ!qLJp!m+|}!tufh!imC3!pXuZ z!l}Y(!s)^p!kNNZ!r8((!nwkE!ui4l!iB;`!o|X7VT-U;xJ0;A*d|;iTrON8Tq#^7 zTrFH9Tq|rBt`n{oZV+x1ZW3-5ZV_%3ZWC@7?hx)2?h@`6?h)=4?i2189uOWB9ugiF z9uXcD9upoHo)DfCo)VrGo)MlEo)exIUJzasUJ_mwUJ+guUK3sy-Vojt-V)vx-Vxpv z-V@#zJ`g?>J`z3_J`p|@J`+9{z7W0?z7oC`z7f6^z7xI|eh_{XeiD8bei42ZeiMEd z{t*5Y{u2Hc{t^BaIgyA|uQFAfj~ii5<#;t+ADI7}Qa?jVj3cN9m8qr}nT7;&sPP8=^z5GRV0 z#L40majG~?oG$Jp?kvs_XNtRsyNa{KVsW-uB9@9lu}qvJ&K1kWdE$JrLaY?4#A>ld z+)b<%Lt>rC28AntTnXeVAXfvq2FSHQwgb5i$n`*O0CFRcn}FO5e0=W&y?Lh7T zawm|xfZPq_9w7Guxev(wKpp_{AdrWEJPhO!Addoh49Md^o&fSBkf(q=4dfXh&jNW4 z$n!v60K%r#mw>zsUl9U$)lc@N0@Kt2G%rjd_;uxaEI zAfE#H49Mp|z5v2zjIV%v4dfdj-vVLN#P>jc0P-V{pMd-fQA0Yn%<$w~P6ethW2GkBz04f4?0F{8sKoy`WP$y6gs1DQxv;e3Zs0XMQXdzG^ zP(RQB&?2C{fc6I32WVfQ{ebodIsoWEpo4%8208@jP@uzr4hOme&=Ej)1UeGvD4?T( zjsZFr=s2L`fldHA5$GhKlYveFIu+6( zgFwrG&H*|XXgSb%K<5Lk09pyO3TQRZ8lbxYtpyqaS_iZq=mMY%fi42N80Zq9y93<= z=$=6L0vZOoH_!&4OMx~5T?RA)bUDx!Kvx270=f^-RX|q*T?2G2&}N|PfbI*l1!ybK zHlXW)ZUDL;(EWjK1bP6_1A%S=dJxcqfgS?X017}41$r3J!+{ft~>LM4%@DJsIdJKu-mF8qm{$o&oespl1O+8|XPe&jorO(DQ*_0Q5qj7XiH( z=w_f>fNlkP3D8S{ZUcH5(93~d0rX0sR{^~m=rusE1-c#RbwIBNdIQiKf!+l4W}vqK zy%p$fKyL?n2hcl#-Uak-p!Wd17wCP7AzNIGLw4sB$mnw~aSt@kigpyk(baTE2xBtZ zQCx~fd7vG|2s-3|b`)2lVV-D5aTPjs1??!Vjb|iRw4=B$I*iZCW{lr;VaEaN$dldq zt82&Ih;8WJ1+=5MA3Ao5b`%fDV8%|UEC@C6AT-Mh?I;@PlCm_2ku^G?9mT`YHV3q$ zcqAHj5$z}*lQyD`itqt11+$kH?I_-kKAoc-#k(@wB^AiMXqOw>QG5Vxv!NZu zhtZ-_w4?YKn&pLd6rV(wE}BkLi>Es zj^elIkptRM`~eMfK|6{+XOpy`9mU_!FgLWL_$S(0Kx*mGj^aOPWI^P0j&^hqbjcCz z=&+%&1*wLgJ=w(qga~7MvY;IuBAVujc67+-)D^U&!`UGl?2$k!urc1=9WFE;*bb0O zwX~ug9i9%_*_dw*9~x&tJ35Ncz``WN1!V`cqoWVncxyw(2kBVXlH4+2`EZNw4-A@x>$;l-L{E7*R-G= z9h1<;0@lldc63Zd+o|nsf;FJL(Tvfb}klqoPCl+0l-UYBcW@?dYgQGs`%X8SUt(M^j4^ z%z$=uEb0(wcC@2o_YQ%^ukeIBbb!6k%u=4L_$_TaiI(SRK-(nDTxV!U$1-$DLJa1N zc66*j-z11%TC}5MAGAw?_+>#mI@X|d5(&;?&00u11Iw`vO}l`0bhM&l&S*!+hLn)n zpVcQnv~+Al)1)$K2NqgLIF3zdmsIZUcCj&zWwrfTbW)Ev1by>BJ30={l2}qQme<$j zOm{?<#A3CP490LAjSfjA5PPEE0qy8G4(*a!MSBUf2W5tFB3dVr2s=ePI!-~KJkgGh z)6pq+w4>uJv`^}Dtk$xk9UbSQOO9wq#|3EI8QRftFEJ(*a>6F;5F8mcEd9ffJ_oGrSEJ(*gXxXvO*uDMQ1brt>`8(@yok1Wu^=6G&)!vFt2J$CoIU`ur_JWbq^AbSy~6H>lJt z7Np~Qlt_IHni32?he|9B6MUWyEJ(*ssFwORGP_!47Np}>)JsLDq}4NVV@WJX#~*p* zuTvJJ#IriRjrS7Nj% zA4O8Td)hKi%Yu}uP$6|*N>)f^K}x%!UyraLr8<;K9gEEn*b5sAvmqM`Qd)>gnLBG5 zr6d-lv;_S#_Z!pSGU2CXK}vg~PUdRKpkrY{N_(Til9UJAmNMxP7NpdO>16JtEYc3? z#9L$r7NoQs^}5Z1l$ubc2Uw8OY7|O+uWu>G#Otk`SddaP3Z_oinFM3Ab|x02)Pia~ z!-AC7qgLv2HG5?7>Wn^9vpltz_D8YQp3s5e8285ScZ_5o6-j16N(Z9Of5w884o3OZ z+K9tS@?=3ufNH7THj7#Z7Nm4IDx}sydWB9{kkU~omKAi;H5R0FEDERY>@A3wOv0VA zAf*#fJ0Fr-tcT$0FPxPHDV>bU8S6Sv7Nm4qzH#E9AC0mT7Nm40DyMdx97;Ed1u30_ zI$7x@9csx^lIdBH()lQvmB^D>lD*CiA@ed7R4854jY!k8Af+uRnGd+ilKHeONa<2k z%%{;?6-~5HdKRQ~IcnFX%+{H!+@hTm3sSlYQ^<-w=}>ct=Gx9ztBqoH)3YF@YcYX* zT3aH4Oe{$0dX&t{^vM)+f^@_fJjpCb>85;(x<*j*kl>8Gq50~H@gvJ6t-w?@6E%!cTliP{Ur?xQhFcN@|jXCWf%>wBMVacsJro5 zS&-7F-7UTjEJ*1K6inUQS{jYzTp_x&%gTb3zV0-;$+X3+EJ*3Q+@ecVLMJRp=||Md z+M<)A%Yg+c{eqgQCo@?}Farxx`W=OLMBsLkg`3$qDi!~&RCGLqsx)!#)6a;Od#tF zl2rDI$m7QlCbBUJ?kQ_1nst4fUNkccQZ7Krtdmlbq@_(q+eiuvQudoFFjJO|}^f(0qhL!};JLCTdV)iW$exdyem%z~6d=-v$$q`UwHdV&Qh zFGi&_u&NZQWNemYCzV#BWOf#$ya$T+2n$jUqf{EMU6)yq@={bxgCFcfF%t_?j-cLu z!Ge@mqH-EmStpTppiHJ?LCUL8y1Oh$c`d4RhXpC`iwbEfBSG0qtoU+bLCS5Y*DbtY zM;4^KA4;a_DY=K3nFT2yfZ{#Of|L(J(KLM~_lP^NAY}s;dzu9)ABL*ks;rJINcqTa z2bzWjDIbHXX*yz}KszVdv5^5z&w(fq+B@)g{$OXN%C zZSrL@gX%m8^dX=R1AXKY`AYdJ`D&S=bsh!!JkS?_{|6~qlG9v>$wpnImpvSXd4dpu-)=<7n zX1JgyfIgeR8p`*{4>H=Ie82nv&?kXDwMBkNW`*`N&}WQ!doq$``Lf8`wx;Dxk(RK_ z#s0p4l}Ba6inUE`>z75?vx4^W3Sn{Ke=cu}tZ8=}JZstBQD&hT_fzt-$w?Abex3mi z>2p!Q;ZL~%4&_%E+E0EJDC2h-FOhzvKN-L&@*DD-@>>jWNM8Z^GEheL0yc4{100q{ zT359-uNzxYR>6KD?EI#-(#V?TneQ zJIWs!^~M6AuWpq;kw29`1Ns`!*MYu)^^GIR##&l}X~^ELW&|H5^A4ks zKKUEW__shApv*9%`9c1jF%9J(<)7rA1W4}kv*p@!_ADARCoN%QK}4eMGX%aWmm8}T<{L{>DnMCpb9GqPc4h(&9`W@8#E zjwqI}NRbp-Q502iDw?7zE~P+mD;~wG6e>Q&uLP7LrI*rM>7(>j`YHXD0m?vSkTO^q zq6}4rDZ`Z=lo85~%1C9DGFlmqU^5h zq3o&brG%Bel?G+0(x@y`BFb`Qg|bpW@U@ARk=jDRN1Cnrd+OEp4` zRQ^)_R{l}`RXLTYROMBhYF7nSR2{0M%BrHOs#DceU3IAis$2D_UbRs5seUz}7OB0| z-fADUui8)TuMSWL0{sx^M?gOY`U%iafwG?cInXbFehKs|pkD+11}N)@-vRv|=np`D z1o{)upMm}Y^jDy)5B?7H51@Yn{R`;dK>q>yFYp}j1b7NO54;U{JMaSVBJd92CE#V? z72s9ioxp3r>%hB!F96;Rya#wM@P)wpfcFC*0KN$LUcmPTz7Ozyf$s-=f8YlIKM?pq zzz+s~2=GIJ9|rtz;CBFi1n@foKN9#+z>fxg4De%t9|!z+;3oh-5%@{KPX>Mp@Kb@G z2K;p3cLIKA;Aa3o6Zl<#-xc^-z!w8Q8~767OMwppUk3af;O7Eg4*Wde=L25>d?oNz zz*hrb1N?5l*8(2`z7F_$;1>YD5coyFF9v=I@Vf)Q2k?6WzZdXf;P(c;0r;iBHv+#5 z_z3XJfnNdqO5mG--v{_rz^?{=4e)D$Zw7uH@cRPa0(>j*ZNRSwegp9P0lz=+8-YIn z_yd981pGn39}N5~{*8qPl@Y{jE4*2VVzXA9gfxij(n}NRt_*;R$4fxxE zzXSL?fxip*yMez4_KHW2gH5Q8M~58Pgz6+T%#%&1 zPDQ7#unEqB?4jnpS6RNFflna|s-H^@FisDl@ zqGdA7EiIc+-GqkuvI*5g&?{Fqp?WAXKX@s z8(QVTCRDFLhveqm&YH5Y3Dv97Fi$q2x;;%e+1P~Y4d{^EpgUs|syCxg?rcK!Hnh)& zO{m_99yzcH)qBt|7dD}Ke>O=An^1iS4Rd1?s*j?rg#?z4O{hMBMi$0f=WIgtX>`ev zO{hMH#ug@6W;UVvBAVvOCRATRr>?LG)z>>@BO9AgeG82(z_#pcLiODa+S%BI>IZ0? zg-xh_j0P5{UK%!``Wf2f$0k(2M4zl|LiHOo$;KvBzekH)*o5j&XlZGW>DYwouV`ce z!)0I-s(+xJrQIeV9U0k#>fh*MDMt2qxFeg;$)Syf43~vX=;YD1n`}a-fFk*@37ryp zC^MVTIS@@PO)vwS&^e?-pxN1k&fy&bHL(oIY(nRbXl5zTENnvOXtYh@ zq;(~Q-({u)!c7aXk+!Y;jW)nJRr-YP=P3R1wX;PWA6Zb4^Lg!qxODgwvyH41I&iUw@ z2b<7Yl_fD&qU>rj`}NDA>RR>-Qp=*R*w_n1vy6Akl31)ZlG%jLI&?@XfwXKw=R&kg zY8CAz&>oZ-#uBtnLg{tNCUowJK6$bUoqMBG?rcJ5BibkRIaZum*@VvJ=#nFw(Ak8> zov{g>tI;YSHlec_J(3EvQ#PTq1%2{m6FS$US5k-0#3pp^kER_tcb%{aod=>*UTi|= z!RV4Zo6reppCf9}c{m#P1ZvQER63=upaz}CqEarXLFWl**|7>bK@B=jMyD*OLFZ{` z&~?Od+e>tEAosXkl4viE`Ki&{Bq6VE$p;#)?DW$MF zpaz}KqF*;qgU%OFAPw*>57eOZWzg zYEX0JqL({rP*YH^o2WreLy^v^H{IS>E662z^9yta)S%`?k<{*T)%EWbx{ZzJ0f#2DM@oOYI5yq6W26)cMa)gW4RFPpyqO z3nNd|pf(THQoC&ywGOC3tr8Vd>ma>CC#XTK2F0>+G&Q+5sq;6&aH$<^)J51vRK0ly6aYh|`qLh#J%kOd+2UEJF$@s6p*8 zRL-aSWl&B=4QfZCZ0hMuPOUE$HK-la&HQIX4Qj`uay}z?BL8VngW5@`*2PYj6KYU9 z6;t@1p$4@xFuDH;YEU~Hllo6kgW7p0-KAci88xU~h@z>>_{>c_(Q?wG2DQy7m=(;{ zVQjLD*cniR+9llxG97AAy9@=p)L+t|2DK|uEuSgXQif?!gW5ISjn9f2)UNAp@pV8A zYB!=_>fYAUX!1f0YPWQn-DH?wR@9(&dv4JsDxnk9pmrDPWo^;P(dB>|)b2&i)RUPk zC71yEm9`g?5IKQF%-_a-pVAL8){H{vU>@b%b1L) zLG79RiYC#{IztU=&v!ZU+)#tsOPD~`86>If6Om^_4Qj8VXx8;@deO|NLG2Bc%sMG0 zNm|-;deoryHVXdNs6p*LOyxgC4Qe0uOcGg8gW4yUM6MmQ3#dWub4;Mikh?UfLG3G4 z>}k}X_ARP*A2q1`fI2;c8q|J9t)4&)YQLdUmutcLB|3gvMEesJyN4Ro{y~jyq6T$> zB3(xf>NfQ68fs7%(f2<`4eBx`({rdn-HCcVh8omeDAyCHLEVE&J%Sq4eJIs4s6o95 zwYrQN)cc@&H&BCme-!8m)Sx~HmC~?~dIB}54@L1FK@I9Vpi~-cS|_+YeA*PfM23H0 zkG5)q=3%`)5*5?X?K)A+gc{Vxpx%Fh8q~+5avBs?Cz0lW8q_DDbazpM`czcu4r)-} z2^G>*MgroNSn=hA8q{Z^UbpCV9Z`e&ER;;sQ*sY6Gip#TLGd0&4eDhmnx@a>9&rcM zpk9uOJ&hXFD^RstmDLe7s8@G8&@`w)y%tr|bi_n~b`FlqfEv{6QM$)bgZd(r{jX7j z`tF#@e~22?_re7JE7YLgfa=|=Rx1zCf*RD9p>(!Eqw^MHl}MKyZhf#y^)x5qB zYWJycjkJs|jVy0k8(Fq#Q%!YgC_I~8nJNvl!z0sj4O;rsSjJ!`GNh%Zx9XSZm+IT}%k<0jEA%V%tMse&YxHaN?fP~4 z_4*C^jbNJwwqmfAgRKT^bzoZrw%x(D6l@W&tpwXDur-6N75||3YUZ4Bk=X0O;^LelLtFSwB=A8fe&zYGsv%AKT(l|yM$4R4<#!1rnv@}ka z#u?K1yfo6LInuaD8kb7rYH55UuijzNQWljfjX2jf##64k@;bfAtX0pri;-L~!v+ZPqJQ ziX+Ou`BjBM+7~azdDF5o_!d7;M#JI7i~mqBUf~dUx8e)G0kP=Y4};RP2d1WE4&jxA ztPD@$@Ge=y(-XW&!#r6HhZHaRW4-9o%u@ZkMe~)2vDt$LQRxyT@6}5#%k7DOx8%E3 z_@G{F`2nVJ|Ka|R4v&bA42_8n4GW714G#^84lbS|{_YPANsNq+2r1G1pXqgaV1TLZ zKWyrq-&Ogy32y!)*x@j@XVl; z&O^Luo=yo3(+gfIwp2okcEuLROdb}Lkdw*Rq6W238JLlnkrU(_Hb!|fhGhn|^k%0e z`!?OsH|l7w2DSDiCVMh6gE}U3$xdtU^`@8HIWFr>aB`;XZ-RE%$qY(N9-fj$MgHY? zn-=WrzAv%$j4E0B&wAP@5-F=URUf_Eqz&?x=weeDz2NHH&-;hb3`*kTJQ)<*Ix!=8 zRAPpwd3JJ&H$Khxa*5-O=>fg)8vk$h-L4%v-(~nVS@fc74KTg!ulIj*-)-LgZ};Bj z-F-nSIDshH15Ujqb$A}b-;(0H@V)Z6vo|?2C_N=LB`}qtgrjm0uLQ-VYG*LFy`jC^ zFsazl+Vr5lO}>}--#z&W@gxlj5B;}uJR~u6P)tOLlOI!cy;2V|-Te;(;m>XTysTvI z)z(W3{LdfX!=fT%!Wg}RBSXTX!h^$OLL%?J7l$V&4lJ=3Kdjd&=s&YR6d$!r!Ftgl zOrrk7;Q2;Haz2TwO_rEio7$M#n%YU@BhvV& zG&YsSX4r)O?(gK80xA>+z) z*Zved&fk>oraoFzdYF2edYO7lW2`jBNn;CXj9+5vYwAZ+21p}KY~^c8>;Gy~O7Ge~ z{N;0cQ(~gV^~?2Qv^h0}T?6Ab-HKps_(oq^a{U7L- z<{Kgx{IO7POL$CScjTJ#?%4!snn)A4qp1UvO%@bqE4P_nsLukpEJ#*)EA_&n>2R!mD)oub<_OKk-ry|O5*g&jgARxP%SL7 zuV^R-g%x)&@kE!4x^OB%^4eJ}83<2OUx_50?Ag$7etXjE``VR6e%tF_`*m{yur znO>8|zS7uF8v9G*fF-6irnP!;k4s~szqtS6PSvjW+RNn^6V30m)MX{d~F_stCt4UYD8U`SzcADi~xv$%bx{d#eOq;YWZ;x@DyK4Dqk z+^CS?5UseF(8A)5n!dbee;qfS(2Glv#-YWEYbrZ_czSbTagkv~sd~nA{+_kFV7f?g zm!vUO8rc!){xm(Qwj^b*S>7i%B+8ed;lZR;NK9A+H7%^>Rnu=;xpy6rem7mG+&`r; zT^dLD%FXbVTYA@9FF7Cf^+#xMFe5`qRA{l0!E7+A_iDPiw7HDl^ek!2F5dL97mv4F z+BY{eI+TtHjf^hbpUf4_Hm$h3rK;I(c9`0lnHC!*jiY_V<@l=Du6OLoxu5pS4GE6X zMy;^msHkxSA}lge8<~sBu4=Av&#Klm*V3z+D~)4|S9RQn)nnydtJ=UEa?j#I&0$nE zLK??Q;{;!EdA_PPmCr;C@22niVIg7kQFyqPn}vO3ZftIL&r+M4WA#c-l*T7YRB}td z$;bQT#)Q-SP_FO%J#KDe?x>Y^w{hFt$=sPbGTc2Wjgx()J*8*T=1<0#{i<;M?3o*( zRk0|HJHjlYyFmugg^#Y%h z#+k(mY&x!8gU+q*+T%}{pT1}PrkJPF^6ApZc>kiWxLJBMcl>O+|Bv3ekulUQDmX^l zEt#k+9?<8_Oef!cv^UQ-&(Ul7vNX;vUdyo;D-AuacM^w?f^!b@Li19JGA}acn-`mx zNaHKgI9D29mBx8X%*)Knso_d#oG*y-;ER&?L-WU4ncK}f%sb5= zN#inUTrQ0(q;cgE^DgslJ=UwFk)x!3Wc@E53Y+guwWN9Uw!{=39n43}$Fy|0>puOZ z`8Z+uN*dQl<62)6Ue~j5!$bRyUt<4@iq`hO&`|AMCNwgN!$VPQ&X|3Nox2?#%oofY zcFY`h-jv3-dn z{F^kcm&R=+?h%-8=+_kQ^6Zp zqKq`YD~+2ETbvdbC3Lp9EtLkC%zQhdf3fzSw82?JD5=E@w-&FujP+kElV9O zdbOqTgGH9Q()i&&iCzOOdV$io{ch2-7%bub0KG4bJ4ytIcKf#+ZD}Mdi!59=ezeHK zW#h;EpcXCc$o(Q_<+$rn^2eS0t2MK<5tbE}=9XAXoTY^&-qO<2%F+5NXg(5Gc%KK)h+&pKe+~&Ec(mMGm?g+ zWN|IZ<$2+YdLbcAoYVOz~Q|w6@o-9wFlyq%6-J2QHkSU0m zVIfg5;mJv1q1_TQ zhDU~X*C*$D>Hmpr5bBR^qQ%3{c3B2mk}RB`e=dz*NaL}^mO+-mmLbykr8F`i{H~=) zFkRI=DJx}UN>)x6Pr5fFE6zJSJ=LS#B1p&{u3hEpy_nz`tX=98GS?lebV~Q#QP5Mj zWNBSfvQj%r14v6yeN&A3xBH3GSb2s#A3@R%VkGP2>QLWZ}qT1aPeX?ch-P$wFGTp+7)fs6#D~;zE*_-KQ ziN~sSi_K0+P4;)?3l`2*?u^H5%Nz^m8sACdd1<_$t#8r`=+fvO6Q*x?J2mxi z*>dIYf1pCojyz7H4Hw;OCTX+0eLE&J&*FuF*;$^o>K!wb2TT^`<5ts$HjUz215~ z(^#>m{zs2ikhV;I-TIWRKfrdlL14LtLBZX)!__L5M)_)zke-<2X&F*2xLU9<8S=_e zU}%`Xn)#vi`Flhp3%5vN*Yu{b6Xr)~FGj~O3Ka`XvL3ShM)mLd`T$eKjtT8NIi6(S zKt@*_eB{xt8D4KzoHDh5P>38M2UFOPIM7{#6aN{BSfai7GuQ|Vw!kK%n@_N zJh4#ZizQ;2SR>YnEn=tGFOG_n; zHS{$MFeDpB7_tmGhGz{k4a*Gg8#WrY>2bc#p#N7iE%Oe z$j(Y+9B)DPEw;R2c}pY_Y3*jwCRTk*9qh);-uQJ)v@wca3bJ|MdFM6c*TK4Hd@#I_fiX^Ye zB)Za>$s$qYh-&;hOH}JvE}^vu;h#oyda&tY@Ae&9b?(+?c;euKG5QeC?T89_rL@O( zm>jlzCVpSVGc?azj#<9692c!EUx_%;Mzr)j2h&g*|CGiX(s(QHA*qNwS6Kpj$KRCl*{9c6Rny-9?M{E9LcWAkTpq~W) zYWYjlUSavo^1J1_g z{tyA26)EMVa=%m_km^HutfMk)-k(>mKl#{$vtI8g-~D*Lx|A0>WpJOz zT@wcuJy^qG{!wkEyS_PqOMn(FLqeJdhJ}Si1%`*Vhz*Pm35f`75f{}wGB!FsI6S(= z-xkm!JT5#UG$uAsyU`TLxlV9k^O)G!z?RKpTgHaB2n~*i<4#9|;x*9gUsS_xX(=PJ zJw?C2$~OhGxbNh{bB}79+**OYICS-TQwJtyw8<>EKcS!16ug%>Ft)^7?%dqSNG3X> zBgZwU#wD{hbsQ2F96fHFZzr)@4G-pP-Br**g+Etnby=DIT4i-xD_JXBAGB7nR<#CL zt68gC*ckSqIS=@ll%F&7}{MB2%?80*O&6Vh>5{bkk8YzR*N|ex7 zdxoopXJGc=!Mf0SQDfnMVV(h-(qn-{L}|yMF$o z|2-jl;Bc1iU2H{@l{I*>GBG=q-{7V7dg!VZywq7+oo8p#vN#RT@MQ7_{W%@3Mg74l z-U-^a{jX~!9kKSZ_O|vB<*j{1Z)<<+0L}=sBfZ|sT2nguu1lPQJ!vV~1wYQ~d+=v; zmi~?_UB9cR*SXkRQi;hEQmG-8CZAaotpnLck~P`tVLN@TLkyj*DTb~+In_%|&hVsl z=R}&i_es*9DbkOc)shSD6zb<8+OfQ3EBkP&?r*WStr)2^mP*Z|)?wCEHaFawX7yUr zts|@%hIlgY_N(CjMdN^$GQK5-C#Iwoz06dC?|Qz|Apg2W?@1*>Dz&8&EtOh%$$U+t z{$v#0rKdTy)qd-_g;y)GVd1XAxmULD%1|E#Mj9SmW*uc6ZOyTcvF2LGN~Mle9+pZ& zsRT{#h@q?KZRjc$9w}gV$P;>=tz**0+F#op4S9OS)>}8|XOe}fSz-t- zIRfNcH){3LPCWm~;`$agTetAr7yn?DeCt-N^nbFbe=PsNx}BLT>xWW#bdhz3RGMnn z_O%1E*ZPO`FJZa+F zxRV~HcvBXH}U$Vroc=&E3V(&oC z50VQ`=4%%ouG*z!HPpM@-vfnUQ2AvY|U-4Hg+RM_#~;Y19_I%;%zMraW?XHkW^B&F@02C zy`JqogR-;}J_=_1C_IBHJOk>j@8jCJRj)27gNOJQF31gACtFu-Yn^S6*}B-s^ubaY zB9#=W3|(SsYlye?&?bVLYWwdn?Yxa21ni!ek)oYmaANNnrJW;Xc1axNo2}!Lrc%jw z3-;NBB%XNYXC+F!>06!IP|ja{Hx0ZQtnVnXh_7^QH`Ajy!0_P0qW#)7(3Z@YWFt7k z7uh^g;h6#LOs;#^!VqZZ6q)+EDdP8h+fdsusd%N5RX7)FOS6p-mVBGnmM#_kKO*0j zVat?ChEy_{Wv8&B-A+5qcJHcHyF=pOw3MvuWKUu2YczYJu5W6^AJ;LqakrH_-Znuh z*-{zF6nbGd)M)n9`z6+!WSgut>Pe}LUSwe0^tFjI#Y}Iihk=LZyR?4vZ)KWodWlc* z*k;&fYO6mdmE1+P=cO|C?_%Ny$^0id&lWHj~ zM?Wi0i1U5?FFR9DTaNMZY1+6!yKGtx`R*Bdl70KAejTXI znXo?Ru>QgI&Q-M}@;6&wOS@U39g?+UDwihgA!+)lx<0*87@HCsjP>x*i-H}Bn&~AK z8<#^Pv_~75h$tGD|FGTQE}ZR8sl2ktc2g>I{W-0Vea$nqLJ9(GH`tBbA-3OVoiCMF zr4pWRSL~`(=1C==sf5CFeC-ih|0?C|4`{2DH!!<2Kd*hhy@K|q1$pi7wSv{|W96fK;P(ka2e)IfW3VM5qB{=AXb*8kQJ`%p$Q`+ao&B0I;NH;bB_W>44N zDo-abvX79;TSaeW*+*(`ag2C-k*{_PBo*~sDg2;G*O455{F@nTA74}hjxO&O)nKB1 z5@l)W7rw~OsQF&eW}dcl&{%ApW3QpLkFH;w*M7168T+%^U*DI?MlEyvn|R(n>-J83 zS&QKMqD5Y@&%1pQvQ{b^iWbSYucV*t&9r_dQ|M=3|2CkbwI%;WKl|(QmVMnly2Zc5 z2K!E?1MM5_o9vtITkKoy+w33MKeTVR?~ux7scex7hlg!a`9LZkN@crLczvf-K3Zk} z$PjPeg+2C9vQu^FemEtcKZclsPH=pO5-z!|L&EPaf z-nhg}P6%t>bw%#qr6#6ldXigZc(cO!uy>iZgfgzk5rCH<&%QXTHAlnK5K2)cQftdqkMj}jrJq~HT0*|W&1U*ZtXwW zuh_3jWv^8BNoD_H`_J}Y?7vEdL^&dr&+^*(Z!dD9*fF6qce@8?B&O?~QgGXn2Wad! z>^JSV?0-3g!{E5jQOaTDAr{{)OYHzw;`SxC<>I)`@ z{mvc*ug4OkVOkgG36oJO zM@{$X1v?!sKD}OePqAbq7CW2{w>E=uM~sd*Dmxx@RG|h{9RZGNj_L&M?{6aCZ<5OA zQu%_3*gJ0{>vgH=s8taAd`C@@q`z3lQJ3rH!V2D==!+fo9QCy--g}X|-V8qCXh7uy z9S!N%AVX(EX9DOS3Fuhg2Q%XMjAj;@sXy@~mE(GiLL8wi?AtFBJX!gUFp*U3?}xOs zc0|}ONaZV%w;*#JF?oYKA94gTo!8aD#pNlzs7D-+YSRt=JG%Zd7dx6dnrV%?^Q~fA zD#&<8tfLJBwj<8b!V&Li>1gHPn)9?&zLClqshpL{IjMZR%+c1-&Jb^E>*#1=5``1U z^9+?tA*crJl954DJ2hhjOd6&=@xqL|HjUvsTOvJp%Xb>f+Qp-`WE)Cg+gU7KP@2Dy zJstfy!*}#@^mg=d^mX);3IV<-mG7l;Nh&`qbqsJkuHExVz^%GFvxUXH4`x;u}{FC`N|Le_D*^QEOwCbj{^k#JQ zjOv`2&FEJ!rxe0+trJIj+Hl30l9-yB<1Z(!b2t5JN^4lbt$~7ds`|_JACL5S7EC^6 z_75J-*AE1H1`Zk+HgHg|?;6XI>hR_}hC9-v@{?4qNabqDvs4nk#93>#TD7W$=n0qU z$RfK79v~ol7dtW?*&>POG5CvK_$Y3AJ4Q?8=e&-^=8hO=##%=>#ycix)31!A(Ng(E zs?0`*6kf%YsCmP?gjwz0*+RXXy zQn~J%I)5mlT40|$Zv|A-rhmGpBzwG#`k9o1c_YUR$8*}sTm#%-<(^%KXkAw=#Q*ki zeJ1}W?Tn$m|Haw*i+}lEE5WbM`z<@DE`qO)%V->gX9E;d{7dw_XmO7R>mOEBBR!Wt@`kYi1sj5;fE!8qoEi2V>QZ2ud zoq4rmjf0QRINor)$-m$B?a<%V|!-=8Z=)#Uqw*}sd&XJmLY?z~hplZA!Ha_tI@ zom+d&pTPPhsdg{YH_sEBqn}OqCb0@8jtYCfVAaCnv_*MMyWOl`G2Xd(OYzO?rxGOs zT5yRM8C-Ca*|F8(sSYA5yI<)qs3$#MDMgEjudvdR7~S3x;QH_Dp|ji{1+;e7D#b zU1Etxg8s!PneY4#h+;SM?s1Q=ptqbxXGJa+oQhL*mUfnLmUWhMmUrIoe85>jsx_oq zQ>wM3T3f1hq*_<1^`u%~st-%G!78UoyUXpgI&Dt7(;)(!E~i_nfiyW;stu(YCe>L| zeN}sQV1ZQmKtk~w?0Sc4Eidd;PUZhU`s6^#ha!3uei+y{0pP6TtjF%)BzQrKoCGge zONo0-&^jA3$L9=k21_+Ws-cB@hcnC>Su#Ow#pd|@UD??A=p75~_RsA37m9OoPP5e6 z!Wr*u>1^d}E!A+TMo2YMs!>vnUg~V?Z0Bt6?BMJu)flPr?}1W%QmT`+YX9G!&~tWo z^7*A@&K}O5&R)*m&OXk*Qf(yF#!_t})kmcIs8pLSbM|)*a6WEo>m11Wn%YdNZKT@H zHl(SeUU9&@6uFf8rOu* z;ZkkB$mx}8?BC4EIWwJ1ujD(koY_*1lWL26=P2iBsm4pS)$LPk`kiup9?Ln7FIZqU zsD_gnmc>$SneUvyAJvxpKd)YAe^n+sr)Z^5a6avOt96N`N|ZOlIkTwJ&r7v!;f1Vo zmh+{83)zBg&vDKzTJ}|`wl7+Cfpek0Y>A^m3 zr%i7-yaV}CpOGHl#TOSDT$yN3@%T##(yl|bQk)05##H+kU1K_ra*gRv@z0&dv}F83 zs*f*neks+&zuVhRI!~2M$;NtW?)mQU$sg96p#!{ErWzo**Tz08uN;S)O z6|8a>^S^%$%O`e{Bl#Yvf@@gUgIvS9R2QeAX;Nhx{VER|mAJOju3AIG^~gNLHK~j9 zyivDZle+3yN4V;_>T^x1=4jWXDhG*)|MoShE0AkaS3_5jE7--E%^0bUl`4miyrOGT zSGeyIQOzaSwWqnN1mFFVbU-Fkas3LeD_t?JMqG=kcyt{ ziuJ!3r@tr*w)H=_9Q92b6ovd=hn;_LuH;X>_O9++p}IOazjJkRb#^`G>f%apb#-wF zddfE*a44N3)u~dQCe`UueMYL!a)s*Z>FVX`&42s4_`(s_0N?m9L#oe7)%Tfam18O& zSZ2_8QQOnBefIyGSEvQY^1>@r*RW#Op02dwe;HA9h3d*G{>G>hSE#NruCaxuKv)gEj$NzO?FKwd3x7q**5N2yP&PP37{EE;u!}a`a zOP_bW=(`+!MO*rX)g_jm?cxll_yZV@Vi%zPL|N!s%*mB&kt<)S^Q1bz#6{-6zKs0) z%g_e@>LRn`7ajSoZCtOp)@iA`+O@{D*7dsU4cD8lw_I<#-jV7esZz_uQe7g|rBYoc z)#XxMA=Q;y>b|ExAMM)c+T_~o+TzR7RoaCkyQ8{CsvK31OZAkNw-SWN4S( z`W+wrTHZGy%52|%|3T?uPjdTvYmaL$N#^=Qs;@0_?UU+itvUar=M7y4U56<`f8Nk_ zM5^3`_Lu&->o_-KTwl13xri3$u&+z?jm54LhIrRWslKTNl3PRn>yH|`&hku`>zq{I zDtaWzb>8(8&sDfCxGuWBcU^M*;QG;ZS*mYKmAqId)pw=(o>bqL>iXrbE3T`qYp$R9 z=huoaNOgl$H%fJ@RJZ&7|1lr``M>w5p{3wa!;I{KIf439TLr!1zRz8nXAJN9B@ON} z?y{z~?($OIB-PEnl;7fe#!&lSgJrev{9c1%Q^anw+hQHzwi&oe`$($Xx8-=e?caWc z-0k9nByP96l2kvG>W*TE;Ggxw;&$I*v2Y?of0o?+knbsKb*J{kUH#%uQJaE&Pf;6r z_T2Xr^*?-2(02mj);=5V4wUL{s=>Zp{GmYK(M>dKc`qNJ~3m=N}4Jq1q;_teq?iLj7Zsu<8 zj&;XLb-z>(NcB^x9{j)b2%x{Vo!pNxGPt>FJ-p~&J|O4r?(WIb`V^GAmsF1y9PiwH znQn6TbN83(XHxy5@KEPYbn{S0@xzfV!#zlQ9#H*UJ9x>biyyq)L*2t~TY9|Pt3BnX z9@Cb7a#x9^Gu>GwE{WWu+&R=ve_GEyMyehKa4Q59lp**nAwh*?O5s|649W)EKQJu$#@!5F&)&I&)Evg7x)q< zgs@TvYi-c4eEv^Z8z2QEF&bkq7VGg5c3}_p3Slb?2VAIx2hj_IFc?EH6q%TU=P(be z@ETTQEr_#i6Nt5qSlhI+=_4CCVIwDOUx8TLPJvvo5m(z~5L+9uwf!XoAr*GwYcB;8 z%&@?Q>L91>HBbwo2tzoC8xP+Jdt0UM+UM$z3s%qPCV?? z*-m{O#KXaM9Bju?2lX%<#L@8-h=XGqp1};v1i9xR_Z;jejwM)z6(B~AwRi*MnX^0^ zAq8{5ww>gi^9sl>=dZYqKXFqC7cp?rFRlO(0~hsjQ7;$mcSRuvjnNFTXn~gK3i{7Q zJzXz?K66nw*Xwu(@8W&XU#_kA0Nb$x^qcE5oWmthFSh~rp%fHU0CjU)LA>0=$sGdf z;%G?khhgmL7!Km&nwaAm9j7rqd`n6tq1*5Sw#c1M@LXr<(}vb z@~5&#h$?fzI#m{65f)=9mSZL8>ngOb3NfzoCf>$6ya%>ZWg|9&cvqp#RcLb++FXS; zSE0>SJ^`_>asUT$7)S9rj^Q}I!YQ1_8Jxp+xPb5R11{qVuHhH_hU@qfH}RJcRShTw z1*K6I zfKGS}3Fw9%=!HJ$hXF`L5~crgN*$i^t-AQ$5>0TNH(Nj!xqn1*LC12gdg zX5nSb!CcJ40xZH}EX8uH#A{fC*YPIa#yY%*_1K8b*oqIZ9Xs(ccHWQo!Z$dF^Y|V=;tJS5tNw;Ra8oeBiBce^1IprlRD=a~us;PsQpdja%Cz~)fALLc~pQIHaJlURZtBz zP#g8o06_>v1ftOxkD@sk$EvkLTXaAt^u$0UV=`u75omig+FXscR@;Ol_zI^$U8_^a z>PFC))#=0PY_EC(`l3Ic#!L|3>L210>;vtsPCKjL;?$9PK1BT?;fylPJX@v1!))VKB~Y{7Ay#kWG#DTVt%F4Uoq>rnSPjX*hdD5nnP)ENzl zCqUlTp&fPJg|_}>ToIz~1E4*1iC*V~M(pbqt@Lp|zHzarRv{Yr>IELvb7$bJyXt#H2pu)Tf;Kl=H9^527j_1-boj8_!2QpX(0I+*b%fT zFbg9wALL0Oc@p>`KEXcx2=Y4cx)2S+&;+!pp$BP5$1JdZ!-d$1{rD8rp&@l>NF9P0 z6N1=&(8GvFN6lw859$%J3v55+Fs|VSZV3@u88tyY zLg|-K)(@p$LZ8M=ynr1bexbA>l)MaOy)XglAI5rN)IY2px}rO1e;DhBO~pIdjIB6} zA8=WSa3`waA+!K(52x+nLy?JWyo#luFT?4B@M9n+B2-j_86ju{`Z8h=ychx69zok9 zmS8u?r-&o?6@Lj4X+Sfy0d1{&Ul$^#F^Ex2OAyzX znV_yQv?+#d#C*lH0Cj3q6~wd=Wj3PBMwHozdNkUEE%*iW9mR>p>^qIwcN%vDWi=+& zjpyTQ(B{V93(xo&Qea#l)5Qtkd;?|sUn?H;|WFQyg@E*3|LtMchxFJMrB$}c*XkV6pbz57w|M$Fp4^Tnw_6$z3-Y=pF=#mhv+)W($Jh8qh*q?v6>VunTUt%R zGho?PAK?HF3enn%2T>K|RO@U|$JV=`y-#1Yc?iU?4KZw!jvNrjHhZAGe^iLJ^h?{y zpkLa0kOr1*y9ql%TiX$bcEq6_acGx_pBur2S*9b)bP5H_b$SHkWv6*q zAVlXU>0q$B5Tsb-+IHm=~il2HUY0`-SLY1M%oWo4U}K zU4~&e-T`gyvQ>zL`%oSaAOU^RAB(XXYjFjC;D!)g>C3Kd(H^v?>wGN4DV)beA-d7; z-I{`ByUhT((T&{b_7kq-Pa(QLhTdR%-B;mltP`RKb?HI9dpJN2_DBKs?!o&#Herhp zJ&mBgJu6};GLema_zYhN(JL4+Ailk3g6;Qu6=!e>KMK*C_j|{Je(EhT71KdEy(yASx4UEiZPiLZs|R|^e6 z9s5yMzj2rV+SZRe?MFZNFAEEZMgMN-58~1PMa;tjP-cJ1>`$2k0zh64sE@H=EEw<< zC~v^0I3&d5ZqSa$Yl5~so&mQ1I63q9UhEelu`0-`#Ck}^aFAb#%kVnh#IN{Eh=B&Q zK^Jrd%M4_hfh;qScqGwJNh&&^J9>h=N?MJzpqwPiNv53S_Mn}~J+K^a;4Rz|!c!W= z!$W;M)W<`8JnYjR>g%CxgKD4wi0Pp5cnVW+9B1*Z5QC%84D|QlM36UwQ!yDcKwJkC z*TI{y1FSdr4r)H#LyErqtE&|fJNFcDAUX^@L4^RXPnD}}hG5Ra61@Bu!;ZtTTD9KmNGXH#fz z%5@=zRss1iGzg)HKqE8(^&i?CZ7~ScV`v7lF&e~g=yc4$^O%K`_zu*6DE&K>zG2rC z!%CqX9)Jl}*unOP5u;(9L9B;S_AtsGmJ9l6*fh+>T+r5Gi?9T=dDtqvhxOQmE!c+b zAcn&}26ahg+o`lMm2ISs1ZAaCMk-~bQbsDVOI-)bN+mX_r-krRH?IqoQ5Dru6LmmY z-arH+43UTdG4m2LFYWQhgFg4t=U)2U`xv@{-1YWGKRk{k5L<5wXup>jdx^1^ICzPJ zmpFKdgI8h_p2Ac-gXi!9Ucwx_iUr8WQmnvhSPS+yFL~jmjou9)hrPtgORT))iI+U_ z?!^Hd!chx9N#$XJ^aHJy>Bawr#n1G3R5>I0qp2bYOh?nsS=3yZgV;NRrHD1SCScmtq z5nJ#9cHm>|!9IM7!}tuxZ~~|B4bI^_zQ>Qaf}iml{=iKkG6YINMOoaBim<>A7b>GF zs-q_ApgsZ-j4(tZ22Icuv4}@&v_nTchOX#=-spz`7y{y%F&(o&d@_hZ25rySfFt+{ zv@26V1<>!A4G{&lo0)`Eq=C3*63@&P*pJUaY_iCgEb=6aHf7P@S@d%jb;_b2v*^F9 z(RdN8pS1wQIg9x6Aw`jO4L3j@W6Tu#^?wTuhGPQH1Qhk0regIGUni2Yz28X`Xs&s`(#czQ1=}2A%}8uC?|(< zauU!N{V^G|BWEU9KZpFx*^Bd_Jvqd03}uWVc4Nq;G4be#&KQO)&u^Y1= z^uZY7ky{E19!5AKK`e5~pIq`Mm+j}y#w(x>xzr(-I^!{Bc`&v$ zQoznImU@gOCS!@oSYk4ka>i25SjrjuBYwkmA;!7!5Nd*UjO&43cpAiD+zVKTEude= zoy8BhEW~&-DuGyx4@F~8m+{>}d&VbXDkx+8EUW>2GyZ)X0DU|DOHhvq_rVBaI)Qwg zP!F_eLMJ?i9FQjy$dd{4una4(8|3wbBSPd=fD^PSuNm5aeKs#0te=;Qg;u`cR^ zdQ9vJVl#0(p28Gtz;=)$6TiXtAihtSKs%nO4C4Ak3lNVd=+`I4fqFc#7;OKEwb+l( zaSXrWFCivTk4f~)B-Woqzf4L(D$=k3#BUO9m_%MqV!cVnLH(a(y(g*vleH0q5YYZ7 zS^r57W?(j6!B*_T9$dwrxGBWs0MPcyw0&|sI-)a1ArJKBWcpz8J0K?}pTxo>2BzOmq44JUW9F+&z}AS zzu=Y-bcLAW0QH_y9o;biiFg|93scDBDb#n$W^4uH%9Ni#yQfm$spRg|YKQ~do=Tgi zdNCSfFdvKuQ&-{-A*K=IX$o3_*i7q!7cmdibsBA&#x|y15MnxYnocaIHvnZ$r_AY; zIh}e;C;rp-2=R;o<#0bbp(lESvYuIiRk#G&{LF7cJX;fi2m<~3EP3&4I;h{XZ-KTx z`@0Y`D0c>Roe_aY(G0nmgvr>6{rFUf=V;G!6<|UF7+0U`k6EDo&n?6$oX15WX4U|0 zn%NN4VJ3B$NgZa=zccCInXlt4e!yiRp0A2JsE1w{h-54R?R$PTPJ*~SPuyOh+!vw{ zgYkF@Q?MO-u^;5?i|qF=mO(7qp#x~&i_c&NHi34&_^}YPN`v;zvVgH^7BQYhJ739~0aRSu$pd-lZ*~DP>Tr9ydoWUjhD8wAvGKaRzp)GTs!yK^eoI^N{uY`Cd0CiCxVH8j&nbQ_Wo%hUZr1Ntpoby)e#s2mVI>>4uZDMBM$S3 z!#v_JZ#YJRZO+>b>NJmS&VLx;hy-z(Pu|Rb8Ej`hIWwP}S-|@X9zj#Qgax4g7yN|l z_*005t&FN36MC3Z*W$K*P0+6 z#P_v%VEeDFz-9c7KZID#`>Q*Gep>wuUczip&T7h8O*w07paICiHPmIzc;sO{KEw{( z5@IcFT3Z(M=~}kAwhidJwe;QECvh4VaY=~RL(mA+@pa02eF~<5w!KcCzD_^CVMkRE zi#HN66vX3=g;{<1={geFlftL z3j>i1@@n1t*dWBal=Cj-yh}On_5|&GHwkZJ6SfHPUO8A{2laW6`n*Sd-eaGBkNUnx z+ujdCG>GZ@Q}F_3;XC|!1i>avl#Y@|&aY12m9w2?M# z90~emBe}Yf@;80~Vz-glZ9IiG;vKw)4IrPk zZo_tbgx%PS12}}EpiNtk<0MYwEa$rixgxE$7Zc|VO#Cux>m|=qx zl|an55%X;|P#g8o06`!|+aeH+#&{IX(E{|%HrlqW1L&h|^wBo@Xd8XBjeOZg9JdXG z2SYFnX&8YljKUagp%{*IWMU+8FcuRq z5l`Z2OvAI7i5KxQUco#p#9}PNO034~cnjL z4ju6rx}pbqqaPke5(Xg!sqi8L*%*ynjEBS|JcX%v2G8LIyo5P;6$_A$rC5R2uoiFP z9lVDP*owQI8Nd;&f+^<#1Hrh*YGQ@<4+;B8$mp`*F^}3&vs(4 zowjf9j&$UJc5PpTRiNLu@4;cP-R*w~v7-#ifw=A zeArDnyD4WkeKdmaMq*b@lW-!lk9FbTA05AoYW8GDG`9&%~VXadqO?41m@zjr3y zzeroT@Z`?`|&xb%l@mNJ^OD8aiBIR<3I@7fWA4<6~jQ^9vBJg zabP|cf|wp49}nyVZ94Eheh}hQJIIqy$&*iG5Q`S@fV}=R9jov*Xw#=Ba1QLV2k!^# zA9SJ#;?WA!;UIN5NF5Ho47PtTADh7T57Ive+5W+w@v9Jr=+{G)Q3a2nC0c_z9HI_~ zsKcQbF%R_dA^PUf4txaaap)%yo5Ph*1GUf{13-=(9*?I$d=IY%?KsSsb(pvw{u;#N z2>p7*4eD{EDcJszwit>`WaD|v1@$;WzZ_xxBlOFWzl1ni2IbHg#P29=I7(g~Wxb=L zK>d%h-cjm*^h11teW3kES^qNw9!5AK(Fe&GglF+G=3pae`)9QMv(vbUOG12Z#e<+P zKc^2q?+kL{^KqDrr?CQWg1-FxD}0OdLVQ8nzo6}3R7Nw9PhYgdaE!!gyoYU|?#HO> zG1_{J{y#>Yk5T7i)cM%sNWm~jOvQA3fIZlYYq)`1LVOv3x~LE8`Xz1tvLEt5Uwt_Z zYr(d^T#qkt2Iqu0UKSSEK%N{YPmZVKEfAyQn?YPplmT@;L7PsnjT2o#oleXGF+D+< zCn)m-Wqw6Hz6wJGQbFH+H3}QRzVp>iAx=`(Nn(A{jvk=RCkKM@UwG+R^m0#52r5UiV$B1BLKMQfTA)*k2AsB&7tiii@AGGi6@AyNAb9F#F&xIlbwC~(FFg~0k z#^-3~IdbG%1*Opf9YEc`C6~XQftmObzu~$N-#v;}puXQd31acxbJ&m1K`hSG2j|JR z^Yrt1a{D~FeSS37gS8^g}7)0%U*m0EzufN@d9Rn zoVs`k)babK;QjCEtMBK47=BL-zyA(DfjC}j4&J}i9`wtl=Rv<*I)QUw*&o8t1hn-B z;_w4;_<=b5@C8VUAK2!P;h;`Gvdte~!U8M;ak@<2T)q!%=Q25SnVh-I`l@LD} zP!9C}PkoS#L3j%ru~~>KEOUiruCUA%mbt<*S6JrCO0e9OcZ9e~US74qfiZXj^vPA) zag}&nrOnrxfxf!d4#ex)e6UYk`xaF6+7%&wZiG0*V;YFZ&$Q`h`ts**a8`(49tLgx zB@#K9h)LLk!#FC$ua!^(wJ-!DkO}(o*AK8=h~H?>Z+1A*9RrXE`u(?e!Lq-rAUA$D zV=&T@fn7KVws*Zc>Z1Xu%XR8~{TYyh*S`k!{)6}b2tx#NL4E(2jMKP?OG5nF0v*r^ z%kc(??~O8G`!}rUjU;%$`#08ue!B6y5H}4d1U zFOK4KVK9`34Gy$NH}t?uSb#;ih8wsg4EIsqeXY<2&tVSc;%i*M_rg%B5#kV!S(uN7 zIF7UURv3&`P#bkI93wFr+prs-2!p~h3d<-gqhuf#g`sp+)ImLDV?2mQY3fs&`jn z2W+?825biLDo0z({XZ?-XLwZwwg%v{u@ia?=^cUzkdQz~gESHXNr)+g1ky;cVF4^4 ziYVAcal~G@I@quy_O4h)Wo+0GaZpt3ZLlDE-??*ro>%^az1I5H+Q&bhynx*Lnz^rB z`o76F^w!s$eLq5vea+kV^H30$fXu>V8+JJ3$RLZ!=ryc>X_PP>J%_oGuo`3$)`0%Q zWD?d&2VL}XH1-j80*kSiuv1yhxtz~Mp`c%XB8Vb}c#dT$r*S%V)$c{#<{iw`&rJRO zY=3?CpFk$rsL@}I{%Z7BqrVyh`XPVcnGXg;6NlagEMqxmuo5)}s4+l|0c!Zpdoa+? z4oqh}@);0SkbDN&<)GWKzd?6#Zzve-8H4@oV7U%1 zri``Rz0Cu!Eia z9twQFItaI~aL)>FN6+EzG5kc%z-@%vUHHYwFZ_r4%7=bYq;l* zO5kwZz^DQ$sl&XZ^z0XS1f$wGl4CdyHAgMsWY+R9FYqnDgaZFr3?lk70+~ld5sO+8 zYDJ7A12rQiGlgPip>D(i>?7iM7Gb9mr{bO?>@~uy5oV1rYlJ(ExC%2y+{q(6iy0$i z8u1ESk!gfXBV-!!F+SHKcBAJA`;I)6!3<+05k!+hDrv|+(mNTc@5ma=AE}>6bt5

YQoy(smf)Qid?k3x#@4n$2y z{V2PMatBc|ijq;3jG|7%Jw(YUN=8vKigFK8?jdR&H?Wb1d4}hCk$>T z=sWsqZebIT^Az?Qy#@P?egk<&%R5@$(ejSgck~zR4FxfM7)Usg#Nb_uNg|yIOk@&M zkabKcwY1X9(daow&%U=E#4Kk8diEXdAV$wIvX1#1*K;cm@B~lu953)PZ?cW|*v?0| zj~Mq6<0fMD8ym&|++*x8MiPhnh;<*a?jzQH#L7EX&au;&K_hK+(#??^!xBzm8K-k5 zdXJTJ>{Z;%y*z@9V`UusEHaMWii~4r94q5k8OMIeZhj60aRGhl&me{}f>;trW(ITfl`xyR%tOz|xz zQU;=S%2>>tV&)Vxrsz$0Bspd>w$O$Y)Ua4oW5;Lb>$IaY=J4&_R)J;6dBj_#lNnT_t@3J!# zq?Mp%S`F%@$u#X4>>|y*q{%exLd=)8mdo)zr2U;+Q7cWYw0pP@HPbfp0d6Ec74xLe z!?V-PkZy)_x0!w>YNXpsy1k_Pd+B;ix9{}l*n(cuKjAa>@HOA@BR}IkNOzOz``C|~ z#tMPh=Xk)H4Tr884^t?JS^&qgcl2 zc<%VKIS1K|m)-b_v7_eOu-D9u+|9k*&jUQfqddV=Jj1K( z#_SXOGMvLvcVZ`UnyAjiYuU`}yu~)&V>|9);{H&Obs%=1H4(jM$;0>7gRF8YX=4#) z&N6eB-m*5~UCHvUWO-Mz++o)DxGmpd53&!V9|IYJ{<6b~WDII$Paz*Qvx|{`w*0fZ zkW;pM$i5XBWXm912H7&mcIUqH9%P#(`&~ZZQ+~iq*?ZB)Bz;WM$D}X@5YIR=P=C@S z@~B`YvYRv;_ch7gO)}r4$9MxZCcDAOvYwocpPgJw9gQ^6f!-(E`Q)Qn%=O%aeNEQy z=eW0=lUd4gR&W-pa5p(u;JG<^%(fn+jK%XjaCDfT?Yo~Kk{{wZdfVx}qG9LX^p$B8WA6qcd4Df*eR z4z;H|&&PNNbLEt4##}Sznl0CCxptgehP-oSms`&qnwd`<>gAq#8R-Cgc8sGa*F|KK(3D|ZL_$(2Ly_x!|O+(w?;$a5QcZX<61k;D*3A`{Si zUN$-CJ5S$va>|oao}TmcoYzh#3$d3x`^ejXyUn|UO+3gWJkHzvi!b>GedXyZPhWX| zg@XJ(9K<2?#XFQgoOJY&FT?yA+*^JNt#r_Z9`a9M5i-oTr+hb*FT?x?co+5Z&7c1} z`$Iv28U_6s#85^MP8wz}$fba3s9m6TfxQ*XK?VhOR$%^uZhBF-;5^oH4RR>BlY4NR z1rPBkPw*5k^Cmml#csYpJ_TQgg2E8`E3~6RpCyHZv8Td#k{Hcc^joOM!Wnos3*A?t zJ_{RJfV(Pm$A#{=a4{z%o5Isr%^I#{J$6{=oho!^g?Dow_E@-ut!(Eb?6L4uK4%X< z@(aHs+d})C+7CTX9mYt!hf`xoK>t&d8Al%apIX9nW>Sqgrq(f!qgcWzEaP;}Fx&PGJlgn7!ES#j-7~MW4lPq2nr!<`jOvL9-X)!Y}OKA;uRoX-gt+dmL{gf`@WXx2024~`(C|%8E zT*=j3$Nz9Mw_v`~%{BcX ze5RH~lZlzia>&EZ%IvJHlyWMmVm93z$$6;+<@zhPhw@(J zU9N|6J(RCx6&JFGwOo!nD!&2!l|PJ3%il+a<TJ%z4OCsnjo5vaKC15H z0rXL&kE$Jf#-E{}+FaFYRTrQ}wHnoGRI5=viyDr=tyMR13~E=qx#}f&536NXt&eJb zRO_Q!AJyk^GY{cDs_muv$52pnFbPb?vuixF#$IadrN%yLdN_(j*h|exoPmARxPzJt zxd?OCn6t*5H8)}ZHMb%28kyHT$`d@zb8O*Fd_L6ZrDg}a*v%Jw$^K9|T~3kJ)Zww!58u71y#JJ2HEIi} z#ItHWtJbq>k3{aZa<7$pt=wx*!SicR=Q2FM_DZfsF12#0RikzTw<4F?ySRtP*vjXj z;E0io#SV_>VJUWZ#1-7YM()N9`F%yf5fAYQukjAs`G|k<3E%T?eq~=Ms0(ob2XiRH zi6ELdk{N?s>)d-?E@f0OlWJ;_bDd1;WKt)Sx+9TEolNSy2X%kLeb;ScPbjEAj1kyf zy!MiGhr z8}#pYE(Hyq-Qd{`cG*zHY>uEE{WmPa4jb&S!44bFVm0S+0T<$48m?zOH**^s(NDv@ zZ01?E@Di`EmDkyUy)^9PKYf485B$t8{LUYtpixeZeF?){jmgNTu^hcO>b+6#jdjSR z(VaAUR~zl3u@kd2nxWAQjk0PqN8>8a#T<>^)y6ekiXAns<8gL{f;j^ijm+nq$(6XT zIr?u3&{vb5nj(lM4*P7f$EI=Uu}N=DdTY{Clb)LN)N~9BIgZmfgO%v3NncIpb1m|0 zx}7_?6L;P80x$C_|HR&#+*H$h?B)x;;v2r_C-&mr=Y}J@xwGiyWY*$6n!5q@=5FL} z>~8MEJjRnejT&?9r8$XoCNPmnlp?2Qb2gi^*__R?Z$1+9HmlXVl;xPWS)R>jvzohb z11)_Rj67RnNg$aq6e7p}$+`>KRzeWEo`fquL=XsI0 zd6)P3kUv7fyu;|nK!y-S42NTv^UONWtn;Q(LKzjPJMSb`aUDU>~fcP2R?Bw0*!1^wMUJZF*=QfZepagLYZ9 z*K-ONas_I&tJSVnyISpPwcm%`w40@U4`1^gKk{=Z=m@Z*4s&#Pe#cFO-?zossF>8l<9qM(Mt;1{`^6hZ@9Z%x+JIvSd7Vn^+jt_C89cJs;&FAQ+ zQ@)*QcaBGUCa;Ogqi#cen+e=Im6rQ@u|0I@Rk`uT#Cw=XjNEe8f-eL^F0$xWw_66?Rm?^Y-7@G_yIbw#?uy4cv_#bwAD%$g10py8p?WY~wxj-u(&Qz3#90mhagg z3VQl*5N@`|96f^=%5db?Gn%oalSwu?n61Y>^;Bb~o;n&aN6(4aZI3y6mT?K@=rKo+ zIeM0Co zLV8a~NFcrUg!JBf@BN>%tCd%lEmr-HG=s7`Hmu-qG0FvS3{6vLk}^ogH=K0`2uH8|lZ@<2qa0TnyTH)y+=x z+YHC>jGIYd5}Aq2BxW)*g_+7sW0INaObT-_Gn1Lc9Ky_I<}*u}rAz~}l4)d`nHHv< z>0pjx)-dat4Gdw9V>U7;F{d%7GiNjBFy}KDGnX@)neEKA%#F;g%$>|V%x>m!=4s{` z<^|?O<`w2u=40j)=2PY~=5yu?=1b&2PUJ#vlzNQ)=qL0S`WyX& z{>2u|VIEtt6T7e*AB0EZQ8)>Y#^Z4o&c-=77w6#__+Yk{UC1tF18f7kf<1z5X4}{f zb~U?}UC$oN9><=@p3I)cp2?oWp2uFuUcz3^Ud3)@uV$}hZ(wg`Z)5Ld?`H31_plGL zkFbxkPqELkFR(AOud;8jZ?o^QAF`jYpR-@F-?BfjKeNBFf3p9W7!zxWXPRl6ZOS)!O~s~prgGC^rfSoC(?Zh{)8VFi z({fXzsmau8YBzP6)|l3rjxiC_38s@wr<%?%oozbTbb;w&(`BYBO@+8shnR<(4>Bj2$C@XYC!434Q_Sh+EOV~;VDlknkGar1*LqnQP5;=EY{ed6_wAUTI!sZZRKe?ld1|KH9v&teB5CZ#17`KHYqld6W5k^F`)M z%~zN=o41*Fn6ES6XuidKyZJ8jJ?8t&_nRLwKWcu${IvNw^NZ%a=GV+`n%^GTt)DGSxEOl4i-Y z;#AyF7A94L$wMhoMFiNX{iSx6N! zglr*Cm?_K_@&&I@EX)(ig~NnuVZN|XSRx!Q)CL;=dA-OcE!Fl1*|*Zs`DNm^4BfC5@5B zOOvFj(sU_J%9L`X8PY6ij#MBONhMOLR3TMKHPQlUk+f6_NDa~o=?JM=YLhyo)zVsN zy>zT}oU~DTNP0|qTzXo1MtVVdQF=vsReDo;OL|XwU;0@3MEXMdQuLbzHOrc3onbx1 zI@?-cEwmO}ORQzqa_d6tBI{!766;c{-+H(;U~RUxSX-@a)+4R$)(&f@Rk0H5an|Fl zCs#f$?tb45YTOY7KXnn}~u=Nq^3)UB{FInHU zzGr>k`nB~N>$lcFt$$hnw*F%iY@$uFS#3jY!)(KC6KoT0lWdc1S+;Cjj;+8}X!F{N zY*jX&t=e|DEnr(_tGBh<+H6PK+HJ?!j?Q+|-w(D%y+jiRS zw%udfWqZu_xa|qsUfV0SS8cD^Ubnqrd(-x*?K9iwwl8eI*nYMBX8T=cWRq-`1zD6G zvQti!50b~o6XdCKnw&0Y%QNLe*zEHkMzF6KWZ}qpJU%-zr=p2eY^c?`wsi<_B-r%+IQRc*dMdMYJbiCy8R9N zoA$TtAK5>)e{KK9{*(O=`+p9z!{Xo^Hiz4BkYl_f*)hxEbu4i#b@&~JI|7bnj(SIf zBj{M}Sm9`P9O*d9akS$E$BB-Oj*}ecI5s)XbzJ1Q)UnmE!*R3Y7RT+5yB)h74>}%k zJnneX@tosb$9scXC-I?ypaArDloq5g~&PwND&MK$RS?#QG);i}q7dY#ji=BSw zGH1}a(%Itdbgp-9a315_=sd}Jvh!@`InGVaOPrTFFLPe*yxn<+^G@eo&YjM?o%c9* zIq!Ae=X}KZoO7@973aIo4;>dcKa!K2pEYC+JXT!k*LtH?FiRqQHp9qOue&37$u)w%qxm98dNo2%2c#qgg2uA5!Ax$buDc0KBP%=NhIdDjcB z7hSKpUU$9i`q1@}>vPxlt{+@Ky8d+i<@(#5;7)WO;2z>0>K^7E?mo~x!hMi?q&w4{ z<<55JxO3fk?iucb-80>@+y(AJ_o41;ca6K&J>R{^-Ry30x4PThN4neH9qvwdmz%hc zb06Ffw6G!q|j~3DXi%6S5NyPAEv2mr$3m zFkw-`;)Ep$OA}fWS`*q5j!bAz=t$^HI67fn0!cV7;nalF5>8LplyFhP6$#rCwkKSl zaC5>f2~Q+EnebG?(+STcJe%-b!t)6)B)pjLQo`PZ*Aw1KcqifggpU%wNcb}0+k}4; z{!3&MQ6f%c6HSS7qCL@(=uAvZJRosg;`qb~i4zkiB_=0MPfSnDP0UN2lUSJOO)N<~ zG;v;HMWQdUI;)X;e@%Y4ziKi!S zO1vcT(!{Na+Y)a`yd&|>#JduA){JZEYHE6!5g3t?7%O9ET=UC)(^m!8b}XcShNAx3 zarI4sj*cyijgb|suv-`h<5Wz9{Y7cVt)?^t?j-}`e{c|YkN&qX~$w!Ea%b=x#`5vX}|mm^3Dx$zU=SS+Oe)#kq~i zW^$NZCQorG83g}B*c+g(nG5P$8-mjdgUh;BtO&NxYwVmJ2|ayople02EZE%IzE&^J z@5s^A)c>aN}`hBcg$Fmyav3P+!kzKp4_auY(`2-yV{nsn*v=e^(%eB4%iD<1((mtSzezS z3@poO2;`-{#Q2%RnEKh{Q)~R@-27#BC9>@&l<}F_yVV|PnjUEJEeOh`TP@fkJM&O;O;*A{y9&L}j zhU{QYWPMLn@x~7hZ+S2yJtGid-&rcjTI=PqBCmK(^+qIlZsv57gCnQ+xr%4BUs zT)+{WA<`N3Vx&d3jTG51D(u!nhwnXr|)pLu|JP{~xX zmE5h&!^|Vhqsk2BSb`pioNJy^-J0ch6fSRU3et%rrKW?f87YB==Ejzk(xOs2_0T!A z`)`GG#)z9BEm>VV^o2=V&??)5s~cOpIzp?J_F(~pB!0)V81jDYvw}cdU|C~RV`pQqL(}m) zCdCk+-l48trF2Q3-a&JrovRyzN5w|=1)Jz{8Env3u+r9gHA0AEh;DdwpnXL!wD5=Q z9;O+R-__XE;0t!rP-8{NNZol~G5;_VZgx1CubFR{Z<+6y@0lN%ADN$+pP65nUzy*S z-GDOO69LzQ_-sZyqtD-}wma+p#DF#j_DAqF9Y5sOU7 zj4X&lJQ9$IBxFT4Br86pR++CXQ0kP0$|7a4vP4;`_?5$zfU-=fR~nQcG|<7S;nO4h zsK*ZxA8IZy=?pecFAKDFOmD34(RpSN+O?JCrH!35uJ8;(xiHWfp#7$kHk%sBL((fo9M5&%O+84F%HMhOBYhOHA+}aHD&EP!f3oH+`H_i*L ztzQ{vq_OavLDZ$Br8C%0E9B{FXl(U6vIhY%-&5ds%o>E^TpExS(6~F;KDdG_%4orZ z={EBNO?103xDm6j?xw!3qyhO#8t?iY`GfFUexQEU3fk>j8p>!~Oy{?mg8(TG2AVoo z(w-NZ)6f_+j+w9-ja8~Qqwz|OGI>!+M`2?}eN!vlLGilO6f`YTO8+7irN{mv8|B9S zVkSBy_80l6F!mS4=+M|-l%vYnUsR*o*k3F}i(`LrI9e9_i{)r#>@S*7OYAS&QD^Kg z)}W(fe{l>_Vt;W0+8FzbQ_<=0#c9fl&FBneC9DF@!rE#dtd+HNt%Nzg<0VKTpmWf9 zThJzSuF|NqD(!l^U4SlTTwBnE=py9^Wz`mR3A$8iQks>PB^0Klfey~b<&7{t9rWLT zSQc2((%9Jro5|7Lt8+ww-D+d!1m`gDe_G&q5!$i^0!+Q|ZD>bC;n$#Rl{V!_rKNK0 zoJ~)3E25jw%}c}as;9j@uy!l`>=ve;?i|&BYE`@q-LVCJ_M*GcPINb|_C079 zx)J?Cpa;=I${JuR5@B%r%bNZ7UNni5TU6Ebuec{M@UP;!6}5KszcE;=vlwx zAWb6_g7u7lwxZ|I^GtnTVst}58@Q*kR?02t#Vz3MUX0VLxfi`j!%FlDdKJBfUPo^z zib9m*l;f2XwxhSu+vpwiE_zQnQQ4@RrJPOB{ey`s_c2iHCGv|EvGx*I#tMi-0d9=o z(eLOputcAuFJU-;p`5fCeWje-+i?C4hV%F62jvvyjPBw5Gx`li)Gz2)7EfMq6hI1<$O9KFW8EQ;o{LpCfJH}|ED@KEccNmc)v+t^pfvS&HE>Sk<_DsOZ%!I4)L_7&k##8WAJWaV& zxlFlSxk9;8x$0^>9jD+_^?w=4W`gb|Xt)0Vd!UnyEog0OqLF;CAxSkfiJDLIWu0r2 z3TQ%gMQc0VXHmQKz8oo}em5>|HE9sg))ZI^u?8*3Af}g58))jHNd$vbXhw?RGY!S) zQyrz8#)1YAt&_||=iTf?L-8zp2%e4SU=Pm61-KAyUla1(CEEw~l8 z;UjT7?!cY63$MmU;Wc+uGB3_cbsnBe2^@%RLMBHoBk!YAWX@TvGTd^$b@ zpNY@HXXA75CVVbF51)@Oz!&0+@WuENd?~&RUyiT9SK_PiX1oP&#oO?9d^O&Iuff;i z>+tpX27DvF3Ezxw!MEbu@a^~xd?&sO@5FcGd+;uNFTM}&#(VJn_yPPNeh5E|AHk2} z$MEC$3H&5}3O|jX!O!C7@bmZu{33n{zl`_dSMaO&HT*h$1HXyi!f)eu@Vodu{678w ze~3TAALCE(r}#7cIsO8FiNC^M<8Sb{_&fYP{sI4pf5JcGU+}N^H~c&P1OJKt!hhp` z@W1#!mSGW#S(Y`iX4b-TEYAw8$V#l0wXrg5XC17Qb+K+XflXu&V27|n*WGAtc*(vN)b{d<^PG?itR5p!GXEWGLHjB+>bJ$!q zkDb9D%+6$Iv4^m;**UC-&1VbPLe|R`v2)pCwuC*DoyV54Wo$WH!B(<|u~n>(tyZ=u z+m!9f)yfX#8s%E$I^}xh2IWTOCgo=37UfpuHsyBZ4&_eeE@h{3w{nlNOSxCMPuZ>P zQSMhBP##nsQXWltNG{L1_e~6O=(vCP7&QWfPP`P%c4v z1kE7mV1i~6G>f1^2%1gM9D+Oqw2s)M^g&;!EaRePt&T}#k)1YJ+i4Fugt&`kv0OwcU^-Ad4H1l>;19R%G;&|L)WBNZ99q+bm8x&0A1_glMYMnGB^(a8iBY&!1;rFiGol_uhV&Gn>^L1=80 zP)%iJMOC%8&|g{VsV=IhDhs1Fg2I746>2@DC53*EvG^tsoQOOf9l=hhvd`zO_E&le z=K0G!bGtl5(n1QCL$@?a!|%FD&)y zn(d*Ago^W)d-6-Y{)%#cp_i6d=&$nnDr%|u>;kae}dS6|cv0;vhBUX`rs8Ni7 z*!UP|#hxl?zAA4)MOmfKUsw`0-~><^8B(cegM=S#pZuDV(n5c&r>ewLULCIGNg#V* zPuaNz1^!xZm5=sdy;i4!U`kIxwfzdI9R(HT)!y=I^-m9Vr{6cfq$u18&xmPJs0s5+ zstYO#!|i-F$WH88&|Gi1x5`uPh0ak_Qd&|zmloBnbS@~(>8TWUPDyz|X-%QmU+neJ zhVuD~sw&E8_bMr+9aigX5e~cn(5Cl9^Hf%rmK1nkSo^&TXl=ngeotiy?S!Rux}Pov z-SnQiz5)*wr9DjRqkdWiFEyj0igrEKv~Yu52KwU>oFFIp8tL}GWySjSP*uOLWG*eZ zyt<^i)LTaX469rTD%0Ys=mW!FRRe`qda8?cW4D0jxE`7n)y3W_)w^1eMWu#Ou^lwV z_Ryd{tStd=K(lBK6mHIIKw$_f47N43u7xWRbRbk$l~hKcUJpVeB7{7p)!wRdXz{sK z6*ZN*#W(dPSuvk>5?%6EkUTIBW|^Vy+yR1<;s{oHtIA4zP&e9K&}cgRPS8q<&?@v+ zmR8h3)A&m(w2q=n?*i!w@uVv&eAU!#bnfy|59&I*L1%b`jy_LCxa5KOauE)B801FA z!~=Kup+)qv9|PHh2-!l~Y=(8^Nst&8A)#Am>|xJ<*!T#sGEYH;ZvmaQ!``a)RL%8< zCyVDnB{@Px@4`MhLxU)ta09TZVQK}BVq zzp~ilqvH&^r!M+Bh>nX8RlnCE)hSqAu5^vJKx1eO4PSL#_iFYoNR5q<>b+_ofI?(< z_IWEk>YNy1@y8%IF#@@yvbdt$OV6F+`uwvP=G6A4@FgfjcIf#Yx)k=N^9|^XK{OlQ zKu>{TR--=Gr{t=da(@jC8w$Nex+lK}`EhaNL$YeG4G+qnK;u9(FWBA^Y)Yd0->#=vc#?>xFj6qfjC_*mzIfNyRo9SY!Ly)jHOC{62D zqN}mbTUG0&y-+U*;xC^cqBlC(heJ@4vqv>(+@e)ppJ5D5X-WX$(cQ&FOZxNau-3gg zqzATlK`0QW;aPq~sW!{&!oxw>-HlBRDX;KXmW0s`0)Yd%1)xHHx?WY6l!s-KKxRU> z3^>m}zofdD&NQ^@o~lAWZP+Rrn05PZEa)V5>(DuqMw$NVy2>!@1Q1E+7ExnRb&b-k zp9~Ts3=-k_Hs^&%Z zcn@fdiK(Hra9yN)Vc+twmWHb}7vv3HomPuZn;_!%d0}s+26ticLqXi&vF`Ok?dpCn z-J|L)PzJJw9-y~CM9Wt8DBq{;BI&A3{jZKPx|gdg(J_4>KCIgvKHBV|UTW-owIF7g zha$lu`biy#8oHp``Dl}s6xI0+A)RS)4?}Cp!%Y`!nEQJeI^R>JhO#{dz%mds_>;Qd z(6=iiEDeI}cvMM`j{_^zIUcqTb)guXZgSPd&lpjfR)WfOqe?-Q7dm%Ic~J%3deHqe zcn~%nR6oC@+*76RI!voTceqiPuA_7gqEoZF_~}j30&+=4Iqw2*K~1%~mkw`gbm=2O z`k*+{w1;%>>rI^?I@KuJZ8h~hO!3+VlqL!2>8P7P zdV;ae(3iEXpSHsL{KZ8y*z{C;be;1-XQEL@pSkt;uc)Gw#y-Z~sp%q68e>%IHeXja zHsqxsA2H@=s~Iv$etKn_Zl8-vy65;SKxLG%2wg=h$`~}7HiPicSi;75WE)70i;z;s zE?DZPkwJ8y*Z~?54y~-or>TZ;Y#hTk*MZI?V=1r+pyNs%EqXcLa+*0X#Bn!*R>Xjw zUq$C7>i4Lze+!6CM7|)rbWW-abgqOPU+H`Z_d<(J4GrfaSDf2HX?zc*?g1KWk-7_1 z#`I9pI)z?N#2R=H$dBzIudlAL3b+pxM)gpr@-B>tem@A$>>=!3pr&Xb#aK{7lVUW4 zH5BCc)#TIUo4U3cBDjZQA%&7X14DWgkVf>diTXa&J?LQ8N7)k~*|W{mR_eu}PlJ*h zRjcnjXs)Rm61Q|87oM`71A%md0IW)WH7i2XJrG;dsEV#M9*6^|Zjq;?w1&p|x>YZN z{y0>ty`k6gS)m;X9c*eW*kk723mPLMG(r;US{Sh)yaux4A zIn&dUl9N0Q4fM#NBgq$_SA`mn4z|;cbt&DC`RN%1Z3Q}7X*RyPWFZ}^kff!_S^BpM z>d&Imiut-JUjX*(C~P$gIXBo6Y*(-M>d1xO{F=FHQ^3*&0ZFO1)*JTk*8>7ukef|+ zrVtrYL^_Gl%s*t?bj!X2u$ld}ER<4K^Q8q)NzdHyCgVpy%IX^llHV|y(me`2^zax` za;9GZVov`c!eNZ+EgH!{^Q&7-y=uP$TK)jh)FGxfj7zYGU`g5rCB5KYMD* zt19TBOld_$W!Sf)K))YjT=Ok7a4i4oEj0$vVvNbIj?UKRBzQ_mU&gd26*UD`(D*8J z^kUp%n8(NQOs@!)r8P7)SL2I7ngmEOdR}ZKeG^;|fiX39eM$o@E4l(JXtEU=sDPeL z`b&H?p9^PJa5_jo&^S>@J-Mo?N1%6loYv|-3 z&avtGnV>(dkKzmH#6yer)zS4*Ke{&OfM#+ZnkD6Gd{1M2L$1y|15{`9p{i!Q)V!cR zIcs5UcTj1b1vt?IHL~&{-_aF}PNS9K?2&m6C`T_hF_r1uONS?2vMMV|;5bXoPuBSh z=n-akpcM26k4C(7$kRbWgQ`%TtuPe0`hB!lbZ<_~MSz#nhm*Qt0Fee>ky~DK34rv~ zyfHOP`%`?#x9ZU`O~Mq_lt)A><_Z9b9_}%HT1-$`jufyrf(4+!wH3?Znj}2%mM>TB+ z07fs@hRzxRs5Oo{)vACre@=rVeUD&X4QPk-+dK`;XdsPO^{TD~tmtsHcgu`e^Sz$1 z-_`?I^hluh>)YB}>)}a0+!G0DhjuS&1ocB<<7mVwaDu>a-t+rsHrwIS$~XTVsBp9WJsYRkqTneDn&m*02y^P>^DJm+H)D#MZjFl`X_R?!{^hy+57W2`_H{t@1`Ah(g-r4s9*pp8)inMj09VlvH z^on#2V3!WSXsu7HZBtlLpdOQHI|tZ_&<#}B9p?dZ+_sM44g<2ctdgEy=T{Wg>7(Vs zf!YauRJF2*Ui(ucOiuwEFVZ9j^{~OtO8_}~==8(8G(af~=Sa+#1CDoap3J8wu3%YY zqRV_0K$Z*+Qm0AiXVIfHo&od8=#a^lIFN+zF6#2V=9I?4|dO=nPOyPkF+#z}*9Nbq_mrXnHC` z&r=P~372p$lrWGPEKUh@HLI=we}s|u0P;YFUmWCe?|jIG=}q?_z(!|h;!ik+o=vYp zYu7w$^fa;gk$tScx`m??qqY~Pnfh@39|z=t%;4erhsGVfic}S@_EUg1$U*0$=?Xfa zD{ANkaD8k?59?>4gunnhu(qnx;5&42ik`yh9)Zpf4mzUO`xl_NC4(zYuY|rsR1YBF zwjOLtqKkSNiYgmmQR?akbGwri|3{Hqtr>;a4FpU z-aZ(q2Dp0T=|+ACfP)$@HRUQGU0A}$B<4>5Z6Hgk!G|gu-2N=1YeqP=_W8crM_+l= zgvaw&`|3XRtpRPiVL-jFgnRUFfY)n}jigG zP~DwcwOID806mNe0oSRyV;U%x=pMHS19MHdpV5smO(bdS7tJcGFJP)G3M%yDR*QAe zmK*ksVK1pWQ*D_FjSPz&3g~qOiST{c@=zLr-Ve}@cOx3q1)#mgx3NL#n=pEmPs5Hv zdI>`V*6mCL$X*wv2uQ;UqShW>H~8s?;rPfh6aW|WwkR0@lL9RbNi_YVCjiyKq5{(^ z2->08Sa{eBGT+vy6iR3f0$!@X$Wc)ViYk{46;s{LW2% zOh}UaOK&-q(16l-pUW~45Q_(kSn8q4|KgG|IAk*1|FTR0)ENUr)eioQ zCvBEwz&WIE96e)5-=O(v1_W*=g`dmlBO?{CX7(G4PBvBAJU|ypdhfKPG(6d40Meja z!FW=t+m;Q8*?rp<(XMpkOke+m=hi&H$Qd983{ZON5eAtFAn9-n|jK@EMKLT^#fJ3rwndZC#7!4*S0XElBF=r8ZS zlW!>oJkQ|pVs!a=05@}Ra1p6mOF1CT8XQuOzI7Nt9Wpqmo?WaOu+l1gp3uQR?OOT3 zeAqpEXWntWWZ^1e(^;`(lu{ zhQCrSXDva$cKv#Ijis%RkgcWxlZQTKucLR!^bWfOkSh9t6ca_gtPgLuL$GSE4Zk(E z91h^K_6c0KvmT%dqoLGGZPW=R9UWb89-*13*OG1zC^7|~~Lom8lhCT_W5G_rBT(+;sTB@F|oyPg9 z6`*qm3?0g+>Q!$Cl*|F6%q`Hfq?Rr~h|w5Yv(la&4$zXrfmyhb)!@uiO>cSV=S7w^ zfai_2sQ-B7zJelkF48Nx4j^NUl>S3%vA@18GG;I>#{l@;eX*BrBp`}5oR(V%fW{az z{kK@X``CTE%W?uB7VnFhp#%!OW*9z?vz!E|`_xzvBda-Qy|GT+XG25LcR>X`Sk_U` z0Mr-*G3-!Zu)eFElIBOvL-fHAsJ33biZ5xrTF$+DfD4@<$U@it2^Gg7z(J| zZv~XigX`P+b$iQYfWB|dYIIT+y_chV=1PF>ZPMtwS&h&%=q#wzk6JBT0K9y^d8L-V zaH97VDD9N%CH@aO)K6P^KNex(fC$=CfUNMzUH?m@*^cHM3g$z@&k~E67M-B1sIjvGW&H`)@*NJawi*%YU4O zG8J+Rhd9i!oQX3N^d>=X5%e}e?-2AZLGN9SQ#l^k3ONa9s62)5>uiM|5~LCq_QeqR z|BR}zf6R$S-}XjW$R*O*hC6_u55ltzHyoz6K4u%$-P{P3?Qj@(5I2$=#U*j0xiQ>W zZX7qBo4`%vCUKLwDcn?U8kfvX=Tf*-E{#j)GPq1Gi_7M6xLhueo53B-&E#frhj6pG zIh=>f=L)z&&dU{XbGc%!ggca*$CYwrTsc?4RdR=MRh*Bj=4!ZFZa%kwtK$}Oi@3$y z5^gEy=MLur+%m47Yv6+1a&85;l56CS;8t->Tr=0gwQ_CTkz701!F6(7+-mM9ZVk7V zJDOX^t>-py$8g7T3P-r(xZ}AKxD&aJ+)3QY+$r3t+-cnD+!@@N+*#b&+&SDP?p*FX z?tJb7?n3S&?qco|?o#eD?sD!5?n>?|ZZo%q+sbX@wsTi=JGg7OYq{&V>$w}a8@Zdf zo4H%KTe;h~+qpZqJGr~Ko!s5rJ=`wtUhY0_H@AnopL>9Nkb8)Gn0thKlzWVOoO^QJ!2QVm#Qn_u!u`tq#{JIy!Trho#r@6w z!~M(s$1^3jyC$!8Jt5kYi% z`h=iQ3Hpqn&k6d1pf3sfilDCv`i7uy3Hpwp?+N;WpdShPiJ+eeq7%rk1pP+P?*#op z(4PeTMGzhD{}A*qLH`lV5R3?>w3jTwCW6fbTL|U|<_Q)E773OJwi0Y3SSHv`u!CSH z!7hT`1Sb%jNbms!44Z)zIFsNkg0l(EAvl-dJc4Hsd@#W?37$pp zAq3ARcn-lHg7XP3Ah?iVFTq6w&n38+;1YrlC3qger39A|TuyKW!IcCbMsO9uK7y+W zt|7RV;Q0hEAh?d;g#<4mcrn3C2wqCCpWwp@4iLPI;Cg}^2o4gwoZuA%uOzsU;3EiL zMQ{_r%>=g)+)8j8!ABC@PH+doodkCgyqe&n2wp?*T7r)zcpbs(3En{PF$5n=utG4Q zoTCIEPw)u@pGfdVf=?p&WP(p2_*8;VBlvWJ&mj0rg3luOY=X}rcoV_r5_}%P=M#JZ z!50#I5y2M|d_HP3Eo5S{RBTi z@PhZ2B={$Se1oJyv?f#+`d^9 z)z?qFnyBrS8j)9Xv3*b@@@mp`0Mv-Qnm+9ZH6pL(MF&WY$gA1U!B8XeYPOSBq`%aN zyqeQY3oFn>Tu%~3{U#RulV$+v6*vPmBCn<~2SJUA(A34G zM&#AJPT$msyqckjEf?VsHQmxbYD8Ymp7chI$Zv;DNT1Y*{52ppAZkSZdQce@H6njg zv{5mr5&2s|tY6fK{2d@V5NbqzCusCVjmYl;sR2+U^1DGHvO9NEUBPjyK7T#{f)fWu zjmSS7!<^oz5&6eJA+kgFMUBWm2|E3!M&zFX`GHU)^3Q`tKd2G;mq4&T)QJ2my+InN z5&73auz%Eu{97PvAR>!JjmWZ(8j=4Gbb6&m6flVNMvW+#L83p@hyo9ihW@A>&_^=E2@(hy zc~JM>3X3?6o!CoPIsT6 z9caMRh=Q7C8wfR`a1a#Re`-V_sc(5BEtD`8lNwPt6yzdE z@Oq&}6v{w4f=SIFO?yZ;C6rJJqJyAD6nvoAZ)!xLHp-;#SLTuL4uv`pjqph~;h%vR zMpz7T5q|HM)8;r@)$Uiq5hKD6x&xp_6qfZ;SVS>~$H<|TP6+l=nARGRJQ2c5P>Aq= z_F}yUHKMQzcA=?EsjKB*CfBSB|?)QCbSDD|HjQ8)_ZBgPzUwO*+a zg`+{GpVWxL1`zIx8c|R{Y9Q2z!ttOH;m|&*5rvJQGf-+o;S|t{7|`*k5rxx1v?tA8 zAJmA#S)eo^YD8fZsPvy2Q8*vu`^j`DTm-`VgXvJXG!_!Qvjo4mF(9Tx;R-rbFRrK-wp!L*Y4qh<@rAWehy_2}gQi_L&|`hr)}17X9+C zceMCShr(XKizWt%iKmlkMKB!-uMMDneKH*iZw{b-aG+A9(rUmfw3tkX!aIQ1k4_5S z4|RmNOozhz02a+C73HiROozfpptlc9hr*`-5QA?He%i=LDhjoT!*nQo0T|J5>7$I- z?r$64mw>1FBa3G=m5<@F&&C- zP>m+ri8NYGVD-v$C>{Vf(E~LSCp7i-WI7awfpYY66H~b#Oo!r#{@|&P=K95SD2@WW zePlWm#{fuQ%^Pd)D~|6^@%jlg{74=U*7aaI6ej^l^l*>q7ch?m=47>fQp`rbrb0O5d$MZZ%l_`CLqNhtTB)xm=47p(2qZEbbZ5uACu`&oB=rT znH~2C9HmP4shhixJMlaX#0JT*+9@C*%0ciV&=}@c!tmtsHcgsS}sXoFoJR}!u z04#bW^k66qX~S_G%QU0ep08gs3Tv=IW)pTAe8L$Mi< zqfE)-dlcDl4nGVI{0C6Cl-iW9Z*TiNz6i)=~>ZsK^ zev=!p`^9u9o(v`QO6<|2<-*;yn?0;bC5?^EbSRz%1q`IOg$szsbSRz)ki9b2#Irff z;}gYnD4sJ=SNE_}hmOm1D4qu;3}gn2Qv%I=L^2(U7XtD?hF=`yNTx&a5`c})&h)GI zMKc|Wm+xc!bzZ+HrbF>6Kpw~p9X^9Q6Hb_P`nL5qr>?4T|C@#Vly3zcLHFqyl_3{Cc}&!hv`tfdmoHc zsRZ@yeJGhH-V1<(8ZR-J4#hoyHjpLN;6v5Co=k`0gZpZqk?Bx;WMAFagXvIw8~~&D zwuVk)$SbI!U9U`s;!}NAHz5%ekZjs`__Re%Dz74>=##`}#)m?Cy1}+}c zq4?gu6`+SPG{^u7#b~BO@xy_+Cfv{ZVmcH*8MNj7V>%Q+hXQ(CK_Yw~wmg)E&tfb4ZqihwlqX|<0;F&&CO0O0?c=}`O`O8K8M9g4s0pCWo; zIu!qeBKkW(2f=hG{sRRJnqw~p)1kxw;{IkjlvqIBccw$h0yz7J=}-~?Ykx2uN>)G` zbSoH7qC-KIWCz53V>*;vfU%EEhf*Sd44&yw8VdS@V>*-$1l|8R)1fpHirIfmhtg=k z+iy&V(l~(IA54eRL_pdvOo!4GfZ9Jyhf*?N4VvjtN(J?OU^ ziZfALWof3;$d}kV)1j0H;QNK?P?`x)F<9FM&2%Wu2E-WTdVL_qV>*=b0q=jobSQZN zIR+C~AC~suP>jWNC=~~nH?{;D)~~Ou zDD?R?HW8XU(&K&tl0#xh!ah7iJSQD5oe;r$D4is2VVHv_=!kP$c zAuLZ=k+4?6%7k?g)@>noCu}NV z(+Qgi=4_X)mUc+jNY_f&N!Lp^NH{*08m#`NQ_7cKgPS|aPy+)b*GP8h@m^x-5vq*YWVU$FI|MNSJS(Ch` zp58J}ZfNWXP*R-cWld{mq@;9o2I^P&m$wI+gR`3gT`l!1eelDXtu0l-<+E~@*QcfD zX65DPrl(F&C5u3?IMA_jR$69uPEKZKdR9()R#t9$PHs+Wnn6ZaPYW)u&kAOx8jX5N zHHt-aQ5Ym!VM=Xd@Th{;md^IpCi+w1)0>U(&uj23UBzO-mll=MA5xvy*qPG(x3;cj zO^qEZ=_kI<_F$m7s0TpcN>_{mLt`0V}wguZ$Dw_hG%Uj!< zJ5m}eSGKkUD}2-2LLV8Ve1-E20v!#jQhaMWI)lwArH#wl1MO>5)M?|W*7j8$DMhVa zEe&eX)74oAK20eO1{#9x9Vz9$s;-vO*4DPj!ST0Ng+vQ#W1qa!8I(A?NU4e40b z+1l2lhlEGBq4&@WbF1mh5rJ++8v2m`!5o>BmztK9my?&4nVyrDk(Qm2l^0`9n4); zA5m$CX4F9g7+$T*jtJJ%E~`02o4rd~mbXSWaiRu1iXO@J)0-)!{D6hK6_ zz&NcY3`)6c(muj$C`)wtp?iGNh-dbU;rj+v5 zO6y_PDyxsMIfTt6Y#w1}Y_rx_YteWs-I*Lr*qI95q|5@o@1d3Ljm>m*FKn&vYNl~R zbz^goZW5Z?=zH%TU)45tG%jlj7OV`k2kJY6?N#*m;Dbww8ykZ3UXPXsq0hK`DEWd- zbZQSaL~2oX^4aU(^L={9f_16WlCrYXHq^pjQgwoG>oV&Ks7SrF!5XwKC+uv(&LOOa zu=(4pE3J*RBC7~nK-fa4i1+`rBHR&aj?IYj8GU z%L!WnHK{Tly}N}kuwEJ$^fK$^6!c2MRuR?* zpw$}aeOK(x{yhXr`%K-s+`NowNg3H;OSfCE1JIcL|9a~Uny+gKJ3r)WwI6=Ca@dr` z>oU?)DOP$;YGxSicI(}Mc8B#&>s{8Jgsmg&Lc%U0?BZ?Kd#t-Ov?YXH`hVYY^A;X{ z`l=(;mdj1g3fJIK>r-)^{IvBMtpv7TE zu)e9G)f2YCfVQio`Pi!$ugghGO#`&N^f1~7)=%TM)o0evH8eW(S9GJzCgl%(use*F zozdM#zq9@v*S24*zf!c{3EN26BLHoc)}8j$cip&S(Ymx8wP$9g(hilDmyt#73>*2c zjRn|P^N`JCGo$e~jc>X*ubJ`M_qgx#HE^=C&nh z^*)lY?S^`<|MIPQ+ZV4(&rPRolAfI#9!9nUZApL@tB=}7+s04>Y0j&Yuw8(*S~YOq z!u)qHePPMEwA4J9sWMV?ayC#eWMpT26MMYg>(NA%Ml2sBMdEi>ZZ6340u2j|Z?5v@W#!>0^X< z!V~6#by;9ww+kC=jd3B5u&ttywC`*r>`4H5vI;rpS8wIgO^a!JX3tGar*%)u?w*Zp#I`Z6BTurO ztl^zO*fYEFW^b4`t+F^~Tc2e+FRpdx+b*EhzKF1N%{T|pHff%G0;Hsc?;+Gn%;xjLQAA&XJ=9iGqN*`OTF!0+XHcNAGAHBS$HX7FYC5& z-oh;v37Umz>EY?j_N47Oie!7r_O$I8+p~ndg0OTsyo#`!x7nVzy`Uj&A?(%=(*I!~ z=Xs{d)e=;FoR-&JgSTw&gD+x^rVnf%QXhXz*zJV98fvgZ>jS&SJp0zywCm^OLf21E zhqxgrKu~^W-FZatgf3S-AySdEU&lqMVE@!_GzKHK1Fffb#MS^KxkaPtMHD zD4_pI_a^72dQy|~^K{rtLBc*n*oO)G2w@*3>|=y|oUl(^BOk_$LgVFXxkj#)=QE=S`=nxpA@LMppC;@x zgngF&yQQu((7qzrNprso8#}yBjVoY>M?I>2OjF2>^}&*k`Hk`sf6xo3@E=snBt zaA_ZV+Ur*~cGCSG-2sO`)If>?&5ccKDZi}|fWCtdHUX#a8|5Qpy03hmurCny#jSF)+#QFcUFO84!p@Jo%9(X)5Ke0tXC3$_K?1N0}H z{f^1KOKWAQgMaKCUj3mork+U}kC~*{{rn91oLKeQB%dqOwf!x^zD?M7=(IjZ!(t9g zs!@MwSZ~TpH75v z1)BYRM6F(uZ)YU=4*Aac<-Y0kj3iLu#DQh>sHUScHFYn-tjTOSc;l--;RlWwH-2hbW=_qxmae9zmjzL>+T;c0^wI^) z7PaH*A(6Vc+~?_}A1v$Y40b4(j%~Ki*HvF13^oKCUUs@f_W{GeqE*4Q9SiB7p;EUb zBq}Dwyk*Ex#i9s`R9o&Vq6bJ+vY@r8wSDhFBS(ozqsNS;pZgk*4$@MW7c~V|bkKjp zgf(`~1g-K;n?vCgo_6W!>XTjV=%}Uy!17z08eW<(anj_4nyD>Rae?;NAOL~PmPu16 zwszC&{-)1dim%Zw>8KleNA<-U2rwZR5;CZlcf;;cie+gn>Z6~yq2lNrv8 zWs;e6CWrAd^O!1T5wnbGWsYPzm@ej6W+QVZb1`!na|Lr1vz6J->|m~CZei|b9%PMqgL^Ws$IvfR2JL*D5qqEQ^v;*CP z?n4i1UdBlKlbId*GxP`j(Ma}FZNc0r-@T;VSJu@Tpc8o^?S5P3o$@_QJ@qsEOCKfo z$`8>W;G?s~Zh4Qa$PdVL8KHB>=Y;)&P9KE*lCWQ0Egy$7CJpCAdX?nW=Ae*qLhDo()WZS<58R|8_D-<&v+M$zoU9%Q zy_GUe8AI5gl%dKvWu!73=JN1m5BX~?GW>b}=Crkd@jLlGYVn|8WjIWObjt6DlCSIQ9;KcTQ$9@kK}9<~c?&kohvQ-xxYdDW-B&&6s(1(}#r*RBYws?= zqsX>~-|vJfJn3o!k|046V6fos?h@Ps1PGEq0>L30cXv;MyUXAXgC`+4%-|B-{r;;v z9SAd=GvD{zd%yQS&wbDPn_<$`wbx#2RdrdDHXJTjs**o{`8)*+7A%^lP=QJn@)XOT zzi^&P6^oWHQlVI7pF+hd{l^tlDpavh;R3}gzeLkuG^7{as|+TC*Ql!rG@z4yNYH{GQ0-cN?fkp(m#yhZ`Nujp z>ct(wUj54RsE#imkG=GVLF}tbSe5?TPlZah`nkh`SrDiQijq}F-XOdaJ*(HXQ;^Uk~Ne!RFH}$6=Q^q%QKCAxAX#*E0TDp;;h45Z#Xl!U=XliI?XfBns zQb{KjH>tQw#bc?VrJ=w;|NfM=IGkF;cwrf0UIRt+xRuG=$9$d@I@>7402D`)~t{qp!+=`sXaOvgnOuJ8$;yVC#a0!Tx+$>AOPS z&u+wV9{TsU8uT^C-zKxaOFw1MJ5)c!qDvVn(0<@z-xv}ylW4#S{ygL3pRjuSnydrt z_tn_36ez4;T4VPz$Z;LaFw`)dQ&Gb(snFIVq>?=>kV7B_w;ivCln>EY;z;ip19yQU z3}X%Bq>@7_IU@`c3=^gDn^f|JPr1W- z{QNG*8|E7pbKvc?!m!Y=h+nd-Y@fo0V+OA6 zEn_Vw41X9-8crEb>kAZ-3cHkIQYkK#61@8TfAN!!pL&z{XVDtj9%5wJmllY^1$B5SqH|;^`zqDE@MR@V4%-OtF?W*+}Ro9c~Sop5tfnKkBdX>sh zukQ={2GsR+EcMjz{Cn%XkV?6*K*vk34R7_`N{#P}9X9PvY#CPZE}`t$)J&aEg%d!rx+R5Y(r-ex=f% z!%+Po@b61a1)VqgcKcscmx_oc=u2O0Gz2O73p!jF(wP2A-3(J;qP!(Np5^L#EjYooO>+E{IzHeQ>cP1Gi7 zlez3`JHyvM?h>8tb0%AnlU6RV>n}OhE|vCD@zXe_gf>UCkJ0993wX;y(Og@sg=-O9 zD&c6+mkTl>L|L8-q=EWJh3!H?ExsC2fi!j1iyS624z>L?XZwnmXiNEKM8b)9;#@>( zOSEPBg=?;lCw?VHTcNGgRuRE!ZH=~8TSwad^L3Zhnp8STg^TT`zh8N=$Ffn|6rcSF zZKH^v`4(*}_X-jM{JGU5wQbsVJ;Yxx@{d=-3xfTH7N- z6?u5L7rkk}OCXi5_AvKq`&ihvW%~N}iqQ6p_CNf0l&!H3YQ3e>o%VI~?$NNwdQsZ% z+=f`C9hXXweN}&GCv7(*;xB_Z))}dt(oXA>;$Fp1Fa0o*qvy|RvFx_BbJ}?=N{iNF zvfb}?##bC{r!|QVSM^31G>v7NDLAGFUA+DGk^RKlbpr7|?}K`s5BaI~jCkm%{<<(c2sZ$@EsqPgQ+isp^v zC(fdMBQ{?DUbdAR6|NE)RjCXQ3%+1XZcHIk8B-cliRMNZqpQ(i)Yus~&JY~iOY85K zAM{gI+Y;UV0s_DPn9bAH+jToS-uwNvFd>zZQkg0h4(z9p#e{RHM5)O8Pc^I*q>p0{ z-0ZYviP2)T8q*llN@bK(II7~Xe-hUrjP6Dc=8@i*!I-i6_mLafVv@>esf>}zSS|n- z_RQ1b?k92xB&V6V^}1 zez}YzGH+vcBPW03r80p)nl|jL_nBw@_?NYh&OZ9fVYWk@v5q(8v;TCm{imG5mFl2h zP9R#_SE@WY%koUr2xZJezS;lJK8M1_BK(>r;!Bk1@4<3(!Pdw&Z98UaFh!LMrsLA71G@kiTum@o!C7oxUhP z(=Wo=Pq_a+youKmmbV{UCCX;}8FCSy_$zP5=Gy1Q><8={LaQJDqpj2Q#qk$>jqQJY zp`-mj9O(`)cGb^B3hS4QjNPQNG%TlmZ9z`cBaJ}PzuWx%npf6x z({d+z?-=9wpWZvc$OgYwD(f7BCtOXl58k6(q7_Ut>L>ih>BbpS*&vmT5&9>H{ZcW% zsMl6m{ZlFa<4V6?XTR@5^)e;A`om{dq9sb_`!C-(`1^OY{(dFTxX`#l-wGBP7aPNk z5ynX465~?iG9wAzB9*OD*(R0kQrRJuol^N-D!Zhzd!=!uah1rVpC1|58P^*(a6Me% z1SyJ7MBAB>RNhK8l~irl#G7}sT^_Yxgwda0uqWL1Db7a}U!Dr?pkI5`=WBn7OGu6F zw*>9~?c2LucaAHY|L{i5%sco>u3ztNy=)(R_G^}&@jt4kznyPv1@=n3htanb`{$&8 zaKe>Jx=0_}@pFAmF~&W{y~cg)(f9MYbkKN+PdxqJm;FkwK9xGQvzfG=`~w4ehu8zN z9Z>Y`Wq&1I5BtO9e|t$PXY|jby;3?4(v zQaNDzaNJMv_Q%L-MBaPyMszkW>!aJ`?}>LbLr7)o{XiS6`Sb1V>po{)5S|!~VfkFww`= zFMhhxxuCuVAN^L{_ce^O|1@X(Pn~w?4*(h88$XEVQaL4+(_!NOrtf>$KMB7Wzy9+> zI1$Eg`iI^>J)e`PKTJ+^$f=~OMDwKVyZBBpMGX_S?3FOBNgG5*^K5hhJv`@dN<{*%IFGFkYfFqx$i z9d5EpB}V@&^|EhC@%LJOwShZU;b!vCSK%&|3*n~pQi%=A*|0VTa*o$MP1!h$H)S?u zF=ds?MX6kp%H>Ftm$Q#4yHu{&`s%g5kI$KCSlc1BFSmCBJK6r2aFdL1kIzgKWSd+G8}I#XV$Tur!{Wy)_V&c%CE0aHO!AyZ*f5mQkU*FxE(u1n>H zRBlS;PpSMRm0QbAB}^qvrA(!bH%(wkF7!|__>zU-;=$2q_l~Q19UOj!P`2nkJs_`se2*P4!I;oV-k2uDdIhd-{RaeQxdW zC5t2JUDNS6e8ED+xqnz7zfWPFmn>3Pf1KTC&<{I4Q*%=b$BvH^xrYfmK6^FVvKuq` zncA7!n>v{MO&z7edEjHI#7X6eRGv!Z*{^nEjy===!TnY9Jo;Hg{EcVtgx$D)gUViY zM;Y?^+S08*ZT7?McH?{7?RMpqJr(2h#^iM2P1vABM~3+e=ub@ZHTi^lC#K1!8Qf|x zO)*V1O*2iG$~&pNm&ymJd|YChX`1C+(8NLXC#gE=J6kmc_bh7pcZ|QuX*-~G-1AKM zByVot%=Kf0W(@*5b+#?+*mf72mg>WWna$sJPD@1|W+O(NB# z2}i%Cy`}?+4?s-^O^2kaNL39BOvqx^a>H``c-Uz=ZaVqX+fJEIOEsBPlk=QoLPE2a z8?__R`=U%S`d%|yswu-cG~n_L*)wun6B5)d{&t#P5=S8}o36xPYPH|aF@gH%o5=lswV_fzmsOi!h%N!3X3eB#)fIcvGGixUO^ z%JlZ9_q{W{m#SH+7RSB39}n@emg}4N{a?(@tLCI;(w0W5 zX{DM@s%}zsm#T+U(@QmjR5MC7lkFJ-b1HLcvy0i)Y!I2uMzcxJU23U%N;N>L-KBa? zsu%SiT#@QkJ*ofx8G@g0%l{95ir~jb9^(7NoXO0Mp_OJ&b7pfEb5?UUvzOW1oZXy5 zs+pylMXFh)noX)+QuUT3rV%GREtQps8owdwfJ(gzqzBileshh z>ym1*R7*&;q*TjDwSw*ctMEM8|GeWIQ~YsGaPM~g^4LEw95oL$56};A{^?6M=7Hux zwy(6PrKDP#p69#C`T>sq<(Ojc?O!e7%P~J3(wK*vM;Kkrqu61qm8HsI;8dxWrLzB< z=W=E~`6JBZ%@d?bJ623M;4yc$oph0N{buOcCXO$m+fUTYGi)bRY9;##mHn{?`&m`$ z(zdgzB>Cdcs{ZAniR}Z_yh!gGN?uJr3`z6-a$kP?6{-S!C(Q8-hk1#4rTsW$x_P;I zh3z=xqf~21wdNA@D)VaRg66eSttHj^w&M`rFmCzVE=}0>586K;i8xBO$-JF)n>U-c zn75kw=&mhQHmJH%t@l6apu-XELGxjKr^2B^gMaZsZa!f?sW1MAR2znyPf68wzvdTD zbD7Waxotjgj*@C)sWwgc+%{h@U;63u_n?`NJC0SF=ucnC(La7#n{Sx^`f2G?=G(SI zlxF(UBaSCp`o5V5F%~cWzF&K6exmR4Y)2Yqjt*MJA4!>C=+V72zmjSzskZ(e-COgA zpT_&)sKSx9uNJ4D7W&~3BH@~lC8=m`Np2*$oj40u+y4DZjwPvnCCB2Lbd|n9C;jq^ z>i5GYYtbx5$0plRs_hasSzFIr(y#$q(pos&V1L-*R~umb8HXj4eZ%wrZyR2ss|l7I zKU_^vJO6k!!J?m%T9Pxdgk7~IzolS8gIL%-cZpB8r6}pP6tfhUYFDXtOGvk+l%;Iq zO#b+2UwpbPl`K{Nwh&Kbn54q{UY7eRQlxpx& zi?5}jrIDqvrHNESq&h~b3#7VVst5GX(O*4-mvHs!*NY@P+wfoRCUAh9_=3Fct04dC zF+EEgOGoajTH0FtEbT1qEgdZWQtc(x-ck*fY9Fcgm1@6bmQI$=7XF$C3*V8IYJaIp zsgBeShDO`(tN!9~yT3n5r$6A4aIHMyK7;<4=083!_aC3~``a^t|NZGcM?rgA`taoF&!S`XB6PGIRA<{?#)&|NrAn z`r}vn6Hh-r-t@otjM$G4azxu7WU^e)A7qj$HzxzWKeA-GqW{ICX@c7E_s50$+cqJd z8{lCB{XV&4ExYw6OZ2r^Zt&EI%GDUhQzI6Bk4rsA``)qK(;M=xROf|T?n`z4f7&u1 zTjCOZBwL5<+iC8{q7xjJE z61~0n_dcE&(L3xvKdbTo^phgLd_*NaLsr$Af=4E;Nv+AOd_XRh>N2S=kF=(=rm}K! zze1|3q`KF()v~X(U2RCTpS6E=!uE69gPcLVd90YP7XSFf%fEe?#CE7)HEEy2tz6>Z zrs+@5XgESnYt0}`ORdQX+TH45<+{akTjZ;yx<;yNms&GgxhE+ySve73Csn@bpkJrh zsF(i#^iwH(akWDc{>H)hr&6prc`C(fw&v2Ot8zPaD7Q@#J$|4+hf<)BJu~@u%Eam; z)lEM=WnwL4bhQ?-7S)f;xWmcLaI;i7SNh+4D8yQVheE6+t);A`t(=f{w`I% z79D?pW-V_&Z`!8sO!a3#GWh)SGazk3c($&U<6Tv(Rk>}Y?vN@sUdJ|ze_*3PAuY9I z3A25@EA8ub{FGCB{e8O~qx~;DvtfG_=zG$C_1W|5W2S_|Do67*v$p5TkF~kwwY8k9j-d}r^@vpY>^UaY<5E4rl^?6WwWGBY{|>Nr;h){~9R%kl zC#8By|Kn+?p3(n(R^QO{P4@qrk66TinkPJBVeR|F!xPp4KmNzy?~hnm<&Q56PxOd| zb(D1s#~=3dI4d9j=i?94trK{}!aC79$;uG`CvGuPjZHXKw@$OpOnew_>}Q>=KVqRq z>!)IU+WvSdW}Rj_-oL^t3gJmR@S*80W{Y_!xh9g2K@yYpokNcC)Ut zu9xa1sa{U>l*E634&pyQN0Ix#Jtgtu->B!fsPPZqezpE?-POo;-;6K73XvRc$bwub zj1u7MsHr{)k(y;vyCMxTgA4Sj^B^DcqX>$jBub+p27t5V)Uz-L^RNJm5ROPJ#TIM> z7w=Q=!d@J}VI0E={E06@xTFN*yRd$jj*u9NNtlhfn2&{6jFni8wb+2o*ortIT={~t zYhCoiND!YZ@wpP8EAhD!pDXdXE(h!3OXI?o^|-DFvAG__ar}W(ID>PDLJVSY30H6* z&+rM%!}XgG1}Bt3RWv{g_ zUEBltH9f>zyu*8Z6vCVg$&mu7kPh^@*&XDKEBV6gg90doA}9^!VlIzLr~+R!LK842 z^J35oX7Xy@2x2x8JCgxxs7J@ps(?9Oi!F?09U^{k#{J9?o zd2uH%?&lDN7%+eLJD_*miN~Fo-JgJ-acBE<|0aZoGZZ9+8RWx*e0X>u12Tbmd*nlY z6haXcLuFJ$4b(h40dw_Wt{y`mF$|M24Kpwsb3x8L$fF0j z^;ie`&to&T;vnb=4|>Am51hgoP=S%fnG>&MOwIl zny1f+T*wO_6hIX)$Mg-+7){X}ozM+|=mF}Qp1G#)hXJ6b>6uUZ{Wt;Yll~&8P5OtR zF6pUD`j;Tq^l$MV#G8ToWJm>fWCn3%AeIb;Q53~N{xc9$2J)VPJZB)548)RwSTY1- zC&*cbvxq?~E`jYN!%fg98OT?LM|dGb#?)ZEj9F0;tUn{`%@_vel5rwdVjB+N5U58+ z>XGpTPJ%HqGDgNLxCZKyk#RCIPA2*+QyFvzdC5fmGCjZ>Fz-wsL4RhVKRum5Z9FrA zcs#w39pu5Y2uh+f%AyjektaRrNz9&2(G`=xIG*8%#8Rxn8mz+xkR#6vAWl!>^dwGC z`p5Gvh{=B5Sgo@9$KLT=<&?-cxG~xxhF!<8}vZtevnv(EjWkEASam_D>J>C znc8P&{h3*RX4apD-poSXv#?!eNec3ng&br_2e!*B8Ic*;;Eh^f{4C5f%T_RtEUYhU z5+S_k5wGGPPhRB7i#&OiM@3WybMdN+ly`3%NLE%1kKO_%-5?8{Lmi$=!5`} zXD{mM6$EnNMGm}rgB*B~1Fr!XgdrdwUc)c~qc8^JFagZLYYL`e24-On=3xOAAsmrd zise{|)mV%5*oe*8itX5mUD$(tIDkVqf@3&=lQ@mDIFD#tz(rifRb0nS{Ds@Ni~D$p z$9RHgcmcL!uXmshUSEWuOu{<}sHb-du#I^eV1gCY*4qQr);lw(t#@|(hCJ{=LD2Kw z#Zd}nQ2~`f&wJNI9n=T=1Menij#g-k_UMQJbVCr>KX~^>Ukt!tNDRj)jKu^@1~u`X z3F_fJAJoD-0!y(1)WDnV$(#Ip)9c>k-uriucklfm=iWy_zP(R@Tzf}>JbPaPIrhE* z^6PyE~6*W*B_27%fXoi+(gLd!-+h6vs2t-fxLLc$Aj~v9BgBWvM2DQvd z4RbPH&K$^(f@p<~Aip_hVKE}Wd~-5SF5`>ei1Rn%`^|)MsE(TGgdPaSMDYIK=HLMS zz$rYzTQL7z%qXrL3UW4)SWPmqvpbna#8A31+tRv3?EW-*M!#PCZIX-}TEpcQD{n)P zue@zQym_Z!8a7}jcHt8K0=&aIejnNcgU>^C#f%oS-jH9>( z=97>7`LGTja_5r{Wl;?^z;@vi2x{On6N?ZIa_4g#)WC;)_ajk$=#QV>HCVkklkMc5}4VSK_DMcu*pMX66w z#xL3c4bcPrFaYzg1j|4SMTwy(F%*4?PoT!ds72_{ye6*{6b z=79W_U=Ahd%M!e=#03z43H^PG#I-ab1oNvVD!Rlus<<=79DFAs84z7{587KpVx zb1Kgm8l5qBTHN>Pp_|h?-7d zel=5reN#uszipgpnAH(})G}*0z9L)Xs!f=m>IA zn;O)n-)d9m+VplEdb>_CR0DlohaA+IfrVI%>$rypLeyn0b(u?D=2CYorh#Sap27uO z6r!FRvLGAisd@uIjP=eyf1O&@&jE5+pB&Z?#SoCk`sbj(epQGD)TKdYP?rW>(F-iw z;2=(dx%!d^U-IBf9(+5Y2N=`$5Qxc_F&lazFML3r8qzlnr+{%9(lZU|nMSS*9_|G${s_Yf=&PWs^uO6{2Yhm_SXM zGLNR@qbYN4dKT2G=@lWGk*{WXz&6pWHwGgNClG}gA)32`d^BfH&8cPco(RG3V9w2t z2+<-rj4-1a+MxqhViUIDoe(VroIx#H)+2MYOG0$c1m@BCH!zpZ zeZlyh>6y+^h!G+n8|bTmeCUD@&|d*-uoc_!S%@wwlA$h|qB&Tm3(It2nXcrcD|PCc z8op?W)}XJtZo(F@o^GtC8|&%T0L-&nE3Cyf>=2^60vC{v?!?obc)Al$ced&7#M_;@ z1?EHnkki27n1D(63y%;dL{Mo|L3PwZ9ngb8^n4IG2pWjNAa6nA!8(FiN6>tb>!1}_ zjdj?F-?0b#aR^6o9Iu4vkp=Z3F%eTR9rR(3xnP|===mP>bPxKc#{nD$>*x^;YSn|< z_PBxDpuRmG;xRr5(UY9^q|QBygE{rA3Fgz2F?%)!_2@}1dy>nZ9YH_zWL`aqr6;lU zoQ5S>j#VJvJ=fzkz6cRa-GYrU!-{mEcY>K$FntzG4+Z~*y6B32V2t1q7>%)DuEC5E z%oxFp5zH9D%sF^7sBJJg3ueB-CvXa9K@SFBz(u?hB82fnN`U=D$VjZl7HkLk57~_) zIEDv!Aw+0)5L;*-kfYFoC<1a7S_*9Qp%qXW)j;k-$z3S93vGxdXby50%DAEJK@LL$ z&<#Nd20a+s7Xv`wgi`BJdM|Von13k!6FM2wFcWh?-9r~40!y(1%qesoHiEf@GPh9X z7D{i0(p#bADU>{glBZDe6iS{#$x|qK3Z+j%>61|UB$Qqcy@UIBgeQ28S9psL_>6Bt z^l^rY&z!O>Fjhx7hd|+GaQy9fi5@k>xl~5HmP#g8&i^gb%mS}@^ z@JDBKMId^j7y6(-1|bZ?K<)dC!FWu&2gMRC?9ILPv8?YJMuoJtn4+n7s z$8i#8a2_$Zh%2~`KXDuP@DOo$hL?DQ_xOabLiBZlf@DYu7icge4cw3(nUDou$bnqQ zi~J~rq9}pVD2Ixug6gP+x@dq#Xo?nS4L@{1Cv-t~^gswg(GLSL1Vb?bqcIK>F$L2x z3v;mmixG)sScx@Qk4@N$9oU7vIDo@AhCgr`=MarpT*fur#4X bo4yufR`!$*7( z{GG~3f}}`+)G)vVE7HLO8Ic*;kR87v4}4G%MNk~2P!<(X8P!k|bx@!Ug0y?Cc`p;*u&_DFxC^sdcs&wSZDM^2Zl2FF_iupO8*RH z{Gn^H9>g$|7={wV&~HKvWBg$$krM?_2n|3V3~PnK7zN@OMoxy2lVRj!80#6vdWNx{ z;YpAhuE>j`C=TW^yc+_s0OVl!3jBd+P}kufgc#ur1>VR9axtPR>Va5BbOrMn5sF1% z9V1rZAgIlV)3^g_JK~uTBZ*^VTDXCnj-($)mH~4b*#R9f5%kGO`efuL?7|-00DV33 zp%A08A~%@Rs77cFw%Ji5!23r{##Zdbeh|YbVi-jXqct%8Xb%(yjma9mVMS(@;bH|$j4afI(90EW9$wv{@6pfjW|3NVq7X%Kpf+!%Q)UYj=GHN3yERa z0`fPGIgFz($La5TF2s1^AJ6;76aV;|T1`=q9gIDt0DRFBtuYkiFah+*6#8V!Lm{T-0y&ym1mtz91hGzKPE#3U z>S-aS5z{nRkke_ba~kWM#yY1FhyKXmv~{=(YB%k<5YyRqrnBu#F9X&!om@|!h;v}h z)2|9KgZ;-0Bh07{`eH^y5Z{cMV6HQ+f%VQH)|qa|f^47;GdrUTR$>#jfceaPiPu8R zDhjrnSryO^%zxH!9KtD_5n?udGMhQgHh~ys6T@s`m^~U(Fb&6X9??R~aYib*fS#H| zKhCL!5nz6ECgUK;+Z^&Xm-Wu|KnAo!S9Hg6Y``Y`g-3`JVxBkhAwQVkyq*ZbVldBn ztMM4jZ{AxW=Cgm8Pmbp^&-wJo{J{vr9vlU+&!?9cBu7egLJtIEJNAQk7km|BA-PzX z0##8D97eQ70LWv+@6caAB19y0iA)9R z64?l?!LpHSuocWTk~}OS4@=0ylKN;4#$2)%#I%Glml}``?jTP~>6@hkz&K0knWgm1 zGG1Sn8ClUE!$AF)#o#(_3b8yF3ZV#CW;x3&XPM5buU>LTqe~ z_VC9Z9K~@VHhCf^$nPcz#@{p^(YS^iLTu*s%>_W6Hupq-36d~6?%377=dxt(=xXPr9?pf7j0qZ`;4?C1m5w_`sJ3b8W< zn8!{Fn9I&~VEmo*%+B4|E5z@vNDDVKL@Ur=zt6-XgySBb;e`;pilYK5fn|2F%r2JM zMLu>>r(JJ_*j*Y`P!06e?)g{<*0Y=S>}EZCN`ZOqsft-xj0il%JCKjP#Iu)p_7cxt zw&}gZyO+7`Gb24RA`pGh4<`|WSRwXjhYzUt{`#PA_P0bRhJn29AAwPrj~ zui^&&!X4ZP+r)lqzW+I1fw}G{&i$Y9O^5@|P>~#|KyD8hVF5WlK&=jBfG4t|HT*zL z4s=2n&>IJOAOysEfO#Dlh#?pXVm>e$<1i6ZK>P>D;eokW0CIVN?dQNUtOVQ9f%Vvg zEjWNfID+Fs9ITGUXbSRlkenY}0djtDJ9c8X5QmaL13h(!IUQn7hgkLyaUY@w4^fLl zJ;4}<7~>FQ9Ab<^jB&Uc8lefAfm$3Uw!_=71HWUB5Jwo}2xA;!j3bP3gfWh=?2!O; z1^GEbevS~=k;6C&)_;WBAMFq7d2|fO&r$Molv*CW3+i|DA>xEM#yXC%>@o6sY%nBF z;|i{Ux*z*Xh~w=L1m<*{dK{-7$9IA8j{kvEpl?p3gBP+RCvt=3PlSP*ofr<{`lB>z zpcd+&J~m=6_TwPP*U2jI1^GHjzD_pBbc7=kOR++TQ#nu=MNu53FbtD16};!vEHJlI z%TVmw2PXKsV>&XU`+1&>hs` zEOR|O9+SX0XNmbNF`o?wm`=8~bnohjA2VK|e$hZxr!Hy#e`(B41JDD>@lc zf_z0AU;^<+6MuA05N|Z`Mw6##@)S*;qRCTqBao+P@)X?x5uPT~^C@r66Mk4GTa7s&C25BQ9)Lc}_O*kj3YtOs%+ zKMJ8JN}x2VfF6mZM`Fo$EcuT0MN1HGY$%8~b|^-GxMPVsmUv@lfqg(M`Ho$La1d`S zIgZ_jQy_`4#2kASH$d#M#2ib^vCr`mZ}3iti{$%a3Zy|MWPulm{bDW<`$b~ENbDC& zp)AUy2AaSRf#`$&pf@gtVHn0^5~hOKFU|tJaxns{u>%Ld_IHt9zW4`DBNmr&4a9$u z_%GfDIle?}mkOc<*p@DJ2jgC1yi06Dm&Sl`FEQ?=*_elgpvIRXL5(l1!!8^_6v)#h z#<|2em#Fb2YJ7=2U3vv-eCa(t;foNLlR|?Vyg`mG=LL1XTm;2Io-UU`P1FYafy)gr z3gqGPOc2{;V!M0}*YPKq;ZMYO3tq`{#9~*m7HHC=T{%$37+GX5Z4rt zgKPBXwN!A05f-EcHMr&hdg7W7s-rD>gZW>h&#vvlF(Ixe0qeQ0fw^AK4(fKDx?QKo zuNOrblm~OZUKOnG`asOW2JFOP=<8z+*H7adqHzJY@DOo$2DY>7AMhF9gt*}h=5!+u zT3{3=gZ1B_FK=uDW8Bz{eK?2{IEAyIS8v3Cez|cIcflAp7~=+G++d8GjB%5B-b@Vx zOdwx3sqIZ_dy_feEQt!JjB21yZ#Dq6z1bARcC!^af;!)%wl@b0@z-t8pMNp8zrF}@ zi@DsQF1P5*Th!nd{cXsy+vMck=I+~^A`EMRRgtA55AyZZ!zAjL5KkJy!95$ z<90G=Fe5$a$=j^|HZ{3TytjGZZ9jAXao+BXo}mA3kHL6M!cEzPd}j?_R?Tyv93x1bL?f;vW5cFDa8;?>$=bK_o>@`>UN*H z-Di&Xnd5!le?JI)F$iH8h7n-C_sRMFxmW<|dw(fbfU)lHz)73}+s^%4xQ|D8f|q!M z_h8-+nC}Dn;Q=u{$cW5fz7Lr1gWtd$A27!UCBb(5pgby}3IZ@5tFaF^!8{*)!dD?4 zQuBujk|8Brz*rCIfrnYZI1d@;A-Q;%8|3vNd3{J9JgkUz=m+Nca6SG2IecUUu|6W! zN37!!>v%-0k66beVthpJJR+x$$mt_;`iOi!Y7IZMM^A);oIV)YWQeB4uLoxox&NMLlmxpTt9jSa{GuJJR%2=1(F~&3^0LSeVi6&4FYqDV@`3CKt1B9N8D`81AP#;3Tv?eo3Rz!!93!q zM;!6T-N0YCgZp4Uac`l|=QF+u@x&Pl=%pv=z+9eWgeS6tetMDz`H&xFP!W~EoSxK1 zT{J*L&|6PBqBFXp2ZGTX%H@?W~6}|(jyZJp(skAG|GYbJ*8$( ztAm<7r4OIhhcAf#DY<_di)TVSV_nb4!?QLR0&4VZA5MWdpAqM?3%Gw1$Jtm{oa6hL7V12uTl0@UD5PxQrLkf%3eF#(gX1lw^Gm+=Pg z@DX2xcq@hh|{iAKIfM$khjW=0ga=FbpFx2IDaiv#=21Sc2tPiEY>g zV)<|YM{xqDa0Y)tkMA8m7fDG^k@qeUeK9)ooR74dpkB{{J$JQVp zAA6x624V###pStA3}U$j8AFt8>qo2YVe6MK2d{D)Zi1j`a}&rRX}AlMGG*_ zCu;nO9DVAD0FeJr^62&O4MTaX>h zD{*}#uCMg_SLXbcTz}0Ca`ct4zcSue^7M5rj^YGPg8uk=4wrEaH$lIAeI&#;*7?m1 z)c;#81cUi}TYyzyUEelfGnmJ>9rzPZ@Eqjt8~OW2{=R(@L<<*KkQVO9fNaQ)-;f9S z&;lVChv}GwxtNcY*nwTxivu`}WB3E7aSbO@S#Ow0lGQ>mZ278|e`JFp9Tu^+^&M&nQ11--2jw@TdV zD-gH(UN|MC2a}rN3F?=$7)qiH%A*phq6VmK(t7YkXLLn(^gswg(GT=q(!rp{NvUyC z@}HDkC#Akgi7hEHBqLAB3ZN{=MKV9Iu4Mf&0h2Ke%sClzPBtH`BN_EdM!k|T|76TJ z*&eXYWCw8s$8i#DcgfC!K28=3`X;$EG-L;3C$9$PkbDTng8fHw`XV{ob@IiC1Y;#X zjVrj0KXDuP@BnY{8Q+9c3f7-OMQRvef)!~|5iLNTQjnh%jG1C6$WIFTF$LqLV7wH| zuo7#q9-FWg^kNEnF$Hx^K|CqWAsWmr#bsOt@uYYyoKiA&%B(1hR-mU+Qje75FdfV@ zCG$+lJX11mO2$jccqth#e{FWU$jJP&~vFfpd*MW^+-^I)JJg>%rW&dyaaVl z&GwS|qi}Lz3>ObDf0xY22J+yN6QxlFHBkrk!I&;>!Q5RK$E7QJfL?G3MPEov#5BwV zJ@2v*;aGxYpgu0-#AOfmVn65w7ka@Z8uWq-z2HJGxZDACa$ycG%)y1cxqKB)uH?%# zDae;A^Kdo7f;7mA9LR;dD2O5`j*_T_x?oPO4bcS5!!;Pa!8}|CU^1A8EAwz=9SE{)=3rnB2IgR(Ck@6v#h?$hWmt(dp!V7(>;q$IjHNM_ z_6JVmEErE?{Te-Lq!*3VK#h#l#5fw`K#v)xU^=LeaRsP@aR+u`FAm@kSf7zP7;l3* z80jbDQ@p@yP!r=j;bc-_f)(lD0cv9M1obfafO(pVfL=0{1hJWFq7LeVelj&idyorL z0J7g#B0t9Z;(H8ZqQ5S0w|24C;@uQOq}L1Aa~|7crTnR^q{2-TB8rhtz{#&fSg(m z;~1!!B^tzNA)gjFSlkBK0cR&r^bi8+{$h1iQrcqyFHxF9{4cbZC|UTNxpvC=d|3ozF-e&_(|mS!;M z-!#lI4RcHri_5qM>XC-IrX}xb$$MJzo;DR+VFY8P%>?F~HXEozTI!INIj3cAX-9#a zrrnPHID+Fii8HtZ>YbK)r+tMt!YN%!q=o_HG#&X&M~|kfg+?I$bi|*I_|tVl7jy$T zOE(bYEZtBfpOg!&y6{|{f<3g?rx0j#{AuwzuO-;jk91}x39vSaMsd_e z0H}>S>vw0p?$pRV5^Jy)8?Y0M;l3NJ+x;j`APUsdom#rz#yvbl9G>9?=wA;fC`g8s zNDVh+1hIHz1NHPE4<7lDA0F sQmn!ntOIlNU=AMCB7G)g2YE?f29-GD4fPU)-sUz(}cLI3~& From a7595aa776cf39788d8fa814b7b7d0c14346ef00 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 11:24:19 +0800 Subject: [PATCH 11/39] modify podspec --- LFLiveKit.podspec | 1 + 1 file changed, 1 insertion(+) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index a199f57d..43fdebbd 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -19,4 +19,5 @@ Pod::Spec.new do |s| s.requires_arc = true s.dependency 'LMGPUImage', '~> 0.1.9' + s.dependency 'pili-librtmp', '~> 1.0.3.1' end From 5ee942d4b0b5f2af085706b0d37421696fc378d2 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 11:50:05 +0800 Subject: [PATCH 12/39] support carthage --- .gitignore | 23 +++++++++++++++++- LFLiveKit.podspec | 2 +- LFLiveKit.xcodeproj/project.pbxproj | 10 +++++--- .../UserInterfaceState.xcuserstate | Bin 15247 -> 16189 bytes LFLiveKit/Info.plist | 2 +- .../UserInterfaceState.xcuserstate | Bin 10986 -> 10986 bytes .../UserInterfaceState.xcuserstate | Bin 118380 -> 118716 bytes 7 files changed, 30 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 382f8343..3a6477f7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,25 @@ +# infer +infer-out/ -#CocoaPods +# Xcode +.DS_Store +build/ +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +*.xcworkspace +!default.xcworkspace +xcuserdata +profile +*.moved-aside +DerivedData +.idea/ + +# CocoaPods Pods/ Podfile.lock \ No newline at end of file diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index 43fdebbd..a2603571 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.3" + s.version = "1.9.4" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index 56555621..ef57f710 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -30,15 +30,15 @@ 84001FE21D0016380026C63F /* LFGPUImageEmptyFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FB91D0016380026C63F /* LFGPUImageEmptyFilter.m */; }; 84001FE31D0016380026C63F /* LFLiveSession.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FBA1D0016380026C63F /* LFLiveSession.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FE41D0016380026C63F /* LFLiveSession.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FBB1D0016380026C63F /* LFLiveSession.m */; }; - 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FBD1D0016380026C63F /* LFAudioFrame.h */; }; + 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FBD1D0016380026C63F /* LFAudioFrame.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FE61D0016380026C63F /* LFAudioFrame.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FBE1D0016380026C63F /* LFAudioFrame.m */; }; - 84001FE71D0016380026C63F /* LFFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FBF1D0016380026C63F /* LFFrame.h */; }; + 84001FE71D0016380026C63F /* LFFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FBF1D0016380026C63F /* LFFrame.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FE81D0016380026C63F /* LFFrame.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FC01D0016380026C63F /* LFFrame.m */; }; 84001FE91D0016380026C63F /* LFLiveDebug.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FC11D0016380026C63F /* LFLiveDebug.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FEA1D0016380026C63F /* LFLiveDebug.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FC21D0016380026C63F /* LFLiveDebug.m */; }; 84001FEB1D0016380026C63F /* LFLiveStreamInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FC31D0016380026C63F /* LFLiveStreamInfo.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FEC1D0016380026C63F /* LFLiveStreamInfo.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FC41D0016380026C63F /* LFLiveStreamInfo.m */; }; - 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FC51D0016380026C63F /* LFVideoFrame.h */; }; + 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 84001FC51D0016380026C63F /* LFVideoFrame.h */; settings = {ATTRIBUTES = (Public, ); }; }; 84001FEE1D0016380026C63F /* LFVideoFrame.m in Sources */ = {isa = PBXBuildFile; fileRef = 84001FC61D0016380026C63F /* LFVideoFrame.m */; }; 84001FF71D0017590026C63F /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FF61D0017590026C63F /* AVFoundation.framework */; }; 84001FF91D00175D0026C63F /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FF81D00175D0026C63F /* Foundation.framework */; }; @@ -342,6 +342,9 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, + 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, + 84001FE71D0016380026C63F /* LFFrame.h in Headers */, 84001FDB1D0016380026C63F /* LFLiveAudioConfiguration.h in Headers */, B289F1DD1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h in Headers */, 84001FDD1D0016380026C63F /* LFLiveVideoConfiguration.h in Headers */, @@ -351,7 +354,6 @@ B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */, 84001FEB1D0016380026C63F /* LFLiveStreamInfo.h in Headers */, 84001FE91D0016380026C63F /* LFLiveDebug.h in Headers */, - 84001FE71D0016380026C63F /* LFFrame.h in Headers */, B2CD14761D45F18B008082E8 /* LFH264VideoEncoder.h in Headers */, 84001FD61D0016380026C63F /* LFHardwareAudioEncoder.h in Headers */, B289F1E01D3DE77F00D9C7A5 /* NSMutableArray+LFAdd.h in Headers */, diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 14d471beccc50fbeaa3995d90dc1974515a05060..6867e013df3a8be188e8fa300666d512aefbbc8f 100644 GIT binary patch delta 8787 zcmaJ_30RXyx1KXIfdm2xAv*~PWCKD1A*kT*E^4j2xS=Q_prQf_xG$Nnb+^_f+D)wj zTI*V?UA5M|YHe%lQmxfm>u#&oT6e2=Ld50v{x{$AK{O~3$fp=jktc3Sr6|9CeumL`Vk6{aJgI%y2_P~Dl8VmTjMtP zCEO8b<32bS7vdsZhKJ)3xE#NNU&WL06g(AA!_)EWcn*FOzlGn%@8Cta3a`ZL@P~LK z-iP<&ukiu=4gMB?hY#ZK@elY2KIX$G@G1N&zJUM0f8$&DHhzR36GX&BLc&NmF_Ktf zB4!dt;)#V=NhG~y-cq#8^{-AEBTUaBiqRivXks02g XcrU zCP&Co@+0|;oF(VTd2)eVB$vo#a-G~Hf0O&<0eMVAsg%m8iW+Ggji+|%pf2jBX*7do z(uTAtZADwtmuV;3nRcN)X)l^XOK2%Aqr>S4T23npX(b&=N72!A3>{0y(aCfQol2+C zIrMG%4qZeS(x|FV>tLYlLmVQD%rJvEAbQj%C_t9_YNqUO@LQm6Q={b6lUZR)j zReFa$pbtez1R{}0A_@~l`W@U?7g{;MIM z$i|?tXdIV59!)?K(Io%I&~|1IL(Iz3e?$xkeCieSDw@ov$D?UzI+}rI`i51T7Xt%$4cmmnON1KP+b#>v}i}oXX723z@SD~+21MdDc;i*l( zL*EBR53-CZ^aIOex!pSC7nj!Y97R7NMGg899Ye=iL-rrms0N+jPCLmOb05v(IjNTp z$o?&0*ICvi;MicQTtL^5eI>exE}_fl3i=)Wfv&QqtQl+0TCkQZi?v#buA@KE4fGd( z{zkW0Yu1JdY#M*2vl-mBgVGI|2ttwKJpjN!fPx4@*h{P}dzrOk?OBKSKnxNHgK&^S z1nbB;vA%31^D)M)8WE}Xe;3gv+1ab0tgy)2xx8#hQNfcm&kK&V7+E~5u=C*jilT~O z`ukf%di+_DO3oR8jD-V3bt zp#jTby;;xBmS$5EP~cM=a%cpN`*qCjFtRd#z_6m`<>mQfYoHOTk1~SOO`%zyJo+iY zGi^0KX!^Xu7SNvWWoQXm&^YnTs!#bB0msYkb8k;BA)@zmJ z$;@okyk$mat5)?gGh2IF)vMpalgXzt(o<7CtuyL-viQ4RmM1ket$xe;-qz2iGX6dF zB+UN`X;yJX!LYK5qQZ{(KE5lt@-%nqGf55pEvZg_z5hOy`F~7#>u05Aq-V5h-YTm> zW@cu~^z^jmnVBuJdW}!zCAxuU;$dHKXTSj9MReIRmRlG4fiURBfgvz7I8eLkp%jK! zK^YsyN_xIjlwVj>UJ(p+1yu5UdXfp*MU^#B0V7dH_m0{8Po415FgB3BV_2E53dXVF zfjBvWVR|C$na`d&tF{OhRUCmyC>(si&`c1}ba1-#P0 zLNqB1I5&=sWn=08^^|1!*W=`Ca{ToyU&w*$y!+Fza}?} zYG6Hl;QvFO;qPQL_+TS!3flG@0Kq5lX#jv=Gn>j@sRf@Y!8m>qFnud~^(p$m_5k{@ z$uELUwC{27b2zZLI)n*e2}92yDNo+Oe>*DE_YGhFw`c+!M5}ss%ytjp)vOPHmlqA= zVH#D$!DA+K2SaiQ4)fgL2vrS-0_B1uIvfqy^dozXMdrzs;{&;xIx*QiJ|lzw%1le0 zI8jpPpr7GaWM2*^;S~G=r`a4fm(64IqnE=O_zlj&Ip$~Yuo||Udu+R+N#gJDXMp?< zxC+Iwz%EX)wgFG>Qr?KsOpTnun*a$ zXR5m4p3lpI`Z$Nf#zxlD(rk*h7B!y45BJ6W{}<%|PWe%QvUJ%qN-GM-1Mv`Kufc=x zU|h^TVV|u9?LefE$j=n zm3_&!y@$u+2|VI9?8A(0XJ7HCzs2^l{anIu^~S6jcotGr;hAhl6@HEFWIglbGTwFe z$R9ScsG@mAxALN*I_u|RKi{5s9-hy3vE9{p0baoN@F&+lP?OSaF@E={prvdd>siNE zjh6>}IgjnCs%5CFx%hp&`YFd6_VqK4_4tEeRc^!E@ecmpg?IDsz-o`OAK5YfKF)sP-zT`aMcQwC zhhX?JRuashqtE?54skrOgX2FxH~vfQ@47Xe!DpZ5$2oSgE9v@t@B}9!qZEzt}JAG*7sbI;-vuzW>7X1N@MkVZZTNojz7ifY9fsiHI|wt7Yz@ zZ$@~p;h&_}w;@uZAND{9d#7-O}fh4j&*$ws=ySbb=NiuN}H~X7C62K$C ze>lU}F&hSwdZYn&38~L+RgnyKn?ox+t&6azwg!%V;y%)pwB{-MUt3DjCeTunmq|O` zQr@X;Detm>cuSe<|JI<-t_=?9%&R8p!tOl>LZln%9snWsfZY#3&l7-o?R1^q-lXsI z07Lq5fO%L8FebUmf6<`IsiQ0;eEY2+MPwiuLQG%_hT zO#MP^Th+7SSA)YLdGgjzggX7PCh>C=(}NV^JbAMx6w&?xCUfd6GB*&@*T`)0I+-It zm;m7dNCk*kPUeyM$VL_j5GjC+y(NI0d%IcO1i!;`ILOd88-_M zD}X7$Xs%^koVuIvBhqrRhkQl$l6?Zi3t$nzDu8V{`I;QyOy3G%7r+rZ$odd>T@|aEb%=!d{&Wv1b$?QJQ`>jC_*bK zP|W*+`{Xam!zC{oGd#bvuv>Y4X+=SK@$kx=;=n<-bAIJu0U9zS7Qib&#$hU^5-wpn z4Wr@yVyiPJv$l7j5j2vw6}9bLfTxUK;Fk)#gtspnl=b99T-Pwx&}gbe z8Bgc^kL0d2s;2yORZTTiD?lRwnp9Ig9(;`4g9PB| zR?I~{PFmquMmaeDn|1|#FQ9&?0K)zcb@apki*_U36p+g+;qY3ymS*zWRt9x# zXs%0x&nd_7f2;YDZll`;C>NkY0DgS6H2pKkT7b+pjLe7V9u!W$qI&~v^`?U4q1ZPv znC1KF*L|O!yRz~txy$y`11O^hKiux=MR~)3@Q$IaK57#>KFj^XY6KWBz(n3i*91;#@q8~e=SP}R#g${b`a7h|bD!Zx{YK9Q zj0)T{1z_e*(TTI%*%N}+ou?Oqh7?uP^L}ZntJ4*Tt3nYBKWQ!KHHceD>eD~z4f+?o zN&hClvs(fP0=z1~6al6RFr6189@A$aO792v*(?7)n$X8VvdID1us?+jT0c0P^_)bJ zI5;*f06fn&v?wAtJmY!%*ywRmQItsGKjYE(dXm0m2q`BM`StJ&GK=HMe6oOF5x>Rn ze}ClnynpeV-P`0YzukRE9#KSPG>TUq6)!P58pCgSP5gGZE04!cdX8VQ#)uk-nu%J9 zT8Y|-I*a;<3Pr<2BShmw(?sux7K`2$EfZCXmW$pKy)RlVS}WQn+Tj!J673P~73~)t z5Pd5;DEdKkSae48cSuA?~6!LS(sgTnl ze}()Vay#U1$o-IqA&){)C=R8eA)!&BNue2`okDwu4iEK*t_Yn zW5h;reQ{H9YjIccNUhR-Yq^RzA3&Xz9YUTejxrwk|N2F z{72GQ(p1u3(o@n`k|)WR6i7-Wqa+I@??^tE?35go{46;yxhT0T`8~`WmKfGFEGw*c zSV7q9Vg9f$!;Xdh6!vr2sj$;wm%@GzyBc;q>~45`xFftvc<*o)KE)TlCj7JTE#X_k zw}6rL_Uy- zW#O_2nOvrjDP;*VkF35dL)K8%NY+-?QPx@3Rn}9MBkLpUD;p}CB&(LKk$oiFA^S>p zSaw`?LUvMiMs`+qUUpG-Np?eaUoMf$6j@{8jlB`84?q`7HTt`5gIY^273LQIeB?`X=hTsGp-wMxBZ}9d#+{_o%B;*Q4$zkRn2%ROl5BMUo;}k)rS@yo$bxQpH%s zRK*;{JcVE3TdY{ASgZI{v0Jf6@s(nq;;7zA1szOv^Rg_AjGN^2-1XYqMS=CaNrRt*Ut;$vPQ}tI3Pz_a;sLE6$R28a`s!6Ih zR8^`qs?StARJ&ArRC`tXRfkkZR6nYYt4^p+s_v^b>W1oUbvJcibw71~b%}box?DX{ z?HjEgtDdcXL%m47M7>m9rCzVzr2bg_srn1`m+I~6o$3SXBkD8i3+hYiE9&d&8|s_t zTk6Leu|}oQYV;bT#-xeU)YG)kwA5s2T5Ga3Jv6;EeKdVFd78nR5>1(Agr-8nG*dLw zG&3}>YvyX^YZhqU)vVNfr1@O4MYGkX*{RvB`AV}-b3$`g^QY#f=9cEJ=Dy~kR-_Hn zO0|(%xz?zS*IKo9tyAmLrf5CdhT1mT_S%lx&f4zUp4uF3A8ny_gm#Q}oOXhCl6H!A zns$bEmUgywgLb#}oc4zHkq+swj_N{mVqKU{qtoePbVi*?7pJr6d^TMpT|3Mf%11clA5;-|5fj&+5G>V%;cE2Vm^x5AM<(Mlmf5hC1 zxgB%Y00wNJh6qEXL2l3)^ag_=(U5F#8$1TDp`M|Mp_QSHp{=2vp@X5fA5* z3^5Edlo|xXOv9Uo#fEnc%M2?FD-Ej*YYd+ob{c#K4Mz+=8jc%I8BQC{7|t4Q8!g66 zV{2n~V^3p_v5&E@G0&K9EHIWED~+R!V~rDxKI1FKDaL8Wca2+&`;4b!6|vUXrm^i~ zyT|s7&57+3+c$P#?BLiTu_dv?W6NXL#vYA*Y>F`@n4Bh;DbMe0YHnd}W$tXwHuo^+m~+i}=6rLhdAPaU zJkq?xyxP3hyx#m_oF}eHT(h_qajoKfuf@F`ws_14=CXCS^|KAOmD_x_>9$$6*KPA` z3v5el%WO5a6}Ie&CC*8lpSUn7A<2`}FexXgf0ECav@oeUX+_feNo$hU zCv8ajCh2I>@uZ)VP92uC-zTsTyTz3ZOa;m&sFx(nPT?s0CxJ=s0YJ<~nUy}iX$a8 zCBw7Cv&^%~^Qq@c&mPYq&uPzZp7Wkdp5Hy!JU2X#(jbkbg``Q+!qXzsWNGPXIccw^ ztxNkP?aQ>iXqjX3Y(qB)Xn?65%Vfyj(E9uwMZw32# QjDv4cbnspGoqp$k0FI4}6#xJL delta 7976 zcmaJ`2VB#~+rO1SNFbXG60$=`A_W@-gG?zIV_3KF|H!GiK({FO-DrBm4dBJa)2C3JYg7 zt?f$%-9Z5u1PVbBC3cJG|uovtD`@()O4;I29a0DC)M?nfX zI2KNVQ{hbb1zZf5z@=~*Tn^X6uiyr_5pIH;;Wqd!+y(c;chElhK4Dv&d}XCv(VLvVbfm zOUP1EO4gH2WHZ@Lz9HX|z5Z^@1{Y|me?A_|)6&mr`c?yhFeom*rrQ`B=a1qo(ral1 zT}YSHV^mAeQ#VcT(JHA~N@nl2nZ0xJ{C9ZrurxN%EIYS&U}0imPNqMUKgX9L2~R94 z$}K9+%rCB-Dr%cKeBj{X42isL^R~G|a@yn;#|+CFn4MElIIzFqA`pNOAOt-?4zRBQ zJwX~s2N|Fj$OOGX7RaVLs;80EK#kNy&D27zYd{~+7g#|%_WBIufdRlqZS)f+q?@`! z=x{oV&JHN#=LhW&j{qaVD7O7*Fb4Pl^`8oAW2{91wbMG6!C3G)n;Hkkg9&VUG?)yg zfT>`bzfN%b$Xc|v1kxx8%gz~KCR_Fem<477KbQmNf-n80!JP#j8cl0b2aWN+4vtS; z0BV+ih15yIE`lXsDMKt{0+w`dm7SAcoZBZir?5+_{Op`z)Wzhiky>qw3m^{E2IXLd zf1$vDOTkM2e1TiIhT+$ObznUh<3B4<@w_ya0>55f+iwiv<4s_*-{%bHm4YoGKJZOY zxyv@N-Jcaw1Mcw83h~tcP`#Z@!O4t;#(-%wj_F^Uf(sRP-xCnq$LtYSxy1o+FhdgM zm+&=yg;2sj1mdfHHWe!PN5Ihxi6t;AtMqD0VNOmdI2Kq{F>HB1T;E2HQEcn<8Vz(eo|JO)p|Q}7J@N)u@l+LR{IX0$nN zu?qYKeg`kWOZIvN{-7;sD>{G{u~#u2%w&5Y+Ef=o0g$bP7!t^Xd>90SX=~bswx#W8 zd)i?o41q!@f}t=BifKpMiDuAVG?(Tvsm_O{`KN|?{5{#fi=hhtnXsVnCWCYHvXgU) zi*xh)7FDK1hQ#joglYZXhsAW)zzAjtsHL4tppGWf^bCnKscTAdL0)cg*W8?;t%`F7 z6jk^N8lj~WnxL7c(C#$tozBn(9Y9tJ?J$aVpew<1#AQxr?*dTH@G;ncV13n zVPWR*GS~>z0r8d7iLePv@9G8bPb=fY#K3`mu|k993y z1J=QIsYN+?ITeD^EZUoTX+KNip7wuSKDZ~nO5%<%8Q4o1P2en?DVZP_6&sp60oE#W{s- za)zgMDDIb2Xl!b1o|oCTsA{iV*dN%JFQ@4rPy-wQ^8?fX2hl>BUqK6ln7&C&xuSr= z#dKgbF~Fe#VxR>d^MKs{z$8`4&x{c3U!-U$WK0FefiZ9bSm%GHQ2B;XZ>8-g z!zo?bCwJ-GwiHed_=AxtoEA88IvqyCG9-$TBfV~;ceL9$GCrPtt6S4MdUTa4v*4G& zUJhqNKb!;S(h+nd9Ysf%!+CH%TmTo+F?2kgL1)tRPuu47sR+jmiIS0ivVXiXCb|T! z44_M487zk@sE<<0(Xr*g2HL?jz{*J2O~)}_PWRtbj`3{)HJ4XsD%=jg4me~7olpXI z(ux0H*zXR6{T{fNPNGxahy4L~_@mf80*}(kbP7$a7S)n4l2*iUg|;o3T~5I>A7ReI zb95S=PE%7m=l1Pap&tVkyazGN_wY(U(986T63~t*lByj2+lRi^wA{#2dE zcd4JISGwYtfbjR>13HJ!doTQB`0OJ=eudBJT>2#wq`IP75^{TeB*=f@pVgSqK%)Nz z|EBZl0+#5P90ku&+c;do7gMeAEEW6bVe!F8+D;uX=)YhQ`DW=61AWkN-6+V{Daa_uWCdl-ToetjadH@ zAO)tQt5Ys2V%-et&-BYf15iF1hzigkR7iI+Ex)1P(p_{n-9z`%eQTJOgV7K)lzk3I zBiQ>Wrt5xsfF5F>hv^aaew6L1jX3R_geEVi2P@JZO-0ifufJntR$a>2$aE0TsDS;b z2t73O-EzhVXg0JgrJVB=k5`3I3AB``?^sPoN6|5K9GyTX(J6GAUZj`k_w+KoLa)+mtAG`qMd#3YbOBvNml)$*=inzC ztjEEY9L(ile`dgm`h9Nn1G>u?3*AOPqC4m(dV}7ix9AV$=x1~f{ete(+w=hkBRFVd zfK`fm{`HYVlF>87nu-$ED-13{ztJCQbuhd{|F9^A|5u-Y{)IyH2EDE56Yj7+;U{{x zqEATo-(kg{mBi!{-7k{5G)LoMEVQ8_r4Gw_#@$+Rj>rhKP-e;!3yF1 zib5z>=-OZnHURqyti=&nhxIs;KBSN7Q~E3YZ3Q-B6ED*=C2oz!WKc zKNnvy_uRkS)aHZHh83f~`(K;dL?_^8%sn^}H^EJD5`9VkLtoK9%5iht0@!dX`X~L1 zrNiIMIa2E=|4Xycos2&XAX0D_+!c4D|I*j=4SidVQ*n2O=*dCAK}bL6AbJPdZ!vlM z;Lie}zPKOG#r-)*ILPB5pMyc=I1jV?1Rlu2U=9ibz>o^yV()M~CIA|NN8(X39a7iND~Wf`dv9hI3F=j%Q;( z1J323nuD4EFi#T{PGL0t(hoQoszM=b-r_-h=nD9j4%Yc)!11lq=Oz!9@5VKGdUqa>d;y zfGNd?LHvgUm8*{8W9(|kSi0@NzDWbK1`lAjqP7FGG6QXC89t6r1g;S8=KTYmHEQBh z_za71d>VhpK|2Q>rT8p9$3Z6tV?Q>;_mzf-^6MRQmG8stdgX9*hNM$vEC&1CE>lER z0^P#50~Y&%gRT<%BL@SOomote1Gfr%kApQ>I^qYF9V%#tAK}ONiGNVEHpIaH=uQ zB;iEG!TKC*z`=$bY{bFF98BO~A_tqSCK`5iwh_gz6S%1h!{K_{`KCaJ~2Rvc{2!8T>2HmO4xy|(2bi;!+iEoF@X?HfYN3et!) zCJ7{wG~r-74yJJMQx5jvV6WG_rq|bj_+n4kU?3>EL?iKr55+oHp3+P~Vy26K>cNE!#zX}YEH^GawIFxn6>ULYfYkc=dwy0uS^*&65h4EZuD zVx5d3zO?GvoRnFdS;mGai0{fSV(eGdxns#>Rz8?1#*y))C7DPjRTw0bgT0weIGDx3 z?3JW7TtKFg>8u@J!0bb3a4?61eOXsNqv8_7GAfZ>nTF&R5AW=ctT`*@OD5DjGCv?x zaqfVeiW^kU^}Bu!_NkO^Az4%@NKPqP=)YC7M*C&ZQUXFLdeF}{$N_TD|7)De7lmuE2X{@_BfAvF?mciCdr+5+`!EiliC5ySjD`;5 zql`#SGR8QA&*9tn4twHuk3DmH$TIgSd+PRvJ!8va55~5UbL25Eh$rF6c}kv&XXLqf zal8h+L|$uNH(q~U9xtC)z$@ex^M>$-@ka1Q@n-U7`FMWbT;4q10^TCt65cXi39pQ| zk#~@Hjt}^9eiMEdel~wJ-_M`JFX3xU=pkkwg=Y; zt{d#@8Qd?pe{f!KesDo>VQ_Kqw@DR9 zG!i5UnhRPAS_@JHX@XG#POx0CMzBM0P;gpsMsQAWAtWe75@HW=g(QZw4jCLWB4kC# z-jMwv2SW~r91S@eav|hW$mNjRLV++$7%xop3Hu83gtLXqgr&lA;VR)8;X2_(;g7<* z!h6C8!bidhlY*|ofYa2_00{P z7rG#HQRtG;wV~@nH-v5q-4ePjbVumU&emDZOwl=hYuNJmS@NPSXH`lWQf zbfI*ybg6W?bd7Yqbc1x0bdPkO^nmn`^gHQg>0RkP>3!)#=?hs6Sv{GriLAY>qpY(m zMV2PZlJ%1fk`>E_$cD*g$`;F3$X3bL$kxfelI@V~lzl7PE!!*GFFPT-BYPryCC74+ zJWMW;%j62VUT%<^*+SV-*+tn+nX2re?57-{9H<@GjxK!?VNtg!c>YAD$PUA6^hXG5pK$?cvwLAFH&gD3w#? zR(X7?SXC`mGgW6*H&v>thpLyVw<=rJM^&I2tQw-4u3D&CqFSaZRh6q&sWJ z7j-vvs=9|dO`W06RA;Gk)Koo3y;|-2T76c1SN&F_(6}^ln%bIAGzpq^niNfUO_nBG z(?>HX3lxWH{>or?7+cjTnzR~Q`9M^oOIjcFZ zxvaUWxvsgX`Bn3$mZuHTinL)`xmKxFX*Jp?tykMn+eDkBZLV#jZKv(v({|EkYiDVf zYQNHM)Na;p)o#~*t^G#3OM6m#T6;!&UVBM09aB=-cT#=(F{M^rQ4+^i)4qKTbbEKTGe|&(+V@FVZj7FV}yi->BcL->Sc%f1rP) zf1-aDnIAbaa&)9Gl8ZbZ`F-S-$ZL@|3|2#|p_ZYx;S<9M!$hCq3&U)~9K)A}`Gyih znPG)tm0^uxo#CkAso@`^#ppKHHa0f4HFhy}Go~7Q7}Jb>jJd`{avB)^XxZQZj z_}KW;_{J1$Qko)7CX>|^WpbKond+G8n(CVxnG#GrOk+)7n0A^Dm`<54neLb#nVy=S zn_ie+nckXx$jmbbnM2GXbCkKgxu+dh{ALZc zhFdjOoi);0%i7%9(VA@SV(n^8wf3~;T1Q*I@L6YD=USIq*IGAPcUccxZ&`n~zP8D2 zMw`v%u(@nrTP<51TfD8At%a?Xt*!00?TPJ~?YZrRy@fr+-p$_Ko@Vc5?`s#{dAsNASYQ8S}9L>-PgA9X3}O4RkJTT!>8{&5gTkR!z5 z6FKw_gTw5wIUJ5rj`5D!j=7Hcjzx~8j^&OGj-8Gpj?0c~j+>6#j-MR&91k3i9nTzZ zqfs=8=0^*nMbYADS+pWLJlYuTicX5|5nT{HK6**?j_8BYZ=A#_bSj-@XSCDp^f+re z>pB}a8#|jglbl1Hvz_al+noD+&V$Y)&g0Hg&NI$)&Rfo(o%fxOoKKxEoqst0cD{DL zbvayJU1MESU9(+_U29yMTw7f`T;I6%yAHXIx=y%Gxh}abyKcH3x}LjUxL&#basxMZ z^WE(KdfW=P%58PK+%fK0cbvPvyOBH5o#bxr?%?j~&T#jQIURF0#& diff --git a/LFLiveKit/Info.plist b/LFLiveKit/Info.plist index 8a41c117..c1bb3453 100644 --- a/LFLiveKit/Info.plist +++ b/LFLiveKit/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.3 + 1.9.4 CFBundleSignature ???? CFBundleVersion diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index 7e60ef0942582c91938e949be495bdc8aef1617b..1c23949a69ba311b6b22e8c3b8deb7997671db8c 100644 GIT binary patch delta 27 icmaDA`YLn-3m?n;#huSLbMUeA0$CQ5Hw#N&=LZ0y*9xov delta 27 icmaDA`YLn-3m;3xo~^q#bMUeA0$B;+n}wyX^8)~&`3igh diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index e3be43c628bce40badbc2eddaf300751b5f74be9..575000bb1915dc25f45c97cfc55023f7a6cf1fe5 100644 GIT binary patch delta 34612 zcmaIe1$-3O{`mbfzzmQeNoICtHiRUEBoIh|1OgFUi@Uo7FF}j5IaqOr#oe7kaV<`v zxE7}rw?bQ>$n#mGwEf-xb6@wpU9z+1d~IeWr4MJO-99JnI5YJ*=lSu@2<6$jQqR*z zobkKiciZot-+jNweoy?K`@PW9>FISZ-CNI~XVf$4nROrCS2y)M6PtQDTvha{dNsYe zUPG^`*V1e2b@aM=Q@x$uP4BMv*9Yl?^&$FjeYBrbAEQsuC+co}hCWlDtIyLH>5KK{ z`f7c>zDeJv@6z|{2lS)*G5x%LLBFV9(y!`I^=JBT`g8q-{=5E%{-^%ZNMopmW~4PT z8m8fI1R5bOBizVkZmQP(b3V>(aX`>(Z`YE80r}3nCh78SnJsAIN>}|Tt6!Tki zq&dnQZH_k;bAma=oNmrAXPUFj+2#iGdvl|?$=qyiF}IrA%SqnKhFYVnan^Wi znYG+nVXd@QS*xuz)>>xlKEb=XBV(z?Sgh8JI;=` ztJ>A<>UIsgrd`XfZP&5u+V$*)b`!g~-O6rbe{FZO``WI4_F#L6J=&gZPqE$hRC~3( z#$Ic$v%j;~+Z*if?Tz*(dxyQ#-e>=0AGc4~C+)NL9s90*&%SRzupiow?8o+N`!D;A z{nnYFe}!noi4^#TnoXc4l*iIm4Z~oVlHOo%x*6&RAzbXGv!%XK80OXLVc$%#JSeF!MWMF-MQPj&w0ps z*m=x(YN8`kF-4hJJJYZ-WtU85~_GCbob5zR}Tz z3bD5ZF-SSS+6R$H!aPfNJVMu&K03-R+_tSMJPSe)vwv3Wy_?bR_?~R zluiD1tJJL9wq5t6MpbH-O7584wPxq$Et5*-6F#Zk`En~$TrM$Gj1kksOfg%`6$`{7 zu|zBrYsF@8kWl`Y2>i?UtWBR>2umGbeQqS&W@e)@;9SEkuiCQ`|+s4r)(FkSAiqm+a) zu|D@t2`}O8cT&8ZCo-j^PO0+o&u7KU<(VgBp6GYM@1ox&@s;0YQA(5-Wo4Q?GEF|2 z=Cw@omrV0EB}Arq@BTAN$&v1Eib!$DG;iF&nUs1N{2qP$T? zy+d=)vJ`JeJ>f0tiw2^h-)|odRK@>1FkML7(<1$c2!Ho`C&HKd{o(hg-%Gz&ey{!h z@_Xa=R;sDo)$=Njkfna_b)hSIDv`YDRYR%{shZ36>>@}hqlfD`^w6Ao zgb31e>yc8mQZl5plIlRI4wss*)B;inHb`p0?k>^Fvxf0{DG|O*PtX(f!g>+Cs9sDj zu9whDO4XLCQ>y+_%_7wRsb-aGpj62?c$r>WFXP#Z-OB3~^opK35M5!Bfr(H443y%k3^)Y~Z4 z^tSHb@+;N-8O9LchZyf&UzPj;{r-;uL4pHlWHNUhPvOkP~u&B z4<%r+{*B&K@1^(F`{;e88ZOmbQjL;oUa97jYK*7Q`T*U!H6&LP#DQjKWpV^k_zvwPR1=3m#SRk>Qtfbi? z2`FE>Oo^^7+qV0rd5exo?y0d#=G61`VeX-^N_N-BT}J35b!wkms`MhFZs#5?I=1W9 z_HztlDIt&M@l-9MexX_X5Yw`%L zmfWh_r{lu)SsxF~|KY$|A0ASIg|`x<&({}-22zccYC(6Ef=boYOZ27gxdjzF^)k`Y zbH_~oG0PSDO82FLTpU)p(-l(k7W+rD^fk1rjZ#ZAP-;mv-qX;86!CGmHz)z~^v0eJ zD=8lr+N^JB>XXC0Hd=AI#}-n2l&$W$g_M}o+w~n&;*@Zzw$uHtkdn`LxBi0&pQrDU zYLR*RUa1y!7mHJ3N*~k@dwzUKs>SB%N2FRjC8Ad4=IuItK5Kr`PYJ&T`f>e)ep0F> zq*_v{r55O?sqa~-miE-Qi95Kt(z1pAvnROAQZ4h3lkS>+M}#lYuj@DToBA#N7yY*W zt5nNMwVYJHl4^OWR*-5%sa9H|-_`Hw_w@&i&m;Y@R4Yrhid1Vzwc&@!X(ZLg?mqF# zhSab0ckbu}#WwW!hVZFiq>^e?saBI|ib=J)yK#aN>iT=EjdVtO&sZyEq)K0EejMtQ zRv)TqWMZm~%!ZHQYxo(u!TCc4qcdGv$;?Fbl?arSsBTdC)>3UF)wWV? zx6r48QpTvm^(aj-Ps`i8d$m&fx}O$SwBRO6z}zn#H<}wQ87PBQJI*sW^y`UDw`M6| zv=uMs8|{qtQtc$wZYfnh&6n}D(OLM-H#!-~Qsw`h=Nny&u2StH)vgm)Z_Vx=Rz&g7 z(%b0wpM&}v1Ektrsy*C0iYQsV2N^>=n+}$0Pq&t&gk(2H7$Zga+_@J#kGj+*0i)Ro9oerW>>Vv*m1Kj#T?gb%48fQ6+1Zg~pQq9I(__ zCe;+FO82^=N|ok|Fhx_W2aPyNOfpR zg=*Q0kNxVyyMfOM?KKYkpTjwgIeb`3h3v)MwTmg4S(EsS!A3x&KPHnbH;h8Qkrd2y(Tq9YN_0vYAW-q7}pHX`DTgETO zZG-c7oK&gc1gTDx>LjU7mg(PGB|(TA5O@rA8uDmb-h$KxJMUP?)bhk z{t|xkjMq|~HqUq?)#+|$Z6!;fLpV~4@c9l!pC#29Qq4c#k;b7)b*5D3r9`-8gu+La z^o|UkW4x3MQk|7jX}%+)=icm;N}rB!IQ$%@2w$LNa2O6pN+qe%v$<|(NhOCX(211@mq>M~RF}CQlvK)PuIi}qVKGN_&jBmkB}yrQ8S6P3dP-2=llUrk$5M*3QFBMj zzboqClv|Th;d97s9PJoFheAo`IXL;&ehKw!N3!Q3FDg0D(OIhNzC6^;(Zll)=ht`c z8>N(RS6@f}FFSDNeg9<#=@`VZo~GrW=U|{Wep%*Q2j}1deX1kG6D;+fpHgXoW0YgG z=Z~ADy4BOS&ykLIO!{x-rg+lc%psLOzKe8BbIknjG4x)lTfQta&#{!^I*NIUOMfWt zhoX9_>pAjsakID|msZlb);TtODJJuzWKc2?)IY-7;@HQ#M#omiHph0y4#!T%F2`=i z9>)(3UP8A^b%#_rMR!Scw^a8?^#`f)^Le*365gr0rt0Q+=&oE&2?=}b_>C8H#}mg> z$1|z^DAl7caGHq+^Y9q&JXfZ{WgR8M_)e`0!@8Tj#s&r|bFZ_)CM znaRw|6MRGi)6djR!`=ETC2xi^KB+z)Z<>~S9_ywyP20WuE5*v}Z)PzAct%z;&P@NMlIkx~y}j5hZ}P(HQ^Bn4Q$ea+dG9bfywTBw zdu|0~aiO|qV=nJzJ+r>qz-(wXk}A>Mlj?n`K9K6eMP?JTspsRA$;G0DQsp0?A?Rpdy|dZh9N>A+%0Ly8>I<%f6RA+hf34HdIax1>(3>a=T>l$_AmNB#63$sA*j z^&HI0$X^^>uU1<=P_zn;{`_#qj~@V@cuuv5&p&j3{NcL~KXh-YtaNNX$6Ut8G;=Nu zm~SpH7n+OA#pV)osnk+?dZ49|nkqF-YH6jGPHO3;=CzD|tT0!atN7;{b1k>uedvj| z)c6FL@x%R0Qp^0|j*mNMV`ZnibXCRIwbSf0mok5>;hul~VV`(8_tS@W^WZ-pIP&oi zpDvQ-G4nVV+mCOq%o9@c`}Cf|JS|=>FwdB0&2v)IrDjUa`S%+O^P+j#^G@L(Z?eoQ z=2fW~QggWPR#n2Y+%kXt_j?KRj(JyVmeg!_L^UN_mPh8({~Ykl{7q{9Qp@5_s-}ed z{As@CN@~6|Ur8;i)B@czswvB}rnWM1^|aDhs-;mS>(7A+@}ow{$KmiuYGm9xJcZ@<}cF?=o01R_woXF#hrW>QjZS1gpsZIq1;m zcU7McDrs@|K_vDL(C>QljL&bhB~ z&X+Q4ktu_41u_L7R)t6&OTVvd5 z>nPzJCRnpP-JWPovL;(oEVnh)nr2P6W>_<&R#9q|q((WaNUf^Ws!6T7)M`krrqpUJ zw`MZ{b1~mq@bR_MS|YXDoTgeG&rLn4)&KNTsWnXD8-!4IP+7&XX;mGijuP?ZT6_BA zOQW^X^U^4_y8n6+w6?h;>nT}_emp#nR4;wFf?K<|f@=-FT*0lqT){t=XuoyPllB3r zHJWD~l3L>rHLiWsI`;4KoUqP&I(pJNWu3OpSZ6KH!=_SeCbi~LYaz9kQfsxCUS9NE z&8^F3`45duB0101JcVmJ?_dKfD#32Mz9PeZweIuI$+~0RwaBBj)Y?d`?E>q8_0W1G zwRTeLAT^hJXMLqZM(erRX`b~$YP`F9=JvczPOx5D?}XnX>mCdIWxcW9^5hSzFe+b5 ztna4;^R}> ze|YMrAKK9$f7t)ydR*$G+&vm8HJT^brMceQiPl-Wh+Wh!W*4_h*d^^!QXBN4OM|60 zL~28&_N~;0No}~)MsU5i%i87aulQ#LyCS!hKXh%R)J92d^oRRnq&D`$opGM){c%_Q ze_X`vM*sXn(=V5LyTv~rNcz{M-fnBR|NG)*)9wkMF7{p-`WdB>*MT=%DVj-I&kUq=tM zxd#06v%2~Bi@@Jk1AB};UW9wkx5wJ!q&7`z)2B351~#5%FYuIox;?|5Y0t7}+jH!> z_B?yO)MiO-w$$cGZLZYjNsXT@klI42En3EWF8cUwsJ+ZyZm+OcdR_w-dtMmms`iuA zcn!NIwOj7%O_iO#o9(R>%-$lkCG+fUQd{a?(@cpew#(kbk3W9FW&a?xWq*HGu=m@C zc{^+$un*dt+RLT3LTW1)*t}i0k4kNoXOp9z@3vYsKgE|T_GziD{_;hKea^nhM`QcE zeZjtHU$TF;FWXn7wnl1erM6CL-$`w~)HX=%d%oJTuiH25oAxdHm&|-><|?>RYFnjt zL~1*w_9I`*d%oHlm+{k8$9`hJ5Pt4m*%Y&u{k#1K6ZcYTo20h+!-#F6H6?ab;Wr;` z-~RVEACbZN`v2p+wBOn9_0CR(@4vLeQsV;)C+0TKm%0fqm2bbCnlr64oin}D%jqq( z9a7sRwLMbXE4BSnJNS>&^Oom3T~D(IXr@9pgKua`+@f2WJ^K7I-3Op)64&*2U7gg4m9rT2!^ zZhi@Gm~-TRHvIc*xW6ws&T-C(pAK?;{u1%;@8VJ_oY6o1ro=f_G;mJWn@H`6=T(Xi zSpWSkiF2yww|Ewt z?p)zq>0ISpEwwvRyDPPOQoAp;2U2_Z|2(7q_o?&wr=L!lk7vy1AO7n+`Se4V^IxaO z=R@~@JQp7Qzs`kEr^3H}XXO0pAHRmso>Gv%e+}V0?KuyodrIgz4>DJ&Suv@1QY(I8 z7!@C|yR7y4ijreQ8tVJo&_J9dIn8%II84d%h{tWBE?=E7hu zT*f2rP;TQ1o`KRD?}czsUI*oMWQGqYu|r2NvLOVah(-+ZBNio53Z+5i95vAZjnD+m z&;}&q=t%$hl2tgmU;t7u5Q9PF4l3fHA`YT<5V1)`Og1vv$Sj0-^arIh-JlZY49vnD z%mV{xGJqy$i@5EVrxpb9pvvGal| z*aT)1m`w%jaww0AsD>J-g*s>jT4)of?OMnUeY1(q-i3WQfJ2~X_HmrVX`BJQvwy|! zcq4?9z?{DDgANA*L2%COpkhuc`>!TqW(|;%Za4rEQa_#^DI0?W>0L~*| zb0?cS&*D5T;yUgK;hzBvsDD8e1DX4m2HE?Qt$%a0L=xJ7p8I#e*XVIS8S7c#;JR5^%vf}99I5JJGU#}!01 zf^s7dqEP^pKPUl3P#mRD7R*ObB~%6T5mX!XKx=|XGKlI0wF1cok!TQA3+jw+_y)bv z56o~7#S0q3*%U2WLjmHtfV6?88AY3PHzk z0#rBX9H?y2Wn9Be-1bCr7Z1QSCFmJm;7`29TOoo4QbR*}WI$#xz`+jK$bvv*Lnv|} z7ow04`B4z@D2!q#30E0zzCuM*L3J>Q!FAC9jnNcs(G}e>1#_?tWFJh{!DJhJ6u;mR zn2~G*n~fl|8At$a%*KA%TB0L5(f@2yxib%xINK@wj4MKfkZ}l^hGYVfh7e##W%R{h z48?q`z$zRD&ks3+H$r4r!60VOiD*!P>?D_+q_Q`{7`W+w_G#SNioMtmBG3L3uZ0LL z0tPRX3WYK}p##C@p64a4f8Imk9_I3{2cR$~)bBkThG54(X| zLWHM+H!>nO3V@!4vq(7898Sr?$AXH5PX_^qAH`32fEV~fh#Wcsz&zxj&p8M_M+LA> z4%W%RIyr`6JX{mGVS00rNsa^XEbv~4oC>mnjB--KoGg-)^5v|FJ{W`{;Oxk`474HV zNnFBZP`!xMpbZgJBq9z8Xohy^0IK1Nn8h82DS`kZj^Q{6B7z`t5k#&a5J0Y6D32Pb zg|1*0a`ne-ECGS!qAIzlN-nCBi*<6bPHxu8ogIZzG)uK`6?iDyT+e8*qV%B$&vNAfw1B*ag;zJc65`J&})uh)M(68?2> zO*|-T6jK@11Y{L87(=ld3`-Ql5_Jkc;|kshk%#HdlOCl|31pS02L>Poi@@{qti@Se z#dRU_5=35t$V(7;U0fXU5I8wEfhF|;Oz=f}{Rn5meD1vmpL7(){IRr;TwC+7D78RX}Q`N<%E zL$pF`P=Wj`kbgS%;TVqN1>WMl5Cw7|A7W4oWM6>n3-rMtaHbSkiSI!>3($rF_wc|a zM68J*WJ3v50R4>p2A;jR;7^>J`gea&XGkj4E^+E6j3AP~F7Gwqr5_Ca= zE=bS?UkOnt71F>TuI$`|p#fSV38OF>ZfwRL>;+R?h^!0!Aw--Lv@0$Y<-q1~HP8+H zFaYzx?8L1Cvl7p&#HU9^P^I`f^go``#?J%M#*^M9Q7G8c#viiGK-EnCnR4Xyivbbfy1=yK{#K3vUG3 z7Jef{5f&~&v_;}k5~V>aiVVXD9L6b}5uzv=74?IGy5Q_7+5*!+1&S`jEj+|yA&N08 z#Yn1H4BD}1v92J9VoR|O>v0#)@LY)EA;^s=Fj&PI%HnOY2xM4%EpCF!6{m6~Shz$Y zieMl{Vl)oo1WpN2Qh^s-8MrBns;CY!EIA$%u^VJs@<$;`d4mi~nczB6iZYiX(^AYy zso9u|EBF-zUz&j~ofEn6EyiI2&f_`=xJ)4ZFGD%XM4$~igKCta4P_X*GW5Ev1}40$ zFKU44E=v{4&cRYF#~nPyZ$gwKnQ|mkj%3PB!YpuXxpTON8$x`Q9g)aG|G#3ez8V6e z{OSTc2hgwbF`$g)DP#E*3u=aAs5|`0B$j0;h2qSB0n%3Mx{Aq-xO48hz0p`#{z;jtf!K4_WAc&8*yM ziH_)m_1KP`Lex^=g$$sdwHlxi$f(w8ti=<&#A_jH)BD<0!LhaHfcdD+d{7KgCq2B; z9PPn=b+*#~I(xaZUx>OyQ%145M*Q4YU~=#%~+6KW2Ut+ zy>8+}2&hJrBy<85X|fQjum&vJghiXMXw$r4IGZM56u2fdb%XVqUdK%#nni#_niT-a zG#dy4a5ZCanq9_KA)4m_!__<<-O(RZsQE_h!X6=7Xvhp-)JF@n0>`xAm=+wq)f0tMUn_8iGY#_D2a1B ziGY*Hu62GC24!tM25wBl1H8Z=LbNG|YM|$Bx`5ef(+^Wvtj!!y+BTH7%`uz-Pi)KN zw`KFTHVB|C1KPF_N~0=RtZi-7Lqjw{J22U8lhGAD&-B?V(WK0DE*={v9fs(b`1uD{RA5P*NF5)t7;1~P~2Dcpgy)3@fn)-{E^~#y0E(r(H6$kxZ7!M{o>OE17B~Gb_o=O7dk~ z!%f@<)o>+0;Lc-s%JKq~H2F2&3PA#*b82W{pgLzjX86GY8=MuL1Cb4($bnp-_nq@0 zKMEoqg;5M8Q3hYp|IQV;Qw7yg3w6-|jX_U4w?u2SLq{Z|D|(<8`eFbi2IE_dz-Wxa zL`(q#-FYVFfPwD32%c}#J2NnyS7RMEU=y}t2XEH2<@T*VFif;+g6 zM|g_o_ye!-2JeOFk_swZ>A3MmCiudD1%G5kFtQ^Y5r{-y#2^-NNJLSTKxvdi1yn{g z)I=TBMtxngrOLYQ5cH}n2f2If!Uacg;;{+ScSD% zkB!)Z?bwAMu%G^SImDeG@e@ws49?>cuHZUu;aA+lLp;H6_#H3t7v2fcRY4l0g%>iy z2RckR5r80sAk4*0PUJ=&M56!-Apu2D9HmeeN&>mkS8NEP7 zyUqlI*Odx&r3zg=zg_Km1i#=B$gGoE7358`+Q@wa^%B{>>!J!W z`Fc{eo|LWUa7+M;^jwea*oiA(9(vvtqL+ru;Oyze8okPZiuI!Wy}F?X2)Nf0EW-gD z2ea1ev5T8O!MWMnA4K0f3@p@}g?h74@0REY&XC?yKqkHCfdzUqq`fcWr4W6{s1FtF z!y0|4ULWSEPi-_oGxWt^(1t!M@jYCdxS@P~u7Wo7^@0uu5>XmuK{fg^M12{ez68*B zDVBpE`VvH6g6R8Nh<*gnPemxAkQbG~EcB~~-WUi1=|@#u{U}R6%F>U8`msbpbGsLU_X8Wz3u-3Z}DD;0ojlXRAWF%R0P2cXbln?(1rdF zn9dy*8L$XDKz{}t!Zpy}0r!cLKwN$>Kv`W3r7Ip}_AF)$r`L7NA<=+D47B!EB$wgTlDI0kM^!x5YYb29KZ`~^xpC>Ufi zh>O=CN;{|)sK}tf7!3j$v>pU7XeX}X4(24U@^#g*gBj5eH-?(5W`huf)B!x7tv^qWOTt)a7GMg zn1>Va@MAa*t}i15$b19=k6?gDuk*n9+8k06i{!@!XvR)g!ph%G{l^a5ob$@O4l zT~M8oEwB))K(HgpY9yPCd?myvf*M6NM->KZj$+MGtT~E6M$v*%mxLJY3x5RA|Itmk z(-!T)Vxu=>E8Yk(hOEb=MFA8+G0@vF%*L1$5a5^}K(=GkfrZBs?AS7>f@&CvNtlAO zxQgpSj3c3Of%Jb|Fn3ylOYFE#SOh9CZY`eRC0+|LK0nB6d{Gd@c!C&D5aa3bczQg3 z7hZrfWBhv|CgedO#G@U$qC3`s1Sf2V=lVZ^(oLXr6Ipm-Ih4m3xG@c}L zgDTD#g~_0jGk%5V@n=HJq%|`OA)fxvl-wBs4xV`dH$b+tsKhKPF^fvf>W`sd(^(fm zP_x){b|OlnEU46MW@q*iu+ePhW;Sy(hsWnsMpZ1v8ZZOoDCVZ8|8u=j7tPQD95k1M z=5i2i67$l7W9C%>$IYt`hI8H)Y!hOB5ORXH%qNlgRAfF`&%X`YHUFs)3#izFQYeFw zn1m^CUE}61?hCOnAL2n)3+d;=Z!sJvK-LR?7GhB#av%c9=!HJmg+n+Z#Nv!FVS{!q zZjH7ep~YLV9e)Y2Bn|yvqH%}bFKGx4Ua|rV$Pxx*X$Cl8;cI*YHe7lD$8l1KWdyT~ zz?a3KABKRym+|Fkko4a-9@pQzOH7| z)%0~W{aa1{R@1-L&+r;=gjiDo6+n<{SZvKS%mCS~VOZDD>$PFX1FEsM3;KYHtX+#O z*ap^I%bIIhb6p}_Oy|0?mvA268KX+`1kN<<@Q3iQORMtz@=M zMJ7<4ZNZ=d+t_qlZbTs;VvvZsAhB&OqS;0?+lXcxS#2Y$ZDh5LthNmWt=Y!JZu4w# z7nE-s<=gfIf8Z4urfu(q*sg#Lx0ggU)JG#U1=ZV5qTBm{q_+D{vBzgxFyq0S&-nJ6K~E)!4NM`*0A<$}VPV*9n})Ib6hLT*FP= zrvJOhYS#li#xuMC{o3^!Z-t=3Vs~n2NRJH23~=Q>ecIg;twEc1cLb%}-4#913w<#F5`)1N zaQ6s|#yBujyP2uo(=ZcrFdr^Tyn87q@9x!DhYi?-t=IuduzN2K;4qHjI8Na#F5qWe z#SQ#|J789J)2`i5@f^&{?pJt&_d@JRMgR9OwR_UR8=2q>`n$)1Ke8ei*%6KiL?SPk z<~^~9Ln4Zz1WKbEDxfl|p(g5}J{qAZTG0PJN!)3R4(Nm~=#HM~gZ@atAPmKDjKWw< zz+_Ct49vznEW{El$11GFdThiNY{xGA;NoUK4&g`qgp)Xf^SFd7xQ<)+75DHEPw*Rl z$4mT$cS8K2APv&O3mM@99VVOzKoDFZ+=L+~aw89-Q2>RIfFdZ4QYee^sD!Ghf!e5t zhG>H3XoWUtkFU`g-S7>1qaR4#^}|5!48bsr#2AdnB)BmhvoIG6uo%m*5^L}szQ<;4 z!%pnMJ{-gm9K#8m#yMQXWn82GKiuTbZQR8JJjOG;z@K=Hw?ga{NDU3?;SB?nbZ;C= zfC}xU3VX?ZZ)>DrI7n>oI&1~K-+KwSaGMhB%M2U-ptSoa>AqU%gF&D=`xu&iKY*w2qWwI7e??I0{ginB0L;NsFpT>T;UuWU0Tr%H+%OymqLF~Y zAo2sr=zb zRFymR&;Y%`JRBN~`B(u?pF=Ei=msd+Vak7)iXHX^0Us`fGH8JgVAu|iz(jCr9^MFo zKg>)VW}U;VbJ(-adm)abqyI;|xDx|1IZ^~HaD*v6(hHM7Mn@>&5f(W@8b=tYBhT;} zZ-n^KLNI8zQ);Lv0IKj)F*HYe(A%HJVH##&2M&O0{B#v}KrqKsf&L!Ph=O2^<0ViZwC8v$ z^abrbJ`@DvI=-4aYe89$GnL2Bf~=1Jg||YS2m`}%f?+vP9yL%4J-~FINWoU@1zDYV zh~L4fdoln#|6~qSLT%IqL7XIrlSAqM$tBz&fRo?hCeWiPKc)bS~saELy|!`?%8#%IPt1gA$+K4l+4? z5R~@xQ&5pJ^!7{y2;>ZB%^3nX(*S)i2tzO*D?lJ;Xw4a(e}>ka<%~IN!ygqv1<#Vi zSr^lJmM5P57G!XiC!QsPv#0Pgt_X3C3Y=qsbG}GKX_Q3=bjLTCjU`xyA3^r#$o||j zyv7?L&Sys?Xy;#)%I*j{xj^Bm&*@+N@f?4^QS@}5yKY%Lze2o5IrnHxB5bb5M zy38h*TY;c1F9Kz~%%YcB^fHTHA&@JjPzL=#`>uQo&Y&wy+Lg0HTxG4Rl>2HJ+JLOD zcEubl#d2JSi{ZHXK!|HZcr6NKd#xK-_!`k(TZ=8&23m3Ly%5(G6i0bf1Q}f)j*<8Y zoITes3UR{+PGms?v_uk^l^Y~=V;TLwK@>L#;s!z73_?!iLQ}LwdrZT8EChpflcBtM zO^90oAj4ZZP#;w87L~ij!neM|2D}pDmsCiDq9}**=mkmte;LG`9XNnPAj8|~;DtgU z)7vF65M+3pVZF_D;x=WzO{TvxC%-yip%xm0;D2SHf1QJQcqhc2v`CLCs0#wVGug!r z<+w8!S8)ea<1THu%h27W*LRulyG;1qVK@w?`z}?uXCesM&;)JJ4kUArWbTp7eQy}x z*!z`H2lX%=3*dSGf1km+{{}?)pei`v0sVTg43zN!Wqk0b5Dx_?<-_XW@rR8-YaY%A zt$D}+4;j#h9Q&vgDuHYtQHe)X;t`d2bQfF`9x6hhsh%)A zPyE0}PZ*ph49*iCf3g`{g?Q=1m}eaGjANc{ z0mnVtNB@6gI)4jAI7VP1CWB0Vqap+v3?dfpIJ?D=YNW<37`?}d258S$b5YN9q~ zfQq~zsTZ{K#cy~a#P5k9>)*>_I402l-zVV`ZsE2Nf8;`b#G)4@27!M5aT2G6_>+wO z3_}jIMl!m9-v4<39Q)D)^YN1Tc-a#v7>EnF0rq?4ismK(g)sp{{%RH*#2(^Y#_~65?F}6hSfc23fru4Ep+xP2cSU?R!W2-qXJKjgW*kSc&gJ zknjHzE=8mfiqKF7RZtD%Fby+s8MkpqC`uNDA{>p7gf>`=HCQKY_g8 zVi{K8DPG|(mrznyKuy%fBCN(*JirV5A(S*xD2O=p$50Hz30%a_LQy$JBuhK za;(SqLP@V86MWDD9q=`F;vf#=E}r4JP+VRah2mwxi2wv52O>aayx7329_phZ+F}7H zxfkX1S_w+;^&JSnYcsZi4ZZeYAK2LI2#$e@d6BHwIb5XwUS#b>X}oUYHi*uPOuZh1 zD80zo>rbJ0$AGeVQ(o^x6h#S?MmZ3ycV$#VP1FHFdow5AO+huhlRyIARK>d!y3l{` z?&yg=peNob7zA45Jq(lK#x%?nN`|b+g-GN$nb?HC zDXnDY*vv&x3{)sH70OItnWthpSU>X|q4-px|3388r!gpz4<+)UpFUea4}ErEw@`dp z#FvA8DXnh})W$rl##+#O-|vOuR}iH@Qhv0AzkjUwjRhO}%?78SAG7245U=1l|NY(y zh3k%@^8~#X=$p>K=>%r@A`rm{K^TT$0w!S!sF=eaIY7l6RE+i#PXbqlZb^zI#WM`6{Np>dLSqd`22YzsXK&>RSK|6HBM(o8=9LFh~ z#Rc2|?XvFRK3tEuc_I|s3k-)%&ukX6JAo5b+WEA2AnZY=D=AQ4bT!K>~a#dlc=4^=!L!*05)|F#!$?|A)LcIq4=i- zRq)S>a8P#tNaO{Z`LmgSQIr6i^0#Xh|MIATMqp$AZsf6D6rGp>TN`cqbas>(?z096WLCIXm=fJIo1Z8(IJID_-Jge&+J_vnAXL+&tu0l(n|-U=madg$P^ z%NhZCo0Z;XjX^BvZB}}ll?r7ohYF~K+Gv6{=#KtK!5~n@tiv$@lQ9+i`@c%o+1w%W ztjn+wdvO%UL65VZ1wGDs1C%@~CC^I9vr_V`1fKP!Py#jhAqY8<8+pJP6<7d8P#mRD z7Ue;;1Nry=mB8lcgr4XFq7EeLK%x#Dg|Q&&K%x#L>OjgJxB#oL3HxviCvY0)a1l3g z8+Y*lkHL%tG9y9EMGyf8xw3E*2&OV96giL&3`r0}62y=MF(W~gJcv>URYQHWMl!mh z2YR6|2IE_dz-WvE!3WL2Qmn^r90pZ(1ySapQ=rU2S3#MBD02{H4x-FK5AnNDg44hk z{>X}8WJfsiA_lRDLn0`7Fr^NziDqbrjv#3MBC`@q(7^-ff3V~ZQ3n%sFi{6n^5Dss zi{+ri!9*Oq7Y9J(!9*T>78md{2t1g;gKvR=gMSxFHcr26-pB+3&t||vFbF&wfoF@L z|JfqJuw-K>vUSD)EXEpa0UKvKfs43|YoNE;=xsK7n(Y}lbF%%3*WmmQp}!$)9Kwbn zp@>30(AE&z8bVt`O2YH{AMzEL#*iwgh7O=-A#-sECvge1B;+Zc;}5(7oAFnzmF($2 zw%OS5I zgp+kRS%;H#I9Z3+Lqkyca2KTyr}W{JJG>YAfGERLFbG3293w&0;cm>wHk<;%gp+*^ zvd$3&1}g_^<>(8lnPU{DVJ;Sc!OpP^R5r(Y`k!MXcea29b37JG&h!XDZWIIy<}8e2 zD2XyChgu+uoMe%cByy5O&JO5=F6a*0nsW}0;gwJ#Qi1g&I4dGTk(>TUP^O4z6hI=1 zq6A8#EGS(>b<_o$M6gK&n?$fl1e-*VX~Y0X3fxY>xfm@fK6bQB4{yx*-n@Cy{$pA&thRErd1vZJKw2_oHlF~*} z(MT#9Nkt=fVh@<7$o*i`$cI9S@&(C8xstdcnUtfc;e=Oh2sb^ZY9=$VT_|Sam-EJ zPEfJ9heC;G!}tJ%At!Q!zeugbCvu0);wyt*$J1;6ioX(11>(sno~+_Kp$odBC;Fg2 zQZNVv7f(R(v%%)^$8b+537l*RR4Rca6G$?FpL3OfApnM69(F`ro8g0Rb z2?IdZ39~>u5*C1r6UaD$awcrR9?+fyhA-g=*gW9`PU9SCS;DXI1VkkgUf~Vi3nkIT zfFv>?i3~_01CnSV6gd!qNW_Ea6N{n*h&+*5;lEv=BvPqFf=(pp#CGU_Z$NAKZy6~3 zw+xg-hBt9NwqqB5z z@cE`c%#^eULQ&AlOL+-WF?Dx#cV>2H@3XtJvpaj6ot-O$0)nDuxF{+jiW%w(-q1ur z4JGpusc8^l8b2|`%uD1YEdp;zn%!PhobwvKoP*!b^SqzuGk@rDnTkRx#8~0Ww8GL= z6e#m8&Oi|r=W#w#tT3tydsHFCibvVb$L3!tfy%uYg+nUGaS(?viNiSw-B(7bqXCCk zI=s^8DziwT(%32`P&t#?IJoj^R`7i!QTYH5V?iq)ANH|E{_UgB+bu#5MQQ00fA zz)x)m!U_xW)Wu#SOQFPUz z9A^Gi?qk)_9D@$3j^|`z=%A{Z1f6uFld67-%wawYSjeR;<_bnw!A-10{8e|bnx~L# z^=R|2Hs5N?RNab*s%ry@o=9-`KGhCONU+_GyA?_M+*Zdj#QS%pe^HC^> zjKZN2F-I(MWD;NEt4KK_<;d5W!tkj+bRW@uM7)t+R2ms1#|&m-VIp&x$GM0*qUgxA zh&r-@TeuZ_6A^Xfeja8$kD`&tMl=$6l5OT6dELXCyv+`Fp`(b7qT-A0#s9Gn`?4RS zIRMKP{W7|WoznZQI2M@O;A=qNUo zQ>effP^^g#bQ4QsT(Kbvl$p*9j3_prJ@404Y!QobGh$1*ft3v3=3^CiaS!*R!`Ndy z#TK^m9Oe~!k(c=+JJ3~3SFt@^eZYtOH5AnD&A#lBE?%!U|i(!*@!l-h`1u+mvbf8Fv1FM!p_9+ zM1k=qkY4;Xq!s^1D5zIVy<+P3vxN1dJ*cK$O!WtI9H$bYmU`NeP<@73d>hMJe?2R( zsPzu5zZK)G--r>`zluZack{1M&@hSv_&i3^Fn-vF0~@ANi9;KjC}SKAI&ARbhWq#l zo3RrOcB0`G-exBsg@VR_z1as1H9DkGQ;ml(iKDU1jZwPEF|3=$@9cTtA}--Fmar7t z+ITZMYy3WUum%g;xRxh*9kDiz=P2TIGKfQ(icDh$nrb=^XEr%=xamR<3lV3NgPVTH zFL^%{G#`nVHJg94mo*nC^DWN6a{AdfLGyAH*8BtR=MjF&&oGMS7kLTeX#PEK@Kz{j znPC1cU-4iBEt5F`qi8t=QMEMCOo9&1LG>+)ZE*uyuHY){LCY#uWBx4)X<5&sD5OOp zEq~yhP|&&$8f$e{Yu(89CxQvXeIBh=>kwz*m{u#+Iv*FWRX(kbY;|O-BU>HWdNqd7 z`Z%_s)eu@g2?dD*IEHG{cxS>85{8fveqt7LkX^zM5*HXkVvz^oCrmwY4Q_BkPYFFG z^pv<8(@v~IzzG2-Hn0gBka(Ic2sz;!A)%OreiJ*`#d~}Z3fe}YKtD4mXgeI)w;g2& zZO3}}I^W<#A~d0cwgIx_nZ^us(l(ogNTkgMv{}=(Te+PzD7wuc+V167yb}uAozY%L z67OpFuJ)ND=Wg)dwHA2vy7o)C0k3Oc$!*A?T@LMD*)E6n`;bHXgFMVeUf|tO&~X?i zAoPw|T!dkD+=9Q~v4KsvUM>OfF#r33UDmO{O%II%3$z(Hw(AQd1CPN(U(& zq`L9)l$WPOn<_F5-vz02na>3*VJRX{S>=?&Qnn*?2Y0d>S2Xny>v@#N8Q$RI=RC`= zaj8<<*o854iN8zyUE??iJKQyiFL4Bl?^1l1;=7{MQBM<+>1rcM3e9x&&`*X-%)jey z4=-Sex(}uSF?JhEw+_00iNd-S)%`ku;E(*75BX~-NGmd}xU}NZib^Xgt*Ep+m7dDU z7+KoL(v2uAt*~^5`KJpW&SVaAIUC!XzKZX0E!T4cH{-IU*RqZu@gP6u5q#^VpWydM zuV*Zislph0%FM)hJ!f%_`S+M#&xKsfA{OJ29;4~8ggv|Yn7{L{P|!OXN%iWqSEs!? z?G=CT6m;C{u-*u@=(ty+z0I^T4|~x26u)Iyq<)B2(7S_OycY`k_Cch5=H53Bo7#6A zzDE3zt)TBDzKI*r=dSlz#6Au5X`oL7eV213-{mG&avQ7oId-k@d46O5eJ^=?p&*9+yFfA0hjB8oV-K=FD#?t=~>#^zmHoae({o8nvm-!v9g+>-{ z4#o|C5DEsIF<_|&rc%oQXQGh-#SeJbfOiaRD1^)r9*i<$l$qn1OeMycQB0;DO=sFjk|M(lW;2(0oX-WAMrI*b zu$-H*QkmOX!#&)KuriOi1R0%Wd^u&FWDCz=Oc~u}bTRk^q9|Z6Pl@TAjujZ3!`WDc zK}8HIV(@B~aUIvQf*ZL7i4Ly9wt+f`+o1$^IJe&l-Q$n6j_)CT}xrUPI#${ZJhVIuP?vnj#Jy za^TRnna=_iaw&_^;m{3;dPqS-q8_@FyZ8a(9#YWIW*k0bFLGm;V*WX^&NZUbTo*m` zGsrY_p3`|w=Q)Ms<{`D5gLBIeWA1LGkds1A3b_Z-UG6>p84B_Ndt(vv7BO!T^9M4< z{PTx-_%cWGRjRRYdGpRIDzB)#5#$w;S4e&~bCE>;a<1emzQ>KM!II?_lvhxGBU^cn zZ5TuTRd(Qi?ho>+%zqFH3a&&UOdN?66j4w_K?(&6R=9u*xtK*bydZ(Xbu7mS3OBP7 zi4->Q3=S;3%r~AB1MgUttO-ax$lye_0o0OIMCk zhc3#xDEHtdlyy+nL3tK)*z?a^gprhuq8bTJdRjf5o?iFTGw9yBHL-arhpVt&U9X|n z)NARr^*VZ8y`ElQZ=ko(JLvIx4}G9MSRbMf)ko@My`B0veWE@|pQ2CKXXtbEx%xtV zk-kh{rLWaD>Ra`l`fvJv{iuFSzo1{#FX@-{>-uy3h5k~1rT?kF*8kGq=x+_x(2V3p z3L~9i8NPE899yIMm{6I5n&WIiWO+Ml++O(avaZ zbT)bzJ&j&QZ=;VPjX}l_#t37iG1i!5Og4Tt<{I;i`Nnc%g|X6DYiu%h8M}==#$n@# zaml!BTrsX1*Np4NbK`~a(s*V3X}mUEe;IF#x5j6OaHMdgbYyUNJNz6O9hn?C9625N z98r$Sjw+6-j%tqTj{1%UjuwuVj&_dDj_!^=j=qk54(S-?nBbV|nB(}>vB`1LamsPp zamI1han5nxalvuXamjJnano_v@zC+a@y7Ai@y_wyR87rHZl*TVnWpLEGBcY&X0Vyl z%wy&^3!0H;5woOO)2wCIHtU#m&3dN8tZzn}ZOwLOdo#xDV0JXSn%&Gev!~hDOfaQ6 z+#GF=F~^!enn}!wrrVrmPB&+mGtF7%I&;0b!Q5zWGB=xB%&q1&bGy0A+-vSP51B{J z)8<9ABRm-Yv)v@YY(Noou{u~Ct!`GF)z|80^|uCFL##2@SZk6s z#hPi&wdPq{tgY5IYrD0>+G*`_S-Y)0)?RC$wck2y9kWhYr>%3=73->X(|T^buwGiP ztUs;S)?c<_C$W>-$?Oz%O53t++iCmQzIK2eXlJ!^+M#wnyP#djE@Bt8i`k{@a&~#U zf?d(BWLLIZ+O6!?b{jj|Zfm!*+uJdA2fMS~)$VThv|W8{mp#NDV~@3evfcJ9dy&1^ zUScn`ciOw`-S!@Puf5Oy&E9Vxun*eD?GyGn`-XkfzGdIG|FGZM@9g*X2m7P_$^PsV zPQ{tlna-Ks>E$$?mebD};0$zTbp|_gICDC4JM%b;IEy-qIV(9UJF7UWI;%PBIO}G2 zHg>jfws*!jyE)^WJ)QlX(mBF8!TFPOnsbhGp>wHorE`^YjdSC~tm%p?NhU_88=mZ~ zO!n@F=jk>)Pp541HvU?8<>i%z{qlqc%s(-CJo=|Utnps(+ zDsMJy%BDy~8RL!$Q8p?q+^<5E9%{XEEn_;lyX8geyH@#9O{&!{9os23u6Ea! zt)t6@2jmIJBfJ!MshrABP5X-<#3=ETm@1};8Dh4WE9Q%ZVue^Iwu^n@m^dr0iJRiK zxGO#>NtJYprDRe9l^jaAQcx+QR931g)s;F*6Q#M*M(L)+D}9u)$^>O0(QHw6x+6jr ze`WWEx}i!|Mg41-J++eC6{bYi$m8{(QI)FYYt*gSxn;ZPQzDf}?R`qTnIqCABu%LH z<HJTh4>ne4qx_CY55l#p4f!u=^s$(G`N zf=F=4WFOsm(kTs6dO!K{$ItGuxs(PeUMBuYRoqW!9ALaHgHnrg8wbVX01Cl%?u-wA)=FV)miO(WH`Qp=pc z&uANd%&nAFYCHS&D^Q?dXh5L?140AnS>b%40pa=b6dEufF(+MD{N{Q#{JMpba9X7F zB(K|gkO*F?J9Qu3SNGF1>X~$ZJwOlCGrMz#E57-pYAw~X2!Exlo=wlLhv+$kzaFZG zN!3nBlh8)0KS*_?)V!sZF(HlA0;Cq`?i;Q=Zyc$Y5y4CJD7}bYR4=9%*GuRn^-_9i zsXC?VBUN9i`bjmTR5MAHv;w3WxI{0jm-B4JW|j2HdKFK;RJo{UC(m9jrP@cTeciw1 zRW_$@s5jCZlR*>FNN=V$caO`bR0$}YAfy^3)gtHgmU=6`+gfj2dD1`IWlqBcvKE)k0Fun$V`XS8jK)#)|2_ zkzeu8+f(sdsQ1!)>wWaTdOy9tRI^Dnr&M!EHIG!orJ7GtTivB6u&&ex=|8x`BNRP( zcBzI)HHSNQO~p#>mGqQoE?Vfr^x^Ia5sH7(x%vqAxCkXE`peIa(nsr*HB_pUIY<4j zJz8~&>E8a^s^h5+-_7kQNREb8%2kQ!8C^Lh{(pY^+ZU%Qr9D5BH=&g$h+kVPj<7FV z&HVC>{E6SFQzN!b_tQ#%NUiwm^YrX7_5c3XmvoowE8MpWC}GMQ` zi}d^Y1O1`?i1B%%Kb2}VsaBV2ZK*bqDr3}Cs?FRhS}Nn+dZdy?eW!mG-tL|LifI_a zP`oM`Nu^ptsx@V@;!>^UZp%TgzsKB2X{7Ruxl&fDjipMhzYKX|rH%9qx8Y@EFuV=j zFbs!O>q@nrRO?H%fm9nxwb6fvJ24{9V1FC5e~<3hueQ+tHM8H2{hwd_*Q9 zKL*qt6Qz{0=6$))7Wk4(Buy|l%Uig2M=3$MiW^*x<{Kr9l13?m&T%5NmMT4qo^O;j z$|L#jO!DmAPX)2@Ad zy_o1;K_!N56rTV4w(wwMs3(pgQtj=2Qd9}bJjxg?f@jZ82>EuzIAen6i1AYGH^=x< zs{Ioqb^YXt)NM?W>Hw)Ge2;XRG4nrx%ra(6b)ZyT1X8g~*|Ke7;$!22N{pQSuRs+CM4gVh9W@FobcG+(1km^vW4oj$1BdEl<3jYdfud)CC9L`06!$%}k3Mx^x z%fE9tYMd6q3yfpNapQz>(%@1sN~)u!I!3Bvr8;hbamF}noHNcF7o;VHBnN!4;NPkR=;6*E(^v@NIzFUBx$(LvbV%!E+r+^qEqfF4g>V9jZf<>I|vQ<>v!}72dI=a-{VflUm`G z#>|8&a~Iqn5iXl~yvj8akSI>d?rO{YvVfq*2mP1j6Tfa~$2Iy88RK;vGFb z-|CT6s%zX2N-M#x{*Hm)x8Rgt|9y)=j=>!4>1O^p4o-v(-`5%L;5477Pjv)&5}*+f#~798&emP0aDLW5$1vVJM`!`TH_+9ZRUX zqlBmGG>)n#Ry&lkdyf28^`IiF9qax-YWpps&5nJ%G<9roY;|mNY~!pM>~`#N z>~$9_tN8NDCq_vs!V-MP7!SL@g= z@o}-8>O}XBPkcvp+Dzg2C{mj#^&zIHU#)ii_A&9%rSkB)>2%_2A2W@amMGZsq@~T$5M$5RC?c!Q? zZC^XSPp9ZJrmyK|W@L#>roS0r2D+bCP_h<0FVzdpy}Wak?AoNEw63uFSazr^I{;xUA!UJl!KL&p~V}R&A3&V z4b4VoW3!3bRI0@JK&lU=`bese7nse>7M?eCCa(~lNcAaia~RW?o-xhFt9HjWoH-9h(n?p?Q zg?~wvH;>#$K7D@;XpS&PdfryNk?LE|`-)msm5}m@?<>r4=6KI{xchwII}Pfz=XTyE zApGkKImvU;Lc-MR$fKN|^g;Ri${#+2&H-Nt$!$$2@bsxxid# zE;1LJOQe>}(-KXUnkKd6QcEGVlu}D2wbV;!&vJ8xxsw0=#r&1WHHj_a9V+jf(^AAHx7UK!`G{pdCWY) z_4vz+KJ%p1^sg^h%roN6JoBu1&O9$QLu!`PeExpTVqP+@dR~P5 z^Xf`){X6C#|9+Wb-ZSq@&6b)op;FVoUvqh0HwBfL82ZnbIOY@c`G0=)h51ryzEbmZ zcd4NSd%ZE=b1^pGn(w5RNoxM?`8AZKKFO@~T!1ar(yZiG3M-|R%1UjevC>-U+#PFr zJ{!{81;$I2nKJnrMQ6hBukUf@}|tvpf-ms;Mxi)H1r3j8}A;~y{Z zzO5N$75hI29sKsP?%P47El&6aRvD|TRn97JRghYKsYOVwfYb`gK&{XMtCCgOs$x~O zs!6S|)Oc(qwINa)`rlV`%W5kFOVqQPa{aLCTMev+RwJvi)kJEMQj3yW5vdiGS}~~= zUuZS6np-WrDp{?#erP46R#9q|69=)XJEjh=+&WoZxFK1crB-r|6)Uw;o>y*uuI?7E z3g%kzRu8F_mRgy)Rxhi!)XGY&d_qXmzdr{k6WuxX%NzMGH*ITxmEgJ223fprm?yPz zb1fH7wQ~G7A*9B)N({9|cvg2=!xO(%;a^92)~o&P^_ex!n(#foAEj35?;EBy+4|{U zZ!D~-*0k@(PM2Dh@5j!vX1h<+Rf5~jw^n(^Y=O1VT4XJ@mRL)zW!7?Qg|$*@)umQL zYBi-+OKP>HR!3@erB+XB^`+Kesr3t!w;F4$bzg4&)+VVnM30n*Y}&k=`Z(m>wxE;F15!0x}RG|6WTTRGGkkHkB;jZU9V-Q9?|7u<0?n@snIgN z{kL07ZmC{QyaQP$xdUl@M)B^sTh#t&5%-T##CeIjVI@YAq8RTKk%H{olp9 zW!?9T&u#0Db=Ugc`op>>wboK2lW3{6m0CNgwO`2KJoMattS4rr#HooPRnP5bFgGLj zjt0uStZ%FjyzaB!TJNm)QtKeKj#BG1&-!S6vOY_Vxnih(a1U#!bWCfjX6ziB%O`i0 z7jDn1|3Y>u+e>&au-;p5?X-3}o9kq!#8tXVt((;17T6hVZ_jPZc1W$e)Z!CwTUt+d zl}5^>41V0CY|Z8((nV^#GkxKH-bnGwm6EZ{!(Lb6TaVo?EHzh46WanTgHOMO6E$5_aD2k9m)Mi z8z42_l;&vo_5PDLzgL>Cw;)^l@}pf6fArfIOMm&I{IVXmAXm#Oc0KMsc2(;SySiP& zu4&h@Yuj~f#&B3-M>ty-xpDv#DDsF>vMqRWNHb-h4IZtZyrM5t73#GP5YKx_|#8c@FUp_Imx7b_lZT9xW zqAvaViLrJ_YMgMFrFPxDxVf@D!y)?!MY0b|ZP^_AsMMBwKKlzVVV|^5^Yt$u{@G`w z#yiumr>}k9zRXK{`+|MZW+|>otEBeJJo}1L*1jgSUl|qcfai0+)~)P2eCA}|mD=j> z9|GC;>}R~gx9{5z?1%Ov`?3ARek!#!QX{W*Qd=*z4N}`EwM~oc=k^QxrTvQkyiPw+ zYMZ6DMQS^wwkPqw`}zGt;s<}3dA*(ZnQ-e|aXnh~3G3cEE~abz*KnOloXPn>@I;_u z)_10Gru3@hOf9voQrnhTnCF@jfmk-9B znG-*C*7p6MPo2H;B!23gj1SWJ)OlhJuMl^!Hj36bl)^g0q;`-UxGeYcM4l({HywE+ zh&()R{sIFAe5*jXv%r^+`bsf`!J%a6==7IYR;N;@N^M(`YuvyBp# zv$(SiKjSRnEa@!eEG@NTQadiS6H+_*KR@d0$7jKbzdvzSch+QZoZO^Ndp=vs>~hv~ zHsC8?Ud%WfO6}~|^U&FZmov_$&Sp|OC$$TIpMuVoPCh^Q=P6^vIoo*~{67puT*-=jsJJdalze4uwm|N1K$=Z~V1bCTX%YJYgH z^EdwQuV$hcE5jft<*opU`yIM2z& z;kMN7Fe0vR*Ykf(;ny!ZmwlPOyZ=Aa_pjd|IoJN<7Zln(&l{$1zo2k#@(kxBPhOtk zOkb^b_8mNW3sE;m~ zjJfy~Yp@O*unAkR4LfiQCvXa9aRHZc4L5~wrMu0OcS5AMkOc&go(<9`U>c@l307k* z)`QQI({IKe?8AN>!ciO-!b?L5#9$(3g8;n<(2D@Q2+)fFy>8JTM4<#qp$y8Q4rrlGptfrg5A@9@I(rq?Vm&s3 zp4mIF3wy8^^v*tqyLbcwb1F!Jq)?F#1n1O2#hg^i84iMRmPbWYMpgRnq#sTX*~&t#m1Ihcn9 zScD}T#&=dKCvg#YrqiH;nJ(fAuHzQ&;vOF237&)MX8H^7@KNv%2UOXgc>Gf$4bsCK zoF)Dg!`~N~5Qr?G`u;f(Mz#I(@FX7!fcfw*g5qF4{L7*OXpKKf`cpmsIv`np67_G2 z7HEyOU{3uzAr=(RzXvFse?JU_#9$1=NQ~iZ^B>QXiTDXqF&$LQpZW8rTK=3F{>!iu zzhW&mfKl+@hMiy>{C@+Z;C}?iaSCT~0hhrg#s4Pm;14{&V;2w4@Di`_79a3ghyVsS zKt&3qMml7G0Si9JhyVm37$FD+vloyT5h#Qx6hlcchXLhL2`(l+pgJ0$7204Z#$gV~ zK7gzP$Tnar&fqGpgJ1&*GB6qFXCQ41WW&JPXo6-KjtQW`fxB@SM?u1YB zQ$ZWD@4_J*0p-j760{+NiiBiG4%9$HGzQfO83Sf1gaAUeVFw5zgdjo)!j&Vv5IG1S zhYt#)6w06#n1vjjF&1tRNDiu!gR119Dmhpv2Mgt3p`1GWkO{@Wgy*aX63N*g1F-^B zA?HS1!fpDWlir4=gf|QnKygrw(E8v46G||l{Xj;cL$L}h61o|uKzl;3;uYv`=w~6q z2qY{ULO@l+7|O6}AgQo;^u$aT4-88f!xFX|hjA27z;uVb6(UzY6aiV~ibfZ7#bnIH zZ0y4^oB%=OB8XfBk(+BnZUV@i6J=2iv?w9Ym93BA@2`>RE>k5zI2_*@qx8ZX^ zAmIl<0O6Q0{KQ_Ew*6??%*+=3X$JJ05YQt$UZ;W=kI{-=z-~20NRL52q7Nu##Y6BMa8`&)w5F0TXicSPbOFa!+KPQ3+sag;GL@)IB`UW> z2e4`7Z6K)1Y+A)1*^vWOstU7HWfV@kcwlgHNf@Wa)YcI!2 zJg5J)-|^&w5OwHhol2+zGO9BZvvD2w@IZ*V^uBI!aBSUiU_Re-QE<}T5AmRq;(E;7j14nTV7lde-2Zce2 z8%_WLG@Opxc!VcHG~)Y>N`Y22k{F56V4X&+(};B%TL^IRz$7*%n#OU6#|o^+M!W!7 zHU20>6Z+bOO`8-2{cA%1n$W%`XK@XjAx*O)7YMQ`Yc(AJ_Gn6SO&Qjvw7OYJc!O#* zbJgXc87N7!Ntl6IV9{nQ+Kfe;`-93>UxtmQ(i!moIZ&qB0PP!Y9I2OQIiV_I=cD=N~OR<+hp z74^^n3|H&rSP9l?%{pyZr%e@*Xq)<2gkP{4pXh&dav`EANi-2f6Hzn)MRRUP6LB=z zwGBcjC~Mn+7={tJj63*Uh;{{00`$CHOE5d_I$P#M+G5KMOa=4gdzv7mE#1M20r(F!m#L%)B z_K6vUAs7w@GG-=Rlp}_c#ZcN9N)odcyYL$h;s{RS49hBT(e%H=IG+57$zb3*`~foPK#x0o5~8C(Qlvs!ctM8) z1kjNxb*ur(-I2vRvUtaSpj915VG5>!Y&*`ue2{d;Yx$xE}=5 ziOoBaWhXZ2)DtY$iA6fGNGBHQMD;qY!v;{DPB(>!CAe4{zQ}|?WI;BtR%{sZ!1LlW zwg3u)vc*!iSTc$&3;G^Q-(%@}Y)#Yw6CB$JP0<3aL3v|iKm}qcb1Y?!r4q4JB9=oG(EF|d z2tqJI5Q^N$iwG1#6pEoF%Ah3_GbJn4>}=!5>CD%}QQ2!>-6#$o~{!Hu6W1G6y? z3$X;tu?nlP4jZur+p!CKu^)$Ul>T=+!IRTChl{v^>$ruxxQB;$g6DXJzwizpg@_YK z3Jocd2I=7q2WXMs}3Q(^uYOA-GPAHXp7@IzMm--A;0Xoyx| zIC>1hSd0ge_t=AdLi9|54A4;qRl(*x2V)GzVFPwzH)vH)TGW#bdj%mBB-@MX^`dIM zs9LW+^uO02p0G%-d038>I11*W*9E-BXCZoX`t)Xz-VvZ;y(xchD%Lw11l-$=saTI4 zVAy(J!yn+(?2`%v--ns#!#aIfr%yo_54F(*%`g;X(q{r#pbt~p=LqhBjQUW*zAVz0 z^7Un)`j$mC)IdkXgEsV?js;i*s@L}zXhUBr(l04gAh<*go?*Ry)-=9MCH;@tjC<Hh?O z;Vu0i;1ps&W@G`G45*KW7!Im1U?SFGJLv6zJ9vzzLJZ6RA5e{fc@YJI8CVY_G_WN` zf;9$C#tP7$fg5og7wG@MOFZ#Jk_5>>SzQdJD+FZas*alI17^j=thi=iAr|8xm~Ph@ zArjKT0!by5Kt*usCUgZqpU@Apu>{L-oc<>;WC;Y3@LmW>08&E$2td*!NdU3|8iC$Q zenv{n!BQ*-K}dp-1R>uDF-QS@9z=TvWk(JW$e=o)I)esc7+fQG*o-}3PzK$?Lr~%$ zyg(*D_=3{@PzF@w2YUO%01(Iz^FRPUti&;#$3?uxXCVd?$Y5GCn4cd^YX-MQCv?Fq zsy3Js4knAiOy^*JaPVc2!4Q6M2pJ3sMs9?I3JhU^A<-CzDfk&XZ~%w!0`Kurh@t)< z`=Mk%v@EKj26`a@gXsTI`Y?1Y7>uDe@c@s67?uVO(9dBdQ3+K*_QS}2m<#j4Pz_s! zv$%#ELJUuW)JO|{W_VrH$4?;Z;d8JP2U%qJVf=|tLeLE{!UU0z2t++JM@tL`XT%7m zc?1EE*oGb80yE+n$b2LLk7R&H2B0+9d?Zw}Q{V*6qu@A>^LWuDs zG(H`?P#auf$2Y@dP=WEYah?8;zsHjYLQDt(Sxv|Vf|x)M69{4gJ)S_1C#=F9JjPQY ze$0ey2th-%LL1Bh8UDB&*Ffcdq;eBkcVa;l#y||i2p11~a0JJ|)K233pOgZnP#IN0 zhLgr%9JYc?C+!zva&nO2WN&bhnoOA|lj&sUWb)6L4$i8{w?Obe`63HAtA6?cuF*V< z#c5mt5xadrHQZUz8l6Bj+_b^X(7EaL6efHM6Fwy^s)6aALKUV=!+b2nZ9KvgA*Pbd zRFauWGE>K6Dmd0P^%PGoSWG-3HJqy}3_l6MjsMx%Gh(JFK#!wu`1zZwh zegLS*e3F_^JLmU;=dLz?C&+sKVIdZzg9Rs=qdhuc6*gisJ_@l=LkiH(h4s(?WVCQ8 zmg6B_;k6Kp=>4L~;Mhgez27AHqjv;`Y3UXLBvCBzbfSwiGXGJ`=} z(gQ@kgzqmofI~toA-fnF^igP$-3EVP`3mb1``AcTTJTtP4^1|k8Qup4{vNr;sswK64W>q<6VSrN2v zCGA@|1UK=3NLM}*VpTXIL6oalY}E*i0@vtyzsN*e1l5(y9TizetJZm=AzIP@b!~XUaIM>j%|fha zq4g}Zo`u#o0GY0DjWyVc?Lurw1sx6$&;|n9KtLNfw>J>*2D00j8-+ksH;%+aOvWF0 zh8IF?Doy`4)!<1@)IuGw;ieAg3>M$i9SqebTD55q=-npDxoH%bkxc}?iA6V?D2Ljp z2Qu8;1kJ&kn|pxjZl?d6hk_Pw9>t=Yr+|svOdmJT!(uGMO8f#c-b`kj|HLODwosid zUZ4V7*mR38G9mz(krNEu782V+Fk1*_3&Ct5sVyY6g`~ES)Rvxb(VHzy>=riIasiZY z3+3B#9lzr~7^W?pC0m{gv6T(C=0yoqL={vA)!Ry@TN%i$v4{g1Z|x0Qvz5%ZQn9UL zF#!`nJ6v1cJei7V*oCV?Y)gh5s00?<#v(hY#*SaH78|e`4AqXE*n{725Jzwvr*IY| zwc|3b;U?~YcI|k8$Mk>4GoHM}YrMq=d=_FSgSb;g3Q+Q$>5u^iEck%3@1*QIgAj}m zgd#Vn(#{AJLKKRjB+7vH>?FCJRb4#Lr=9d^CwbVL_)Lp*w+ zF9sk14AoACYUc=y#yI>4O1yIlsP4|0n1lIPgr#t;;Ncgn!Fp`MR_wrT?85;Z#xb14 z8Jq{RvXgf0yn)+bUUuHcBRs_m{E0Vsk55AEQjiSEk&6EBO3M>3=wP^ZIpGI?FwMKN zB0F*-7s8Pr1rdp&D1p)_hl;3z>ZpafXn@9OhL&i9cIZI=cX3MW>Wc2@i9YBLTC-~q zhF~~GVJs$K65RM1GcX(Tun#z}9upPUw7yEI@v&m7Mz-gSrMO?vk+`?Vl z!$Um5bG*V|c!!Td&?&JyDKw-+8l;Ce9I)YwObA34WP>XQ4`IlId? zhGj2V?Y#why_cZ&(aU}GaGwEMw2uY$MS)W9qs05VU>v4^VcfS7yFexO{fSRPP!RE3 zAaWoS1pZrdw1jIu4=b<=H*g&kz#<1uf|4De{0BY@aZmvfAEe|5BTx&C!LS|d ziyy$Ld2k^J{vb1PkaZ5S&Oz2W_!MvOPKZO9K_Z93z|S9Qi}n~y{|}MTAxe0NMGjHE zLk!fRn|OdnLL5#GFVKd=g-{Zu(Gnd%8xB*E!?Q34oY#j>;1sCF5oYL!1_C&e9|b@V zM+o9bdlwHwK>$Z4;1_Jd7F-0gaO92GSme^bJswGxYY169jUmI0)cOC3HY{^uUjp1_C)lYtGP`GqmQ+XCcm} zKq^Fm3Z5m2vkd21e(r29kic1f?(8h=#$mXQ@<0X7vcNe7IguX)&=_sd7Gpsv&rQV^ zko`HbKX(%k@JNXBI{ZL8&(ntUHNae)AAq44j>Y&DwDbH``hWg+p4=1S0@+_6`wPA( z0fy>AWpqYQ^adrnuoFaokzg;9?L}tbB0*my=!*n>u?1q#5fURY8oO`^NAMCK;Q9Uk zC4V6 zf%+ipYpuX_p8PKdkN=>J_Zy_*+tAj7)@z=h&2Wxh+McNvu5RU}6lR0Yxh&P4w{4ioSMf8nhV ze-s1L{|5p8F$7fOkMTH$^Pn2{Xu~~b?p_Z1e~$^j$AsVOjZI*>?@@*O8qy;Js-Zp_ zf^6=S&3&@D|6Yg($-uD>ilQ7UU?e7DGMK9ek3f(Qi-Ye!q+JiE!bKS$QpSgW;29|8 zqmtl&M^!*;9{mVf^XLlhf@2@&LlKbeV=D2ON<5|#k1v41c+93x@`IqBu<4VZFbi`) zrJgc7Pm|F9r)>0;!FkHyJmr9=i?LLQX9`k*8F!fBip;wM(QaD8<1gh~8JG#~Hdp%9<4APl(?1G4%Q5BmCvO+T##{rkj3f1-V# ztDr9GV>%XqAV0I#=T~?w6cK@9D1iiwz$oET#1WpH#d)D9sbRoG71Tw2{DfJUgO~V# zPeMt;f=SAwBF1AXrr`$egsfm4jde8E(e{8uwPQj_o(b6y0#^?n27;0#C!*wc zKtL&qfQVC&U5cME4-4=oJ_#kIKrJ*zQ>?@WY{CWH#BF>ON-7O0kp}5t!3iqkO2r1L zDxe}NqX8y@lBc4asiuR{raOSxT9djr{ZBmv!!QD)g_0&MeBg&n2t+6JK|c%t?MkyB zXK)T=m4>X+a%@@xPn!pM5rN)dkF@NOmOaw4N4hki1?gPAJY+-wh%Q}U^v6I*ut_>L zNyjGX*o40+t)%DJ^kK*iDwLiIr6;iT!!Z(Ukbay{yo!OAdQ}A#@}fdsw9{(|Xrb2% z`tS9NP%^Mc1`f_ZX)}~USxmr8%m%&Bus|r@!N>=)@}?#H{bR+O3VO4l_gHWWdNVuT zS8yK>@fgp9qI0}n2DD9Q-gE*p6#8$ZL1xn_iVvbCp&yHxc zLkv3M5#HiGJ_;@)D33a*2Y$wE43aZR&YVs3CfS)}XFkU#p>WCPUj`2qL~7MVeKbT9 zEW{dY#SZMoJ{-VF&@SsdF5xP!!$2?~HvO~5fKu9& z(xzYbED)qkkoICM1smFAYqPL3Gjf7Ca27=+)CL(l37UVWgW_zC_UMQ%U{7Z}{de}{ z$pmb~Z+L>ggu-1?@kt94l-O)z5D67v6aE0@^3xf8c z=e{YB0R}AiAR~eif>6)`UrtS50{5-%;(-eI5|J+(`>w-wu%Yi6u%Ry-`m&)f8~WbI zBRs_m{E5GW;zzaosFoia`UQav{n*ft4gJ{AuMnb83?;#t;8za((=Cc0{qbWM{3w$j zW%8p;e*M5q_)#W5X2Opu`7slI%!J=$%)~Nm#4hZ`ejLJ4oWn(2!FAli9X!Tcp=3-7 z&N^2{Cr{{YMtYkuGqQr-W~8?nDN)9Ph(r;TMK#n%8+1ljbO%+;*aw3!1j8{3V?p2< zr(z-2U@LZDH~r7Jk0A1LkAFaCW6jH(3$$Xc<>Ck1S+B)nxhq> z(H}unCmee+PDhD*GP;RraUK{#4nYD!crz@Z>ID2_-;5 zDx`%MbeQl*W@JTnfuI8jI)I=9`eO*j<7fIG zK#2p0IA9IdgUI=t&Pu>O9Kc}^c>s|IoCX00+y$p!zpS?AVU~<1Gn)z=xyLbP_4j^LdmRw z%`&q|W`9tn%vnK|GKV8SXlv$36h(0~g2XtuHt?_$2XPM9aSLRa`5xFTGn-}p3-7?D zK>|sT8k`M5Y#PKxCa4gY`k-Pc38p`&DriqoZPWv83Tlitpc1Yi`Vur4BQP376*LKM z{EX>Xf@N3%S`|caL6kp;z=B@mtx&S$0-I-H^DJzhh0U{+M@2+~&9X2HS>nN-S=cMf zEc%~?#j`BON~{9y$g&Q*K}E9M2UW=O1XLkwQlx-^j3@v~o0aUcc0wQYhYN!+1jE3Y zk(GV2vPV|-$jTmB`TdWQ^(cKsF=|W1N9M~j;O+wftgiS)&B!o;uI-@JPgNlU=galcKOvgMd1hW)Ek3-gAJvM>h zLbhW+j^RA63njEP%F+MO>R`iA${N}cv?G+E38k!|qd2w%{b#q#zSjka8D{0LK=riQ1@#251R_FGzO_c0hN`0GG&uw5%Wj7kn?2LIS}c zh(ZKWh)NW4QHny8q7daT#Ap@ji9YBLf-A(iRA?|p;3u$fA%ZM44-3J_6{0nTn43cD zK*b8(1{)UE;Rk_@_<;QB2Wm#8AY5?)Fq0#L^T2-MzugIbOw!xq5)A0C}0$`8nqN_upXPR z6=!f05AYZS7DZrD1QzvKC`FP$MRHKIBAhry90&t}6rpHEs)G?L(iqLas1;$UgeQfgIPC|DHtsx_0!`_imHx<=sG|M1PDe88kZZ~=?Cty0!Q<%qmu3{1P81+VqI<4qp zmSC09Cs@u(R$-OVw|Iy5_>3>@Kl-%?m&>*{UT7Vp05o!JcuswcBIzeIxS zIQ2A>Mv&@udO4KiIFVVL%3RK1J`1?S{;L;yF#hUmxQ<6L+M4yT-x|kMQ-=xFOsAVZ z1~9Ff3X0X7gbCHm;WWOus9mWif!nk9aj-7{5#};rIm*Z|?CLOzt zyX-%<#KYZ45_{wIUd`olcVkwWT}&-&h+bE3c`9d>UW?XKezro?m+ML`l+R?CF~Zao z4|b8-2ct^W5T~9-jzsq<-KM+)QfKfh+(GJUuEYLQ8c8kTZZwk8Na}Uo=DQ$F3zk+Z z9YKw>8tHmnq!mfGkYO4QE}i3W6i*+s@U=(%mh5bgHjus-r;xUR^k+es8O?4SfS1Vl zS2H${v4M=qXDS@ZOl%-?JaaJkjE!f`XVwBE$yyd8vXinN({TN7Ta&5N~_7bYI}(RYzYFVVJ;D-ga- z_%<)oCVZRsUz_l4cXAgGu$*^-aN4#^!JwyAI2j9?b{W@lJBx7;zDH3w?OyKVue``g z1D&?Y!#{YFPx&`r@l6o62du|vHewvxu_L=MnY}Q)cGuf}Fm*K0Oqwh0^xK_v`^7ld_7%JrgdH0*9vkW~ybc@c_z6E{KYmUO#X1!0P^?3-4#heY>&P;L zVN9UIg>(q%&q{=yo3jw>S&^eLqk)Tt8&T71Prx)+EvCb5&Ok)5EI%jesE}`>O zPG=rxa}MX?Dmt%W5jSuPx1*uX-*FGakNWZiPw@=Pd6w6)n$FdXbpA2_c1$JfR4DbGpmvE~l%U zu5!A{9nK7n5ThMMn zy9MnY`K3?yAxPY;L=#ijDf}TfsoWJk_FYyYm@ofzE$=2+^PW%}A?3K1x z+TI%C)X{*#y~lGVSF@C5`2Uc@-ZdW9@-Ft$`!WB-rT4n@UgPZBoUItkcqXEFp9%Dt zK%WWpnLytZ6z%I`2D7<@TM(>Im_D!FH}aa_@ZE3__HW2WY|1u_V_RHn|E^48DiZeZ zi!t}tlEAq8jk~{vPI}2xq|6Yw{bzB`$lssm%cWe;&D_SF{GNNTp8orJ zjAvPaBkNzu8@$Qem|p+Ck*t4R5at6mU_-1eFI--Td^5AqK>kYm&tKzV5jWr{^0#sa zjw7#$ye9Hb@eIq+ME*HmzfQdjL(Cx5MoUQNeT)S#xQ~H@!?X~1-DR0Q(+EL7L2)YJ{KZv zLA`={1>p+!*hj&L3*G^Rhwu(4NLYBD7tv7RHC7{CVJ+{Vp`sBN6)*l2V=W%a@hDha zh^ZDO^&Q;8qNx@YE}CXhy`p+W^@_s^79V37D_F(9_=5lNE#C*>p!9>rIaoz4j&CqY z6VvIYmpnrphmj9DyusO=#(WlV3Dz*U)cyzUcF=AIm-Bbt=gS~0Nmp8r(QM3SjKj!F zQkRUpWaK3yFFCA|w_Rx(J@iwcgl0-db1bG)Itd3@(o|{WzeAVq_27s~QkNAiD_T~x ztZ3P;%bT++d$TV;;{XoEG|CE>O`)8k6-|^)p{#gW@$z9D#Y{9%HkI;R+(h}JVP7ug zavVo_5q4I-k;T|k`OiFvah0v9{5PIs1uvud@|$=ImOtQQK4o1HRz|S_4!0smWh=%q z4wI`4@9E1Frm`PD=Rl;ZII2n-sVePck)(1A_FIvpGK&ki2tg`>RIWmpio>hijv$pK z+|46A&XYVHgflCyyyD6$D{*`kNh-RpSVzSXR=x_tp)qX3cqXzvlhFK-=7%&tR84}W X;D3LuKkA1cCqDeckN^J~4z>IjoETpP From 322b74ae2eeee3aef238df5f756ebbd85a504e47 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 12:13:57 +0800 Subject: [PATCH 13/39] update... --- .../UserInterfaceState.xcuserstate | Bin 13643 -> 13708 bytes .../contents.xcworkspacedata | 10 ---------- .../UserInterfaceState.xcuserstate | Bin 16189 -> 0 bytes 3 files changed, 10 deletions(-) delete mode 100644 LFLiveKit.xcworkspace/contents.xcworkspacedata delete mode 100644 LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 769c914bc51effbe3b2a693f28d26f3ebb3944f4..f879a919dac67a9780764071951a5e11ce9d1e26 100644 GIT binary patch literal 13708 zcmd6N2V9fa*Z&<4BY}{i1VRW&2!ucavWK?H616HQ0UW3_kVl9Hf=N)R>!z*U(^~7V ztyYG+Tie>9-Q(9?ZEb&@cG_jP-PQLzc?d&o-@g67|Ih!=N0TS_o_p@S=brIB=RVDy zZkNxWo&74ph#-nM6o$gN7S1_4Whw9VxjY@SQ#{R!_!hr!PKv|Z(&mEZx)i^sQ-$zF zTkcTeSttTUqEToxibZM2g3?h2vLYMGL|JGIDnVn>ICK^&N0ZSMRDw=x6i``W5|#e#Zn;9ElZJiB)(M9*xyF3G1-| zn{W!Y;4C}_XX9dAg2&=<_$*w8C*rAi8lH|FxEZ%#C+6`&+=|<<3opWpu^YGJ4!j(9 z?7r%!;#9!iX@VEFl{t^F*f5Rt5Whvbqxl1~aq zAt@roq=bwm<475)BGsgZ%pfyKEtyLi2~QT1R?N@)`M- z{6>B!e~=U8Bt?``jz&-^RZuleqIzne7Me~oXfDm8Q|LK#DxF5B(`s5nXV96nmd>S4 z+Ce?kN0-s%w41J?tLgdlB6>N!nqEh5pquDs+E2IAJLsMCE_ye;mp()PO`oOD(dX$4 z^hNp-eVHDnZ_^|6Blk3+GhaC~hGd)8vU z+cy`!2Xwt5rN!;=`FfEI#W2EXFH#^S<5&-?ZBmS$QaN#!$Gh0q>1g3AJuO}Be22eD zky4R2u{fu)xF9pPtgI?CKd*3NW_f;Lc4k#kQAJ5feol7L#N5!tYLtY`+ff{fM;erX zv?vkjSQrau5iF8Pn3P3rM|xyHMwE<9CGY>yE-a) zpWoHt@PoNGDYB`VoH|8%7q=K&2I5H z`799kRF^+%B(JP#p4JfGCWZO0DD9puZwp@mteO->B4O||Z-&<+cs6@ir?8DvCm2B! zTJ_mz0y6iY@l4f&N?9z^9YPhLN0n$IszQ?lO=<=KHNv--cY{Ee@(i)jY*dpXcX{S= zVLO>_PwUt*V=9V^b4n%_6lNA@7lX1?73F1?m*iDtmRFTjRu+{N7Z+rg97N}!sc0IS zK37!74o5pbTYThw_0@KeXD@VD&#t!D1buX9l`~MSu*#WC-Gl5bPLQR>&bwi+yfY-U zX`Ytgf%^^yEnya#(}QNSc$WAY?57dUL-WxB)CBu-ph>7@j^EV|O5$kmY*Hxr<(-ZW zXPwv4;cM}_I{l5#0V99~%>fPIm(Otc+nAQ2NH&UT4j~>b1g5R14Y`^W%4++R5Y05c zqt)LAEQhrwr1BtGni_Ovm|}$X*oT%NFRK32 z{wBq!%7(gAB_s0nqb@ksGOyRs-HZIFT5Mg0mcyWG`}8iqquI?@y1)QE9S(1IA6kaG zQT4oP`%qixSXLm0%>8I3T7_1lHB8Tp%*0arQ2?EfEe8SB8JZMT z3tes=G-ph`4{UOb!`bfY7&EPE8t539$FL`$^YdP4m^7n)O1q<#pV;2%@2(2AX0;7h z*sFrPlbaOd&cqwM!eR*Y!#JN|wvi043o=Y;QcU<~4DGy6z#Jo)ZVWOtHz}(Br%ZwV zFC`%cft%14;aF})o6u%-3$w6vmcgw3s0a0;KGe@_ER$uiF@hDv&+xk1;gmzFQa5B3 zDW`NbxO^_qjEXjg*8z6tt#x#?^7ALVoP3$vJ;mqi;=xU(o-!z8q$5X}%J$dL8v@45 zX06D!8H$TnMK-J`wi&Wlg%s-!v{%@{o#-xfH`>E;ST4(B`Tgh~bT8~+A1h#mu!Exi zr#nFEs@m7fW(zyWFV0)lAUq81;4$>{8F%mu`Zw(0Iab2P!VbnU9moQyoetkca|8yt zd4&b5X0OOAEE&%16?9l&b`TvxucFu3*=#(Uz)Jhk>jMW`_W!|wBK3>&m;EPnfFGcb z1c&`0tLQ-=vxy@;z~@2zuWV9`{U7RoRxsdnLX;a5n&=q%P7uvk=xg*1`j%C(No+Ek z(vQAJKcM62M|KXI#b&c+K~pRv0-zXuvwXo=It>s(mJd<~|56vftVv-z1L+JeztrXF z^3^%Kt^5EGs1QKanaM+%AF`~F>jrW$xQC*(xR54s#^KCETl@o^6l8gVP3^&mO%t}F z1n+JamfHZ!o#N-)#h`~d9MOxza5$UJ?5tsc0hZusr0T^|9L1_xO)r+=7&e2=WVQ2w z%@ilV8rMP>?}exZzXgud9jzTMf0uwSj6(t%G2zfKQ>pV@B>ahiq#MWf3IQWDc^pm{ zF}W5evN~4JYG;^AFB&!w8*%c2!T1vlPkq=Z1gIv3>U7}*gFH@!AdkoiF$r$dmR zkD_n}wqhI3oZ}1P18}}<9&2QCndARR-vl#_J-Tc3fzZBkFu=WDoQw0=e72w$7vMtH z#Li{92Jt`}#G^dp;^z#;ouJI{*?4@DLN`L^PF0&eJOP&qviNJVGY9_An#*woGH=-; zSYt3!;3_;Bse14v=Ip^!7!Mi&c%vFnnceSob+itxGc=$Y(qq*QT!UxenYb3)aUHJ5 z4Xl;5F&A6J7Be?%XB|86YykUxxDn68^YH?tXC8q2S*(*SVP58g=h`{`pc&aKr_Ocx zCc0g%LTmv%BQ{rpskQJ^e6w7DIw6DtwjbURvLz6%*jy$g@_vAWU0z@^&?qvgaGDhTZP08 zgcX0N!mb>0B^TmLg!Qh) z7vYQ9d29t+*@rL1m*LCVDz=)f5!P!yZH5uc1AyT1LaNoIu>Lh+xua#Vo$my+0cl^8 zBK@za#W{jd+f*7E^hc-#8Bz%{oUX;|@y643@h^NMzKI3c`RoD+%B3O)G}BOD-sN&T zL;Bx?x1Kg~8{Uq0u(j+WcCj#0Ar7q_dxnW`$9JDLd=K7>?_rm+%h=^Ge8M*))_xE_ z050u8{E(1|IURmSjgp+(9IA0)Lbao7^-EY( zL$$ri)8Plo3Xj|4eN~sFml}-8CTO?2&f{Uqg;j1xs}Ft)9>-K_7I&_cDTJZ{sA^VO zC*!%3J$~|u9L35fV{Y(kuz=XY)jG_F|mMP5|lMSh5?WA(pRB$DBq0CX0 zW5_mSBL!wsV3?a1T4rx<2K+849Fgzz76^^SCE!|y3C1aktaoh28QY`46p55jB{4x!F&7mMxQpzyZ@ zJW4l|_pZg;pp5q*engZr!QwX*07m@b$JiBOSUP|oo2$6 z{BLC520q~l{3N~@KaHOO&u|sHnyqBluxq!0e|Qc*k6(b#=fF#>W7o0k;d7nfE0X`( zsO!2r!JYp%M6eVfO8aY00+HR{Ergi=hEm9Yf|LjGVKBYd@f(7!3)O;P*0R22?k#M+ zD4#d+TcRlVUi>Di_9zsn4o=XC8~}g}@aac}>L~lJQ16B9`xsl?1~ zWw){0*&Xaob{D&w?b$&rNKZ0|mDoroJdI&{LEQJSd)dS6I6R#c9D=$G>LpN?3sqWu z(;VHNE~v;VU^DhM&$4RAQdg^h2pXXQ#5~Q>%)6TunZjPrDE)fB3-YAyGN>&NCNLtq zK~_TFDe@n2fKWGbwRW6=gRmfxzl-+?&{>530;K}MW`fYcAOcUrAm$H=?JP1rl-H37 z>^`OwLQ>F4@@n7Bzh=^!5V1bdP_#hzx*uzznO zONf{FP#@_6BYu{>!VZG@zABioc&RZlSl8v9!3??1%kzR|57+?I^Myh`G=x?s5Hu+g zg_erx!aYNmuPRt?gG^{*hfo!T+X%r*2e&bTe>uO<JH>61qWx0hXby zt|0+P#Ry~1^^o(~^TNH1u(%+i^_BSqb|KP>2!QY&axr_Mhg`~DWV)fC5+b>hTm#uD zxr$uPUScl`4_Cp%OH4Ooh!$kDLM&Q=ndcTj#i_V3Z&j!UM>dih2N!e`JH%>3yf>3u zMr4tsm-G)(Ze_0xQSKl+MasW>-nWtaka;J$9nSksau>Oq>>+!}J>*{Y2AukT*qiJv z_BMNmz02O)38#KPd4N0!-}}h{_nTxg=#>+zl{c&MU4 z7a&5nqZ6)xpj#+l3T=bO8Z2rGIwKgmue@8tBe6omz~zIe3ZNPTLkHOjq|he#VDbD< zpI!xhgY=WUo~}+9aN3dN<%b;a^W=ps?1LcmATN=Z0ig~Q>+QV14_aP9)c|zihv-KR z4UogNAUO;ugS^f@9zYr7O~J>!Mc!tgu+N8l%zNYm!7m&k@3W)q(_Xxkd;}@zXRNl? z<*fC0&uk58z~|)Ha6#fz8gCD@5qmW-}s*VK#sF7*)jH2ANh&=OnzZsvv1h9 zOjqXz`7|zso6G5t-a|=$j<2iPHBdy>+1q%x+h7mg#Tce^EFHMnF$B-CNwKJ|jW_t< z#-`b`+~D#VIy`=ZtHa>y?r1SAbNSm0e(0a!aV~@-&2oEM7H1jYPSB9n>2U%bL>IR^ z&EWAGx;mWz3k?upyoL%-i^l?igs`(sezG>!_zu|cm+wKS3a%zhD!UkG@Ejz)+mrqa&R z;m(;FY08MfsnpDVW50_l$L#3_4UHW@iP_0Nj82 zDm?;s(f&FCfYK!a93vpm!CZwd6|#T;mj9!h=}~J=ihqZM8Zv66=g}2tVgRcGcoYLD z_fIdo=^Dz;T$Amk0im4xr}wn<0(v3bS)3}8KReG}x>jKKx9Lad6}=cVtC^IbiKHZe z)q+06F{tN5=%7~!(w+^<$P$C0hh9Uk6_e;QOdLy`K`mX>^=%F_SuvWkbsiFlAyfkr=;PG(NxY|>_j|j=liMSl988uxn5ohG=);g2(|z=Q`T%{9K1BD^ z0|9Ib;FJJP4PbKsrvE;QT zNvH$~KBrM!VQ}q$y8cX0!MTnwaiR1Ft|H_k&d{d~^-Ww3{!O?|A95s(p+0y~1728a z@w8_-gqmTN(=RxatY88L{v+siMc)`YrB~=70Ifr>l++G)eXr9ug0Ana0L}^E%#f$b z76to`AlP>UIBTQ>`cQO0WB!r1IL!ea6&+CSKX*VLg$Z6$>_RT&hMJTCs!7z!9~MtTqGyqq+C=0j}PDp0bClu zW&My|SU5S-16BcpDhC7uUPIg9@VbP^3S||3SujPYbu1GTHh67qWbc5*+FLwANz*W* zGdLVx*n&Dk5b}D2dXjnsNszl>WPRxRWyg%IAz+ zasWdln>>g*xm3YRa$~sM5rgu$`~aR3z~{hS-k>O9 zROfK4$(3;9U}+qj+O!^G0o(_2Q-U@6Jnn&1SvZT(36yeWBQi^_lB*Ji!}4o-xJdy# zBRKxdm*3n}VY|~fz?3rsxNc~>GdTN*mDh3g0bCovc33%RiRiaXrLJ>Ftb8svf5fN- zTvGtUz=lv}J{$|0O6NTi9DRnOKgV+m>w|7FSohb!3y@ZLF>oxJ0I#I}1h1h+LmH4o zO*ECJQ5(&o+3*tT6xs=|m|j7zf^fTzuBRI)1TMOh?xwfFE2ekRJ@g)M=J&%(rN`*6 zoDu}pz%Ajt+#2p8?h@`Y?h5WY?t1P9?qA$Z+>_i_VV1Chu;Q?>VP}U;2rCP#4r>T& z4s(Vr3~LM96t*qwuCNEf9tnFg?8UIR!rl!#682%($6-gqX?S$FJX{$*D%=vD8$L07 zW_V-x{P3pmbHkg%o#9Ku*M@HlzcKvg@Xg`d!}o;m3x6W~h44?pk41z>Bu1o0WJcsh zjEk5VF*|~f@J6hNSQW7*A`o#w#1#=&MO+iHE@FMehKRn1tr6QJc1C;`aWWD|(#WvL zipUv}wUKp^4Uroow?=M{+!?t$@^^_uqLPf3s3q}|1c^zKDoK;1ORSPi$z+K~vP!a5 za=YYS$$gRolIJ9^N)Aijki02*Tk^5wsN^%r7m{O=uccCHj&y>wRoW@-mNMy;(hbs$ z(i^2WOE*imOLt0lOK+3jA-zlbfb(qE;&OHV{; zqDrHxqn1XkiMk@{>Zog@)<E{(Y+W^2qHG5ccPh^;wek(}jq)4iH_JE6x5#_t z{qk+{9rC;7`{jq_#}q^nrx>G{tf*65qu8L>q}Z<5t9VecU-7WwQN=TgXBE#YUR1oS zq{<{^k+M`dRXJT*qnxRuDVlopXz?qgR1?ihgHw0UQ)fGIuctMTNPUqJ3rPLyEt}v?Aq9iV=s-pJod`i zt7ET?-5k3mwl}svc3bR@*j=%=#=a8!quQW8M{QR(s-5bEYL9xUdWCwkdW(9OdXM^E z^*;3j>c`bDs}HJQRUcNrp?*{Sw)$Q57wTi`uhrkimB-b@&5X0h)yLfs*B`enZb#g% zxKHDb$Nd!dOWbeqk@0cyns{xzE?ysRj5o!n#^=Wu#uvwrjXx`Xe0*tqd3ofbD8D}%~hIfH0w0$H5)V=H8*N@Yi`rrp}9-5M{|$n zKF$4_2Q~XOhc%yRjwhrh6eqMMoR@G(!et4433n#!Nw_CrU%~?k4<$UA@NB~K2`?tR zobXP<2MHe~e3I}*!m)&}6TVG2sa0u5X%n^S+AM9hHdkAuEzyqCo~^CY+O>1F^Rx@J z=W1KEF70A%yVj#!r`@J~N_$9qMEimEBkd>JPqm+GztsMs{Z0Fa_GBVXq={jP5s68O znTZn;7bISpcvs?q#77e!PkbuznZ#!k-%k82@yo=o62DFSKJj?sNnNx~sT-w>)g|Z> zbxAsd&Zx8M%5^omT3wxPmTr!&QP-w(=@#kSx^CS{-D+JxcY$uLZk_Hr-Fn>y-MzZU zb%%9t=-$-5t$SB@ME8O2Bi$#uPj$cP{?MIF!bvnKEGZ&Mk`$FBOOhvLB#lq1O>!i4 zCtaDeGwDFmYf0}Wy_fWU(&tG(C7sZ7dYL{(uh47siTWhHL7%SA(r4>)_4)cTeU*N) z{v3Ucex}~8uh-AkH|po>7wJ9vC3>HJx&A!;O8sj6<@$B{_4*C^jrv>kJ^DWVR{eJU zt@?fX$MhcrXH*%rMvF1iIL4S`EHoAy z#~RNv))?m*Jw~sw%edUQ!noQPFkWc9*m#+7lW~i&&$!jN-MG_utMLxw-NwDfdyS78 z-!y(|Jee#_HY8Uh&q`jDydwFU6CO?z>Z1M}qFDHMN{9W=drZAJmly53Dm6*;l zO)%A&T1`t$mzuVkc9?dXZa3X!+G~2p^oi*+)0d{NOuw1_Fr7>xDMcxzDbrJCq}WsH zQ!Y!nE@fxR?I};BypZy9${(qg)Uwpd)T-1esZ&$yQfH;kO`Vs@r(T`9KJ`HA)2YW& zzca_1Q_U8$)tqI{G3S{Z&28qz<__}`v){bbyuy5m`D*hz^Y!M9=3C6Y=6>^b^G@?? z=3{ALX)$Taw9#qmG*en`T0vTI+PJjwX=Q1ZX_L|#(&nViOKVDNPUF*B(-x(<)4J2v zrfo^voAz?rS82amjFv1*jwR1hXeqXgwbWQ@E%lb!mPX40ORHs(rQPDOthB7QFv|s& zeU?WpFIrx)ylOd|9+h5~?nrM*=hNHLH>Gb&-w>n7_h)*kCU*2k=mTc5B#Wj$m)Z2gb*ZR>m1_pQgR zCv4OfZj;!eZ1FaoO>Z;W%(ir!&6Z^=ww2kcZFXC|ZI*4Gt;yDGbJ~{L*4VDKZLr-O SL_CNP@tbQEzeAt4&Hn{L%Eg8N delta 8293 zcmai22UwF=_rK$Xgd`-GgoK2wHf?>YCJ-#Pbp@9YsLhtF5w zQOxKCD!V5O@BkmkfEY)L$&cQ}{W& z0dK-v@LPBr-hto2yYPE>5B`Gyf(Rju7!-tp5fAZE7>Yy|WJQfp9BP4DqPD0bibn~k zGg6~u)E%XuUMK@)qk(9M2j!w+s0bCK3RH z(T8XY+J*L^L+B_vfj&cDpws9aI*)Fko9Gt$7Trb<(IfN=dW@c+r|55tumB6O2!~=Z zR$wKLz&fnQCTz#ixCw5J+u*j?lY~3r&Nv09;l8*Z?vICJCmw-|a4{~y6R-yp%;AZ6 z5}u4_;#qhBUWnhp@8b7xEnba3#GCMDd>ns@PvFn+N&GoJg}=b3@t61-aYQ z3E#&*;|KUD{vH3pKqizCGZH3@X~cvxEMtsiY)lkmXBsnYnT|{n(~0TKBs1NZ?o3Z6 zjmcsLGPz71lg|_}#Y_n^()$R-;~6C0+Z>M<_9mIcNhXp>g!S&iht+YwS__(i=0r>+ zwV(xPNy11hVROQ)-sOzOdz(p(Z3p6kwGOlg9Y9CYh{%be4kUm?kVGPgkyr_P4kQC1 z=mNTeZlJq2El3$GBhs9(_TCNrrrxiW!ft0lPml_FfwY0ct9q!7je09c50*H)+=Y%Er=bOQA)aa$U{{Bv-_mLvg`q_tj7z~Dh9HJs>5=k^GffM9{JdjVcL`U?*Ko|cYWM&b+@3^KhhNji^_LYLNoG@8h zX0NJBXKs-z*_~JEE-i7EkEsKt;7ySJMq1{}wsRl^2m4O~J=tcP-cnc9FIufpiMk zuooNv(ps<&>?iF=`&w`i93mY^M+&q@YSx<;+GL0?>TiNiYkl4ejQR|mdTrDf;511j zNhH3vDRJsh;QOl&$9ZsJ$ctbkm6tnd9L|G_AiZB&rvKABUmTut)*pqhz||UXg>)fl z=fT(DI{1c0<_5S4Zh>zHRJe*LqG%)sRJw~@6_w87vO)Q!c~!-(l1d6Z(w%f8T}dx* zq_~$F+yy`Q65@MskEDLU<8%U?`t=iNZ zd=dO!>$5Mg(m%m7U|q4o7n^#lJcj^CYrsDwqXt6KhdL@jVn_*w5+Gd*d5{l7pa2S? z2!=v2$s*aLFX>16lL2HP8MGFL0X@irGAM`PPyzJh4I-jg8BB(d95R%?$8TLL;X&30 z9W+~E6tu%=l1uVPejSW~jp;gEg!8ZD%dq0kEx_(ouqA8-Tf;W6Eg43NNExXhqhCZ9 zc7Tb#c{;*)m_Q0hA#vBiB-n}0Gn|a@&qEq{Jq=5PJz@I4qiT7Q=&h# z0Av`lJ5{-h@&hCL!#Dmlaxff1^}I>SeR@oZVf91%v@-+-<-PABku&$=Q7z@Y21Xv9xP>+luV~LxLBjZ<7zi@COoJ5}-^-eXJKnQ(SQ`DRZXT7Jw zt6PP^vv3YIU@n|T;T!8iqA6(~P~>ix{zN?W#w>sf>-D;7;R29e8Yb~hP$+xGfp(xd zhzlF;hdO*4?5+V~f=Q?P?{~oNm7pVh4=#htiGoZfQ^|B+6kpUxSm!SXQ(h^c@|T@! z{hiZZR_oA#FFL_3e`nW#8#cm^fprbs1UJJia4Xyfx5FJ|CYeQMlR0EAnMdZYfji+Y zxEqM!79U#40LIv_2e^%T4atZU&H@HG4qo}qp`2hYO`WD$9byiFF9C1fdihrGKMUIKdf6}$qk z!fW*HI(d)kTt@!kU@!-J(D&_x9h8#XB`emMo9fJU73GA5(a+4n(ot#7k?sO#Woh|f z+R!E6P2ZBrUCx}a#`N1eG(zERq;`AvsUo4$dzGi^=g(&N1AGFk>)?;@CwL$J3?IOU z@Dcn4J|@e_`(y>FA+@BAtR$<*>UHo}_?yor`u`_<2LGa-Swjx^ZQ4$*lV8biG?Q8H z4t9GO3PA!S1VSVNU6B|`yyJ{LSh9x(VlCN!5j8?m+9yMD6i)j^C<4wxDx~(_GxpL_ zseNQ!7`T8mNK2)3NRJGNMMh+T*S%S$3=i2!)|1`j1F}6QtV#d8(tKAeU19z5>5i9D z=_*bxEq1y~V*8brkEkef=D9K}%d7G#tIA#e@E{wwx)MbpJBmgQ@*&wowvrt*R45iT zp&FZ_W~lkVlzduQ-NW3j^6Zq7eAj655!paC=7cFdsa?DE&Q2+I7Pz_;msO7G>Mp8u zmB$u_A%6m(R;YDOn1))ERGwGp9_ibJsd{%wfq$CbPIq}NY6H^$hfKhjcBnmSPBxP* zWMki9eog(uc!ADDc%61`qoCM0Ahm0%d!(zUyE4$(#ozhye{}jcmFu8C>Iu3MCD}#l z_bHTy(!KwP6+yMAH{A~CTS)zC`k+kjDRT^ivS{<)=9q+6gNORk@I@dseX7Y`8r^-q z>1YC=K{Nr7m_|FTewrLKG$+g!0OBT#SXhgkJ}11=78Xi(CX|QrbHYq7B?VN{<&%81 zP1TTAr~tX?R*wqF!5TE29HQ`xvo=XYC8*5TTuMHvL2r`7gw5(j@emkPg~kAB9U6&7 zq0!_BIZBSzp|R9kRAt!s2gTW57BZ5g;qDvsU!NZJq1fsJ z`!=*4?LZ%s3*@4QTq2kM1MT(T4j|_h%mtcWL0X{cm>*wX{ZD*R68<7dDLiAn>lukU zv$U!_&()dcZoQFT`h0zcd`;548|=DhbOBwVgb!Urm(XSO75RqTAUDaamFOzE203({ zd`s@po#`?4=->8^(dZ7Mgl8rC4&6oHqkH5wxkJ7qcUOWuYU6#Nr*&yN`JPrP*6WTQ zpN)P6S694@>hI{!09{1S$Pa|AH|aU0i|8K=$dBaV3qpZ04yMEpGdPI+MDEvO9_Evu z$%6*EnBY)##S$!iO+<#}E-0+*>voMYC5|ieZ|n`R$0{62ZNO^sq=puK zTI9Vojz|+WVD>fgh0WONle3VgHP}Xer*b}OiyhQf9D^H^KgeG%ZEcFp-{d)o&k2*z{7ZJ_RuvSu{F{g=@r{G8*_({J27HFQaS${( z4fpUF(i5k0kkSa|r77vS&uh9eu_udzh=bTqnWsd&Hv0hl#(zi;@e2o$cvE7@iq~Xw z@i0oFa30RbE)McI$md{49WKCy*v&x!2Q?hj`i_Ez*HD<|C6T?7aVZ{2IT0?yZ{l)X zfh%zp2ZbCIaWIsFVh&0;7`7UZ!lUsRkcY>CJPtPEppt_TM9)Dr1s3b&Y1(<`X^h?> znr7Z4jW)n0@f19b5@kG-NQ006TwEiE;84hQ8mcpeAC z3HwS(uczMFi{GZa9xp!4bL*S(befP;}QWk13j>Sev9k*31H3|sJCN*eH1>b3272mToE#JliryoZB2 zzwfAP3>;)RXyl-YgJup|)=~HE#|Q91`uhn!OrJ-69<*}M#zDL9HJXFneZMKxr4rpq z4?cs>Qp2PC8wfs+FVK@~!=X0QRar+{=qZIByXl8NVe#dFJl%lsRZ1}NH4Zu&7$*J( z?5@K%@J)P+gE1Uz%E7po@VtY+_W|xZe3yfbIT%|De(HpOAQZ+;NPHi6exJ%Q83lpW zKg5q;7sgNUuN-W~!REdQ_kOVxdx!Gv-s^mwCu3M(?4S7W{}}rm|HHu+9PIYO=?r3c zw3;!PVVEF>^1hZFY{kLW9Bi|a;WHtOfT0j=%fXHujOSp258RCcXTj9cJg4uNSTSf= zRZ&qdS4D-hz~joQ98=~RP*IiZuBf2U^ZSdD(wfW2IM|M)T>;-P3P#C9Fe)Hq)ZjW3 z$!HiYqhoYP4QDa>`aPb}QIOHIXL%*1Go4F|s)|d}s)}=6zx`+amP3~nD_!*Oe{mM zr~XSgTQDt|R!jzsZ4w8QIoO4RT^njC)6N&4_8jc=-{KSh@Az=AbHE$D)x|ksLsLrX zm*FnSE2_$OU{qj;?$uTCi{-XigOgfW6OPK%Kh3Uh=o`H2_)68S~GX0qT9HbeQ&e8L9!}W_+ z$P8kJyjts-9A+p7dvP$0Zd1n%CJ*Cc3SSd&Gs8L9n}Zo7{`HfNDRCBgb!%Wsnex|0 zR`{mQ^iN%XY|5^57r86lv=;jhN|Ln9o~|*j{CeW4`sDP2) zkhi2q*#z_>y>NYwgK;BzxvHg?t1PzSD0;mbgFEBlbjO&Ar_*~=TGsJ=dULu6*Wp!| z7FxU>e}p&EyVI@o-t;bh!U&morZ+Q!UUQCNCNPAV$V_8qFteCB%ns%u^S3962?`G4 z2MK~21<8WKgOow4AZt);P`jWGLGeL}K@)f6Pb-W=XM-V$CdZ#{1(?-1`W?z4! z?=tTS?;7tZU&oK(XYmK~UHl^c82)(v1U|=~#Gk@n$X~>No4=GOld@A@_ z@SWhE;77rI!E+&BC>8322BA@C7FvaQ!Xn`q;RGQOP83cP&JfNL&JivWzAM}$+$}sT z{8spbNFXwZ>>^LBsJ*CzsG}%BlqyOWWr(sweMS96!$f7GYSCQLe9=PDBGKETC8Bpk z?}?U+Hi|Zjwu-ikJ{Ii~?Gf!09S|K7ofKUZJqQgBjSOuX+C9`6S{%AAbYtj_(1W3; zLeGa@480tBCG=+Kx1o1J?}pwJYs9U^y~P8>`Ql;XLh*2qxJX`BuSQZmGqLNOEM&xl5EKk$xz8KNrhyrWV~d8 zgh-}IW=Lj9=1AsAmPp=}td(q(Y?f@5Y?thn?3Wyrd?Gm_IVCwS`C4*Enj-Bf&64Iy z-O@6tr&>BwI$JtdI$yd_x=8xAv`)H8x<Gt8BY$zwEf|gzTj3l#u4t#|pomu_Dmp2;D7q_p zC{h(^ih+t^#W=-!#eT(oB~YrAamqwxrZQXEPdPw2NI6)UqjW0slrH6HymEq) zC?_f>E2k=_D`zTaD;Fy_DfcTcC~qi#i%>;0j!2Bih!_?@BHoQy7V&;WO+;P9-iV_S zpGRDdc&6g1LR3Oks7j)0q_V2os=BKNsYa>BsK%+PRUQ?m+N#>CI;uLZI-xqLI;A?T zx~96Ty03bmdZc=+4pmFk5gv7GbqDnzb)kB=x=3B3u2PRuPgYM=Pgl=WFH+a2>(r~% zYt)<7`_%{4pQw+hPpQ9Ef3JQX$%||r**Y>ea$IC}q$iS#oD?}ba$e+uNN?nOk;fuW zM*bT4Tw~VQHR+l+G&vflCSNm5Q>dA*S*BT`snx8~tkJB~Y|!k{cs|h_(Hzr!syVGW zqdBL!pt+>Q+DL7jwu3f7o22cm?XAtxI<fcqH2n4H7qu~ zXIO4nVW>51G;A~MFzht!GaN7+G8{IXG2AxXH~hw8md^@U5!;BBvEi(eW!Y%9HQSEu zz$UOsY-hF~o6ow~VQe8=#g1mjvem4I<=D5_|gA2W0+B8v>NS3hcVXJ%ou0vU`#S58@m}(jD3xRjDw9i z#(ZOeak#O_SZo|=oMfDCoN1hGTwq*eTx?uwtTk>l?lA5$?lm4T9y6XWer`N%JY#%f z;+vvOElr(FT}<6gJx!i8Q-&$a)&*PC~m_n7ya51Egc zkD1Sz&zmopFPU$f@0x!w-#0%r|6=~f0xZyiEmljMrK`u1Vo9~6Tl!eCE&VNnEJG|# zOQmI`WsGIK#be5M{AaKuyu&lX&r7IWu0K1 zY@K7BYh7S{$GXhA!dh!xZ{1?uZry3!W8G){%zD~-)_TEu#d^*9jrFGWw)L*{p7l5D zGwXAk2ik&dAvTdsY;)T3Y-P48+i2T38?jBYO|?z8y=7Z&tFf)Lt+9Pz+iKfk+hyBp zJ77CxyI{L%`_A^z_J{2++dok-Dmp4Ds&CZrsOqTsQLCbMMeU8+A9X0|uw8D~+FRS> z?fvZ~_J#H>_C5Cf_Cxl=_Al*U*{|8ZvETC8@7RB~Ke9iuKehj9|0_BunjbBU4vlUV z-8Q;IbbR!@=wBQHhs+^&C>#+E)?sv59I=ijj^>U8M<++Jqno3LBh``Zc*Bw7$aT0J z1&&I`7{@qAwPT`VietKCrem37wPUMer(?Hcuj7#8h~v29gyWLqhU32D7snGvv)>%g g9M5B5431I6M8!0YiStt|fKdG}Gp_zO@Qi8sfB6%$UH||9 diff --git a/LFLiveKit.xcworkspace/contents.xcworkspacedata b/LFLiveKit.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 7e1121d5..00000000 --- a/LFLiveKit.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - diff --git a/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate deleted file mode 100644 index 6867e013df3a8be188e8fa300666d512aefbbc8f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16189 zcmd6O33yY*_V~=5N!OI5X|pzM(x!Wxv`M-mN_Rv`DYUed7E0QNHgrizQg&pn$}TQ| zD~q&%2qNOXF9-;Tin1soh{!4~hzKI`^glPZX&b0M|L=R>_kQ#v$(=iM=A1KUpP9P$ z7Ms(RntBjGzyJg=5C9*6NFW{)J;my9+U#v(qV08)to1HuRkX!X-(;iB<d!yRxZ+yg&=d*P?>AUq7efJfjNcov?6-@^0o z3j7sbg}=dT@OStJgBbx6z(|-7CX`V#;a~w1!9+7LOaha_q%nh-TbbLKB4!A4J5$1x zG1W{B)4*66JJZfgW*p32%-zgA%v@$3GoM+&+{fI{Jiu0HS|0B6Z>Kz7Go(6$B{S+$Kp7w#|CV| zSvVUH#Dj4mF2Z-<5x5kO#FcmquEMRj4cl=$o{SyXiCwq@Pr+01G&~*8!1v&}cpjdQ z@5hhf$MFih60gFm@e_C>-h`jVoAJx|75plG4{ygi@NWDeK8lawukdmFH9m#U;&b?0 z`~&_K|BkN_SsUtvGaOL1Crc(d=xu)LRSe^&PF&HkW5&8HfO}8-N@rfD))c7*K<7A|O7*m-rEX z5>aaHS z%vx@#%eS|=9QKwLYBFlYxAjHI>t$_vZeDJdInAig%r+M2({jyu`n-%Rv)-JWo}W>W zSCnebG}elf`vTyn@10Bp&b<~PAw0my7i(01sQK}UyIe>05%I)@+ zI*Vhdv%==I)wNKH6us{(b$MQ|lrv|hr)MW?jj0*4leIIAX{o8n+U)eK*|Yb%*EJQG zI)Re%YQTPw2GT(W$Q;WrX&YtM82+c#NmoAC;jm2aq8+lvl$4Ed+f&aP4Fm-sb|bh2 z3<9@;+rVIu19Cwg$R}bFOe7?Pgc2!{5jjz81ch{shJf3_P;dwRRSbp`C0(aP@+^6Y zd`W+uCTB>^I7(1qgUw}k&SJ}S zdxxu5tml~FktVOD8||{S*j&>&kY&_oxGHScsXXi+xcqcpi{}NR!0NO$w)F#n!;Xh< zvpPAEqr-}9E!I+ttErlQEVk5Hd-SOa)N?Ew3#vg47zf6KS}*}vKpjz$Frp^mB!Xy& zmPC@Mji3Qo-Ao$-Y+w>-rXsE*>29ValKaS$WCJJWV_V$xmpiOfAY^n#o-M=e4c6K+ z%BBX3qk&^9hbaGwDqaQeANl99j=EObdu*S9;t)Ebz&g>=(c&tw)^#-U{Lywdm2$(K zE-J>OtnCh~lm4){sK2EO6zuNaXm#}EG&i@Wor81`Oas%w3~F9xf>~fTxU*I)aoFuH zeknOpN;vez@RlZU zE3fyDaC-kJi6;XNfE8dR#dsA}%O}S6&N13OJ4v9rnN(F=R7`EtFq@0F5?x?ztyoQ` z>}3yyZ)`?$oe;ZLgxI*|2qISPz0U=8^I9Rvc1LQSRZU zXC=D8Ru0qtUhyNJ2QSo$<8S0|FXi5X(FI=Q!2RR?-U9kE718-La-0Pg5fhawGXea1 zy}_;5Y%0;+cKRLgZml?uBGt3NUUte`gSx;r4yS+GohN%c*ikFi{ev00z)lYOKk4dm z7`wn85ZeiMlZ;OA0m&pawc;3Fg}bfzUs)CpfRDi^9Dsc!s}p=mvPn((a4PMdw}-$N zK(ZEm20jOe$v|=o8MGE0q2hg%+)AZ+F&CO7cuhwQq{~`Hol9Gz`)7fT`eJ*Vh3Yu9 zWm50H))tydSQ~C^A7Zt)T3wFm{Ca)Et=CC%8>e4<1a$`d$OSTI!8!0PI1j!9--92> zV3I>}Ngl~31*C8TxBz|v7s1c;?-y{H6pCG+Xu0<5N`ojPi2!$|+l#mf*9O)o)2;o*o z@3v{?QXU{rYN^9I#b)ntdNt;A!6((We)=1n_6|qAwU|06gOhXjQ{3)8sh_U3d|O@z z^$4X+7N@n_)zC1mpWbEN0^nhW$BDl(GOr(kO8dVDYE5S(togb^^Z3u>U2j3Q%5wFiI>#sWzf zjD|6!jFfl5I2cbxlL}Hfj)LhG0M1QSlyh;@a7$yG&DFtq=g3}0drjCoOqXNL0o*^g zWq~O$wWg%3N5`gj!4#0?!Ndg9y)emunWT!0C6%SRoO$tmpbUbyjxQ-2-r?e$vpYCl z3kPxGDL=_zm_vK5*s1i8TGtZ%V9tqq+1JL%jeLuxOe%$osurceLtS2X*%2{Us>L26c7R$*>o zL1uP#c79r#DK|SiuVBnfnspd6jk7(gTnB0RyJiik@ia_?jouBDU^Cyqi)|Zh?}T>J zLRu?tw_0fOM#Y)tE6@pDRF=A>pv>x8OItcX*65Nl`rrXR6;9{WV;Zq{!WpEUTcbFB ziMnC+GOUMF?nuV!JP7Xufp88aU=d`&0(ckAeLV_KCmv;KYqaLuJK9_{DJr(MHM*L} zWHI7#y9dstz1@H)GF@;U$m;X#L2dzD=uP>(aEW)r1MtDVtXNK63w(%7Ap;J=M`_eV zV<-3+d>pQTE32Gt+eD2vnNFsWsf0$<|3Sp0%YCclkFi(w&hy~72CfCMU9b~&kr`xW z7hDI|lUZaosj1-YPz7(UX!y>vwXgNMA$EkD;WM@3a4&;)qa|Gnx4>t)nBwod^&hnL zi-d=e=iv)HEBnOh@MZW47pKG5$vkqGJFK3|3*DO>r{5xX_XNrCT`ou__jt#|{l~F< z#M(=JtvJ(7w84w?vX1t4yTfJR`RHMUcb^0BL%PHtfmv`L*vQpj>ZpsVrr!>03r!}b zSZScRh!}XG+Yb*=;inF|3-0G^AuZm(LmVBSk$cGin)A)%bTO4H24rP%wE$Da$php;vUD9h0l$GK;VH6=JWke_E^g>tbmhnM?K&{6=?UQApA4i{gaKqlC&Q4Hf8%5sAL?WoU&fEDB5Qh_EEC8K z=EJc@|2Eg_Zp*NA~`&@WPK+X zLK(>g12k!N+FPtNf^=CO+?seMh-Kouj17~>Bzr;$Mo*q1HEvcgsT@T{#zZ!f&Ak+* zGg)5fWitcGCh{~zkIDou(z+bm1TXXkGkIQP@|gni4B5hkeIh!lZ+$?QQ}eFZ*rCj@ zo+T(ITRq5);E*e2Mv~{q3%$sdGnIWOf*DK|jU=8YmAag{3NNtZmbNxCd)QE?lS;a0xtz=l5c?$KVmg>9%v5F?Go8Fn-XL$1x5(S%ohO-@%q*%z zG0Yr>kax-ZRFNJfyT~36FRsd3IIW#lf~*bhc#x~FaFwd=GU6y2y7LKpS^jV?ujp{{ z;zE<6!ZuC=Y0AUZPH9J4`S6&koBMF9=pm3VM{)JE9#Xw<$g@tgJG#Y^&ck*0kh+jr z3?!Y*BC@TMxtF}>kq=(;bDe#=KEN!a?umJjSxUB(9bL?FW;xkO|7v=zZ+WS`!CC5M z{!Cn5G%LeA+Y^;AE1A_jQ$9gJMumGf&91uL?aoJBLz|-6$+!C`2q`vv#^wf<5@U|g~V-P z@+e--?Ob}}bZ=(Rq6)Pcs_tqrPq@2c;ViN{Xo%vh;rBes>YJ?G&P*u(II5$KI%$`! z&0Q(26{i+koUVL(E3Gt8fW4(5epnt2h8o-hX>Fg@;;4(Kp2TCG_$?DES_)nez2htn z2M-rwP$G>89s+~MO?Pv60)h`N}+EZd+VT-gTQfrint&9CmV%~kR*TDGBF`KvO8TRTxHjY82# z@>`$01(l=GT;4){C)c=ar91Js_qYd{v1nZ1!~>0|iN|#=@#r_o@3+wdssT~gy&hT7 zMAV3ykd6GwLdZhILIDeX*P~|Cf?81pJ&86it8^0sq>@MWfidu?$kArGaN`ZBXwS@+=j+9aw>Qi6Y z>P)efHrd;(Bgzcz-A#RZl@$!@?T~FS8=MWzDIHEnN{g+I`#;0rs%z+r)15p{xK>=? z!Rg;r)>GO$>ga)sCR%H!A&u42I;wnlX_>vg+3GU1_62=6589_zT=f47nqt~c_v-ZY z+u>?$PvJfcO?`nc;DP(qigW%c@On!-xBo%$&h2Qh+4K2FiZfrJMT^k`oB_BOEkXC8 z`&k&k!ax=ZSs1hqJ&2Zq7_^*)16U{`kFrqAiJ+pC-+l6^V!3Cxf^L1^@Txn=%5Snb zEVO#;a2F27-EM2J=C-u(F&-7E8wPozDDP36lKgrg=FX#hRV5>9cGXOy*-Q&8CQl@T zRwJ6QuR~9uHK-GHu~5Ro5Eh2AP`VDSL+dFm8(1h~p`4>d@xP_zGMRGkj4F49C@n41 zO-yD6m!f(W<~j5-N6ho+1@t0%iG?Z_hOto1!tizI74#}4=5-cEuu#Jhqy66!b7f)n zX(dlgR#w{VF*A)>roOc7Ky>G09omWBN4wB&7Dlm9$HHh9#;ijhpuLoqk60MX!Z?nW z`2Q&_!)h|_`*F9M7LzfBx*KWAOv}s5Ey~juZY=N*Egzc-0m^Axy-_W1o*Mg%)+b#*bn9^rnVr5g+siS3TyeLy0upPK)?Qh zIu?3tppWNthh_BHLF;J^+%8-1Ub4QvdD2vu%WAiDw6!+RsIQ+fxqX_&;hN@jHCs~L zwc`}JE7d^v*Ex?(k4fll?VOLcJ2{`rWuE%p@Y~YIjo}y`&B3+ek^Q3S*>>+mrpNu> z=p%19z~K}sa5$BP`7~O?X1>Y{hTwFZfiwBF>xno^ zEsfTaj@CM>gHQud$ie~^(rt?WbA0Fg=H;tbS8w8l^cFmbFL(41a-;68#kbBOU1ID*e&xs4?L$VYFD#WQ3qt_PB*a5b*MoWcHyPhd17%yib*Qt_MvLNBZcjM0?+cIhp&0_-?A|bU(UKR9 z2zwfNAVro|TMONtllBF`)l6IX#O#I{X{=uFpowX3Q5H)Lg4vaFfZIb>#;QnO9j#?*;sV`g?*L%rEF+Tv)JQ?jco_>DSZHP8#I^V#{4jomg^etvp326d9(&VdUYOD-+3ujpeyup^ z?>pvM>YK}~^n4e8WH90Hd-HR+J)SPd&s~_IhxD{D4A9;z<7+Uj2;NM~di*4}49zTT zaW8`|hlc}3YDc>rg=ZO`!O!(K{(1ZY#|xT+w!3+u%MtScMh(<@hQ5YrnegVR-^TCY zcUkCQp_7GlA6hr~CodZIMU;B(8Me|Bd68DBq< zSQVy@j!816WpYKWgYePRx7PT z9|)ENgvgZroGb1Ml@d$@U2+9I>g!JeD@w|W=m9<2lNK56j)UP5ngDHNl#aHS*=AVj ztP_h`ER9b3%$X)#PApG+aFCcgDNJ3Kfy=e6Wz*?-#n#b7IbeDAmWKVYaq$WLop|k@ zuq!^X_qc1aKERNYN^z)cvbn63GB++|+_B}nR3JtqfPyobJxF$$66te#2JP5;{GcnH zYs{is{E|MLY2bO*H88Q?c3RLXDWlu=w9wUkf8oot!2~kt z`PyOhodI#YY zI0Mc>2z}_S0~L&<2SM~O4GyA5A&Tjlhf+A2-Yl3v?-V#-2b@7~5iF&*2VR43!fia4 z>CPDcZ*cc__ZQsG!Vccu?!h07>)EL1;;XfI58ey1D4)3}k8pp457O!rwGsR9CwLj& zj}N#l#Z(qfqsD@T(^)v6YY4YBEA%Q=&9mK?g|?|4r(hGtGlF{x8x?As+Ad22oN~c0F7*(Qi zXcBUu+4TO(LR!mOf|jD?^bX6T^oGi3^d`#B^zO+Obd}yexsLw80E=)i&BUZMl~Up` zdJjcI@1KmK8uuPPMQ?$G2{Hvaf_y=tV2GepP%UT>vtQM>hbP3i8 zo)kPK*d*92ct@~JuwAfIuuHHeK1-jL+*n zANlO_`PAot&mo`BeZKJd(&w1Zai5=ke(|~DbJgdX&vl`_1*6@3+uzvELHE zF29%jcKChncggRv->-hZ`Tg$qFaH$(EdN{lZ}lJSU+iD$KhD3_-{N2I-|9cb{~`a! z{a^Ec&wro)m;R^y&-#Ds|6PDKAR%CIKtaIRfck*@0+t259q@U;7Xe=e91A!ea4z7x zfFA-b1Y8Y_3XBUJ88|kO1kMe7I`GxNHv-=Zd^hmDz#W0#2mTg#J@5}96e6KO7$B4j zBZOLElrUPDB-9I2ghruRSSy?=ykGdJaD{M{@Co5&;TyuYgzpHq3AYP(3U>+j2oDR7 z2#*TC5`HcGMtDkiMtDwmIS2-+gHnTvgC+${33?!CRnVrO%|Tm&wgx>P^kUG5_Sp`y~%bmPuAhHb^#0UXkpO?3BDO z*)2IF`AqV;Uqm5*{)tq&lQ2q&37IGC5>Q z$h43dA+tj64p|iPM99XF=R@{~d=c_ns3#!C~W z$-dZ+YZX{YpQ>8sLh((Tfn(p}O$(*4qd z($A!arAMSkrPpL~*+5yDtXwuuHeNPC)+%e4Ib|NP=vc0l{vJ-`51Y%e4M;i-Xw38+vStxPC1d!mCu(il;0bSI z`Y8RB0ZO@2sSHzwD>ceUrA`^69HbnkoTR)@xkkB9c~p5^c|v(gc}96oc}?Z35~>EM z#44#uu2QPPR57YVRgx-CH9|E?Rj#U3RjH~~O{!*9tEye)P`OmB>K@fR)k4)`)e_a? zsuil0s@1A(s*hDCR3}xZRcBS-s=ibGpt_*CsJay96Xq8d5GD*85GD?jgoTF5!W3bu zu=KE@VdKJD!tM!sH0;H&Jz-ymeH(T@?EA3GVOPSgs-c=uV|9>vfLg3ps#R*WIzgSR zHmHqivpQXUo4QavL_JhJOg&sZR$Z&MsO!~D>PhMrb(@-1FH%3EUa4NKUZY;G-k{#7 zep>yS`aShN^+EM#>ci?|>f`DY>XYg#;kxkb@S^b1;g#W4;nm^e!fV4V;q~E;a98-0 z@af^R!smqF6+Sn7e)#I}H^O&^ACHhkL`MvcD2^B%Q5jJcQ5`WZVq!#7#H5JUi1r9a z#O8=Y5r1mJH1V1wjb4+gF=^5?xte@Up=OAtTw~GHYpj|^jZM?6Y1Pco%+lPcA)321 z_h{y67HGOOPiwYlwrZZ&Y}f46?9%Mf?A099oY0)noYS1wT+m$9T+&?D252?fWNoH4 zTRTWQSevIU)RtoKaJvrbR7_dOd1) z)K5{r=@^|*r_@F1Vs!DkB%MK*sT-)fRX13drz_Ca>gMT|>)zDu)a}!Kp*y3yp!->O zS$9?Udo+&rjSh$wMvJ0@qm!e{qAk%2qnAdnj9wr8eDoX9Z%1#7-Vwbk`jhAb(Vs;h zj{Y+GSoGBxO^iNfbj7Pi$2=9YDQ0KP_c7OE zqhga{vtsjOOJmDoD`Kl+YhuU8HpjNc+G8ED8)9FKeI@qw*tcTejeRfntJt%#=VO0} zy%2}vGPx`Nyn1DPCA)%I_YfE#iZ-W(&Xgi+~kVnw&eNA%aZpcA4)!&d?xu) z^0nlD>46^U1N1?9u|7mE)!(kK*U!?=)jyzLrhi!fn0}@H34N!2i~c43tNJ(eZ|isJ zckB1+Kh}Su|IH9*C^OU;>J6=i83xvHk72%Hk>NqZa>FBr#|kS(W&lp}eY%}aI z>@s{{_|$OF@VViL;i%!X;hf>J;kx0^6edNGB1{pbNK&LJ@)T`KTuN$6mT{GFjd7## z7315+oyPsf1ET~rd_5F zOb1LyOvlZP8JmO5Vza~?Y7RFin+;~8InA7D9%#PRoMX;2k1&rkmzgWfcbXTQmzeK2 zFExK*K4U&-K5zcPe9e5_{6`v0W7F Date: Mon, 1 Aug 2016 12:16:37 +0800 Subject: [PATCH 14/39] update.. --- LFLiveKit.xcodeproj/project.pbxproj | 35 ------------------ .../UserInterfaceState.xcuserstate | Bin 13708 -> 15204 bytes 2 files changed, 35 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index ef57f710..e2bd4684 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -384,12 +384,10 @@ isa = PBXNativeTarget; buildConfigurationList = 84001F9E1D0015D10026C63F /* Build configuration list for PBXNativeTarget "LFLiveKit" */; buildPhases = ( - 5ED199EAC89EE599F1E56B19 /* 📦 Check Pods Manifest.lock */, 84001F851D0015D10026C63F /* Sources */, 84001F861D0015D10026C63F /* Frameworks */, 84001F871D0015D10026C63F /* Headers */, 84001F881D0015D10026C63F /* Resources */, - 36D0848EAED7999C442A99BD /* 📦 Copy Pods Resources */, ); buildRules = ( ); @@ -470,39 +468,6 @@ }; /* End PBXResourcesBuildPhase section */ -/* Begin PBXShellScriptBuildPhase section */ - 36D0848EAED7999C442A99BD /* 📦 Copy Pods Resources */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "📦 Copy Pods Resources"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit-resources.sh\"\n"; - showEnvVarsInLog = 0; - }; - 5ED199EAC89EE599F1E56B19 /* 📦 Check Pods Manifest.lock */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "📦 Check Pods Manifest.lock"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; - showEnvVarsInLog = 0; - }; -/* End PBXShellScriptBuildPhase section */ - /* Begin PBXSourcesBuildPhase section */ 84001F851D0015D10026C63F /* Sources */ = { isa = PBXSourcesBuildPhase; diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index f879a919dac67a9780764071951a5e11ce9d1e26..1610ac68371257e9693697d75aea1bca79c5dca2 100644 GIT binary patch delta 8394 zcmbtYcYIXEx4&m*vYYH?TXwS>HrZ^=_7)&OP$57<4Iz-wO9;sVfi#lP!JUhUA_&ML zh)PL92PvX-DT06~Dn&%;{fBfB5dnF3H!S7(eD9B!&F7Z6XXbq8JEzS|9eQ!Z3?+U* z{(7&T4aUCL>Mj3O<_2?<`Hi{DJOKs(5MYP`0Z2dvaZnG`5Dywi0y9{^4i4}@3N(Vo z&>YgB1+;`#&=xYG2lRwq&>Q-|04RXLP!7YO0xDrFjDwjFfLZW9%!WDe0nCLD;Uic8 zwXhUcz-m8y3L9VxY=HXdYUF)}nRjQ?wp^hCW9d&_=WseUE-X zhtOek1RX^`qGRYMbR6A8x6o~L2mOZbqTkUy^ar|+9;3f8!4X)3rC5ezuo7Fb4coB; zJFyG9u?Ktct2oV%GjMC1jXUEmxEt<;dt)CSh=<^EJPcRh5qKmXi^pM#C*n8pB>WDZ zhNt6?@W*%_o{tycm3S5Y1h2+x@TYha-i&wPJ@{LE5Pye{;h*qvd=j6>SMV)-8{feX z@L%{L!9+ylL`C9B0?`mX@tcXAxJiA|guF&FNNbWsI+32F7wJvss>pCMf{Y}i z$Y?T#j3wiUpG+WclDEkeGL?Kx=8^ei0a-|@$x^b6tR`#7MzV=)COgP(vWFZXKagYO zB)Lc~k;~*JxkYZ1Kf9VrhZh$gVWJoT6L^g3Z6(?Oe;pnq%#>zYvy`ju`omN{QVAPsDif6lhzNc;#&fz zgmTO8%mc=}h`Go7!Q5y5q-|+C+Mae;#QeoPWF9e(X(sJSd(pyxDtfCF2=EwlHAK*k z)xf7&fisSHF9<nkR?jV zOsHW-Gg)EzM9{w^ZvZ3hM!VD8Y-8G_{!H-iFw>st09LT|%gX6EyefY{v9Co%MgHho zum-*tX9ErJ}#g|5(z4xyEF5`B;3&f|3SC@3rRHOQ_g8^Qs}C@iWf ztH`TsYdO5AxG;Ngexv%5=^6rCyw1f_O z&U+CQ2YDO8=ZBu8Fd-_DV;#ypcCnv zRFfw)5B2}cq;m=?%8HA#%Zf^?I`~HCN#jy7(vwqDGg`E2lA4i`lA78&IU}V}%j8t< zQm)3yRAseT7;1CB8~d zd40F{`ACibIZ{|j%D;a~{U1IxXq4`4($L=|qeVt~Ubd6I^`e*UhCR$3Xh6axU*Q^DhZ}GcZozH1Lub=D^aDDVen>x}AJciu;4Y(OYT*yK4}Zb~ zMoZ_@XzHd5=t5deYq;m!^~)qY&>;a5a|9tFibf*3h%Tl}YLNsC=S&_Y7~zW=rX#Reo8;5Tb?;T(jsGU7#-3h16@v6(3Q2wgp#;nR?$zmVP2QL zA9jD_M)m(8+W<90DRd28OV?42F)b8z8K$$YYshDwQi5JV&HvFe4YlC3tf!yR0l}Wq zx~^TC>cXAcpbnu<^36~PdzPco+o2r2FR+}S8;RZxy?h9M6~DO!e> zGh$R7G*|i)Jx+h-;2&P!EY1Y}O`QLrYEHb)e>hgQp|2S8GV}%7j&`7(Xczht?M8d( zFZ2XGNl($!^b9>q&n-iH87X#xc5N+IAuVlE(&~6FWxVI z=I`!N!5mx8rP%P~e?}L$42FI|C(ucB3Y|u0&{=d2ou?P+C3=}&q5q**>96z}y}q2O zMVEpox!+avE4qfRbDnx51d_waf|>;n3tkqQ1oN0Q2r}I_VE7>ZAYVlgSzwobwxL&X z{^+vdRk;;DUl_ul=r1mJqX+b6HF`*I1(MClEzuJUTq9=a?P`qZ9jeLgm|s--v=>z%~eMleC$4jve>#+eFxnB}C)4%Bx7NS^)4gD)wPz8oqX8KhHWhL(X z^73L|9jWdht6bp{hM0^SF){yh>4Q^o(_rbt0>eTi%?&vLel3W#84K`2-oPzz%V6GM z0keQ!$Q+0K^TUeT;9%ScrViXbm^oM=wKx;L$5|{_hfZUG7hI12N4ltkmlK4S%L2cS z%U8Mb4tK{r{;w?YBkqH_G86a1dAL8$#{+NyE)3)vyZMDIC|D4%AkC9{LYd0V8E&0f zbA@ZWDql%@SqZo3?(St3Lo3Vk3w$|M6~havhFAFNAPova8q7j82MG_w#hjWF&IrnS zwJY?MRuv5_@>O(gS6b*B#eyhg7Lw(%0j)sDx2Ov1HaFe;rKl~69@1t7UEe*U_rw|;vzg7&q0NFE(=-~ zlIRW=EL77Y(>JiHTaj;M&n`uS28W({WrqB=BNsmMq_G@bIpxJgp)VmH5Bhi5AQxh; z1Y3lwaSg7;i&)TcfkDrLfd%6tE`}V$%eXx64lRf&;ImU5Ud!}c_}oR;I72f}1^^U*B2UxJP z;0RVIj(UFlJwE)Bz!7|u1s4l$n)`g-#^r7V5K*@*LNH@U@^?MR)^(uj3mmG+?>vFgM_Ks@vk<@V%FS{ekbZ(1?Y` zG&fHw;kXY`{xqr@)B5ZT0X&Ky;m7!I&Iz6{83YIlj5TB$(kK^WQdwwqj1UsR2nmny zNhFhjpAaE4h(zOIfvbj2{xlX^ve5JeZzWxJ2#zKoA_?DFOR4&>OOHd`Z%SxpQA)g@* z;>?p;I6gwzZf%lSSVLSvSAC&PE+8JlZB~rW#RRy9BnNIMdHkdSNeSB&;Rw!nx^^U$ zTRYN}aFcG$LfdC+N1BlqFRl!XBCTiz3vFnwG40K`7wv?!A?^R0Xl78fT~M@iVMx^T z)PzVj$>l;g=}fwi92PpTkjX;FTGEwtBi&ix;&?w6^0>-tNxV*xCza)dw(7y%*wYdq zP!?|-7#^%I8N_9D(vRek{v@9aAO)n5_{cyOI zKj=PT{$40TNDW!U?P5qR3j?dkVipF4eXU!5@o-;di_j=xoXg2d&TPmE76w<7RV)-y z&GWb%7FtWzGv*qyj(p0(5Eh08AJ=i7#61t8n(Tje*h02FhrmU+5}F$xYbV)N7jyy+ zO`@NCMfN?H+|NS!bIETAw~-7(TNhFl9^?=?^32$|NLLXy_MbppOOBJD$uBHavM_>$ z(a$aZ6gm5%8IyD5d?X81EDYzO)jplkPp*((Uy`^+uCp+bg;6xOZ*VC>2RXs~U`(4r zUviN<g>{+QS8<3uB>qQTbE@*I zd=!{SrXFKp983z+lxfb@Sj}-J&gIIEfn04+g3E9fcN{tjkKxkbCN4P~<&HRy<6pQV z&eQlTcg%SQ-^KUveeRg^As0rUZ~-)qq>z@}S>{ykgmN?4LiUmGxYNkPe-XVt&Mmh*c4*Bi2S-intl^fQNXIJP}XHGxE$l zE6>4m@jSc^yj)&)UQb?cUO8_J?_J(pUM+7iZz*pXZv}4^Z!_;8?=0^;?;`Iq?-uVN zpW#RIWBG1=3O|cq$S>g!=a1pP!Jo@t$oGH3-@@O`|BAnl|26*`{!#uh{&D^<{FD6C z{A>Ih{9F7xkxe7pM7E3U5ZN(uO5_KTA4Yy0IY07jZ_<5QGZ1J9rZ-O6UYS#f<%E%U=WxDE`djoENCD| z@e3LYx(G%K-V&@4Y!d7e>=*nfI4w9UI4`&;xGcCOxFfhLxF@(TcpyZ=Xko0-Aan>_ zLXR+6*g*KIu$eGT*ix7-Y%Rcv<)#;XUDf;RE5r z=qAw_(QTsJMR$m%(eFmjj{YF}!|0Er{qv)jMlX+E8U0E0n&@@WCq+V0Q&EAaTvRC< zE*d2oBN``~A(|_iFIp(75iJs}6KxP}5^WLf5bYA}7JVf;BDyRVh~?r0v0m&JHxQ?Y z8;f5RHxs9cTZ&tWGsRuSh2o)NDxNH!DxM~uA+8p$6n`RKBVH$7FTO2)AmRBXLWxKs zktifeNt{G2(Mzn7*CcHu*^+X}NXZ<@Cz36at&(k$?UHXKM`qK{j3uUJOnJhMRJKePF_zQFE`1Psu@_zFE@)EziR9+?@CVyS-ms9x! z`4ssy`3(6?d98f0e5ri7e5L#|`7Zf>`2qR2^6%t7%72poEI%PXCBG=YrVuNviu#II z6wMTAik6B_iflz^MUJ9GF-9?7;a5yhOjJx#%u>u(tW<1MY**}5e5u%@*sIvDIH34e zaY}JUaZYhT;lHG~qPVKKrnsTFrFalaVl}Z1W81~{iyaX=J@%v6pJGqNUW~mN`%sCL zM9EV|DkVypQm%|us+8@OKIQAmx0ExLvy`)yA1FUmeyp6YT&et2`I&Nqa+7k0a+h+q z@+;*&s=X>l)k`%kis#Yyitx&B}tyX=m z+Nj#B+N%0OwO94E>X_=3>Wu1~>VoQ5)pgZP)os;ps)unXP8cUwk5rFVPf*WL&r#1; zuT*bRZ&7bkZ&&YBf2rQ1KBPXP{!#st`e*eC^(pn4coJ`m&xs!&KPi4%{G9l?@ipA>&EH)x(T{Bb#Lq5(aqCs(CyKk&|TI&()+dg z2Kr|D4*D#8wmwJSRo`9j*H6_?*T1KqrJt?;KtE5vLcdPGUjMm%qkg-7r+&A7uYSM& zcSE$nWN;b0hWdtvh8BiwLl;A?p_`$Hp_ieLp`W3|P-Ylrs4|Q+j4_NiP{Rbnbi+r6 z4Tb}T%SK|98tul$#x!F~W4hni+Su0E*_dPOYV2X`ZR~3tXe=@o8%vFT;{@ZI#5A!B(+$%t)1xGQk|;@5>ddjwFBkq)thtNh6acCcTyP zPSVt*T}cO$&L&+-dT1tQkGY{a&s=1#HLo>)YW~c;(Y({V+q~Dj-+ab=-F(mdr}?4z zZwpui7Li42iLu05R2H4ZZb`9pwDhtRSO!{(EX9^G%P7kj%Xka5Ot4I})L3>{&RZT@ zi8aE?w<`Qrl~rxkSanu|wUM=zwY9aawSzUwnr-cA?Ptxm7Fq{dE3KofW31z>taYOG zE$d|KOzTJ1W!6uuYpm<68?2kH+pOEI`>ltpr>$qKXRYV0x2(Tef4APZ{$+h+i?9i7 zBAe6}W7FD9HnYuYbJ@JM2DTJib6Z# zyJ)*?yK1{`kFbmFv39*Z-JWglV()72Vef4(w^!On*hkyP+5Psn?NjX2>@)21>V+P|}3vHxnnZog%}WB<$k*uglEL+D6xG<6g@N*n>l2afH2$2X4e z96vaYIF31vJI*;SJFYseJ8n82IvFQ&5@(cC=oCAp&U#L>)8TYEJb&8+?Y!&!!}-AZ$oa&DToJA~m)fOq z>0CxvlFRCHxLhu;%U|Es(bdOQ?waTdxK_G$xK6lkyZLUZTjrL#6WkiN)@^m$+zxj` zcZ$2QJI&p~-O4@CUCEWqZ@4G9C%dP*r@Lpm7r1NOi``4z%iXKoYuxMH``!0F@t&rh zOixcwA5WfVfXC+<Y47D>6zu3?^)?t>-VhpZ18OLZ1?Q)?Dp*OeCIjg zIqf;;x#+p#x$61DbKmo)=P$3!o9NYhO-tOL>-ag(u zZ@zbkcbK=zJJLJGJI*`VJIy=8JJUPI`=NK9cY*g)?^f@A??LbP-b3DF-k-fEy{Em` uz4wwEqhx+^RI)Hxnk-LNCdVZ^l3z=1ncOy1Xfg=Z-O1y+d-$5%{=WckIPow5 delta 7017 zcmZu#349a9_un_ONt(1dA!&1LvT54fl>1gFP>`dv^yDZ|dW1Gm8t4HOkj$!xAaVr@ zq8x33Qce+61VIr|1W^PsL$-+1?Cs#3Uiye!~DhEW&Q>P7$QIgYS2InXbCzng9Ui7K^!hMn*Z?16po1ssFp@Fi0OU%@wU8oq-w@ICwlSKvC_ zfSYg&{(`&k0I87%wLmSA7U@tlia~nBp|+?UYL7agj;IsrjJlw%s2l2zdZB(O1*M`P zC=I2f5hw@w&{z~KLd9q*szTGybTk7!jb@_f&@A*KnunI4mr*rZidLW+RF5{IP3Qx( zAAN`pppVeU=pgz89YTlEXXqIE3Vnmlp!4Vgx`-~J8|W_j8{I?q(F4q2j9DzfGOWTn zY{nMMV<&cDH*Sa9;{kYJ5D&u1crZ@EsdxxZ!|8YgF2tocfXnd|JQY{rr}0eu41ONJ zh!@}xUW!-Xm3R$ai?`y}@izPhehVMOpWs9IF#Z%D!JpyJ@lkvNpT=kLIs7v|k1yk2 z@D+R=-@rHVEf%quWmyp`W;JXJwk7M)vN~4JTG%+&%eG<@*iLL`whP;X?aB6G2e1R# zRCXvkj2+JAvH78{xCgEw>7i+OhO~f;B^6{Dd1dC5zEE)#~dBuTldZlRip)}dBW@>WttM!T;vzcKbT%&*Kf<~nnO^dnD^{$#)! z=6B`~<|cEC3?xIzFp?kI8ns)+++!Yur?^iB)c`}1Lwi)F)+|VBK?Gtlm}HRb<4ogY zK?+ffrWRxnNm58^EyzJZhLAMscZ9lcNTyIHYipS*CZ*Xv8Vrxw8^J^}Nft>TVoQ8} z3={qxV+JyVzzVUWQ!)luR^;WE_D%UJLD@JsCwt*Fs0=L~_ZPP>ix8 zeiC|whA3l2Jt2`vp(Q?)BGH95D z4@iNbjAlKg!VpM; z6M{fMAXS8r=g14>rAK@89J~;I%q(~wW|L`TI+;-ib6_sKNS-D$Nsw^4DtpKpZ4O=3 zm9-t7+%MTb**C~v(Y)3ruUxDTDDiO#tAg zVv3P(UK4Zn!h540GAAjoBCihi!uw20c1lLL*1W6_;1kBaj-v7*9Dv#IF&w1W%qI)T z46=|cT1OE&44=XgdOb{WT1-OZWqMs4`p!_304M0=U&G1pgPbRj_JY7fD3RDF2QA@Ar`Wn zyhc`%)#1H-7~^oQF~(obRi)4vZxh;Vaz(%&jcqGJD{EsBlUL8 z_K&$Z$T83$M6^as&5txA2V-B4EQm)|6pL&q4#guo*+e#zEo3Wsoopj-knQV{ld&K- z@*pp2MN6&84odhp$y?+-a+8)G5H9q$#mK7$B=yZG4opeOo9r*js|b|k(1y&s{N%iR zUrDa2RcN9|9|~Fg!BMomZ=s)B^eXe^shC`qu6L!sq>!)3D=YF<@c&C^^aSeBtZk?# zd7E&J@T1;T-cTQuM0S!r4?_n%g$7c&LjBPI@($Tmiw2=&vYWh1(nlp_XAUUzm4;Mm z{V1wkvtk~QpOd5E!hBlzoNz-P*;1FM zPY*q4tsjJzq2&)Jf0Z03>CMksiB>%(=BO5}d1$$oeD%n3J!%MBh9-Gqhc=JijNWGK z4QLBR^L4Zhy@9r)9q3K;7CA{V{f3+(-;&ehJ9376-#{^a2kk<;>3t8{ORw)!RL_zh z$WQcsj{Huqe}vk!;zA=^y&goLq9d!xkB#zyK1WAKK9sNwUqu~l`GQHwO39#=CcYm3 z$DB$DIzgobeNBG;M@rDCFqgkYr^$J8`4N}DM?Z!+c^3UZE|810Fd3aAOUWgYp6)M9 z4^~vA6*aH%GP?S2OY|$cMt&i`Jp$%8bSq5S-_alFCb>ecl3(l4pXfHaL#~nQiDclnnNjm**AvhL>07`x|LVX`4~(D=FbK3d;NwEBFDWlLG~L!SJEY_bn?6 zl;x_%WfuGR^1RZ*{J>P+U(S~XDtLb>UtU#Oz)$g46!R6de@LKktiQyUP!cE@pTMVA zmhz5?fkJ9mUXfQ);@|^ieC5Q#yb2#*6{sxZ`veLCd2~#9vqfHb#)L*NF+fm4MK;af zR24nK7DqIe|3L;vVtKQ8UfafKq~N{rvkl$>=2-f0D}cc35&#tl01rUMPvN0;H%^4y284hwoIZ#xd6qu0b zFKyb?iS)$6lLSzNkF1A>DV`iY7zI#G`uLxkM!LJHn3}so{K807|nsD=K0ZcWxRsfC$=3oj)GdI@Z4UDD^*W>lL zK>)J=76JG=ypitvW&y0>o$f@rchH)jyFKhFt;aq*@bTODJ(@${o%kKR3-8A7;yrk; z05$>Q1c(>FE`UP-rvR>c{65}?KfwF(ex@z{NC3A09s#@pw5NYWAwY)^iCr9wD+o+T z$eTEk26bUY_*6@1Jjv-s_6d|$_)04>si-vj{slfxMVm<@x*8^fM3eM?GvsT0vXLR* z3eZ-7R?Q4)(}enW;kBF*AmMQ)MEul5h1UN+6)rSUpTc#Z?H(lE>9sbS7#^g?bj?{_H z@-Rn!3w!&!0G&f0*{#7p@!#|{4&TOi@L%|@0F+YQ1n4fn6Kn82d>;<8lwUNs`;f%~ z3?kgHWZ&3|Y`PJpC; zh5(K)NEI_J9UGlI3?F0C)p9=ot%wLWP3f92CzwNzpxo~DYb@uN`N8s zwbvMA2ZfzYW~tt#36S~7*&%F3^Sq7IX0llVqzjNi(uY%|n{rZHqJPX|-i}~LJvM4I zn=1evnBANl{aZL~i6cYAC8pV*G9_b>ugX{0tE`}yzMYkQ!TQ*-Ss4}dcq70>FnT7I z@h}}Jsd~~+B7f4)AW`(_F=HE!#}4ep3AhdY2r>Xqq+dAZ;rUc&7E>vy4hsivz#H*q z`h{Z~-j3g-QT-16rg0Vj#i}VH+3X~?EXW4g=h+w7x$HbP#J2wikza3 zqAsFtq9;W?MZH8RqHIyVs8BRkR4iH{S|{2j+9i5lbU^f(=v&bl(OJi6D7aGEXvJvQV;EvP4oXsgtag)Jqy9zeyfM zKm?8uMf8an5|JK}8Ic{aEMjd$eMCdV#)!YAQmIDTQmT{2NDWe(G+yeEx}+XyD`|ge zK>D6g+I(v#9t($mtPr5B`^q`ye7O0UUevbM6GvLe|;S(S{) zUXoSImdTdOUX!hq)yo=W8)chiTV>m1yJR2Aj>t~Qev@V5h zvip(x$i&E$$jOnx$a#?qA{Rw2iL8!X7P&UEKC&TlW8}rin~}l$QA`wyVxz=S$|!YI zizsbWbd)|SDXKiGHtKNH$*6CmzKc2=^<&hzsM~Uoi{%k=nLJ9amz(9hJXY?IyW}2u zD|vT$io8rdUH+VWj=Wm_s(gifm3)nSoqWA~gM6cWhkTFxQ~7cEMfnZ+AM#uB+X|_o zg+i-{R_KEYqhgpMPf@BUS5zvdC}t>TDv074#T>;##ahKy#ZJXZ#rKK_O06Q?695R@N%l zDAy_1E8kG=QJzp^3kllBYkEgi2LsLRmh=n8dXbphRE-E`eb-D=$i-FDqux}CaR zy8XJNx?{R8btiNub*FTvb-^>bUvyV>*K{|cdq<~6r$uK(XGJfKUK718dVTbU=!?-e zqyLP)6MZ)(A|^UUA7hN+Vk|M%7+XwyOoy0GFxS)yHw`-tyA1Cd4j2v@J~e!1IBNLL@T1|J z;k@A&!&SpI!wtg&qsG|6XfnEt3C1?YcE--euEy@hCyo7#8O9ODk;c)+F~%aJ-#Fel z!5AYYhHqJZC&_ylA{^ykfj#ylcE?d|(0-Hi=9UliAeD)YCND^pa_t zX|HLYX}{?s(?QcA(`nNs(-qUNrW>Z;O*c&sxF}A|wcxazfirPtj_0hLhwIIya_L+q zH3GO6!iaX7n;m&eDa_6}7 z+(qs#caM8u1~WE`%o4NI9BG!Dm1eiOhdJGxXRb28WNt9;HGgG3WB%U!gZZ-gPxE~X zYmr+N7L`SBFP{7!xs|1Q6e-_IZ5Kju&IKk?W3oBW^r z9sa(Ru_7yL)da0ZtJB)b+S=OI+R56*+RgfeHPt%O8nBjGE3H$l)2%bDg7rD;Z0lU> z3hQcXopr6X-r8W@WZi0g!@9%zmUW-?l=Y(ZL98s6kL?pXG58)6T{9*jK{ zdnEQ~?4{V>V(-{QHm9wFt&^>*?Fm~?Tc)kZHrY1E7F=svZ`)|wV%uihVf)T@-ge1$ z#rCW1uI--fK^%(f9G4h3IBrN>MqF0h+_+F&L)@0Q595x+-Hy8#?~LyipA_FOen9-7 z_{{jB@gw3##{1$I#4m~88~<_q)%f4+G4^=7)9$e+*xTCM+jH#2_VM;o`y_jXeX@PJ z{RR61`{JPeW&1MwDtoPcjlJI9VE@W~)gf{y9BM~Pht6Shv~zTHba8Zd^l$T7}Q;;3@Wa;$dja2$2~>iElPbtX95I@>!tIlDN!Ia8hK&MfCJ zXO45Uv&cElIl&ol&T!6j66dqdoz8vE&z!+8oL@RmxFTJdt~^(P%jYU~t#GY#HMlmp zwzv+sj=8>dopJr(I_J9Jy6*bjb<1_zb=P&zEq0sTE_Zu(qI-Zl*`4Z6cW1dryT`cm z-9C4b+wY$3UgQ4Qeb#-+ec64*eaHQ``@RP}tVirodRls-JqC}- Date: Mon, 1 Aug 2016 12:28:52 +0800 Subject: [PATCH 15/39] =?UTF-8?q?update=E3=80=82=E3=80=82=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- LFLiveKit.xcodeproj/project.pbxproj | 14 -------------- .../UserInterfaceState.xcuserstate | Bin 15204 -> 17663 bytes 2 files changed, 14 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index e2bd4684..a0fdc7ed 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -120,7 +120,6 @@ 84001FFC1D0017680026C63F /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; 84001FFE1D00176C0026C63F /* VideoToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = VideoToolbox.framework; path = System/Library/Frameworks/VideoToolbox.framework; sourceTree = SDKROOT; }; 840020001D0017850026C63F /* libz.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libz.tbd; path = usr/lib/libz.tbd; sourceTree = SDKROOT; }; - A17586B27CD6843997425CCF /* Pods-LFLiveKit.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.debug.xcconfig"; sourceTree = ""; }; B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamingBuffer.h; path = LFLiveKit/publish/LFStreamingBuffer.h; sourceTree = SOURCE_ROOT; }; B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LFStreamingBuffer.m; path = LFLiveKit/publish/LFStreamingBuffer.m; sourceTree = SOURCE_ROOT; }; B289F1D61D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamRtmpSocket.h; path = LFLiveKit/publish/LFStreamRtmpSocket.h; sourceTree = SOURCE_ROOT; }; @@ -139,7 +138,6 @@ B2CD146A1D45F18B008082E8 /* LFVideoEncoder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LFVideoEncoder.m; sourceTree = ""; }; B2CD146B1D45F18B008082E8 /* LFH264VideoEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LFH264VideoEncoder.h; sourceTree = ""; }; B2CD146C1D45F18B008082E8 /* LFH264VideoEncoder.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = LFH264VideoEncoder.mm; sourceTree = ""; }; - B75B965E6B94DE4CBCC82EA7 /* Pods-LFLiveKit.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.release.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.release.xcconfig"; sourceTree = ""; }; B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKit.a"; sourceTree = BUILT_PRODUCTS_DIR; }; /* End PBXFileReference section */ @@ -189,7 +187,6 @@ 84001F8C1D0015D10026C63F /* LFLiveKit */, 84001F981D0015D10026C63F /* LFLiveKitTests */, 84001F8B1D0015D10026C63F /* Products */, - EDD4B76A07A6817C79BB4E5C /* Pods */, 0C07D14560B9E91EA1B59306 /* Frameworks */, ); sourceTree = ""; @@ -326,15 +323,6 @@ path = H264; sourceTree = ""; }; - EDD4B76A07A6817C79BB4E5C /* Pods */ = { - isa = PBXGroup; - children = ( - A17586B27CD6843997425CCF /* Pods-LFLiveKit.debug.xcconfig */, - B75B965E6B94DE4CBCC82EA7 /* Pods-LFLiveKit.release.xcconfig */, - ); - name = Pods; - sourceTree = ""; - }; /* End PBXGroup section */ /* Begin PBXHeadersBuildPhase section */ @@ -611,7 +599,6 @@ }; 84001F9F1D0015D10026C63F /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = A17586B27CD6843997425CCF /* Pods-LFLiveKit.debug.xcconfig */; buildSettings = { DEFINES_MODULE = YES; DYLIB_COMPATIBILITY_VERSION = 1; @@ -644,7 +631,6 @@ }; 84001FA01D0015D10026C63F /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = B75B965E6B94DE4CBCC82EA7 /* Pods-LFLiveKit.release.xcconfig */; buildSettings = { DEFINES_MODULE = YES; DYLIB_COMPATIBILITY_VERSION = 1; diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 1610ac68371257e9693697d75aea1bca79c5dca2..c5c1602f4019e4389d5d91691d1052ad1a07caa3 100644 GIT binary patch delta 9644 zcmZ{I2V9fa_x^jwO9Fu`LP7!|350||HY5bC1NT52C~grYDwUxOx8}a?yLn3K#Y<}~vybB4LXTxEV@erA4Qer2vPzcGI?_nE($2h2m}5eNYSAOxWx z42VEDhyZG!0WlyJBmoOZ25#U5=>gCfyaw8V_Mijk2(myokOO*wfgm3Y0)s&T7y?Q_ zDHs9DK?N8CCV>DTU?!LaW`j9kE?5MXf+|oAYCtVm54M1vQPr0PzL2N5-MOcjD=d50F5va zX23?UF?5unX)8yTKgT7y984I1~pTd9OzlcEqu}FVP_; zEYt}NMg_=^hM=Kn7%D`sqv5Cs6{8WT9F0a3(KIw2%|J6zU?EzB7Na-N2DA}vLYvWB z=xy{adJi2&AEJ-YXXqq4gU+Gz=mNTkE~6jOPv}>44Lv{)(IfO2JwZ<~!Wf5P5st(P z9EG(w4jXY2wqOVL;Kn!;x5piDN1TJZ;~uy_9)K%wB_4%G<1u(F9v8sl@dP{(Pr+01 zY&-`q#mn$=yaK<8-^SbV4!jfZ!n^Svych4oAK}mN5&ShigU{mc@CAGs|A=qk-|=mH zAODRX<0mY`Vphn8vSF;4m9Q~vEURVXSRI?fT3H)wXC17Y^|2XjGqx3*#dcz|Sz9jK zlkLU!V+XNA*-~}{JCYsCj$_BOQ`mVd&n{!g?;hbDw> zCR@q7*jW)w@%@o{7R<<5RsER=sm#gRW1Q zFN3;1Wj_9TKl=NgL0vxI#N6?|m(`qQ zE(Fzl$9&J6W6qOiq&dkXEozyI%q6PkGHFR#Q8lfpnu%>X3x*kHhSnLCn&EL%RaE5* zi>@=bf~syXzcV+PKS*2BjvnLVq@*;{9V%a!JxNMUikYy=R4$p2HB0z^?|D$;}G zQe{1fHX-w1XLf7vuAlX)13Aq}b-B}C(WM3PL0vCx*TA~{0tvua2TpI&CkRfRGDg{^ z+u6)IWof?DhHU~Vz|NSeffd+DKQf>iIDnH3B$HmDF7VWGrhlHa5#@}7w}mT3piy04 zUY^uV>1w8Z5w?nXycEgZxnN`YZCoK@lk>BS>+Vgyz!+GQq!-Ob4bT7zxS-Wao6M ztjHf!2Qt1;@Z~vVbfk zi^yWalO<&7MsNb01gF4h@GWy0oF&W1aTCPe?<+r z2Cg&NJ+gD?L&Nyr!Od53zYXrZ+Hnv3^=iih@Q@i$w^#LZJ_)krDOvLpTOb1gXapvb zb!08cBNKuyEf_A@6!&ae7y`o>QwOo3KtgLblw zyiK-~9b_ljMRt=to1l}?F$Z8OOoJZiWprdO38&#=AK6b1kauXic=s)Z0O;DnjzKHi z!S=8Nd5^qL4%Wad*ohMQ5IILF9i=R7L!EX#>;ZFOPuL6gCLfSb$(Q77a^gAPU_Ur8 zsHHy~0Q1OU@*(-C2Ij*-RLjTY6EZ%y>n7fyisP>-$LWT{(*FpLfFofU`HXx{zTi1k zO!qNx!hd8Y!bw!&SL8@gVM23h9WR+~jr0xBO@%Z6GiVl^4d;-fv= zUH=t2pt}g3t$XWlyNi-WQy>s}7=C=<1&ZiHH(mZ%j6LO3Ac zfUpL&L2Xex4uo>R%Yk%$q;+5(%0|5y(`M8ebwOQGHwTK0To z{lMjD1qbvTNF_9buiuJQXdONF(VJ*BT7%Ydz`%ic4kU2ESc|GqHL5|i97yDVg#*bP zND0zTR!};)ve;izkySc4KbV=6_Z?DMRMg2|UY;Kq>hE4Lc7(rodF7zO^72Z5c|B{k zP**@(Ibb5O=a~LzJKBMEqFofk-ArG!2kk}s(0=+cFhkJ+`rVqAr~C7yit>v5vWmiz zp)E^`DvL|9D~kvD%krdhe@VfAI%R{0<(K8tRLNh~Ex%-_Kg(Z|CzTdGGqH6EuZ?i<<5G2FVvL|=73o$ra+$5U~b!`NBiRZq5fxO*D$kwNMVs*Us773A5vObQjjOrX8Fey z4$d#qmzL?flm&frFh!U?v=8E159`N4+I_+S>kDpuj=ui?Zs<{T3>`;Zzu|z515OUO zIN)yZ>r=rUI?Vz5|8IxR{&$Bs;CPOQhH|l2`;z))6qXDwsx0sq=!cY*7VCq=Y*F0E z)hJIIl{sPrU1nBRZYO=Z-KEbittcG)Y%!NW&vj&E*TVXxU3tm7>a#3$!EHGK$mT$2niH;d z8e_6>=T`)};I15?|J~^OOHN%N7x#YUeIKd-bgvsvos)vzqoS~=u%eLSPEV-@5b|&# zjqP|K&c}oBU|fLxcnBVfhjAd612n?-;y`Z>^x;5X4)o(de+~?wu^kVmr>7K2aVZ{w zN8&OX$Q2-uo~2L8Qx44Hz#0zFn75IisXr*klQ0QF72v?YD$H>ppYLICwZ+r$j9~Y4 z4h*WoGdVDrXuEgHFDz*=Z7yEO$ZPOCJRdLMKmiB*92ioA7tv{X4h#*>G=r}F3q$@u zyplQ@euD$Uo~HzOHC|8C5WEJj#p`etuEsUEmIH+xc%1{oIZ(ubVh)sWpmZbNfH&ez zcr*QM2|Dly4vgdgJvFA){ZA(YcoM%efcN9~8TtQR$>9(1;b0}lfiez^BQ%83gA9Kh zw2DTN@|P+K{5k$2SW$3*o@x~@)f9KWYgqVEd?J|P(wYK)6RasXFscTh42R*EhLO=^ zJqO0{?h3}zJQ)ag+mJcWj)%L6!^z*AP|I| zyl~9ASdAaxhkWNm9jeBU_)du~C1`~IVOo`TZQ4lLuN%*|3*J)0ODV_@Ui1lGs_%ASQBSj2(FwKQ@`*(6#hr3Kf)^TbSa ztdr@z?m2>ND(j)NWYajXv4X};a*BX49ZNh=&&wQF~PO~;PlcfQMW&o?6 zyEEIGZTGS_vhCRp99YTG3xMKYS^gmvxrP4G3C+iomvb_udr?S)N%jxV4b|w`FuydH9>|A8v+2qcF?Ht(2fz2;5mYvTo2r`yk z%z?K#@Kys|x7Dw0NsuB-Ik4q9UD*{ux^iIa|FAPM@NfO!so>QQ)Ato#KC>#L-YmA7 z8aAgsMdHAYpy@k<+@*fZZVXPy(Y27+I=-`8*sXa|Gc5oT_<<={Sj}z=<{}Lpp_tvy z?%<14%$VIt$zGfiOid)&IUW6D{RNq2gNM=DpzH*@huzyFry{??PXHri)J!~+!g!cQ zOf!1vzn$hkpU|A*487>Th<>1#{J+pk{_7afi+m4mMURJe^b$V{XX7ro8x7urY2Y4; z3-NGVOoQ@BJQJ^{_w--V8+b8mVUq)FI@^?O&bFY}>>cSfduO&Qn?tv31ACdh$=+t~ zvVXCEvkyZ;Lc}4Okl2v85Pe9GkO3iuA!Q+BLMDbx58*?Wg{%l!6|y>HZOD5epM-oK z@@2@8kjo+0L!Jsk1(AX%fl3f9&3gj$-)d_V__3vGhwE%rLeWItuR;EOV~%)PdGq0P&i0fARHnr z7gh>K3&#rI6MiB5O8B+#Sg0=48fp)9hPp#1gw6_`6FM(+LFjj(KZpJr`djFYu(Ytu zu$E!1!`g-|3R@GlF04ANHV}3t>{i%=ut#A}!u}C4qEL}Y6d{s`WTHrsUDQJ~STskp zM6^b zPYItEJ|lc#_~P&-;mg8TgufBKH~d2Q^$2-HOhjUYH6kOTMMSHJHW7h#5gj6OB6>vh zjOZQFH==*Un25Cz+akV=xD;_C;$Fo4hzAjmBA$p@u|OOu7KtOo5^=J)t2j?QL%dM@ zhPXz&L;Q~TJ@G;D2jUOKUx~jK9}|BgJ}Evez9{}hd`tX9!b${^P>DzqAyG=AB^pVr zBu=82SR`&qGf68+n}DR9q?4qxq^l%HGEg!|QXm;3sgdlE?2_z}?33J(JdgrAa zG*l{=Dx^wjv{WOFm9~|RlCGB?lb)Ael3tcxmHsUKRr*+lWuY>WEJ7xc#mWq_1X-di zS!R{lWlmXBSyx%5Y?5q-Y_4pjY>jN4tXj5FwpkX~BHJc=Tee?zSaw`?Ms`DXPxiO$ zq3p3dT&|Qy%Qf;?d7Qkjyg)uuK1x1DK29ExbMh(jY4W*pUcO1bOa7kxy!=WejEs&< zj5I}>BQ25M$Yzn5ku4)zN4AaZ64@=Xdt`28ugE@;(;|0AUQx&taSDSXL19uPDUuaU z1B$kaEJe1Wi=vw%Pf?&4q8O$qR+K76D#{g{Vu|8_;zPwT#VN(NinEFfic5;iiYtn% zir*Cvql8gmQQ=YIC~1^DN)e@uN{li`S)x*+Y*CIVS5#`0C#p--q^Olqd!mjUTIX?ly;?4nW1c>?4-<51_mngm4lQ8%8|-4Wx29aIaxVHIaN7b$t#yAmnxSl z*C}h1>y;ao2bAwB-&Y<|9#(#-Jfpm*{6YDn@~ZN>@^|GQ%G=7j%7-dOm7r>(YOCs^ z%2D-D^;8w8{Hh_UVXDch1**lWC937Bm8w;$&8pq1!>SXiQ>t%OXI0+^ROeL}RX?bH zRNYnGQ{7iRP(4yTQT-FmM8jwtEsj=4yQAAj4~Q<0o)cXgy(9Yf=)2Jm)krN>tJG?B zj9RNUs!i%7b+X#3&Qp(5^XfO%8`YcDTh!as+toYOyVZx)pR2!AA5kAwpH`nyf2TgD zzMx?>shT{^Fioi@FhMg#Gfgu?GfT5jvskl4vrMx>Q>CfVY}V}49MpWE`B3w*<}1zD znq!)8G$%FZHJ3CuG_d1xAw61 zsP?$_g!Yv7TkTox_u5~y*R4uqx<%X4pRfg4uwT3D~jbXiEqhX)nTzpJ?hxnrSnejF8 zAH;tVe;nY(iXuAt52* zwS>+IeG}#+tVuYUa5>?RgxkgtW28}Oj5fv?}C{NTS zx)VK#zQo3fO%pQ{TP3zl?2yi6aur6Dt$PB#uj5npm57Ao1(O z^NE2!O#)Mb$!F?p>SgL}>T4=6`AtJjBTVI{QKm5_&NS0B$28xx$i$o0nbw;&nYNgA zn0A@=n)aLCH61j4VEWp0!gShn)^x%2gXxOtC$q!sGB-81G`BIgH)or>n!B5G&4bNF z=2CN+xzaqw%$cW}XP9T3=b0CnYs}jN<~`;^=FiPvnU9)}n;#@el8i~dq>f1glFE~2 zB+X8mo3tQlQPR1jt4U8S*b-++v-Glzv2d2DmKl~=mSvW;mTJp-%O=Yf%O1;q%e$6? zmcy2hEJrNIEGI0dEKe-|Cc|W$+$nie^5Nt!lfO#-I{8@g+2rq%&m~_?{wevFlt5^T zG9@9!lwwJ-rZ`gEDV~%TDXml5rF2NiO6i=^Ev0+P@RT=F-buNbax3LY%D+}#x|Y8`2vWSwH2Zk=VFZ(U?vVqIok zZGFqS&AP+7+q%zs!1}56GwbI8>zCHc)|=Mb)<3QHtq-k_Z9<#KCbr3J3Y*fFU^Cg0 zY$-Oo&1uW9HMh02wXwCgb+q-e^|$5O@@+$Gg|;GFiH)<(wk@~4XHYuDQo>?XU#ZnXy-cDLPQZ)fjd?_}?6 z?`qGn=h}PQ``PpC`SuF?Jo_s9F8e|IDf@j#m?O#&??`kcIZ_;Uhttu^;dcyk40n_` zMmowJV;s{Qa~%sDiyccHZ#vdGsvNbB4UUtJ>rUttImJ$yGt#MdI-G8&$LVu6b~bfp zI$Jq&0?u4#Z)ZPeo^z1X?;PqZbdGXPcCL2rbRKoyaQ^L*x#C=Am&KLhvbh{CpR1AU zHCHoN3s);wCs!9&j;n`jh^x?5Tcui;U4A=Om)w2&vwsqFLd+nPu<7dC)}sqXWUoaf4J|t z|8_rgKS>Qqm89BJQ&YR8_Dvm@`g&?{YH8}2)cL7=>gLp4sTWhPq(!8u(z>SgN$a0B zFl|uU*tF?s3(^*+ElpdIwkoYQZDZP7X+O<7vO9-AjAwVLZqa;t36S zIceZz~cY$}YcZqkkx7r(6@7?5m%e&wEzW0##u=i8%7v3Y@qu#UL%if#byWV@= z``*Xif6_rZN{>p9OLwHVN*|CuHhn_+q;xKQYWnK*4e8s`-$}oael7ia`pxv)>3{l! zK9NuCllc@rl~3+j3+4e}NECi;kPatq%y-wfYEAMab{ rTj8tn9r1nRdz67PEE)EU?ioWehG&edD|i?P>wnpO^}h|@8Rh>Emq2M7 delta 8013 zcmZu#2V7Iv_rK@fO9Ba5BqSOLnS|^F0R>wX6x`wf_dpc1xE0*m+~;boR;|x!ZR=pv zy=|?lj%r&+wRKc$-TUX(w$@s;{$D^G|KE@BdGEfHlXK7g-tQUjKA#F#M$C$)^fCR1 zuNPydYgI7zqecyaQ_u%!8k&h_p*iSdG#`D6mY}6*8Cs53pp_^etww9mR3OnFi_yP98Q8)(2;TJdoC*d@lhYN5GuET%e zDLjLJ;W6@P$d;F*}mbMPm4K3;&=;&pgE-hema zFY#A+6aE@+!$0Al@ezD9h>zjp_!oQvpTxi7yZ9cyk00RQ@gMk4{1E?zAK_>CUqVS3 zQ4l3jktm`i4&o#(;wB#AB|hRO0rECUCt0L1X+zqQcBBL8Lb{Sd(uedTLvjEo>7 z$yhRuFfxfuCR4}|<8W_OkEz!So_D z+}Jkk1g^`Fy#kFx$qi@-6;Icz{ngYo&;4PMAZ{D$yo@h6pHJAXJl#~Xnp zg3Zs+;*blUqXlRoTEra8$z04`fl5&sDn}K}!@SJL{Jcs!KC3mE$C*FqI5yxhrKXM# z?=UviV+xEvhN>*BMPG(2twZb42DFhSvRW*OC0C%Y&?dpsW|qQg3zq5#mV&{??M`NN z3fW0cO&Z@JGJHJ8v;WG_RP(G}K+ zWtE|8=sIh{3g6s{+m&!;@^Ml1w6AZ`gGzbhJY_8beGRWLsrd_vCn)&` ztxJWs1ehTW>asl6vkcybdMuyy;wMI>kW9$pw?$b5vzYW(BL~FM*M7XpIuU999WtwZ?dO z580Q)yAT8h987?TFbO8Z6js89vk`108^uPmF>EXww;XMTsqg_zgX!=g%z&9Ni`8M{ z*}E*r7@NQ*vPo<*Gvq1l{rO|jj--^V%*3>`tc*sfX<1n*X=#lUvr=j|OiasD)<{hb z1QHvk)=tb6?kSmxfk3U=jcO+~=HHDr@a>{&^X1Vokiu6)dxJHSYG(#glT))YvNG$W zrKL4WPOg=amew${dI6uqf;TZ=45e=hD&UJMK@|s(%hjI!_%3&p} zM%nGNbA*S`48ag<*220sEp7xMk-WaoU&H1%1>0cz>vQf5@opD;?{(hofxYMfB(tgP zeb$)u7IH;v%GaHyuU^=GIE3u_ayS47;YaoXo5rS>!(sRder6xC8EhthU)xO!zlJQI zVzXFm_3zHWS^nQdQ_bqW>@jHn#`f=BQ-JVr*gkVOi~Z4q0{N?DmO&fQ3Bh`}hq za%3;ZQXGk8tb%>PmXu=!RtmwgleYn+s;<9Z`h6{0tf}T5cd(T1p*X1*)I02aNQXK#8L@J zbX?FoSeSnZ9@;TG$5&D`urS0ZGAFIeBDTBAmttH}HEm%TF6MW~S=uC`TFQQvVSz`1 zycEe9VV$a;$ANq)x`OKCAZD1u6YxZq#iH2vY#%$oeq@J3oA$Eg+9vJfvp_N90cUW`j|87{{a_zU(k zJHn2#W9&Hlg`HT6m*Qo3Ig;bj5K-Al_A5Ijtm0^hrW?O769`YKRmWpWOibq2Su{bs zSqKYyyo>D11<-9X^WOn|DqQ3b; z8B%o@BvlnDV_0E+p3=*gT5IwntwlBSl-i8q;-cb`{6Q~jR&mSx(SwJV@b|5T4)_$l zEF^0D8$ONC;IsG~K94Wpi}(_|!mhGw>^l1oyTNXNNS^~|6N^48mSwqt{fl^!dY%5r%An#Gxa%u*YYsQAPqx#m;=HAek~IpSy;W; zCM1*!LW!6(4<%v_XgO&qho49`C$v8^I1n+zY+b$Bs#$VEv*dChezoR~q?3@7%^K2; z2rUQcLGnmXl23Y(-lTv}s+A*CaUkVD!-0eYCC}HY)0FfLnR$nUNWl#0PX-9T8z>+= zxJ$Ew!a*fPeToW)wQn}4pl}oivP#Sqom)0;Su~=sMNx^esz)Nlq=Z)_nJ^j7CnhDT z$S5)zWpNP2ftsN=n?GqpWs)G{$-Ddxv6hsPApcR4ff9~P;5Q_BFqz0-NE&K=E$Ydr zP_zo>s@6R%XmfsuoF zwv&Sder<9C4_QovQe8nxh1ttV1qUV$%p6!auvQ49K17xY1;|&qSQ}q6g_+1Y)OqnM zm^YHILY)1Q1A)$m1U-IpilHUhN`zuu)u@thIB+mSmBYJ)&Wh|N-*VvO!1J;Sk?+X9 z*SjXNpB&)8#erKOwYhf9Ao+PsWZP<}z3*6q?cXvjg`UtUq0 z%BdnmZ7R^L0S9kY^SXW|>Qp1pgbJbj_Df#V8X;bDP_K%{e7#hCi#MtIh6~gpcqt`A z1y_c0kP&jTArlBMv}!aVWL)TD)0hy9!dn7$Qx9L7>Za6-vW0=GisjT#1H3QIPReK^ z?@4o1H_SANrc_gf3bwv>3+K3I~pb5*hrcbW% z`u5W%wE6#2Z5dK+7E(ccK|Tk)IOtvJV!;ynF`X;CI+xS=^ivKBIT#>R zEMfjZ91P~$xtsIb+&2Dmw~;^Z)@f)Ntq?j|TFybAQu+l4eFgeUJo?~rx>DFYx`Kmu zO6e*NikRV5N_-K8bRFG@>}7O4-M~RV4*G|N>nn%-n4!&o>u;soUri=tkAW<=+SD$( zyDFgbl$x<)1N={(n!)er53khsaWLeS`avo*^wsW#rhNIHNRQBCFKH*Fl3~@fI|=e~ z`YSy}f8(H-gAp8ze#O1B^up_eq8I5UP7Ea+3>WYiA;mDX1=LAk^Omq$SA}3SA$ecnu_jNr`ZVHj0c93c_X~!yYFmg2JiT zZ^F6QIdVZb8+$nS;cV=Qkc^%SX{ZKGp$&x-u<63F))u;z{y+~4M_5PcaeA7b zrRRn9UJjFm)eajHHa?7nO$eJD_FmZ3u(@Fi!&Zc?3R@GlF6?U9-LS_ZED9INL|Tzm zWEVL^Zjo2y7qt-Oih>KM4OLA~T|GM3;ze z5j`S$M)Zp4A2BduaKzAv;)vl9vm(BZ_%7m3#9tBrMm(2@Bx*^lBu-+ISR^)ySK^l> zN|Gcgk~)%hlF^d)Bx@y`CA%g2B)>?`NiIk(Nv=q)N$yD=NdAyKlsu9=mSSn7G+Jtr zx}{#JUz#XQk_O+F)|aMB8%i^!jive0_oZ{CJET8Ik4sNUPfO2AFG;URuSx$SeJFh- zeJp(vnHrfD*(9=AWQ#}^`C;VT$WJ0ajr=@vVdT=t<&i5RS4XamTpxKxCY9Bd^_C5h z70ZUpM#;v=#>r;M=F1k!7R$|#IvTJgQTrH24o8>-v zk~~FTNB*|FzC2ytP~J%1Qr=!(Anz|{@~QIa@)`13@>2Oq`D*!E`FiH%DsvN0YqTHz5q};6Bru;^^Q+Z5zMtMIU-g-4fohSeRQ085 zlWMbSt7^MyhiaGVTh(6G_o{uW2T_qx3Bjm_QA47pMJ&x?KH*dZ~K3dZqeH^=|b(^#S#d>ci?^)F;)a)Th;F)mPNFG(ow> zp{b>LOH*Hyu4$-gt!blatI5#})Qr)L*90{aG?O$_G;=fyH7hmSH9IuBG~a6WYQER( z(;U$Js5z@SueqqXthuVWuDPMPrMaWIr+FMrqYcr?(aoZJM30D`8U0!G$>_7uSEBDm zKha_>)rz#?T7_1nRcoWQI&Jfywov=7_C4)v?Huh~?I+q#wV!JjYFBDEXus5M(r(u7 z)b7^q(SE1>L3>{pr)#du(RI=F)eX`O(GAm;=*H^C>w-E?H&HiTH$yi^SE^g4TcKN} zTci6*_qA?|Zkz5K-S@ivx)Zvyy7Rh=y34woy4$+Dy8F7{bx&&G8qyj;wSJ_2w0?qq zmVTaop?;-)vwo|7yMBj$m;PJ*Ui}gMG5s(4lloKo)B3ae^D#8W8Iuz;K4wbHjF@>b z^JB_lmdC7#ITCX$=1k1_n2Rx&V{XShiFp?DJQiX}Y*?%~HX>FUTO(E<8yg!J+c&l( zc0}x`*fFsOV}FhPE%r?8xuC&h@EZJvL_?BcsNr1$GfXf{GJIrMY$!8S7?v288CDop z8P*uSG3+$#HtaEcXZXRe-*C`y$Z#eO#bv||iJKj_CT@S+)wsKH590oaduYT)wK3YL zGuAZ57!5|F(QLFDlZ~~Fsm8j-dd75PBV(4asj<1SgR#gs(imK7+-kgGd}@j|xlO62 z7N%CF)~2?m98-H!M^k50S5tS>5YsSIiD`ssv}v3vXqsS}YEzB~( zGSl*zWs~KA<(idRl~$Lvjy2ud(3)v&Y;9_7Yt6B?w|262wRX4mu@+eeSO-~y)(O_h z*7vMat+TC*tV^uRtShXmtzTNdwr;U*x9+fBv<9Eps7-8>*dlE;ZF*ab&0wozYiY}~ z4YF~z1-8Yua@!Kya@#7~IooyHP1_yYJ=@cGal9;E8Ly7l#+%|T@$UHM@vY+r#gB}i z6#riQ2l3P6cgG)yzYu>l{)wI1{q|&gp1sIkZeM5LVE@wowSAX;kNtc5KKpt5ZTrKZ z{crmd`@ad0AW4uVC=;R*q7!rprUX|)N){R9Pgas zoabES{N8!ddBu6ndBb_z73Pw=qFrWJrmKysovXd8ldG$1h^yE&BIp|J8s`eS-gixN z&2Y_fEpRP%mAjU>*15L0zIW|+{pdREy6(E^y6w8>df@uU^~{aj*e!LZxa+zL+ymXb z`xEyL_d)kz_s{NQ?i21`-51^0+&A2}-FMwjJjjDR)Dz*6dgLCZr>4j5aeKTTzbDyK z+mq&b%aiE|w)J%KboKP`--!2oBZ4SJN&!- zd;EL-hyADh=lmD_SNzxgH~fG3ANl|G{}WIJ;sWM?Eszj!1w4VYz}tZafrf#xS%D^j z+(5@b=RmhWULZfvFEBJv5*Qg66Brkm8kiB76__2E7x*-=Ah0N~A+Rm5FK{UEQ{YJ8 zMBr55OyFGLcHm(mP826bBuW#NiRwgcVvR(1V!gzMiA^itW)Q|zclxaAUVTk${(k`2 BW}*N9 From 33070a53c792d7a714a37015b33f9227420501b1 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 13:25:03 +0800 Subject: [PATCH 16/39] update... --- .../UserInterfaceState.xcuserstate | Bin 17663 -> 17319 bytes LFLiveKit/filter/LFGPUImageBeautyFilter.h | 4 +++- LFLiveKit/filter/LFGPUImageEmptyFilter.h | 4 +++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index c5c1602f4019e4389d5d91691d1052ad1a07caa3..50fa540c5669436aca65e753fd31eb2c250c95c6 100644 GIT binary patch delta 9296 zcmZ`-2YgdixW8xIHf@^Sq)BN;)9hxaNy;u$W*IG;Qc9baVxiE1RKT3e2HBA%v$X6f zpn#y@Mg<4R5?P|4s3_t9RD35bWjx=5{%-C)<2&E^f9rqFzq7%nnP8y;y~gg3v~Fg? zyCh)8q7F;L00S1p04b1xL?8#LKo8PDI&c9u$N+xO4zvdyKu6FCbO${^A20|E21CG5 zkO%TX2^a}VK^Yhg#)0R+1W*GwFa=Bn)4+5v2h0aAgOy+vSPj;Idawa(1$)3=un)Wn zUITA}Bj6}F2~L6V7vMBF1I~hT;5@hpu7GdAci>0x3-}G(1^<8tkO$*nJmkX!D1Z_u zhYFYi)ldVy&<8W19|m9$X2RyM1#Agh!Pc-1Yzw=>Zm>J-0dwInm=A};0yqK|!Xh{Z zmcy}d9GnOz!O8FiI1A1W!#Qw1TnyL4I#>@kFePv!+zNNVop2Z23y;Eg;d}5y_!0aB zehN>)Z{c-#1AYg;hd1F5@JIL)yp3WJ55=K)#77B8fP_ed#7Kh7$bziMhSHH8Igk^% zkQ-&94yY%}M*YwbG!*5b0u(~wVpM{jMeR`;8jHrEYE*+JqbXL@o9VkU&S}@clZ|m z72jdw*m#!DCa?ll$ck7oD`BOqoK>8zdY>R|nBfDN*3+3su)wkMm-_F{9` ze0De+VoTX-wuYU|zQA(q6m}{*jh)WUU}v*)*~RQ~b{$*Gu4lKh+t}^wKK9kf7Ssk^ zCdHAPXj0sIvVrU+XUN$|Ilc;V_x53hi0wvtl0JKjczZC*OGusYXQY`x7e2?FXTD@E zkhUa?v?J~7nO~UO%mL;Od4_Z$8Kf&wr?q>dSK?xo>HNTnZoAIo@lPH&kv{D@_vBMd z923v*nS0Cw##GPzN7lc9p|Vbp?xY8m)%u~VRhuW1L5;F9!ht3sAQmM2UqXNY2$=&w zOtMKYDx~*AAHbEFngH2%s%2IF5)JhdXNItRG%xP zFUg^zI#E%PRFTSV0A|Lt9vFd%^dke-0}HT{LF752KE-sPyX(kwVmbpSVMrqBAJIr# zE1qokHM9@R7keY0^AaN>oh))iBr^v=1Y5HghH)q_@`HE09cl40ao zQbxu{lEsHbok17IR12OV`L&=c86L4Drnq{7-s?d&=tT-h2^sYeXwVn*W2Ear4#*`V zNa1?W9}FNxBt)(5mh=w~ThG)m*-c}IgTklA7J(2MNlHm^@3gEL!$_G1>WI>A=GBqL44QprRyH)v2hos=VT4)Z$%`mHN>PFf(#Usz!`S zhCELu)`KNrDOd)UlSyPISxRatoUX`B*<#UJP#cAG9hqDU){_?+Al(>+bQ9Q2!erWG zNVkEVQJA)a9fS~WJ=g_ylPP3s1EkMHUQZN-_k-7;8gU35Cez7`Mj$N>KsKrQ7Wj}c zZ31tDcfh;gJ@7vG030K;$ZRr)yh!Gfd1U@3@DcbJ90w=BC(IS_8CgIUlEoxKUZU=} zgsAhy%FGP6+tWES;OR(z{T)4#F7i|1FTsUIKpNq{1TL>5iyF;$6?{GPk@<2$RdwJh zxW;7n%g&)6O^SUBu0IXv_u%H!4L^ZfPdD5Kzdl;BKQ-^~QIr2cmOW|md*D9sf$?Mo zSxyF$>S(Iq;V!n4re*(yh%wbc1_B7l%VZ^4RR=L-VGLPK){wQ4Ule(AA(ab7WF1L) zI15UlEb_pkQ#E}jK@}sdg-TLa3zJDbHO=;$G9c&Lf zkbUG;@>(721Uo~P>?fzmnutwR-kApF2AB`M-iBjg?O0XhE2n_xdUC@Q5t z8~_KBgXDE`s16Q>L*P(yn7l#Wj2hS!X{XUd{!vwg2leROqjY?zdufxIUKqU!Ub?) z-Xmx1SWs0^N1GNg+5NL~qF+tgM&QegX(NSc30w*X!sT!U1?vR)gd8HDlFv3$$X3DC za1DK~qQHGlPLeO^^YbWt`Ud#qns9g%U4Jv&G9Ww0SyehZ)KI6ETseD%oNCl%8{FQw zY-l~)#$=a^MNBSJ&J-~PG)?O0vx?DW^K-f|9;OB3WirG!8-fe&2EA(;AwgtRk~A`aNXa?!CHab6in_w1q5V~ElhyzoKaco`z@OS$Gbf zhhM@A?JGnu=Bj3|4e7(0eH4TK}t!R*)rQ;t3 z*{|?##$j{^#a-00R1^&Zm;lGGM06|8J zFk;Ca8aTgkAeRG!IhtS-IIx`q2}C`lYsXHx1tWSCj0lzGi^cRSXJq+v$=Lx(g&jiAatVC2jY98g4cZ~O~zpgRYmOhrAotYNL(icp9u@<`{Nv>4{+P?d@bA}>0_ zkpibo@&)oBFY+;bl)-dD0Thh5f<02D)PVnTKz;@_M=fYyOVkRrru`Wxizz|vQ2WU0 zU@wi71Bo19#LQ{b5p|-&I-_S$7t|GXL){S@@p*FuA`UG=T2>XR5XUxkkEB4-ZhixN zF;C>Ohf0iY>=*v0KS~xTWvD}yOebRDD79-OB4|8%E?+E*ycH`$>(PYBBA*(g=h4K- z9-keeNs%i)N2{msL@!W8vrv2bY|jB14N;I7l|=~(no1oW1<1c7rE!ll(X2>qW;#Z* znQZ!Ei)(Ct5zUP_o15r*^CI@t<3o-@M`d|+MPcY+Zb8S;2}b%~ z?v6e|pGLVm2do^h(U@pWiW@TQs?BdsZDQ|%$xk}&8FaSMaXH}SfIaH9;Rd&Dw8&Rc zx4noiq08tB2OJ!5a=^s__y2O+M_$_GXHUCfQ|r^-*VOuRqtAJod@kj2f&H)3MJBUW zQKN46qNcdd0e@t8hOPyAfGKUMNB?3515Cp&$bn1_H0MBzdWV^$qGv&6}f~l=KaG>)e zK7#Go^(1%2ZcMT1$bnAO2~z{AE;xXjKQ*8Qru%w^16@e*W98GbCXQ_={!P2c!tJRV zxE%+&)gqcw5+Vou>OA}mrU#iPXgltXdq&lb(r}#3fu2q3_KB+77t{1bTl+j#w?7{A zls1F$5DxU>KyRw<gqVCz`D)1TyDry_FBWgrXc99d> zWLpDo!QbN_9&7U>2dW-x^E3XXF^H)BZTa{&Oz&2<;NPiz|HOCkJ$xVkh5yF?aNxNH zE7P)G!-3~HFp&e3I53$5FKl5BunY@WNPjWQ(r0Yc)?p414sg-eDIASS`db^BU{puy zjl09F?BS7;7Ef03#Jh^JVK$iqQyNqfs1G6~r;-iRR(^wOY0@lD9 zIY7OC#(K!I7P5u|Gf8pZ(xSdqHGP^6hpdzJJQ}NR7|eQE9|vZ0U`{lgyF5A`Mf${h zBUv^<#AS{PvzctmNAnt|wPIUyU@iyNK0=YrVmr}1%eG_NvmIEP@aA!V#?}H3EUafc zv(KC%F60cWo}i?*wB#T z>awzSJwuh11tp<|yvz2cg@f(GfyE@{OD2!aW&5%HSxUj!fy@wg5IdM1!VYDJQi@W- z<~0%(c4*|Tc~MM6%;UgIkp-6CPPTv@K`B|s{<*oK2~{2cmlAbsAzKu^i+MO8a$9N+ zH!fZrHBAWzmOjRR6kGoPZhI^{j;&xTIY8+PO%f|PuoR#n9FC;v8w#QGOZHjVZ6v6_;0!De6<&xW}1KxZ<3K_lHr-&SMwQAZsXv zEaho+O?!z%{qrSu3A>a7^&Hs5fvu16S&_`HdNNJ3t6A!t8#u6$rm8WHv=p|E-T2gi zP3&e4(7!Eozz&DT%kE%zKQ-W$s05nwsD#E7Prs_tveK&3P-Wv0r|m=UO$gINjVL>( zd#EN<)UKj%BxPe2pR)(pgZ=1{qAElH!(&v8o=Io?^g!B{lF2&C0}s&4?vK!?=yQ72 zeGYwzzM>53cY2j=#hJJTZiU-mT8wc=dWYQ)55R-)5PFN9j|*@i4$=GT74)|H4g3oW zSv4Eh&?{#zn?diJGwFSEI~o|BXe5kdUuM5xud?5;*V*scAK0JR`!O&^7$c66#w5mc zjp-Yc7gHQl7Bep9`Iwn8b7JPkEQq208?!6sK+K_-H)4*&d=Yag=61|Kv9YmnvHaMC zSYfO*rM36;nXeLSDkCgvx~Kgy#}!5+){0OPG-`D`8H;+=TfF zTM|A?xFm=Xhy*HuPT&v(1et;sVL>ZF8^JSzu7d7@o`PP2K7zr5LP43JMleM%O)x_+ zOE5Dl#2V2B}BtmHMQ9X?tmRX-{b{X&-5hbf|Qgbhvbcv`AVkT_imsy(Lr2 zY%+(;CG*NMWC2+>S&nR=Y_M#oY?$m>S$M3hLRKZKkxi6MmW5^WWb0(_%1+3>lwFox zm0gp4C%Y;8QFcrAv+Qmno0yoWNK_^!C#EE75_O6CL{FkG(VrMhY@XOMv2|kG#CC~8 z6XzsuNIaT&L5}5Gxn1s%JLPV9b9oDSD|sh*U-=+;zPwC6T0TZTRz6w&f;=3SbMpD} z1@eXR#qzcCb@EzyoqW4|m;4p^UirK7_vOdrAIXo)Ps_iSe=q+*{*(M?`5*GT^850? zT8_d&LimpA-+0n4~aF!bvenyrlS~gd|~-I4L>FnA9ezU(%?gaMH4*-ARX& z?j}7@vPzLMMVY2FD$Po((yjC=Gn4^krt(?k4CPwoX60VxtIGY#gUUn7H+URQpnlBn9Mo>f(;CaR{Z7OPgMR;pI3)~Ys!Rhv~?RohiNRr^&3 zRYz3ERi{*^RcBS_RaaDBtG-cPSAD0tt@=&%Aeo;mO-@W!BrB7($*IX{$;M=JvO77L zoR!=qWqZobl>I61r5sN=m2xHJr<9*lZl~Ny`90;&lzZwJHBTL{PEZTgVzpGAsBW$v zrkTEWNF%KI%+y= zx@fv-251IphG_CMJ2i(iZ)lEa-qIqiSS!^gY8BdCZGpB>8`73&tF%+K)3r0Tv$Zd3 z=V=#c7irgN*K6yw8?~FYTeaJ@JGHyDN3_>;23<~ASFKy5+pasN`&{>h?u_o7?pxhm z-F@BPx(BIDDon+xF{!-N_*8YOHZ?UhE!C82O|_>wQ$48}se#n?sr^z5QzNMxQ%|RU zm-?q(thecd`fmCj`fPn~eP4a9zQ2B;ez1P1zFa>}U!@@+~X|}Z1Y1wIoX_M2Ir>#s|owhb@ecGY4chf#jJDYaj5M$sO_y(at zVvrduhL(nIhCD-!VUi(im|~c2m}Pj~@Q&fQ;Zws&!)e1g!5I_^R=M@sRON<1yoD<5}aE#;=Uu z8vimeCWT3B>SpR?dfpT$tua=; zHOtz=I?`HgeZfksQ>@dhGpq}&i>*tn%d9J`E3Ng`oz{1(?^{2#9=CpKJ!!pQ{lvrT#gJ! zz>(=_?Z|R;2s=7C1~`U0MmxqjDje0036AF-QysG$FFNKs7CKft);sDQ8y(vmI~=?o$hwJ-R{=zEO!TYXLnb3clSW|5ce>5fxF0E?5=c=ch|Tlxx;SGJ>R{^{gQiG z*!{A5m3zPYUH1p>kK8BRpSe%EFT1a}uez^!VmvaB$z%1{JuZ*e*LGy4e$;2<@t(zrM@y>xv#=k<(uVO>|5rm z^KJL-^6l~M^Ii1)mJyes&TwaR$QYPWmQkKDE~6@Ae8!QCV;Ns(eD6nLzsj%o8~ql) z%^&n<`8)VK`@8yk`1|<>`iJ<3`3wAo{xW~LzrtVTU+rJ#uk&y4-}3+Izw5srkOoEv z<^}czUJD!v91Xl3_&9Jp@JZl8;8Ng9;9B5%;Jd)>z}>)Kfd@elL_u*-9!v_Vf|_7z z&=52Qn+Mwmdj-4a2KxsG2J?c$gN4CRurfF)I6F8$xG=alxGeZ`aCLBPa8K~J%$Q7m VrljG%hJmp0mwmnQx9OQF`yUO|y0!oS delta 9272 zcmaJ_2S8KT*ME0B5(t@ughWCTvIt~DLex5N55xiDL<9*+1yP1ubDw+G5m##+fP2(E zT5D@}tySx&cBr*>J6g5YI@|9C#PR=s-&>Xx3I)Toh z3+M_mK`)RE`hvlr02G2Dpa>L$5uhB51eKr)j0KZH5D+j6%m#D7TrdwT0!u*+s0DRk z9as;xf^A?IcoVz@4uRm?;4t_Id<;GTUxRPJx8MS}2rhx|z-90w_yt@8e}F&1-{2wm z7h=dlF_b_ll))G%hjB0-YM=ocp$RsH&0ur*5^Mom!d9>~%z$lRTi6b^haF%K*c0}G z*>C_1z+yNQ4ud7|WmpBP;b=G}2stO6pu70 z0cnvA=}{swAS-epA4*59P;1l$bw!yd*bNOqMJRxZ(NHuDm7tf=a8!!Q&`4B?#-K@P zI+}rIqFHDmT7(v(*U$#E5p6=7(H8UudJ7#wN6>rdee^Lpjn1KO(6{IUx`-~L@6nIw z7xXK-hwh^X=plN99%FN8>SgEFOo);|X{oo`k33X?PBvivZEt|sH zSUc-roverTvrXAnY&$lS?Z#%YxojV{FFTMeWQVfl>_~PLJB}UCPGG09^Vtx)oL$Mj z&aPr>*bVF!_Knag)Dm4KWuZ%GV&rDBjl4y^B^N?N@b?h|h$dfR2@Ow<-&4%qfNUno zA$|As8>%?nH$XSq$BA>I+HHzn48Qo<{)#NbR{p5G}0n8FM4ts zTF&X`oM9rFC`Q0M1Po(d$2?*lGygFEl5Ql6bSFL50RSL#5Ma`i^r9WJ!yU`n^UCaf z{U*3vN0R5xwACOsJoh(OH$I+h&7(y^4JH_4@A^&uKV#^LVlwthW7?i(J= zljfph(d+YLX+UD=`zU!dFaRT6!DZ5q^beUuI%f)SFy>ld19mcy45|fA;39*`WTH94 zw5985$FyfU054&PoaE(8nuSL4ouTU@q5Rp)X7w-g^CceoQXXm%8AAnPY%M?=sy%24 zT7lLegA|ZLGK3VZ18qS&&>nOk0a8IK$)vCwl0vs+F`<^?b>eQIJ7cZ^S){lI^dLh+ zx5V+z9MGp0^aitIaB0Y!tE@aHts zh3U$S1A$s*9Fx`9btriGg|5RvDJdr-Nm&m=>lx2=1fxL3pseg})l~(BrGbo!ih^-< zU{vUmMC+<%`qhBZq>99!1>=|jOdcZuMZd3akce$W-zQSw%KIZ&YO{BwH-r05*jUyOB(* z0h`J6dc$rH8}<#bgUldvpBZ*H*cZ0W9oCRMp zm%(|mj4UUwlGn&e(w@8?Y9;?Q_&vDNV1UzMbb+71&uhtwdar_C!EXbfIww0&RY#v( zWwLr_Wz(BR9j=2LFB8uUURqb2)DG-ZkXU&aewP zMBXNc>tH7AM#J$P@(m40UtD}ZG#k!=bIEaXf}EtCTHS2uG=$Na zAfr1C7cv642rllEmF*}hs4D0#g@X%6mX_@4q0WOZe^DQ4a0y(R_cYSlQK>rmXc?20 zo0T2DYn*)rT*H_*P+PnPSHgU_3a+LWIZe)x_sLoEolI7_M8f1S&-WGYiI*S6Agr zB%Rx}&rTRtJ|?SRbji>H`f31ul2cHaSx^`#&6n8eT_|6t2nF>e!904?&_AOhP#|IQ zB{7*DGD}7Wx|UQq#ttbj3XH5MfBA`3kP(?1Qzo)-fFYWC|Dt5d?kEM>H~=_6z1}GA zM4lHJ9Ho(Y9Dt^+vg<4+o++Am9KsyO0AS4v0A*;eeC_vdzpv)E^B%1L^M|luxe( zB#i?x9FTJ$Hr!NjAT9js4VAhz;z88bxh0h)G{N_xSvxe_y)%eLJV`S&<)Bf|wW}zE zP&G$OuLcaEv7k>K8i&TC2^@&yKs*Pu&kQ{oanyJyhzJK%98lMS8E7h@l1)fiuacr( zRpWXNZCvSWH1Dabv|bp^N3U=|!+`|K|3^JJK@>vEp2{?cEJrIipyNO)DQg^PC0a`h z1oS#ug;t|A9ME$hkpl(}7}ud1REz4+Iu4jPVC6tE2U5a8D=jJ?Qe74pQI%Ofq##_k zR1PSvE-mdAsH`j)8px>{H!{$#vO1%%q_VO)P}vY%TPcvxHV&9c{5MP<+JSbWU1&Eo z*B)j7+Kcv~{pbMw>Y1VFAicJy`ZtF773pNNra(v&^h#O8f!prqr>?UWxL#* zoWR(scK@MD9eM}78~P>9W6YQ6EggIG?p#K7>QGWz6{v6wv*?OTN&~tP~0oyZwe1bmzKTUHSoj@ni7aXv2z{LSK2Rw~9Iul;%Sq?b4Gh^F6D_IlD8M`OymwOEKLcH3~E zZMaF%OPlS8rpJHFE3hgouf!DY?WtT?o;Im`8=}djK|sTyVQixSYq9>BW;FOaK1ByM zVakTgWEq(qqqa^$niiH2f9W%;vyq z4p7e77>+GTHqCbJ0)^E>hXyLbQ`7ADG_Q4|IV?Y$=EgU^oX#IZ(!d5gaJrh&SMkcoW`Czgxr1F_HtLI6xVGdi~!FGAQ(R(&C_U zNO_r~AdLTp?Cl7rZ<=fx^*w;!W@P{SrVhW0kAydM9H`*Hcv4nR5%`0!(v;jPpWBn* zPw=PVJqZVBf~tCMS8~h#RpUg*@u_fqOt&TYi}1FD1EcHkX&8+uy^SI3IWU$c=x9ZY z_=ah|4o~w92gW_K%|(2PZc%ixU*aqH2S$K@#6RJm@h|vS{2RU+N-|~(r*L5%rg2~* z2e{BIV|sghBRtOU9GFbU!ME^j+V4;5i@*AJE}~hdq_`wd(YrI{ps^eXhMhy65kP&> zi|^t4p)MvZs>KgNolJUjOz_`^&zbPXn;8I_;eVL+B%h0^Y>v=tY%_8Fidg~Gl%-Y0tkBwIU67Tqu{2|_QdY*suyPK} z;lNxD%;UiPb*zF_!Wx$5Vd|SDGx z-`c0HVN+QzMFmR{xwHmJC?vzFy)Dh=<)u#&W>|?Qp};m{Uuxtrwgm^4KjAU9HLV)i z43;u0EiG0)r7*TV+v$0lVLP+IE*yB3qlXY>PnT_IJ>dm%V!N}wUg(w0=5XM34y+<& z&rC#njeMTc*nVvP7q;YV9-ALlHhg%&4(0$o525bzF#>i-Slc3&;=6_e>z-*lj2-^M zyrnFKEDiTM3VX3#-HEMWt6vZp&5q#!1;7SU){yd^XD&nQfk)~UJj+gGC$W?1#6hM# z%dt~JS%%ES9faY)CJyXA$4+CX)0Z>Ynd~eo5M<{vL)m#qA39{{9^Ao!T^!i_9CFxK z*aczau!}kH1_!n@!e)CzFfIuP<5CW6eF__PMHn_5*!CYN`G4{7M5~t?)M9I?Vsjfx zG7jtvtG+9Y8CvSH8^aTFbS;Du06jQkx3b&l87U3g`mIU6B}$w{L#USB&Sbs#JS4D0 zMzK5Co%s^ObK(?!yFwY3q#(6vR(99GxIj@x#gJiiCsT2X-OKLlO;bQsfB=TisF*}1 zh4IoYYb$!Fy@OVTAJT&C96iXsh`ytT*gw-l?5h~yBzi{OjwaAf^o%+aXW<^W7tLWq zX!;t8OYm@9MpM%$JPWU<=g>##3A2c`vdKX|gBP?EQ#{2vLMOB0eG^LKo3HVo*d$L`B5dh)EGMB0>?%B34AKj93-1CgM=U zhY_Dd9E~^@aXI2@#A9A0FNPP(Q}W_?YF<3g%4@;v!Ry7#;pOu3dBb@lc@ufT>Acmv z^}Kg@XL%QSKk=^e{^UpUC43D(nctM(oZo`qil4!6%Wu!`$j{~X<@e_gp#e;of1|5N^F{LlF(BDImWNJpeA(i1r`a(3k0$oY{ABEO3KDe{-d-y*L? zrA1{#wT)^Y)iG*O)at0UQMFO)f>BqZ{)oC4^&sj|)IS175GfD{gaWZZDu@v{1ib}A z1ak#T1giyW1e*l=1n&rr2;LWbDEL@#QgBLeM)0NJyx?oWo#-af*65zm{i6$`OQJ_d z2cx;@snOG;XGSlKUL3t7dRg>}=+~n6MPG=%DwGME2u(tpu&J<(u${1jFxW}hMVKw@ zE$k!gCmbNm6OI+G5pEZLDZC`SCcGoOE4(LsAbccZMLbcYNFWl5#G+(TPf@;Trf8w) zHBp^tr|3=5A<<#cyQ23*pNT#foe+H?IxRXYx+wZt^oQt?m=*KHkz#>ZC{~E$#A%=?7yTyCO`^DG9_auN8HLQdu ziIm7Ba*09`Cs9k{B^@QBCF><8B;QIdNiIu%ko+Y1MeT`gTJt(9(+Zk7hOO1DejkRFg8k)D*ElU|eFk^U{c zFMTMBmMLU$GPNvTmLMA-E0T?pjh2m-jh6*woNTIWx@?{-B-B;# zV$3m?7;B6#rd3QvOxu|DF&$%i#Po{EiOG%W8`D2#dd!}fD{`qkL9Uk@oTzDb^BejZKNQ$2w!(v8l1%*dDQyV_%Kk8+$VLo+3t} zQ|J|m3ZufVa41}griu=VZi;M0aIm64QK%?Vj8ar6Dizg=DT=9zX^I(&kYb5qsbaZe zt)fn`Ua?VeQ1O=HZN)o^BZ{MnbBc?K?-buFeo$OhTvzJZ2D3>c=RjyQSR_;+AQJzwsQGTgBulzcw{8o8U`JM86 zPzZt>f7o&>bvTD>VKPnCa4M9gx5sUMA<~wBuO(*^NMD8G z=D6mh=9K1)=1a|a&DWZrHNR@EYOZU3*WA?H*4#-@CA3T!7ED;2uri@8VOzo*2?rA1 zN%$b)=Y(GqZYA7FxSMcK3$zMtoK~%k*CuFn+C;5UYt|-dleIQ&Q*Co?3vDayEbUV5 za_y_ymD-=Qx3zb)ceVF)8M-W84_z-^j&8kfw{EX)zwV&!fu5(2)C=@NeOG;7eSiHx z{UH5t{dj$FqJFZT=%?tX>1XI?>6hzY)vwgA(y!6i=pd;vb2(68}v6EAj8d`-u+|9~(pli9u$N8x#hWA>NQ+&>IYf zmkiwv0}OKws}08umkqxgZW<$uF-Cz#xBN8V^3qY zG1u7FSYRAstTa{|#~3FXCmT8ARO3A3tH!m)I^zc8X5&ud9^-!Fo5n-NZ;gMLWG1c2 zWAd8(rsk%WrVLX%Q%6%5Q#aFKQ=utf8fF@9Dl?5VRhp_zV@=~tOHJ!c2Th-wzBL8^ zH1o^`v)|m^+}GUCJiuIJ4w#3UN17|mqs?Q@oOzacuK5-7B6G;R*1X=l$-LFP)4bcf z&wRlAmie&xUGwMWQ|7bg^X3cY@61=sKU$m?x22_}t)+vdvn9*Y(~@J!wG6S8TFNaI zmTJpb3ul>TnQ570nQvKOsk7_|TJ~Drv3z3r%yQgv(sD0JoMcS$Cv{C4lvJ5CGigrJ zyrcz5i;})c`XTAD6(g!Pp5jP;TA-(;AKle;BvNmlww%xGZwEbzjYrAiIXy@Anc9C6bm)jL~gWYUTvZvS` zc9*@Wy|ulqy@S28y{o;iJK!{!J&9Uh0*(aF)p(aq7_(bJLb z$aVB{40Pl>3LI6A`Hq#2-HyYKGmg8?C}*rQ(P?rfIa8btr_0&b8E_7B4tI`lj&fEy z$2zAw=Q$TR7dw|aUw5u?);QNWH#ko_uezX1;1ao{t{9ij<#c&mUYFn1+||;R;cDl~ z4!Uw({agcG`L04&z%|rW;u`Im;#%d}K@@9=^p2v;hyha;9lfj;$G=q?OyAybFX)Q>b~N>=KkG% z%l)T^@c<9>U{8W4)ziV#+cV4)oaUM7nd6!7S?CFQKJuLKobsIYob&wP`Q3BJ^S9@| z=TT}zsyNk=nwr`xbwKK{)R$AsQp;1vroNIIO5L2gJN07fl{8_RGOcG?|Fpcc!D)qQ zwzm+>NRgf}wim3ie} zg;(Wm;?;PYd;5APc$aurc~^VadbfLbdiQwudk=cw@}BU1>%HW??ES&}v-el;E$?0L zeeWahKR$s^;*?`q&_Koq4^^Nz< z^v&_j^DXc#_AT+P^40o+>wTMiTYLw6Z~NZy9r1nS`_y;Lciea0ciDHtciVTzch~pO z_fI-VN9nQY3F*%CcIkuC$E8n9pPbI6PfK5wz9D^k`kUz&(tl0AntmhwX8NCgzF*)M z`K5ljU+LHR4Susf$?x_1{muL>{2l#0{CWO-f1$s~Kgm!0Q`-2a`)B$W`a}L@{uTZj o|1tj;{s&D_Q)^R4)10QoO@}ufRlmPsAZ++$_c#1DUYl0_52JwXl>h($ diff --git a/LFLiveKit/filter/LFGPUImageBeautyFilter.h b/LFLiveKit/filter/LFGPUImageBeautyFilter.h index d698db18..34ee2e14 100755 --- a/LFLiveKit/filter/LFGPUImageBeautyFilter.h +++ b/LFLiveKit/filter/LFGPUImageBeautyFilter.h @@ -1,4 +1,6 @@ -#import "GPUImageFilter.h" +#import + +@class GPUImageFilter; @interface LFGPUImageBeautyFilter : GPUImageFilter { } diff --git a/LFLiveKit/filter/LFGPUImageEmptyFilter.h b/LFLiveKit/filter/LFGPUImageEmptyFilter.h index fc6ac327..d25a9032 100755 --- a/LFLiveKit/filter/LFGPUImageEmptyFilter.h +++ b/LFLiveKit/filter/LFGPUImageEmptyFilter.h @@ -1,4 +1,6 @@ -#import "GPUImageFilter.h" +#import + +@class GPUImageFilter; @interface LFGPUImageEmptyFilter : GPUImageFilter { From 6c4fc9a98b6ecf9a3c78bcced413e7161d10f0e4 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 13:27:41 +0800 Subject: [PATCH 17/39] update.... --- LFLiveKit/filter/LFGPUImageBeautyFilter.h | 2 +- LFLiveKit/filter/LFGPUImageEmptyFilter.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LFLiveKit/filter/LFGPUImageBeautyFilter.h b/LFLiveKit/filter/LFGPUImageBeautyFilter.h index 34ee2e14..c83493bf 100755 --- a/LFLiveKit/filter/LFGPUImageBeautyFilter.h +++ b/LFLiveKit/filter/LFGPUImageBeautyFilter.h @@ -1,4 +1,4 @@ -#import +#import "GPUImage.h" @class GPUImageFilter; diff --git a/LFLiveKit/filter/LFGPUImageEmptyFilter.h b/LFLiveKit/filter/LFGPUImageEmptyFilter.h index d25a9032..a77974dc 100755 --- a/LFLiveKit/filter/LFGPUImageEmptyFilter.h +++ b/LFLiveKit/filter/LFGPUImageEmptyFilter.h @@ -1,4 +1,4 @@ -#import +#import "GPUImage.h" @class GPUImageFilter; From c89eece287cc7db6a77fef87a4480b3021ba0d0e Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 13:49:59 +0800 Subject: [PATCH 18/39] update.... --- LFLiveKit.xcodeproj/project.pbxproj | 51 +++++++++++++++++++++++ LFLiveKit/filter/LFGPUImageBeautyFilter.h | 4 +- LFLiveKit/filter/LFGPUImageEmptyFilter.h | 4 +- 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index a0fdc7ed..1e119a87 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -78,6 +78,7 @@ /* End PBXContainerItemProxy section */ /* Begin PBXFileReference section */ + 0BA5A0CE1F07E1D707F69735 /* Pods-LFLiveKit.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.release.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.release.xcconfig"; sourceTree = ""; }; 84001F8A1D0015D10026C63F /* LFLiveKit.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = LFLiveKit.framework; sourceTree = BUILT_PRODUCTS_DIR; }; 84001F8D1D0015D10026C63F /* LFLiveKit.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LFLiveKit.h; sourceTree = ""; }; 84001F8F1D0015D10026C63F /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; @@ -139,6 +140,7 @@ B2CD146B1D45F18B008082E8 /* LFH264VideoEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LFH264VideoEncoder.h; sourceTree = ""; }; B2CD146C1D45F18B008082E8 /* LFH264VideoEncoder.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = LFH264VideoEncoder.mm; sourceTree = ""; }; B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKit.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + D0BB7E7CE5403C4911E026B9 /* Pods-LFLiveKit.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.debug.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -181,6 +183,15 @@ name = Frameworks; sourceTree = ""; }; + 4FDA0F424950EEA14E09E312 /* Pods */ = { + isa = PBXGroup; + children = ( + D0BB7E7CE5403C4911E026B9 /* Pods-LFLiveKit.debug.xcconfig */, + 0BA5A0CE1F07E1D707F69735 /* Pods-LFLiveKit.release.xcconfig */, + ); + name = Pods; + sourceTree = ""; + }; 84001F801D0015D10026C63F = { isa = PBXGroup; children = ( @@ -188,6 +199,7 @@ 84001F981D0015D10026C63F /* LFLiveKitTests */, 84001F8B1D0015D10026C63F /* Products */, 0C07D14560B9E91EA1B59306 /* Frameworks */, + 4FDA0F424950EEA14E09E312 /* Pods */, ); sourceTree = ""; }; @@ -372,10 +384,12 @@ isa = PBXNativeTarget; buildConfigurationList = 84001F9E1D0015D10026C63F /* Build configuration list for PBXNativeTarget "LFLiveKit" */; buildPhases = ( + 59B15CB863CEC87780FC058E /* 📦 Check Pods Manifest.lock */, 84001F851D0015D10026C63F /* Sources */, 84001F861D0015D10026C63F /* Frameworks */, 84001F871D0015D10026C63F /* Headers */, 84001F881D0015D10026C63F /* Resources */, + 49F3EAFC1828A54738134F66 /* 📦 Copy Pods Resources */, ); buildRules = ( ); @@ -456,6 +470,39 @@ }; /* End PBXResourcesBuildPhase section */ +/* Begin PBXShellScriptBuildPhase section */ + 49F3EAFC1828A54738134F66 /* 📦 Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "📦 Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + 59B15CB863CEC87780FC058E /* 📦 Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "📦 Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + /* Begin PBXSourcesBuildPhase section */ 84001F851D0015D10026C63F /* Sources */ = { isa = PBXSourcesBuildPhase; @@ -599,6 +646,7 @@ }; 84001F9F1D0015D10026C63F /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0BB7E7CE5403C4911E026B9 /* Pods-LFLiveKit.debug.xcconfig */; buildSettings = { DEFINES_MODULE = YES; DYLIB_COMPATIBILITY_VERSION = 1; @@ -626,11 +674,13 @@ PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; + USER_HEADER_SEARCH_PATHS = "$(BUILT_PRODUCTS_DIR)/**"; }; name = Debug; }; 84001FA01D0015D10026C63F /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = 0BA5A0CE1F07E1D707F69735 /* Pods-LFLiveKit.release.xcconfig */; buildSettings = { DEFINES_MODULE = YES; DYLIB_COMPATIBILITY_VERSION = 1; @@ -658,6 +708,7 @@ PRODUCT_BUNDLE_IDENTIFIER = com.youku.LFLiveKit.LFLiveKit; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; + USER_HEADER_SEARCH_PATHS = "$(BUILT_PRODUCTS_DIR)/**"; }; name = Release; }; diff --git a/LFLiveKit/filter/LFGPUImageBeautyFilter.h b/LFLiveKit/filter/LFGPUImageBeautyFilter.h index c83493bf..d698db18 100755 --- a/LFLiveKit/filter/LFGPUImageBeautyFilter.h +++ b/LFLiveKit/filter/LFGPUImageBeautyFilter.h @@ -1,6 +1,4 @@ -#import "GPUImage.h" - -@class GPUImageFilter; +#import "GPUImageFilter.h" @interface LFGPUImageBeautyFilter : GPUImageFilter { } diff --git a/LFLiveKit/filter/LFGPUImageEmptyFilter.h b/LFLiveKit/filter/LFGPUImageEmptyFilter.h index a77974dc..fc6ac327 100755 --- a/LFLiveKit/filter/LFGPUImageEmptyFilter.h +++ b/LFLiveKit/filter/LFGPUImageEmptyFilter.h @@ -1,6 +1,4 @@ -#import "GPUImage.h" - -@class GPUImageFilter; +#import "GPUImageFilter.h" @interface LFGPUImageEmptyFilter : GPUImageFilter { From d460a1105ef50fd1d3c67d76e54ae3d4e39be897 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 13:52:51 +0800 Subject: [PATCH 19/39] update... --- LFLiveKit.xcodeproj/project.pbxproj | 35 ------------------ .../UserInterfaceState.xcuserstate | Bin 17319 -> 17882 bytes 2 files changed, 35 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index 1e119a87..d2f61a0b 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -384,12 +384,10 @@ isa = PBXNativeTarget; buildConfigurationList = 84001F9E1D0015D10026C63F /* Build configuration list for PBXNativeTarget "LFLiveKit" */; buildPhases = ( - 59B15CB863CEC87780FC058E /* 📦 Check Pods Manifest.lock */, 84001F851D0015D10026C63F /* Sources */, 84001F861D0015D10026C63F /* Frameworks */, 84001F871D0015D10026C63F /* Headers */, 84001F881D0015D10026C63F /* Resources */, - 49F3EAFC1828A54738134F66 /* 📦 Copy Pods Resources */, ); buildRules = ( ); @@ -470,39 +468,6 @@ }; /* End PBXResourcesBuildPhase section */ -/* Begin PBXShellScriptBuildPhase section */ - 49F3EAFC1828A54738134F66 /* 📦 Copy Pods Resources */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "📦 Copy Pods Resources"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit-resources.sh\"\n"; - showEnvVarsInLog = 0; - }; - 59B15CB863CEC87780FC058E /* 📦 Check Pods Manifest.lock */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "📦 Check Pods Manifest.lock"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; - showEnvVarsInLog = 0; - }; -/* End PBXShellScriptBuildPhase section */ - /* Begin PBXSourcesBuildPhase section */ 84001F851D0015D10026C63F /* Sources */ = { isa = PBXSourcesBuildPhase; diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 50fa540c5669436aca65e753fd31eb2c250c95c6..52415b0a0f60f1ad2a3811bffd6536fcd5df2908 100644 GIT binary patch delta 9192 zcmaKR30zZ0*Zw^-b4eg1Bq1au7(yTmOEwY+iFLz$L0rIHM3AW9f&wnMWv1n@ao%`)Qy+VW>Q005lkl zLZeXyszg<24Dut2I5ZW_L35G+4YUL;Ma$4iREu_^U1&FYml=Zgp!d)R=tFc69YH72 zx9B@`2AxG0&_#3wT}3~mU(j#p7P^B8W^ou6U?C325mo{0na zExa7B!0YgOya8{)JMd2YKK=lIgg?c{@Mri-d;))iPvIZ%Is79&k1yk&@HKn~|B3J7 zd-y&<1QSA75=O#F6p<1oi6cqGK#asnY{X9dF5)H4NL$j5v?pCjH`1N-B_*Vcj3gDL zl2noLWHRB%bTXUFBMZrLvVyE3Ysq%9gX|=`$YR*hTEyYz@1P-NtTb zcd+}}_t*pM5%wtiabN{*hJU4HfeUz2_(r;g9-u$c^MOI+a#(MwEtHxA!%`dV8qEHV za5vgxS8rhwoA8i%#5`vHp`BKqkJpBkv7 zqiLgWVJy>+Dd5d94#q<@Oz2bT8}1uaRa!ohR?$jYL8}{J>o04hngU!9Za+{uhK{X; zsW6R>qvHefWSRIics+1SmcZ7*9GDxpDQnSm9=yqzR;>yequ%5JSP}$vAuNK$@D`m! zC(|j^zZ#apGI$%7Q%YZ_E9r(8K~)6;@&!?KuqFuXYRc8XS~@iZ?8YFln_x4YMrS<- zb{p&rqO%=#&{yg7TG$1<=?pqE1Z+?L0r>Ex-h*(6&Ze)`W0xEPy+PNH;VZ_p0X~6G z;TU`dpTlwZ0=|S3bPk^{!#8jWzJ>4Ld-#F(&qZ`GeTy!o z%jny5IbA{91osphXsyr$<|yI<0~B^wrnhB!W~-JSx2si~R-UXZkE3lXN2aF@FPGtM z>uQtTwpF^v>B-K_$Z~noT^ViN?!X#_5i$eY75V-KHLt=?FJt^O{8Inon-I%x!f&hS zl8`6C?{K^SGl$6YRn_rt{$O%@hG7O|42)DJ%TiDpBdN9xdUG=iVyDE*j@4|Nx*2H1oe?W}(^q#K-9u!HJVvNJBo)bZ?x{ zF!4MT_^&|=(IT{%o}gdRll-9Ox9bNDViJsqN%#i6&4|!)w4zsTo~@*)s;G+$4J;lt zymVJrO#v$VI^=uk9ki<8neVmcrRw-sHB4^L+`QmZ!|HWtBV$_2W3dLUMP+C`+Q1`n zihfIv(C_H?Yk6EYqb+DF|Gk+<=LdS4p5?!P2x62N!YEVgx9;Wj*@yNA^{FZy;S0K! zV$r1^F3~gfx*S07*Dvd<(4Nn-3OW_aDi;f-?f%UwPQ}m{`%j1ZF9$A$ zw-0QI7e|~AJzse)-%N=96MFYkpdh|La6Qz%9{4CeH{xcf{Y#n{u{!js zq5;J}(IdvR4&6of(0%k5dVu~$59v+%8@)w;r?=@J^v*i;82y8uFcNe>=<4)OdY9hg zH-2l^MqLv4u{cPq`@DPuv9Jus@$nGLaWq!o7_7u99E;UhL+{hS=mYvUeMleC$Mhfi zWCL!*=x{vN;RLMbTS?eJpVCMUQaKpN!C;P$+5FQU4)#)QL5J3D+LV-5l~-g}_=*ap zQvNwFZD{$p+@i6iLyGwKg;Hg9Wo2n)Rnf?*ddbS1qU!Q7RfSS3FB~XLPzL<^@&11N zLw-?lPEoOM_`g742W}ixIvqQ)3uoX=?8aHxgS{Lu8~_K11Iz*8faM^J0|5uZ4a`2= zB&2^lZh^CLOWcZPa1j3Q2KYU=Iq1Ye4hQ)`x0B}a%C`0uhmRRD#24B`U}557L*L;c z9rG)Ez6NEw;BI^p!(BOusKMPi5Cx{1o$Y$!-odxMIEbvleK-(PZGPvX(uPU_7vLgB zUWW(ZLd=6F;UJ0wX&o-+0WRS{7D6?H4{wjm#RJQDtKnfB$e){SBp%DV0xrj+@Mv6t zD{&Pb!$C9$3Jzj8P;#K+AeIC5Iy?@K$JKZO|2GLw=0L+i90x`Y(nJ589Jm7SCok}; z29=Mn6^$A-+-DmTTi{9uyjpb-ZKTF*MXFsSt+ z4zw?%2fPF?4W z4on3ZG=!aA4!WPMMcW zQ&D&*h~aPXcMD8Pev0n{PmJ;SG(Hp1nXLFMlgt06MLb_Lu#9aI|12`$uY#e1pSm8D z{{(k)C9opZ;J=P<^2Hhc8UKQB;9oh&;=scJPnE{2@o)GR+Kg{=@Cpan^aqYFfVI7I ze1ofcmior^>0UZyXsFeh{m_F=<3}FBXX; zfi%-|(3XRCwCvfYlUhuqp)}aAiixDWG%A&(anON-jyyL@Eph!FB>kltiDVFWP_1B1 zOL(k1H>lM(sMaf_2?x0xbbGE=bJFsq1zM5T9CYEJE5AULHLhI;lJio3XTp!?!%QA6 zE0jj@#t7Nq-x!tDqW__g`rnc~l23XtBGQv-Lwb`wf!w5=#Qv0zk3Bdj{GRk9{rS!U zGJq8F0zFAFGlUGn#{vhEy7>EZFo1)eFVKhh$lxG-NGS*XIOx^Doxb&u598g94CnY$ zPXl>Kd5}CD@R5%XtuJup|K>zUuVxL3k!oJGVtkBggO71g5LA8u<+;WmGs%?TlD+xe zP(BXu=P5FkOydu;4Vk0R91)evJ4-EjmC1eieL!Rmk03M1%tC3>3*xwz%nD?i?SAqa zne)uC=5kQfV8J(o7MxEOa8S%a$#V-XB1>Mh5F8}S=xh!K(Xyl#(_>%qrj?}T{}ZhX ziu!`0BUgn)?G2u{j%?!NAX!f~kc}J+;b15SrFCR8*+RB*P{zSq94z5OVU8_9#q%UD zbinhKypUiM0$psXfZY~8xM9xSAjl6T1-vX|^5`^kIc0C}H$z`-yMcms~$U?d0S z9E{>%GzS$NRPu?F93mf)!~E}2@-hGYX^_BG9PkNeY_K_wgGIsL#ew1J+Nc3Mraen5 zOZmfTcRqatW~XoWlW+e$t^_Uj!wc=Rd}T(?aWMYh(u`by{5o=xTq2h_sOEsrSM)j1 zSIKoAJ@ON|#syQz#9Ej}exZCso7JI`cgAz|_-xy!*64*Wc` zK6B>#$=~GhGnsmkf5;OKI1U!lvVSLHfeqs$8jD!W5|-s)DhJazc$I_ct62dnWW(7A z4rXxh8V9d)FeenVO3DX~8NsKGobo|M!83PdzrkaM5AW=&tSlPh%de^)ox~QxD)*K>!k3Av#A`+eFiIQ zWu5=)4qa>po5}K}(Hk7h=YaS9z`t~0y+Mr|b1?6J)41tN8jE=nvsw<`e6FqBUC=xx zJ6LigY*Z-EE z#QKBsli5lR*79;edA`ZZ*HLZH!J%{+n%l@b(R6m^bJb>Xu>QG!zs~Z}Zq=&5V|#+1 zeUn}ATrR-Drsr~tS^kozVH3fHp56bj%h{C;0h4`)gDnjnQp=N>tz%cSYdGLT)((!p zv}q`S+4bzE7egVtncc!W@-_~(^RSt+3~@WzcV80N!|vr^CkMOueqWX@?tS*)OZ|t~ zEgZaCKmMJpUjFE3^;vyE8?0yfjG3F)$ye+x(sKT_Pe*(wyY-7scKAG~1I;uzg8? zGK`EMAyC_@eN)!k32sC;a#DKf?bE zzvqu=7SSo9b3~VjZV_)stdH0ju{mOE#LbAmMMy+MVIrX@LKH2E5vfFKQJhFC@`wsW zBSectD@E%?8$>%r2Svw3pNqZ_oe-TAoe`ZAoflmcT^3ytJ&80%x+42UmP8JZtc;|Q zvm;-RoEteWa(?8p$mNkMBUeS%My`%L80o(jc~{&>Y!utYZgDGdjyPA`Roq>iFD?)l zii^a9#6Iy5@hEY%c&d1wc!_wKc)56`c$IjAc$0XGc$;{Kc$fHu_?`qy(j*y@W|B^l zd`VA9Z%JQCe~C{rL{cglCK(|qmyDNilG&2Ql6NFEk~+y6$vVk)$xg{`$sWICpX5Es z5y=;lGm;CEOOpRcu1kKA{3`iP@=)?v@+69hY8ursDkmy8s%zBrs0C4PN3Do@C#oi@ zE^2es)~M}KJEL|-?TNZ9l}ejShf2pt$4e(jCrkZOPP$0CQd%cnBV8xmAl)N9ApJmk zP3a-_cJLi~=bHig1NUAy&jGY>MuReu|-r5sGrfXvJ8?c*O+8 zB*kRK3`Ib(Rx0FkjwaPWhb;=FO_muA|KU5x49#$S# zo>pE|URGXF{-nIAyrsOYyraCUe567uqpG>8gQ~l#hhNo8)kig0HAFR3Ri=7XHD9$* zwOF-O^|oq-YLn_+)nU~s)px2NRA*G@ROeL}RhLy)RCiVPRS#4TRgYCqVwqSJOJc)f zrLl3b8L^#W2gZ(xog2G0c4zFZ*t@Zh)T~;rR;%OGTD4AXQYWiZ)fTl)U8J6*UZh^B z-k|nxQg2ajQ}0mkQopM{to}@WT>Yi`EA{v4)9SP8AJrE$0!^l-NK>X6t(l^ku9>Ns zt$AIuK(kP@ShGa4OjD;>quHd{r#YzkNOMHzf&j$`8lap7_DICWfNT&ngB?VH-A+I4>IR_(jm!`iR3Uu#ckztjGpJ)=FR{aJfM zdsBN$ds}-)dslluJ}$mRd|CX$_!aSM;`!>47wRMQk$QJ>g!f@3nFvb~8MwiiT^ctHOn;Wx@rN*(wYU3oM-#FFys&Te)v2lgbzsgu=Tx;B7 z+-}@u++*BlylQ-GQkb-+1XGg9XmXmeO|4DsOdU;~O|P0Y>AEzB*=oy?uhUCiChJfXgUv(D z)#gcNznL>nGtV(E^_y3jYt5_8>&#otJIuSxd(8XH=gjxaPm&>-B!?wOCo7U;l2ys+ z$!(MSBo9rVnEY1qvg8%XtCH)I*CwA%zMOnD`Fip%$@fzrMUWDaB2JN}Xi^%b7*krO zbVwPRGCE~S3YYR~%8Zl)DMwSzrd&+9m+~YvCDoSNCv{Njid6r`)Xk||Q+K4km-<2K zq13~vXHu`F{+@a#^|lv?*!J)ApzRn07DiN!n8jSR@viMPX4{G?qpdhoz~dg{7sXjitS%qb1MM z+tSZ6z%tM>!cyV4R9VJaCR(OgILkE49Lpk0t!15MgJqLtyJeSUk7b|bu;sYrwB?ND ztmQ|`FP58@Tb4g8cP;lVPp!l%utr!Tt!it$HNl!_HCt1yR;%6WwYIc&vF2NQT6m5+hsdsJ7W99cFcC!cGY&x_KWRT+g;mVwuiRI zcGhmUXV?eWhuG)Z7ufgNKeivUe{TQMe$xJp{ha-h{fhmX{b&0<`$PL<`x6ItunwUk z!taQ7Bsh!?vm?b}b2uC>N2a5hqm!e%qlcrnqo1SL;d2ai40DWd%yrZ_jyk?}Tyv#GO%vz4=rv%9mGv#+zjS?nxv4sn(` zhdaxiqn%Tn)0{J$vz>1^=Q|fV7dtOIuQ+cz?>QehAGsJ8c7?fwt{7LmOYbtc%&t_I z%jI@?T}@ogUD>W4t|Hf9*Ct@z{5B3N>B9Fuq?NNHvo;XhQ7o$iDS|ndy+^MsnH_9o zDA zpahHprJxLq0TaOUU=pYX9GC{CgBf5ZmB1AJ`9G1+RfO z!BKDwoCK%9m*6xw1I~uQIdC3a0$0H|;5+am_yzn1?ty>711NwoFc!wacqoKYsDvt* z1hr5Heb5inVE_hU2xh>RuoY|#pMh;)Ti6bEgWX{d*c0Z!;V=)5fcbDFEP#b@EF1^N z!wK*OI0a6HFTy!+E}RD!!5Y}R4%Wj4xREJ=o8UJ13fu{I!F});d>6h4KZGB_PvK|q z6#N$6fH&cH@OyX*{s4c3KfyaF8VOJgibZiK9tn{MiID_Jc`uZQJtzZpM7>ZJ>W_w^ zVJH{nqasv{O3-tt11dw~(F9b5s?k(54b4LH&?58_T837mHE0vsjJBYy=oNGn9Yb%S zx6wQ3UGyG$AANvMp)b*CbP;`xZlGJ}Hu@FaMSr4u=pXbij=`}w4##65*5Jf2Zh@1q z7AIp1PQ`wljsv(2Zj0OD&bSNij{D&LI2RAcdAJ0R!lif|uEbODR6G;U!b|W{T!VRB zj~nntya{i{TkuxA3-89S;RE<6K8BCu5AkRCG`@(h;hXq7d>j9Y@3Jv$EE~thvqDzH z=80JeD`jP@l2x-wtbui~sjQQAu>m&7hS+v&54I=Ui_Kztv)ODOJAy4@OW7*6nw`qN z$a3s7b~-zQoypE(=dugf8g?bSfvsih*lp}~b_ctkeU;yb+M+9@nEwq;iK!zS$v$$1 zoaL+WHITD!C@Vpz7wNlitiZ_forO~Oe}G{QGM|thq$f!yZHU&`{*B&>HCv{V!7q56 z$zE@IaOx>0hKXh3Ks1Qw+Y1#EAOs@jAdrwO(wk1|!;cY}j4IHAu{1Dyn7zzCW&UNnJyrW zFhoHH)QG!9`x_qz@qNT?tU=J4t^q;2F>cv<2vO*2!)ESLkmYRk$)$d?o1D+8Vs*vs+p|jsUtwaQ&S5;5gA2FNpT-z=By_sg3;i) z;aSX0U~X$&ANXZU;Lffb0OT5JI>*unX)a)5!Ej zEC-A}0A7Ep_b@m@W|CP=aM>DhZZ`2P@F8Q_4BiIsfOo-r;C=7`I8Nq}xnv%hPZp4c zWYK2u5%?IK0H1(QnXBM)vY0F(HH0TGk6u52Jn8~qQifv<->a$|N; zWj$@V&Sdq^%BDY>=YI=sJPr5v;8t@(xXA%Of!ob3O$~RzuTQS|dxR8!kQGmo;xBL? z_`yW7imW7qNL3_G1^8JvM`FQCF{s~q8-q=u#~^2 z8mJV}w?i@6K$0HLg)%7TrM6^E^X~+xVPv&XP3mi5B59!RTbq#FO%H91Yzs_=DbN6o z&;-rU0_hXAVLqbip*}X7pqyiKl5|7uikrkiGP|cwdw{ z8t5}&r-;GrVF%ce>?g01*Xm(s*afoW069(8@lMUS_B3KQ!YtSu_JMt2KXQ;9CGU_A z$caacgZ<&)$Xf=$fp8ExL|!L{>){YM6b>Ut$Q$HM{wvM0@Zg?ZdX`Qs>QUP4BUl7W z|7X@{_#7-F$H-gcZJGu%8|la}eUtlls46WhY=)*BPW+#l&%;Tun!HQiBkxCMN}8tj zYn9xzaF-`n4#VmHGk*r031^YxGyunXB$HpjtMg_0t?d?9)?HY z8}LnHC3a#aMlz7d$T@O>d_^utJp8C2z;_y}!1+8$7QZLf!zagT`Ilqk2OS~whd-tlw=dsOjd;6-wkTq9qT>*O2q zExEB7UV@k56-ETlMp#U4lJCg(bO&GWt4lG0FuWa!owIcOquBWs{=-&!sIip6OHj@}m4a ziHqN5)bKNnllVTy79fFNoU96udAMyPLrK&`Bu5IQL@Jbk)JTI8Q48`r`GfpP?vcO9 zeeyT?hy1%0X_1ba#?t==WJD$k!2=GcB8D~n1v$`z0}-*H9a5HGJ+7)Ur@W{rPojEc zP_J=?MS0nk`TQ!=G!wNwVtA*bkyRxnMUBROiM+^%{7f85XF8)G3h_%_JyT@VQ~z;5 zc?Pvat!Q6s^bBf4`_oY-Q-a!~4*Y+v-Z~iv6dYhA%xTmKb*96*pswgy)D3k8Ya0BXt`l5a)n*)dg(Hw~7fROqS4L}3wTLz)QXvmOm zg+*g4ON&d3%KLX4TUa!S1DFFW2cq&Mjey3Fk)TG#qv0rz?`GGdIy8dsYEKC_zh@*W zK-7-{4#d!D1By$_DvQb`(aqhXD5|tOt`WOULKTlW1G}$;Ay}&=PyU-Nc`oNyr?kQl=ixk>S)Pa6> z;DDUQBv3@&No5|IPAQIp;Dnd^jMM zg0hfb;&#i>VziXX2DF3&3ALz(18TmV+1GVBS{Z3w!2wMzTE&4xqRr`*U)mg_Xf3K` zWc6qrT8}nxpaln#IH0YkZ)H&f2Xu|tq|=IY&^$74Tg24u9MC@sN3;{YMnfF!Lc7r( zv={9|`_Zc$NajEa2MioAa=^p^GY9CH1Lz<+M14O99bpIutQ?@wrZ#$=I^IvL{A5c_ zSTnwAWLarJ+PIOUBVue~k*mN}F|Mk-pr|>#j-yW)Sp##9InP{RE|PX6le8xt8qlZc zvxv6lfSm&l8Zd20@k4d}=Jdzv`bj3AL1&wo%mE(xHyoyx4=|M_4d_42V1Q|0hB%PHftDO-)qpW(;TlY%u{8(UlP@?b)U|_p z78O?xC@p%Pcc-d559!&MVj?MJaKF-$QH?zhamOMor?19hEWuJtGs80+Xv2ZF9B9|T z97HTuQSFgV_rX7(IwP!O20s=&I0YLi2^&ubnDU_m(KZ^6tyI-w8>SBH$bl}8R0eip z_mj#MdoTs469+nz;ztG>GhZN$mAl!#Aa40+QfsP)aVtzW+LZ&(lH$j^q0`1cH7yf& zptj-m9Ozz)X!?og6P>zYT`@h+JR#z758NxVg@`D}SsduqyoJ7zE%d`Q)6v$xkGC)Y z4}N4##F!y?CJMTj|17HI8P#`+5O2JZp<8Z>?xy1 zQr-shP^QpSRbMRa|j{|gr!#I#zj~C#Dco7GNbD)Z&5?agGdW?!Zi8Q0SsPYX(6x5}b16_Ii|oHh<9tth33!G1KUG~wH9 z433Q8D?%x}A=I};voYK74l2npO?Jh#_!SP6P`4coX-e=Oye~qRy&M=-i}!P&lxUx* zEKMgqd=MX|2S$7djpD#)T7l>zexvd6Iil^;yvAGjT{;Sl#D^Md*yzY8`bZ~?=BH#N zhw(@F#A739VjD|}o9BLxPd+x|6i1J=&BslA2A`u{k)`8m@p%r6f86yIz8vYIUD1xB z;RjUz;NPhu|HSw3U-&-$8~=m<<-qfe?xdBzngf$L@B#;>a9}D2UfjwYWEmE)kbYy9 zrJvCer-nH|IKV|7r*Sm)=yxse@M`%1-rZqV{_vZZX8(3fn^+6%Yk)#JHM*`;EJk|r4=1S+JB%GhRY(b&+oVO< zVf;4#(kNac;J{0K`}Dqse0C(2U!4Zz@=>RY77-=J;xQ~NA5-H*#fqZ-=FRa zH>Pj4I0AGD2bMoZb2K~d|J>Mkb^=?@R&aod5gOa8Ik4to-eM<4OsDKv`G1){`6<)m zsE%T3u36P&cCXk71;U`u1~Qt@4XSw<%;m)a9e*R;8tz-#WbEuG7&5Q%`C%-v3)#h# z`i<3zrRu4^*(g3jo0r&S>~an?a9}eBwmshHDt672S(07LQp#-Pz@|w4jvOE!COO{I zGNFGxyXmRHo7pWKpnqFQ@zW;@W9EGFWQJv5VRt_@b5CR$nw97>{F;^nmEx@I9!1qf zh3(4=Mp3a;{snuGJ;du;C4|?adMe5e(o5Tq&}ZlidS!bKT|i$^IrBTck+tItS|6XG zH?g$-;ZF1}wm%+-2jijiHZ~9E;{sen?_pQbTh}-6FDzuWG=5Sk+0*H@X$HMEZBKdI znG$sZ`!f3_dyV~uy}^FZ{=ojk-j9M&q9{o?N*1Mv>K4^6DmSV)sw`?k)a0nyQS+h} zL@kb@jEmY8buj91)EiMpqrQx~9Cat^-{|P*nCQ6Z_-IkIBw8EojqViPCHmRu?$O!N zdC`T@viSH_-+y&n5x?CscJVtc_=@Sji^5C7E5d8STf)1-zeNI(R3sNEMF}E}$RIL_EFzo8 zA##cWqPC(QqCTR2q8!m+(NIyYC{I)-8Y>zvDi`e%9TB}LIwpEs^rsk$g<`Q-Dwd0t zVx2fyY!I8o7O_p7EuJRc6Bb_(-xS{x|0w=hd`Em&5-pKPl#&FAM$$rJlei>qiB}So zWJp>`o{@By+r3jO`8@@z zP$*Pkg<6rQNK)t&$qIwQtMDrVijbnEqP3!pqMf3>Vwhr{Vx!`i;-V5O^-8DGrA$+L zlr5F5l+P$TEBh%2EAy0P$}!5Z%JIsn$`_SkC8u1ZT&!H8tWmC4Zcx@L>ys$R8GwORG5>Zt03>XPb;>YD1h>RZ)K)%U6&R6nU6Brpju z0VhNy2ohow;uAy(l7z$rb3)sM{t2TK!U-!9b|)N3xR>xi&8o%fB(+g(4y&zdyV|4n zsngX#b%y#m^(^&z^%nI$^{eUw>O<NnI!)hE=a)Th;F)#ue$)nBW>QQuI1r;%#f zX`a(mYF^OH)YNEJX;y32YSwEuX|`y#X?AFKY7S@)X^v`6XijNPYtCxUYp!a()_kM6 zq4`d8NAsKJL1J8@EK!jdRwb$v^@%Bo#zb?XHPMq8O3X}rHfcxF&ZGlL?8GThlkOzlP5M3Q&!oS!QCfjERvWJsX(d{jR-tXF9j=|D-K#yUeMfsj`>FP<_OkZ6 z4(p)@%hYwyb<%awJ*(@k8yMCN)(zF=>UQc5>)y~E z)xD)hdWl}9SLjvx9DTmNKwqRU(O2rH>u2g`>*wm{>lf-5>zC>`=?O6eb0x;FPEoK}u|j zHbtM3l448=TT<*P&XlwiZ%TSfFr`CE|CE9hK4nwN>6GtM{xnDo4nxS$-O$sJW$0t* zXUH)OFbpybF$^<|GfXg48YUX54O0wZ!!*N8!)(J`!%K!&42KOT4VMgejVhzV*v6P; zEHF+rt~9PTt~IVV))@~Q-!*=0JZrpfiZTgIabc6lBsIxRHdAX;cT=vZ+BC%!rZ-eG zO><1Io8B>XS!gzY5LW4&-AzHff<-vn6+k$xwE;uxzs$-Ty36W4x4A0 z=a`q7SDIIw*O|ANUo{^zA2z>fK5jm3K5M>U{>uEV`ELtjQCaks?v~z`$rj$S+!9`C zS#4Qs*<{&j*=~8o@|xwU$W4dH^a7%ZD(!2+N10Sd#1goeU!b*{-T}Or`c!NXW19q zYwXMHE9|T6tL+W;o%VO^@7q7LpRj*sKWV>c|HgjPe#`!o{TKVM_WvBv!8!yEr9Kun0?>oYuIZirGJI*;S zI=*q-blh_M==j-j$8kSZma0v)rlzO1NzF{{nA#<^TWY`5oYaA-LsEyO4o{tux+?WZ z>gm+WsaI03rQS}xlX^GxkJP_X|8`<$j5FRTc1oQ(r_pJ4TAfa(+v#(rJKH$BIJ2FD zoI{+$ocYc|XNj}aSrK+lan5xva4vS%IF~tBIJY|wIuAKtcOG$m=se;4)cJ+;OXnHq z73Vk3o6cL#ADw?WAGjD7bVa*jT|$@GrEwWtZdbZ1=*n=lab>zXx;ncCx<E6 zT~)40uF0Xa96sYcTaXtaW8VOcCT~Sx*Ob^-CN!J+{fJS zxsSU)c7N_Z6?UI-pL3sg-*ErnzU{u_{=@y3`yclMkJ4lCm_0U6swd6k@wD+|dOCW# zc)EFdcm{cfdWL)QJ%yfPPlacqr`j{c6ZUYPMV_Uempm&xFMHN_4tU=6eBk-W^NHtk z&q>b}&sEPg&vkEMJ}A8`eO&s4^vd*!=||I#r+=OPeE*Mm2L-v#dk?*;!3J_vyj3Q0oBP(nx((uGn&rjRAnGSnf|JCqX|5E>N9 z4UGsDgo;8Hp(&xcp+%u3p_AU$e HL;n8&ZMj+G From 77b1776e41288bacb9efb785248ec213aa51d402 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 14:21:40 +0800 Subject: [PATCH 20/39] update version --- LFLiveKit.podspec | 2 +- LFLiveKit/Info.plist | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index a2603571..d91c4b38 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.4" + s.version = "1.9.5" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } diff --git a/LFLiveKit/Info.plist b/LFLiveKit/Info.plist index c1bb3453..84df70fc 100644 --- a/LFLiveKit/Info.plist +++ b/LFLiveKit/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.4 + 1.9.5 CFBundleSignature ???? CFBundleVersion From c1787ef6c540d7d813fc8bb69c58832cd345529a Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 14:54:46 +0800 Subject: [PATCH 21/39] update version --- LFLiveKit.podspec | 4 +- LFLiveKit.xcodeproj/project.pbxproj | 1493 +++++- .../UserInterfaceState.xcuserstate | Bin 17882 -> 17738 bytes LFLiveKit/Info.plist | 2 +- LFLiveKit/Vendor/GPUImage/GLProgram.h | 42 + LFLiveKit/Vendor/GPUImage/GLProgram.m | 236 + LFLiveKit/Vendor/GPUImage/GPUImage.h | 170 + .../GPUImage/GPUImage3x3ConvolutionFilter.h | 18 + .../GPUImage/GPUImage3x3ConvolutionFilter.m | 128 + .../GPUImage3x3TextureSamplingFilter.h | 18 + .../GPUImage3x3TextureSamplingFilter.m | 121 + .../GPUImageAdaptiveThresholdFilter.h | 9 + .../GPUImageAdaptiveThresholdFilter.m | 100 + .../Vendor/GPUImage/GPUImageAddBlendFilter.h | 5 + .../Vendor/GPUImage/GPUImageAddBlendFilter.m | 100 + .../GPUImage/GPUImageAlphaBlendFilter.h | 11 + .../GPUImage/GPUImageAlphaBlendFilter.m | 72 + .../Vendor/GPUImage/GPUImageAmatorkaFilter.h | 17 + .../Vendor/GPUImage/GPUImageAmatorkaFilter.m | 38 + .../Vendor/GPUImage/GPUImageAverageColor.h | 20 + .../Vendor/GPUImage/GPUImageAverageColor.m | 204 + .../GPUImageAverageLuminanceThresholdFilter.h | 8 + .../GPUImageAverageLuminanceThresholdFilter.m | 47 + .../Vendor/GPUImage/GPUImageBilateralFilter.h | 10 + .../Vendor/GPUImage/GPUImageBilateralFilter.m | 231 + .../Vendor/GPUImage/GPUImageBoxBlurFilter.h | 7 + .../Vendor/GPUImage/GPUImageBoxBlurFilter.m | 178 + .../GPUImage/GPUImageBrightnessFilter.h | 11 + .../GPUImage/GPUImageBrightnessFilter.m | 66 + LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h | 10 + LFLiveKit/Vendor/GPUImage/GPUImageBuffer.m | 112 + .../GPUImage/GPUImageBulgeDistortionFilter.h | 16 + .../GPUImage/GPUImageBulgeDistortionFilter.m | 174 + .../GPUImage/GPUImageCGAColorspaceFilter.h | 5 + .../GPUImage/GPUImageCGAColorspaceFilter.m | 113 + .../GPUImageCannyEdgeDetectionFilter.h | 62 + .../GPUImageCannyEdgeDetectionFilter.m | 125 + .../GPUImage/GPUImageChromaKeyBlendFilter.h | 32 + .../GPUImage/GPUImageChromaKeyBlendFilter.m | 117 + .../Vendor/GPUImage/GPUImageChromaKeyFilter.h | 30 + .../Vendor/GPUImage/GPUImageChromaKeyFilter.m | 113 + .../Vendor/GPUImage/GPUImageClosingFilter.h | 19 + .../Vendor/GPUImage/GPUImageClosingFilter.m | 57 + .../GPUImage/GPUImageColorBlendFilter.h | 5 + .../GPUImage/GPUImageColorBlendFilter.m | 113 + .../GPUImage/GPUImageColorBurnBlendFilter.h | 9 + .../GPUImage/GPUImageColorBurnBlendFilter.m | 52 + .../Vendor/GPUImage/GPUImageColorConversion.h | 12 + .../Vendor/GPUImage/GPUImageColorConversion.m | 159 + .../GPUImage/GPUImageColorDodgeBlendFilter.h | 9 + .../GPUImage/GPUImageColorDodgeBlendFilter.m | 75 + .../GPUImage/GPUImageColorInvertFilter.h | 7 + .../GPUImage/GPUImageColorInvertFilter.m | 46 + .../GPUImageColorLocalBinaryPatternFilter.h | 5 + .../GPUImageColorLocalBinaryPatternFilter.m | 159 + .../GPUImage/GPUImageColorMatrixFilter.h | 19 + .../GPUImage/GPUImageColorMatrixFilter.m | 87 + .../GPUImage/GPUImageColorPackingFilter.h | 10 + .../GPUImage/GPUImageColorPackingFilter.m | 139 + .../GPUImageColourFASTFeatureDetector.h | 21 + .../GPUImageColourFASTFeatureDetector.m | 48 + .../GPUImageColourFASTSamplingOperation.h | 22 + .../GPUImageColourFASTSamplingOperation.m | 204 + .../Vendor/GPUImage/GPUImageContrastFilter.h | 14 + .../Vendor/GPUImage/GPUImageContrastFilter.m | 66 + .../Vendor/GPUImage/GPUImageCropFilter.h | 14 + .../Vendor/GPUImage/GPUImageCropFilter.m | 274 ++ .../GPUImage/GPUImageCrosshairGenerator.h | 17 + .../GPUImage/GPUImageCrosshairGenerator.m | 139 + .../GPUImage/GPUImageCrosshatchFilter.h | 13 + .../GPUImage/GPUImageCrosshatchFilter.m | 163 + .../GPUImage/GPUImageDarkenBlendFilter.h | 7 + .../GPUImage/GPUImageDarkenBlendFilter.m | 52 + .../GPUImage/GPUImageDifferenceBlendFilter.h | 7 + .../GPUImage/GPUImageDifferenceBlendFilter.m | 50 + .../Vendor/GPUImage/GPUImageDilationFilter.h | 16 + .../Vendor/GPUImage/GPUImageDilationFilter.m | 431 ++ ...geDirectionalNonMaximumSuppressionFilter.h | 19 + ...geDirectionalNonMaximumSuppressionFilter.m | 141 + ...ImageDirectionalSobelEdgeDetectionFilter.h | 5 + ...ImageDirectionalSobelEdgeDetectionFilter.m | 103 + .../GPUImage/GPUImageDissolveBlendFilter.h | 11 + .../GPUImage/GPUImageDissolveBlendFilter.m | 72 + .../GPUImage/GPUImageDivideBlendFilter.h | 5 + .../GPUImage/GPUImageDivideBlendFilter.m | 96 + .../Vendor/GPUImage/GPUImageEmbossFilter.h | 8 + .../Vendor/GPUImage/GPUImageEmbossFilter.m | 49 + .../Vendor/GPUImage/GPUImageErosionFilter.h | 11 + .../Vendor/GPUImage/GPUImageErosionFilter.m | 312 ++ .../GPUImage/GPUImageExclusionBlendFilter.h | 7 + .../GPUImage/GPUImageExclusionBlendFilter.m | 56 + .../Vendor/GPUImage/GPUImageExposureFilter.h | 11 + .../Vendor/GPUImage/GPUImageExposureFilter.m | 66 + .../GPUImageFASTCornerDetectionFilter.h | 33 + .../GPUImageFASTCornerDetectionFilter.m | 89 + .../GPUImage/GPUImageFalseColorFilter.h | 15 + .../GPUImage/GPUImageFalseColorFilter.m | 101 + LFLiveKit/Vendor/GPUImage/GPUImageFilter.h | 134 + LFLiveKit/Vendor/GPUImage/GPUImageFilter.m | 753 +++ .../Vendor/GPUImage/GPUImageFilterGroup.h | 19 + .../Vendor/GPUImage/GPUImageFilterGroup.m | 208 + .../Vendor/GPUImage/GPUImageFilterPipeline.h | 30 + .../Vendor/GPUImage/GPUImageFilterPipeline.m | 218 + .../Vendor/GPUImage/GPUImageFourInputFilter.h | 21 + .../Vendor/GPUImage/GPUImageFourInputFilter.m | 401 ++ .../Vendor/GPUImage/GPUImageFramebuffer.h | 59 + .../Vendor/GPUImage/GPUImageFramebuffer.m | 457 ++ .../GPUImage/GPUImageFramebufferCache.h | 15 + .../GPUImage/GPUImageFramebufferCache.m | 190 + .../Vendor/GPUImage/GPUImageGammaFilter.h | 11 + .../Vendor/GPUImage/GPUImageGammaFilter.m | 66 + .../GPUImage/GPUImageGaussianBlurFilter.h | 36 + .../GPUImage/GPUImageGaussianBlurFilter.m | 513 ++ .../GPUImageGaussianBlurPositionFilter.h | 22 + .../GPUImageGaussianBlurPositionFilter.m | 232 + .../GPUImageGaussianSelectiveBlurFilter.h | 30 + .../GPUImageGaussianSelectiveBlurFilter.m | 147 + .../GPUImage/GPUImageGlassSphereFilter.h | 5 + .../GPUImage/GPUImageGlassSphereFilter.m | 106 + .../Vendor/GPUImage/GPUImageGrayscaleFilter.h | 9 + .../Vendor/GPUImage/GPUImageGrayscaleFilter.m | 141 + LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h | 27 + LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.m | 414 ++ .../Vendor/GPUImage/GPUImageHalftoneFilter.h | 5 + .../Vendor/GPUImage/GPUImageHalftoneFilter.m | 79 + .../GPUImage/GPUImageHardLightBlendFilter.h | 7 + .../GPUImage/GPUImageHardLightBlendFilter.m | 99 + .../GPUImageHarrisCornerDetectionFilter.h | 53 + .../GPUImageHarrisCornerDetectionFilter.m | 292 ++ .../Vendor/GPUImage/GPUImageHazeFilter.h | 29 + .../Vendor/GPUImage/GPUImageHazeFilter.m | 96 + .../Vendor/GPUImage/GPUImageHighPassFilter.h | 14 + .../Vendor/GPUImage/GPUImageHighPassFilter.m | 46 + .../GPUImage/GPUImageHighlightShadowFilter.h | 20 + .../GPUImage/GPUImageHighlightShadowFilter.m | 93 + .../GPUImageHighlightShadowTintFilter.h | 25 + .../GPUImageHighlightShadowTintFilter.m | 136 + .../GPUImageHistogramEqualizationFilter.h | 26 + .../GPUImageHistogramEqualizationFilter.m | 307 ++ .../Vendor/GPUImage/GPUImageHistogramFilter.h | 22 + .../Vendor/GPUImage/GPUImageHistogramFilter.m | 341 ++ .../GPUImage/GPUImageHistogramGenerator.h | 8 + .../GPUImage/GPUImageHistogramGenerator.m | 87 + .../GPUImageHoughTransformLineDetector.h | 49 + .../GPUImageHoughTransformLineDetector.m | 241 + .../Vendor/GPUImage/GPUImageHueBlendFilter.h | 5 + .../Vendor/GPUImage/GPUImageHueBlendFilter.m | 212 + LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h | 11 + LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.m | 123 + .../GPUImage/GPUImageJFAVoronoiFilter.h | 17 + .../GPUImage/GPUImageJFAVoronoiFilter.m | 446 ++ .../Vendor/GPUImage/GPUImageKuwaharaFilter.h | 13 + .../Vendor/GPUImage/GPUImageKuwaharaFilter.m | 223 + .../GPUImage/GPUImageKuwaharaRadius3Filter.h | 8 + .../GPUImage/GPUImageKuwaharaRadius3Filter.m | 547 +++ .../GPUImageLanczosResamplingFilter.h | 7 + .../GPUImageLanczosResamplingFilter.m | 239 + .../Vendor/GPUImage/GPUImageLaplacianFilter.h | 5 + .../Vendor/GPUImage/GPUImageLaplacianFilter.m | 115 + .../Vendor/GPUImage/GPUImageLevelsFilter.h | 45 + .../Vendor/GPUImage/GPUImageLevelsFilter.m | 152 + .../GPUImage/GPUImageLightenBlendFilter.h | 8 + .../GPUImage/GPUImageLightenBlendFilter.m | 52 + .../Vendor/GPUImage/GPUImageLineGenerator.h | 18 + .../Vendor/GPUImage/GPUImageLineGenerator.m | 164 + .../GPUImage/GPUImageLinearBurnBlendFilter.h | 5 + .../GPUImage/GPUImageLinearBurnBlendFilter.m | 51 + .../GPUImageLocalBinaryPatternFilter.h | 5 + .../GPUImageLocalBinaryPatternFilter.m | 123 + .../Vendor/GPUImage/GPUImageLookupFilter.h | 34 + .../Vendor/GPUImage/GPUImageLookupFilter.m | 115 + .../Vendor/GPUImage/GPUImageLowPassFilter.h | 14 + .../Vendor/GPUImage/GPUImageLowPassFilter.m | 61 + .../GPUImage/GPUImageLuminanceRangeFilter.h | 12 + .../GPUImage/GPUImageLuminanceRangeFilter.m | 76 + .../GPUImageLuminanceThresholdFilter.h | 14 + .../GPUImageLuminanceThresholdFilter.m | 74 + .../Vendor/GPUImage/GPUImageLuminosity.h | 17 + .../Vendor/GPUImage/GPUImageLuminosity.m | 329 ++ .../GPUImage/GPUImageLuminosityBlendFilter.h | 5 + .../GPUImage/GPUImageLuminosityBlendFilter.m | 113 + .../Vendor/GPUImage/GPUImageMaskFilter.h | 5 + .../Vendor/GPUImage/GPUImageMaskFilter.m | 76 + .../Vendor/GPUImage/GPUImageMedianFilter.h | 5 + .../Vendor/GPUImage/GPUImageMedianFilter.m | 178 + .../GPUImage/GPUImageMissEtikateFilter.h | 17 + .../GPUImage/GPUImageMissEtikateFilter.m | 38 + .../GPUImage/GPUImageMonochromeFilter.h | 13 + .../GPUImage/GPUImageMonochromeFilter.m | 115 + .../Vendor/GPUImage/GPUImageMosaicFilter.h | 22 + .../Vendor/GPUImage/GPUImageMosaicFilter.m | 188 + .../GPUImage/GPUImageMotionBlurFilter.h | 13 + .../GPUImage/GPUImageMotionBlurFilter.m | 209 + .../Vendor/GPUImage/GPUImageMotionDetector.h | 18 + .../Vendor/GPUImage/GPUImageMotionDetector.m | 112 + LFLiveKit/Vendor/GPUImage/GPUImageMovie.h | 61 + LFLiveKit/Vendor/GPUImage/GPUImageMovie.m | 876 ++++ .../GPUImage/GPUImageMovieComposition.h | 21 + .../GPUImage/GPUImageMovieComposition.m | 70 + .../GPUImage/GPUImageMultiplyBlendFilter.h | 7 + .../GPUImage/GPUImageMultiplyBlendFilter.m | 52 + .../GPUImageNobleCornerDetectionFilter.h | 12 + .../GPUImageNobleCornerDetectionFilter.m | 74 + .../GPUImageNonMaximumSuppressionFilter.h | 5 + .../GPUImageNonMaximumSuppressionFilter.m | 107 + .../GPUImage/GPUImageNormalBlendFilter.h | 8 + .../GPUImage/GPUImageNormalBlendFilter.m | 96 + .../Vendor/GPUImage/GPUImageOpacityFilter.h | 11 + .../Vendor/GPUImage/GPUImageOpacityFilter.m | 65 + .../Vendor/GPUImage/GPUImageOpeningFilter.h | 19 + .../Vendor/GPUImage/GPUImageOpeningFilter.m | 57 + LFLiveKit/Vendor/GPUImage/GPUImageOutput.h | 128 + LFLiveKit/Vendor/GPUImage/GPUImageOutput.m | 439 ++ .../GPUImage/GPUImageOverlayBlendFilter.h | 5 + .../GPUImage/GPUImageOverlayBlendFilter.m | 94 + ...ageParallelCoordinateLineTransformFilter.h | 16 + ...ageParallelCoordinateLineTransformFilter.m | 266 ++ .../GPUImage/GPUImagePerlinNoiseFilter.h | 13 + .../GPUImage/GPUImagePerlinNoiseFilter.m | 239 + .../GPUImage/GPUImagePinchDistortionFilter.h | 20 + .../GPUImage/GPUImagePinchDistortionFilter.m | 176 + .../Vendor/GPUImage/GPUImagePixellateFilter.h | 12 + .../Vendor/GPUImage/GPUImagePixellateFilter.m | 151 + .../GPUImagePixellatePositionFilter.h | 17 + .../GPUImagePixellatePositionFilter.m | 194 + .../GPUImage/GPUImagePoissonBlendFilter.h | 18 + .../GPUImage/GPUImagePoissonBlendFilter.m | 175 + .../GPUImage/GPUImagePolarPixellateFilter.h | 13 + .../GPUImage/GPUImagePolarPixellateFilter.m | 128 + .../Vendor/GPUImage/GPUImagePolkaDotFilter.h | 10 + .../Vendor/GPUImage/GPUImagePolkaDotFilter.m | 85 + .../Vendor/GPUImage/GPUImagePosterizeFilter.h | 14 + .../Vendor/GPUImage/GPUImagePosterizeFilter.m | 66 + .../GPUImagePrewittEdgeDetectionFilter.h | 5 + .../GPUImagePrewittEdgeDetectionFilter.m | 97 + .../GPUImage/GPUImageRGBClosingFilter.h | 18 + .../GPUImage/GPUImageRGBClosingFilter.m | 41 + .../GPUImage/GPUImageRGBDilationFilter.h | 11 + .../GPUImage/GPUImageRGBDilationFilter.m | 306 ++ .../GPUImage/GPUImageRGBErosionFilter.h | 11 + .../GPUImage/GPUImageRGBErosionFilter.m | 304 ++ LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h | 15 + LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.m | 89 + .../GPUImage/GPUImageRGBOpeningFilter.h | 17 + .../GPUImage/GPUImageRGBOpeningFilter.m | 41 + .../Vendor/GPUImage/GPUImageRawDataInput.h | 43 + .../Vendor/GPUImage/GPUImageRawDataInput.m | 139 + .../Vendor/GPUImage/GPUImageRawDataOutput.h | 44 + .../Vendor/GPUImage/GPUImageRawDataOutput.m | 307 ++ .../GPUImage/GPUImageSaturationBlendFilter.h | 5 + .../GPUImage/GPUImageSaturationBlendFilter.m | 213 + .../GPUImage/GPUImageSaturationFilter.h | 14 + .../GPUImage/GPUImageSaturationFilter.m | 78 + .../GPUImage/GPUImageScreenBlendFilter.h | 7 + .../GPUImage/GPUImageScreenBlendFilter.m | 52 + .../Vendor/GPUImage/GPUImageSepiaFilter.h | 6 + .../Vendor/GPUImage/GPUImageSepiaFilter.m | 24 + .../Vendor/GPUImage/GPUImageSharpenFilter.h | 12 + .../Vendor/GPUImage/GPUImageSharpenFilter.m | 147 + .../GPUImageShiTomasiFeatureDetectionFilter.h | 13 + .../GPUImageShiTomasiFeatureDetectionFilter.m | 65 + ...PUImageSingleComponentGaussianBlurFilter.h | 7 + ...PUImageSingleComponentGaussianBlurFilter.m | 189 + .../Vendor/GPUImage/GPUImageSketchFilter.h | 11 + .../Vendor/GPUImage/GPUImageSketchFilter.m | 98 + .../Vendor/GPUImage/GPUImageSkinToneFilter.h | 47 + .../Vendor/GPUImage/GPUImageSkinToneFilter.m | 246 + .../GPUImage/GPUImageSmoothToonFilter.h | 28 + .../GPUImage/GPUImageSmoothToonFilter.m | 94 + .../GPUImageSobelEdgeDetectionFilter.h | 16 + .../GPUImageSobelEdgeDetectionFilter.m | 188 + .../GPUImage/GPUImageSoftEleganceFilter.h | 19 + .../GPUImage/GPUImageSoftEleganceFilter.m | 62 + .../GPUImage/GPUImageSoftLightBlendFilter.h | 7 + .../GPUImage/GPUImageSoftLightBlendFilter.m | 54 + .../Vendor/GPUImage/GPUImageSolarizeFilter.h | 14 + .../Vendor/GPUImage/GPUImageSolarizeFilter.m | 76 + .../GPUImage/GPUImageSolidColorGenerator.h | 19 + .../GPUImage/GPUImageSolidColorGenerator.m | 123 + .../GPUImage/GPUImageSourceOverBlendFilter.h | 5 + .../GPUImage/GPUImageSourceOverBlendFilter.m | 51 + .../GPUImage/GPUImageSphereRefractionFilter.h | 15 + .../GPUImage/GPUImageSphereRefractionFilter.m | 179 + .../Vendor/GPUImage/GPUImageStillCamera.h | 24 + .../Vendor/GPUImage/GPUImageStillCamera.m | 338 ++ .../GPUImageStretchDistortionFilter.h | 13 + .../GPUImageStretchDistortionFilter.m | 99 + .../GPUImage/GPUImageSubtractBlendFilter.h | 5 + .../GPUImage/GPUImageSubtractBlendFilter.m | 52 + .../Vendor/GPUImage/GPUImageSwirlFilter.h | 17 + .../Vendor/GPUImage/GPUImageSwirlFilter.m | 123 + .../Vendor/GPUImage/GPUImageTextureInput.h | 14 + .../Vendor/GPUImage/GPUImageTextureInput.m | 46 + .../Vendor/GPUImage/GPUImageTextureOutput.h | 21 + .../Vendor/GPUImage/GPUImageTextureOutput.m | 83 + .../GPUImage/GPUImageThreeInputFilter.h | 21 + .../GPUImage/GPUImageThreeInputFilter.m | 328 ++ .../GPUImageThresholdEdgeDetectionFilter.h | 12 + .../GPUImageThresholdEdgeDetectionFilter.m | 145 + .../GPUImage/GPUImageThresholdSketchFilter.h | 5 + .../GPUImage/GPUImageThresholdSketchFilter.m | 103 + ...geThresholdedNonMaximumSuppressionFilter.h | 14 + ...geThresholdedNonMaximumSuppressionFilter.m | 297 ++ .../Vendor/GPUImage/GPUImageTiltShiftFilter.h | 24 + .../Vendor/GPUImage/GPUImageTiltShiftFilter.m | 126 + .../Vendor/GPUImage/GPUImageToneCurveFilter.h | 30 + .../Vendor/GPUImage/GPUImageToneCurveFilter.m | 621 +++ .../Vendor/GPUImage/GPUImageToonFilter.h | 19 + .../Vendor/GPUImage/GPUImageToonFilter.m | 149 + .../Vendor/GPUImage/GPUImageTransformFilter.h | 19 + .../Vendor/GPUImage/GPUImageTransformFilter.m | 260 ++ ...UImageTwoInputCrossTextureSamplingFilter.h | 15 + ...UImageTwoInputCrossTextureSamplingFilter.m | 108 + .../Vendor/GPUImage/GPUImageTwoInputFilter.h | 21 + .../Vendor/GPUImage/GPUImageTwoInputFilter.m | 264 ++ .../Vendor/GPUImage/GPUImageTwoPassFilter.h | 19 + .../Vendor/GPUImage/GPUImageTwoPassFilter.m | 201 + .../GPUImageTwoPassTextureSamplingFilter.h | 13 + .../GPUImageTwoPassTextureSamplingFilter.m | 85 + LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h | 15 + LFLiveKit/Vendor/GPUImage/GPUImageUIElement.m | 123 + .../GPUImage/GPUImageUnsharpMaskFilter.h | 16 + .../GPUImage/GPUImageUnsharpMaskFilter.m | 101 + .../Vendor/GPUImage/GPUImageVideoCamera.h | 156 + .../Vendor/GPUImage/GPUImageVideoCamera.m | 1062 +++++ .../Vendor/GPUImage/GPUImageVignetteFilter.h | 22 + .../Vendor/GPUImage/GPUImageVignetteFilter.m | 104 + .../GPUImage/GPUImageVoronoiConsumerFilter.h | 10 + .../GPUImage/GPUImageVoronoiConsumerFilter.m | 94 + .../GPUImageWeakPixelInclusionFilter.h | 5 + .../GPUImageWeakPixelInclusionFilter.m | 94 + .../GPUImage/GPUImageWhiteBalanceFilter.h | 17 + .../GPUImage/GPUImageWhiteBalanceFilter.m | 107 + .../GPUImage/GPUImageXYDerivativeFilter.h | 5 + .../GPUImage/GPUImageXYDerivativeFilter.m | 106 + .../Vendor/GPUImage/GPUImageZoomBlurFilter.h | 13 + .../Vendor/GPUImage/GPUImageZoomBlurFilter.m | 115 + .../Vendor/GPUImage/GPUImageiOSBlurFilter.h | 31 + .../Vendor/GPUImage/GPUImageiOSBlurFilter.m | 114 + .../iOS/Framework/GPUImageFramework.h | 177 + .../Vendor/GPUImage/iOS/GPUImageContext.h | 64 + .../Vendor/GPUImage/iOS/GPUImageContext.m | 320 ++ .../Vendor/GPUImage/iOS/GPUImageMovieWriter.h | 67 + .../Vendor/GPUImage/iOS/GPUImageMovieWriter.m | 1016 ++++ .../iOS/GPUImagePicture+TextureSubimage.h | 19 + .../iOS/GPUImagePicture+TextureSubimage.m | 103 + .../Vendor/GPUImage/iOS/GPUImagePicture.h | 38 + .../Vendor/GPUImage/iOS/GPUImagePicture.m | 371 ++ LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h | 41 + LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.m | 484 ++ LFLiveKit/Vendor/pili-librtmp/amf.c | 1037 +++++ LFLiveKit/Vendor/pili-librtmp/amf.h | 180 + LFLiveKit/Vendor/pili-librtmp/bytes.h | 91 + LFLiveKit/Vendor/pili-librtmp/dh.h | 345 ++ LFLiveKit/Vendor/pili-librtmp/dhgroups.h | 198 + LFLiveKit/Vendor/pili-librtmp/error.c | 20 + LFLiveKit/Vendor/pili-librtmp/error.h | 45 + LFLiveKit/Vendor/pili-librtmp/handshake.h | 1034 +++++ LFLiveKit/Vendor/pili-librtmp/hashswf.c | 626 +++ LFLiveKit/Vendor/pili-librtmp/http.h | 49 + LFLiveKit/Vendor/pili-librtmp/log.c | 209 + LFLiveKit/Vendor/pili-librtmp/log.h | 68 + LFLiveKit/Vendor/pili-librtmp/parseurl.c | 312 ++ LFLiveKit/Vendor/pili-librtmp/rtmp.c | 4136 +++++++++++++++++ LFLiveKit/Vendor/pili-librtmp/rtmp.h | 356 ++ LFLiveKit/Vendor/pili-librtmp/rtmp_sys.h | 123 + .../UserInterfaceState.xcuserstate | Bin 10986 -> 10986 bytes .../UserInterfaceState.xcuserstate | Bin 118716 -> 123847 bytes .../project.pbxproj | 50 +- LFLiveKitSwiftDemo/Podfile | 4 +- Podfile | 10 - 371 files changed, 43924 insertions(+), 36 deletions(-) create mode 100755 LFLiveKit/Vendor/GPUImage/GLProgram.h create mode 100755 LFLiveKit/Vendor/GPUImage/GLProgram.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImage.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageBuffer.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMovie.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMovie.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageOutput.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageOutput.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageUIElement.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.m create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.h create mode 100755 LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.h create mode 100644 LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.m create mode 100644 LFLiveKit/Vendor/GPUImage/iOS/Framework/GPUImageFramework.h create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.h create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.m create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.h create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.m create mode 100644 LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.h create mode 100644 LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.m create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.h create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.m create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h create mode 100755 LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.m create mode 100644 LFLiveKit/Vendor/pili-librtmp/amf.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/amf.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/bytes.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/dh.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/dhgroups.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/error.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/error.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/handshake.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/hashswf.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/http.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/log.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/log.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/parseurl.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/rtmp.c create mode 100644 LFLiveKit/Vendor/pili-librtmp/rtmp.h create mode 100644 LFLiveKit/Vendor/pili-librtmp/rtmp_sys.h delete mode 100755 Podfile diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index d91c4b38..e17a0a04 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.5" + s.version = "1.9.6" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } @@ -18,6 +18,4 @@ Pod::Spec.new do |s| s.requires_arc = true - s.dependency 'LMGPUImage', '~> 0.1.9' - s.dependency 'pili-librtmp', '~> 1.0.3.1' end diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index d2f61a0b..71df2c50 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -46,7 +46,368 @@ 84001FFD1D0017680026C63F /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFC1D0017680026C63F /* AudioToolbox.framework */; }; 84001FFF1D00176C0026C63F /* VideoToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 84001FFE1D00176C0026C63F /* VideoToolbox.framework */; }; 840020011D0017850026C63F /* libz.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = 840020001D0017850026C63F /* libz.tbd */; }; - AD7F89B4621A7EFEBEA72D49 /* libPods-LFLiveKit.a in Frameworks */ = {isa = PBXBuildFile; fileRef = B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */; }; + 8437239A1D4F260A002B398B /* GLProgram.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437222D1D4F260A002B398B /* GLProgram.h */; }; + 8437239B1D4F260A002B398B /* GLProgram.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437222E1D4F260A002B398B /* GLProgram.m */; }; + 8437239C1D4F260A002B398B /* GPUImage.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437222F1D4F260A002B398B /* GPUImage.h */; }; + 8437239D1D4F260A002B398B /* GPUImage3x3ConvolutionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722301D4F260A002B398B /* GPUImage3x3ConvolutionFilter.h */; }; + 8437239E1D4F260A002B398B /* GPUImage3x3ConvolutionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722311D4F260A002B398B /* GPUImage3x3ConvolutionFilter.m */; }; + 8437239F1D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722321D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.h */; }; + 843723A01D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722331D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.m */; }; + 843723A11D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722341D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h */; }; + 843723A21D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722351D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.m */; }; + 843723A31D4F260A002B398B /* GPUImageAddBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722361D4F260A002B398B /* GPUImageAddBlendFilter.h */; }; + 843723A41D4F260A002B398B /* GPUImageAddBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722371D4F260A002B398B /* GPUImageAddBlendFilter.m */; }; + 843723A51D4F260A002B398B /* GPUImageAlphaBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722381D4F260A002B398B /* GPUImageAlphaBlendFilter.h */; }; + 843723A61D4F260A002B398B /* GPUImageAlphaBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722391D4F260A002B398B /* GPUImageAlphaBlendFilter.m */; }; + 843723A71D4F260A002B398B /* GPUImageAmatorkaFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437223A1D4F260A002B398B /* GPUImageAmatorkaFilter.h */; }; + 843723A81D4F260A002B398B /* GPUImageAmatorkaFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437223B1D4F260A002B398B /* GPUImageAmatorkaFilter.m */; }; + 843723A91D4F260A002B398B /* GPUImageAverageColor.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437223C1D4F260A002B398B /* GPUImageAverageColor.h */; }; + 843723AA1D4F260A002B398B /* GPUImageAverageColor.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437223D1D4F260A002B398B /* GPUImageAverageColor.m */; }; + 843723AB1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437223E1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.h */; }; + 843723AC1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437223F1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.m */; }; + 843723AD1D4F260A002B398B /* GPUImageBilateralFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722401D4F260A002B398B /* GPUImageBilateralFilter.h */; }; + 843723AE1D4F260A002B398B /* GPUImageBilateralFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722411D4F260A002B398B /* GPUImageBilateralFilter.m */; }; + 843723AF1D4F260A002B398B /* GPUImageBoxBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722421D4F260A002B398B /* GPUImageBoxBlurFilter.h */; }; + 843723B01D4F260A002B398B /* GPUImageBoxBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722431D4F260A002B398B /* GPUImageBoxBlurFilter.m */; }; + 843723B11D4F260A002B398B /* GPUImageBrightnessFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722441D4F260A002B398B /* GPUImageBrightnessFilter.h */; }; + 843723B21D4F260A002B398B /* GPUImageBrightnessFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722451D4F260A002B398B /* GPUImageBrightnessFilter.m */; }; + 843723B31D4F260A002B398B /* GPUImageBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722461D4F260A002B398B /* GPUImageBuffer.h */; }; + 843723B41D4F260A002B398B /* GPUImageBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722471D4F260A002B398B /* GPUImageBuffer.m */; }; + 843723B51D4F260A002B398B /* GPUImageBulgeDistortionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722481D4F260A002B398B /* GPUImageBulgeDistortionFilter.h */; }; + 843723B61D4F260A002B398B /* GPUImageBulgeDistortionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722491D4F260A002B398B /* GPUImageBulgeDistortionFilter.m */; }; + 843723B71D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437224A1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.h */; }; + 843723B81D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437224B1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m */; }; + 843723B91D4F260A002B398B /* GPUImageCGAColorspaceFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437224C1D4F260A002B398B /* GPUImageCGAColorspaceFilter.h */; }; + 843723BA1D4F260A002B398B /* GPUImageCGAColorspaceFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437224D1D4F260A002B398B /* GPUImageCGAColorspaceFilter.m */; }; + 843723BB1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437224E1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.h */; }; + 843723BC1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437224F1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.m */; }; + 843723BD1D4F260A002B398B /* GPUImageChromaKeyFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722501D4F260A002B398B /* GPUImageChromaKeyFilter.h */; }; + 843723BE1D4F260A002B398B /* GPUImageChromaKeyFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722511D4F260A002B398B /* GPUImageChromaKeyFilter.m */; }; + 843723BF1D4F260A002B398B /* GPUImageClosingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722521D4F260A002B398B /* GPUImageClosingFilter.h */; }; + 843723C01D4F260A002B398B /* GPUImageClosingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722531D4F260A002B398B /* GPUImageClosingFilter.m */; }; + 843723C11D4F260A002B398B /* GPUImageColorBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722541D4F260A002B398B /* GPUImageColorBlendFilter.h */; }; + 843723C21D4F260A002B398B /* GPUImageColorBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722551D4F260A002B398B /* GPUImageColorBlendFilter.m */; }; + 843723C31D4F260A002B398B /* GPUImageColorBurnBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722561D4F260A002B398B /* GPUImageColorBurnBlendFilter.h */; }; + 843723C41D4F260A002B398B /* GPUImageColorBurnBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722571D4F260A002B398B /* GPUImageColorBurnBlendFilter.m */; }; + 843723C51D4F260A002B398B /* GPUImageColorConversion.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722581D4F260A002B398B /* GPUImageColorConversion.h */; }; + 843723C61D4F260A002B398B /* GPUImageColorConversion.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722591D4F260A002B398B /* GPUImageColorConversion.m */; }; + 843723C71D4F260A002B398B /* GPUImageColorDodgeBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437225A1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.h */; }; + 843723C81D4F260A002B398B /* GPUImageColorDodgeBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437225B1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.m */; }; + 843723C91D4F260A002B398B /* GPUImageColorInvertFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437225C1D4F260A002B398B /* GPUImageColorInvertFilter.h */; }; + 843723CA1D4F260A002B398B /* GPUImageColorInvertFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437225D1D4F260A002B398B /* GPUImageColorInvertFilter.m */; }; + 843723CB1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437225E1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.h */; }; + 843723CC1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437225F1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.m */; }; + 843723CD1D4F260A002B398B /* GPUImageColorMatrixFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722601D4F260A002B398B /* GPUImageColorMatrixFilter.h */; }; + 843723CE1D4F260A002B398B /* GPUImageColorMatrixFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722611D4F260A002B398B /* GPUImageColorMatrixFilter.m */; }; + 843723CF1D4F260A002B398B /* GPUImageColorPackingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722621D4F260A002B398B /* GPUImageColorPackingFilter.h */; }; + 843723D01D4F260A002B398B /* GPUImageColorPackingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722631D4F260A002B398B /* GPUImageColorPackingFilter.m */; }; + 843723D11D4F260A002B398B /* GPUImageColourFASTFeatureDetector.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722641D4F260A002B398B /* GPUImageColourFASTFeatureDetector.h */; }; + 843723D21D4F260A002B398B /* GPUImageColourFASTFeatureDetector.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722651D4F260A002B398B /* GPUImageColourFASTFeatureDetector.m */; }; + 843723D31D4F260A002B398B /* GPUImageColourFASTSamplingOperation.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722661D4F260A002B398B /* GPUImageColourFASTSamplingOperation.h */; }; + 843723D41D4F260A002B398B /* GPUImageColourFASTSamplingOperation.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722671D4F260A002B398B /* GPUImageColourFASTSamplingOperation.m */; }; + 843723D51D4F260A002B398B /* GPUImageContrastFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722681D4F260A002B398B /* GPUImageContrastFilter.h */; }; + 843723D61D4F260A002B398B /* GPUImageContrastFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722691D4F260A002B398B /* GPUImageContrastFilter.m */; }; + 843723D71D4F260A002B398B /* GPUImageCropFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437226A1D4F260A002B398B /* GPUImageCropFilter.h */; }; + 843723D81D4F260A002B398B /* GPUImageCropFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437226B1D4F260A002B398B /* GPUImageCropFilter.m */; }; + 843723D91D4F260A002B398B /* GPUImageCrosshairGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437226C1D4F260A002B398B /* GPUImageCrosshairGenerator.h */; }; + 843723DA1D4F260A002B398B /* GPUImageCrosshairGenerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437226D1D4F260A002B398B /* GPUImageCrosshairGenerator.m */; }; + 843723DB1D4F260A002B398B /* GPUImageCrosshatchFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437226E1D4F260A002B398B /* GPUImageCrosshatchFilter.h */; }; + 843723DC1D4F260A002B398B /* GPUImageCrosshatchFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437226F1D4F260A002B398B /* GPUImageCrosshatchFilter.m */; }; + 843723DD1D4F260A002B398B /* GPUImageDarkenBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722701D4F260A002B398B /* GPUImageDarkenBlendFilter.h */; }; + 843723DE1D4F260A002B398B /* GPUImageDarkenBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722711D4F260A002B398B /* GPUImageDarkenBlendFilter.m */; }; + 843723DF1D4F260A002B398B /* GPUImageDifferenceBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722721D4F260A002B398B /* GPUImageDifferenceBlendFilter.h */; }; + 843723E01D4F260A002B398B /* GPUImageDifferenceBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722731D4F260A002B398B /* GPUImageDifferenceBlendFilter.m */; }; + 843723E11D4F260A002B398B /* GPUImageDilationFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722741D4F260A002B398B /* GPUImageDilationFilter.h */; }; + 843723E21D4F260A002B398B /* GPUImageDilationFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722751D4F260A002B398B /* GPUImageDilationFilter.m */; }; + 843723E31D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722761D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.h */; }; + 843723E41D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722771D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.m */; }; + 843723E51D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722781D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.h */; }; + 843723E61D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722791D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.m */; }; + 843723E71D4F260A002B398B /* GPUImageDissolveBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437227A1D4F260A002B398B /* GPUImageDissolveBlendFilter.h */; }; + 843723E81D4F260A002B398B /* GPUImageDissolveBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437227B1D4F260A002B398B /* GPUImageDissolveBlendFilter.m */; }; + 843723E91D4F260A002B398B /* GPUImageDivideBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437227C1D4F260A002B398B /* GPUImageDivideBlendFilter.h */; }; + 843723EA1D4F260A002B398B /* GPUImageDivideBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437227D1D4F260A002B398B /* GPUImageDivideBlendFilter.m */; }; + 843723EB1D4F260A002B398B /* GPUImageEmbossFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437227E1D4F260A002B398B /* GPUImageEmbossFilter.h */; }; + 843723EC1D4F260A002B398B /* GPUImageEmbossFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437227F1D4F260A002B398B /* GPUImageEmbossFilter.m */; }; + 843723ED1D4F260A002B398B /* GPUImageErosionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722801D4F260A002B398B /* GPUImageErosionFilter.h */; }; + 843723EE1D4F260A002B398B /* GPUImageErosionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722811D4F260A002B398B /* GPUImageErosionFilter.m */; }; + 843723EF1D4F260A002B398B /* GPUImageExclusionBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722821D4F260A002B398B /* GPUImageExclusionBlendFilter.h */; }; + 843723F01D4F260A002B398B /* GPUImageExclusionBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722831D4F260A002B398B /* GPUImageExclusionBlendFilter.m */; }; + 843723F11D4F260A002B398B /* GPUImageExposureFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722841D4F260A002B398B /* GPUImageExposureFilter.h */; }; + 843723F21D4F260A002B398B /* GPUImageExposureFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722851D4F260A002B398B /* GPUImageExposureFilter.m */; }; + 843723F31D4F260A002B398B /* GPUImageFalseColorFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722861D4F260A002B398B /* GPUImageFalseColorFilter.h */; }; + 843723F41D4F260A002B398B /* GPUImageFalseColorFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722871D4F260A002B398B /* GPUImageFalseColorFilter.m */; }; + 843723F51D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722881D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.h */; }; + 843723F61D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722891D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.m */; }; + 843723F71D4F260A002B398B /* GPUImageFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437228A1D4F260A002B398B /* GPUImageFilter.h */; }; + 843723F81D4F260A002B398B /* GPUImageFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437228B1D4F260A002B398B /* GPUImageFilter.m */; }; + 843723F91D4F260A002B398B /* GPUImageFilterGroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437228C1D4F260A002B398B /* GPUImageFilterGroup.h */; }; + 843723FA1D4F260A002B398B /* GPUImageFilterGroup.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437228D1D4F260A002B398B /* GPUImageFilterGroup.m */; }; + 843723FB1D4F260A002B398B /* GPUImageFilterPipeline.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437228E1D4F260A002B398B /* GPUImageFilterPipeline.h */; }; + 843723FC1D4F260A002B398B /* GPUImageFilterPipeline.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437228F1D4F260A002B398B /* GPUImageFilterPipeline.m */; }; + 843723FD1D4F260A002B398B /* GPUImageFourInputFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722901D4F260A002B398B /* GPUImageFourInputFilter.h */; }; + 843723FE1D4F260A002B398B /* GPUImageFourInputFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722911D4F260A002B398B /* GPUImageFourInputFilter.m */; }; + 843723FF1D4F260A002B398B /* GPUImageFramebuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722921D4F260A002B398B /* GPUImageFramebuffer.h */; }; + 843724001D4F260A002B398B /* GPUImageFramebuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722931D4F260A002B398B /* GPUImageFramebuffer.m */; }; + 843724011D4F260A002B398B /* GPUImageFramebufferCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722941D4F260A002B398B /* GPUImageFramebufferCache.h */; }; + 843724021D4F260A002B398B /* GPUImageFramebufferCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722951D4F260A002B398B /* GPUImageFramebufferCache.m */; }; + 843724031D4F260A002B398B /* GPUImageGammaFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722961D4F260A002B398B /* GPUImageGammaFilter.h */; }; + 843724041D4F260A002B398B /* GPUImageGammaFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722971D4F260A002B398B /* GPUImageGammaFilter.m */; }; + 843724051D4F260A002B398B /* GPUImageGaussianBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722981D4F260A002B398B /* GPUImageGaussianBlurFilter.h */; }; + 843724061D4F260A002B398B /* GPUImageGaussianBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722991D4F260A002B398B /* GPUImageGaussianBlurFilter.m */; }; + 843724071D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437229A1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.h */; }; + 843724081D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437229B1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.m */; }; + 843724091D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437229C1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.h */; }; + 8437240A1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437229D1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.m */; }; + 8437240B1D4F260A002B398B /* GPUImageGlassSphereFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437229E1D4F260A002B398B /* GPUImageGlassSphereFilter.h */; }; + 8437240C1D4F260A002B398B /* GPUImageGlassSphereFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437229F1D4F260A002B398B /* GPUImageGlassSphereFilter.m */; }; + 8437240D1D4F260A002B398B /* GPUImageGrayscaleFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722A01D4F260A002B398B /* GPUImageGrayscaleFilter.h */; }; + 8437240E1D4F260A002B398B /* GPUImageGrayscaleFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722A11D4F260A002B398B /* GPUImageGrayscaleFilter.m */; }; + 8437240F1D4F260A002B398B /* GPUImageHalftoneFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722A21D4F260A002B398B /* GPUImageHalftoneFilter.h */; }; + 843724101D4F260A002B398B /* GPUImageHalftoneFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722A31D4F260A002B398B /* GPUImageHalftoneFilter.m */; }; + 843724111D4F260A002B398B /* GPUImageHardLightBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722A41D4F260A002B398B /* GPUImageHardLightBlendFilter.h */; }; + 843724121D4F260A002B398B /* GPUImageHardLightBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722A51D4F260A002B398B /* GPUImageHardLightBlendFilter.m */; }; + 843724131D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722A61D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.h */; }; + 843724141D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722A71D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.m */; }; + 843724151D4F260A002B398B /* GPUImageHazeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722A81D4F260A002B398B /* GPUImageHazeFilter.h */; }; + 843724161D4F260A002B398B /* GPUImageHazeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722A91D4F260A002B398B /* GPUImageHazeFilter.m */; }; + 843724171D4F260A002B398B /* GPUImageHighlightShadowFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722AA1D4F260A002B398B /* GPUImageHighlightShadowFilter.h */; }; + 843724181D4F260A002B398B /* GPUImageHighlightShadowFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722AB1D4F260A002B398B /* GPUImageHighlightShadowFilter.m */; }; + 843724191D4F260A002B398B /* GPUImageHighlightShadowTintFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722AC1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.h */; }; + 8437241A1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722AD1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.m */; }; + 8437241B1D4F260A002B398B /* GPUImageHighPassFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722AE1D4F260A002B398B /* GPUImageHighPassFilter.h */; }; + 8437241C1D4F260A002B398B /* GPUImageHighPassFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722AF1D4F260A002B398B /* GPUImageHighPassFilter.m */; }; + 8437241D1D4F260A002B398B /* GPUImageHistogramEqualizationFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722B01D4F260A002B398B /* GPUImageHistogramEqualizationFilter.h */; }; + 8437241E1D4F260A002B398B /* GPUImageHistogramEqualizationFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722B11D4F260A002B398B /* GPUImageHistogramEqualizationFilter.m */; }; + 8437241F1D4F260A002B398B /* GPUImageHistogramFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722B21D4F260A002B398B /* GPUImageHistogramFilter.h */; }; + 843724201D4F260A002B398B /* GPUImageHistogramFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722B31D4F260A002B398B /* GPUImageHistogramFilter.m */; }; + 843724211D4F260A002B398B /* GPUImageHistogramGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722B41D4F260A002B398B /* GPUImageHistogramGenerator.h */; }; + 843724221D4F260A002B398B /* GPUImageHistogramGenerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722B51D4F260A002B398B /* GPUImageHistogramGenerator.m */; }; + 843724231D4F260A002B398B /* GPUImageHoughTransformLineDetector.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722B61D4F260A002B398B /* GPUImageHoughTransformLineDetector.h */; }; + 843724241D4F260A002B398B /* GPUImageHoughTransformLineDetector.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722B71D4F260A002B398B /* GPUImageHoughTransformLineDetector.m */; }; + 843724251D4F260A002B398B /* GPUImageHSBFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722B81D4F260A002B398B /* GPUImageHSBFilter.h */; }; + 843724261D4F260A002B398B /* GPUImageHSBFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722B91D4F260A002B398B /* GPUImageHSBFilter.m */; }; + 843724271D4F260A002B398B /* GPUImageHueBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722BA1D4F260A002B398B /* GPUImageHueBlendFilter.h */; }; + 843724281D4F260A002B398B /* GPUImageHueBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722BB1D4F260A002B398B /* GPUImageHueBlendFilter.m */; }; + 843724291D4F260A002B398B /* GPUImageHueFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722BC1D4F260A002B398B /* GPUImageHueFilter.h */; }; + 8437242A1D4F260A002B398B /* GPUImageHueFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722BD1D4F260A002B398B /* GPUImageHueFilter.m */; }; + 8437242B1D4F260A002B398B /* GPUImageiOSBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722BE1D4F260A002B398B /* GPUImageiOSBlurFilter.h */; }; + 8437242C1D4F260A002B398B /* GPUImageiOSBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722BF1D4F260A002B398B /* GPUImageiOSBlurFilter.m */; }; + 8437242D1D4F260A002B398B /* GPUImageJFAVoronoiFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722C01D4F260A002B398B /* GPUImageJFAVoronoiFilter.h */; }; + 8437242E1D4F260A002B398B /* GPUImageJFAVoronoiFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722C11D4F260A002B398B /* GPUImageJFAVoronoiFilter.m */; }; + 8437242F1D4F260A002B398B /* GPUImageKuwaharaFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722C21D4F260A002B398B /* GPUImageKuwaharaFilter.h */; }; + 843724301D4F260A002B398B /* GPUImageKuwaharaFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722C31D4F260A002B398B /* GPUImageKuwaharaFilter.m */; }; + 843724311D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722C41D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.h */; }; + 843724321D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722C51D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.m */; }; + 843724331D4F260A002B398B /* GPUImageLanczosResamplingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722C61D4F260A002B398B /* GPUImageLanczosResamplingFilter.h */; }; + 843724341D4F260A002B398B /* GPUImageLanczosResamplingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722C71D4F260A002B398B /* GPUImageLanczosResamplingFilter.m */; }; + 843724351D4F260A002B398B /* GPUImageLaplacianFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722C81D4F260A002B398B /* GPUImageLaplacianFilter.h */; }; + 843724361D4F260A002B398B /* GPUImageLaplacianFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722C91D4F260A002B398B /* GPUImageLaplacianFilter.m */; }; + 843724371D4F260A002B398B /* GPUImageLevelsFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722CA1D4F260A002B398B /* GPUImageLevelsFilter.h */; }; + 843724381D4F260A002B398B /* GPUImageLevelsFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722CB1D4F260A002B398B /* GPUImageLevelsFilter.m */; }; + 843724391D4F260A002B398B /* GPUImageLightenBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722CC1D4F260A002B398B /* GPUImageLightenBlendFilter.h */; }; + 8437243A1D4F260A002B398B /* GPUImageLightenBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722CD1D4F260A002B398B /* GPUImageLightenBlendFilter.m */; }; + 8437243B1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722CE1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.h */; }; + 8437243C1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722CF1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.m */; }; + 8437243D1D4F260A002B398B /* GPUImageLineGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722D01D4F260A002B398B /* GPUImageLineGenerator.h */; }; + 8437243E1D4F260A002B398B /* GPUImageLineGenerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722D11D4F260A002B398B /* GPUImageLineGenerator.m */; }; + 8437243F1D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722D21D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.h */; }; + 843724401D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722D31D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.m */; }; + 843724411D4F260A002B398B /* GPUImageLookupFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722D41D4F260A002B398B /* GPUImageLookupFilter.h */; }; + 843724421D4F260A002B398B /* GPUImageLookupFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722D51D4F260A002B398B /* GPUImageLookupFilter.m */; }; + 843724431D4F260A002B398B /* GPUImageLowPassFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722D61D4F260A002B398B /* GPUImageLowPassFilter.h */; }; + 843724441D4F260A002B398B /* GPUImageLowPassFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722D71D4F260A002B398B /* GPUImageLowPassFilter.m */; }; + 843724451D4F260A002B398B /* GPUImageLuminanceRangeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722D81D4F260A002B398B /* GPUImageLuminanceRangeFilter.h */; }; + 843724461D4F260A002B398B /* GPUImageLuminanceRangeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722D91D4F260A002B398B /* GPUImageLuminanceRangeFilter.m */; }; + 843724471D4F260A002B398B /* GPUImageLuminanceThresholdFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722DA1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.h */; }; + 843724481D4F260A002B398B /* GPUImageLuminanceThresholdFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722DB1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.m */; }; + 843724491D4F260A002B398B /* GPUImageLuminosity.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722DC1D4F260A002B398B /* GPUImageLuminosity.h */; }; + 8437244A1D4F260A002B398B /* GPUImageLuminosity.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722DD1D4F260A002B398B /* GPUImageLuminosity.m */; }; + 8437244B1D4F260A002B398B /* GPUImageLuminosityBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722DE1D4F260A002B398B /* GPUImageLuminosityBlendFilter.h */; }; + 8437244C1D4F260A002B398B /* GPUImageLuminosityBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722DF1D4F260A002B398B /* GPUImageLuminosityBlendFilter.m */; }; + 8437244D1D4F260A002B398B /* GPUImageMaskFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722E01D4F260A002B398B /* GPUImageMaskFilter.h */; }; + 8437244E1D4F260A002B398B /* GPUImageMaskFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722E11D4F260A002B398B /* GPUImageMaskFilter.m */; }; + 8437244F1D4F260A002B398B /* GPUImageMedianFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722E21D4F260A002B398B /* GPUImageMedianFilter.h */; }; + 843724501D4F260A002B398B /* GPUImageMedianFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722E31D4F260A002B398B /* GPUImageMedianFilter.m */; }; + 843724511D4F260A002B398B /* GPUImageMissEtikateFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722E41D4F260A002B398B /* GPUImageMissEtikateFilter.h */; }; + 843724521D4F260A002B398B /* GPUImageMissEtikateFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722E51D4F260A002B398B /* GPUImageMissEtikateFilter.m */; }; + 843724531D4F260A002B398B /* GPUImageMonochromeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722E61D4F260A002B398B /* GPUImageMonochromeFilter.h */; }; + 843724541D4F260A002B398B /* GPUImageMonochromeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722E71D4F260A002B398B /* GPUImageMonochromeFilter.m */; }; + 843724551D4F260A002B398B /* GPUImageMosaicFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722E81D4F260A002B398B /* GPUImageMosaicFilter.h */; }; + 843724561D4F260A002B398B /* GPUImageMosaicFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722E91D4F260A002B398B /* GPUImageMosaicFilter.m */; }; + 843724571D4F260A002B398B /* GPUImageMotionBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722EA1D4F260A002B398B /* GPUImageMotionBlurFilter.h */; }; + 843724581D4F260A002B398B /* GPUImageMotionBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722EB1D4F260A002B398B /* GPUImageMotionBlurFilter.m */; }; + 843724591D4F260A002B398B /* GPUImageMotionDetector.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722EC1D4F260A002B398B /* GPUImageMotionDetector.h */; }; + 8437245A1D4F260A002B398B /* GPUImageMotionDetector.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722ED1D4F260A002B398B /* GPUImageMotionDetector.m */; }; + 8437245B1D4F260A002B398B /* GPUImageMovie.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722EE1D4F260A002B398B /* GPUImageMovie.h */; }; + 8437245C1D4F260A002B398B /* GPUImageMovie.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722EF1D4F260A002B398B /* GPUImageMovie.m */; }; + 8437245D1D4F260A002B398B /* GPUImageMovieComposition.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722F01D4F260A002B398B /* GPUImageMovieComposition.h */; }; + 8437245E1D4F260A002B398B /* GPUImageMovieComposition.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722F11D4F260A002B398B /* GPUImageMovieComposition.m */; }; + 8437245F1D4F260A002B398B /* GPUImageMultiplyBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722F21D4F260A002B398B /* GPUImageMultiplyBlendFilter.h */; }; + 843724601D4F260A002B398B /* GPUImageMultiplyBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722F31D4F260A002B398B /* GPUImageMultiplyBlendFilter.m */; }; + 843724611D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722F41D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.h */; }; + 843724621D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722F51D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m */; }; + 843724631D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722F61D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.h */; }; + 843724641D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722F71D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.m */; }; + 843724651D4F260A002B398B /* GPUImageNormalBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722F81D4F260A002B398B /* GPUImageNormalBlendFilter.h */; }; + 843724661D4F260A002B398B /* GPUImageNormalBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722F91D4F260A002B398B /* GPUImageNormalBlendFilter.m */; }; + 843724671D4F260A002B398B /* GPUImageOpacityFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722FA1D4F260A002B398B /* GPUImageOpacityFilter.h */; }; + 843724681D4F260A002B398B /* GPUImageOpacityFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722FB1D4F260A002B398B /* GPUImageOpacityFilter.m */; }; + 843724691D4F260A002B398B /* GPUImageOpeningFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722FC1D4F260A002B398B /* GPUImageOpeningFilter.h */; }; + 8437246A1D4F260A002B398B /* GPUImageOpeningFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722FD1D4F260A002B398B /* GPUImageOpeningFilter.m */; }; + 8437246B1D4F260A002B398B /* GPUImageOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 843722FE1D4F260A002B398B /* GPUImageOutput.h */; }; + 8437246C1D4F260A002B398B /* GPUImageOutput.m in Sources */ = {isa = PBXBuildFile; fileRef = 843722FF1D4F260A002B398B /* GPUImageOutput.m */; }; + 8437246D1D4F260A002B398B /* GPUImageOverlayBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723001D4F260A002B398B /* GPUImageOverlayBlendFilter.h */; }; + 8437246E1D4F260A002B398B /* GPUImageOverlayBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723011D4F260A002B398B /* GPUImageOverlayBlendFilter.m */; }; + 8437246F1D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723021D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.h */; }; + 843724701D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723031D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.m */; }; + 843724711D4F260A002B398B /* GPUImagePerlinNoiseFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723041D4F260A002B398B /* GPUImagePerlinNoiseFilter.h */; }; + 843724721D4F260A002B398B /* GPUImagePerlinNoiseFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723051D4F260A002B398B /* GPUImagePerlinNoiseFilter.m */; }; + 843724731D4F260A002B398B /* GPUImagePinchDistortionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723061D4F260A002B398B /* GPUImagePinchDistortionFilter.h */; }; + 843724741D4F260A002B398B /* GPUImagePinchDistortionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723071D4F260A002B398B /* GPUImagePinchDistortionFilter.m */; }; + 843724751D4F260A002B398B /* GPUImagePixellateFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723081D4F260A002B398B /* GPUImagePixellateFilter.h */; }; + 843724761D4F260A002B398B /* GPUImagePixellateFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723091D4F260A002B398B /* GPUImagePixellateFilter.m */; }; + 843724771D4F260A002B398B /* GPUImagePixellatePositionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437230A1D4F260A002B398B /* GPUImagePixellatePositionFilter.h */; }; + 843724781D4F260A002B398B /* GPUImagePixellatePositionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437230B1D4F260A002B398B /* GPUImagePixellatePositionFilter.m */; }; + 843724791D4F260A002B398B /* GPUImagePoissonBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437230C1D4F260A002B398B /* GPUImagePoissonBlendFilter.h */; }; + 8437247A1D4F260A002B398B /* GPUImagePoissonBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437230D1D4F260A002B398B /* GPUImagePoissonBlendFilter.m */; }; + 8437247B1D4F260A002B398B /* GPUImagePolarPixellateFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437230E1D4F260A002B398B /* GPUImagePolarPixellateFilter.h */; }; + 8437247C1D4F260A002B398B /* GPUImagePolarPixellateFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437230F1D4F260A002B398B /* GPUImagePolarPixellateFilter.m */; }; + 8437247D1D4F260A002B398B /* GPUImagePolkaDotFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723101D4F260A002B398B /* GPUImagePolkaDotFilter.h */; }; + 8437247E1D4F260A002B398B /* GPUImagePolkaDotFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723111D4F260A002B398B /* GPUImagePolkaDotFilter.m */; }; + 8437247F1D4F260A002B398B /* GPUImagePosterizeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723121D4F260A002B398B /* GPUImagePosterizeFilter.h */; }; + 843724801D4F260A002B398B /* GPUImagePosterizeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723131D4F260A002B398B /* GPUImagePosterizeFilter.m */; }; + 843724811D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723141D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.h */; }; + 843724821D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723151D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.m */; }; + 843724831D4F260A002B398B /* GPUImageRawDataInput.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723161D4F260A002B398B /* GPUImageRawDataInput.h */; }; + 843724841D4F260A002B398B /* GPUImageRawDataInput.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723171D4F260A002B398B /* GPUImageRawDataInput.m */; }; + 843724851D4F260A002B398B /* GPUImageRawDataOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723181D4F260A002B398B /* GPUImageRawDataOutput.h */; }; + 843724861D4F260A002B398B /* GPUImageRawDataOutput.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723191D4F260A002B398B /* GPUImageRawDataOutput.m */; }; + 843724871D4F260A002B398B /* GPUImageRGBClosingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437231A1D4F260A002B398B /* GPUImageRGBClosingFilter.h */; }; + 843724881D4F260A002B398B /* GPUImageRGBClosingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437231B1D4F260A002B398B /* GPUImageRGBClosingFilter.m */; }; + 843724891D4F260A002B398B /* GPUImageRGBDilationFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437231C1D4F260A002B398B /* GPUImageRGBDilationFilter.h */; }; + 8437248A1D4F260A002B398B /* GPUImageRGBDilationFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437231D1D4F260A002B398B /* GPUImageRGBDilationFilter.m */; }; + 8437248B1D4F260A002B398B /* GPUImageRGBErosionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437231E1D4F260A002B398B /* GPUImageRGBErosionFilter.h */; }; + 8437248C1D4F260A002B398B /* GPUImageRGBErosionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437231F1D4F260A002B398B /* GPUImageRGBErosionFilter.m */; }; + 8437248D1D4F260A002B398B /* GPUImageRGBFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723201D4F260A002B398B /* GPUImageRGBFilter.h */; }; + 8437248E1D4F260A002B398B /* GPUImageRGBFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723211D4F260A002B398B /* GPUImageRGBFilter.m */; }; + 8437248F1D4F260A002B398B /* GPUImageRGBOpeningFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723221D4F260A002B398B /* GPUImageRGBOpeningFilter.h */; }; + 843724901D4F260A002B398B /* GPUImageRGBOpeningFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723231D4F260A002B398B /* GPUImageRGBOpeningFilter.m */; }; + 843724911D4F260A002B398B /* GPUImageSaturationBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723241D4F260A002B398B /* GPUImageSaturationBlendFilter.h */; }; + 843724921D4F260A002B398B /* GPUImageSaturationBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723251D4F260A002B398B /* GPUImageSaturationBlendFilter.m */; }; + 843724931D4F260A002B398B /* GPUImageSaturationFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723261D4F260A002B398B /* GPUImageSaturationFilter.h */; }; + 843724941D4F260A002B398B /* GPUImageSaturationFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723271D4F260A002B398B /* GPUImageSaturationFilter.m */; }; + 843724951D4F260A002B398B /* GPUImageScreenBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723281D4F260A002B398B /* GPUImageScreenBlendFilter.h */; }; + 843724961D4F260A002B398B /* GPUImageScreenBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723291D4F260A002B398B /* GPUImageScreenBlendFilter.m */; }; + 843724971D4F260A002B398B /* GPUImageSepiaFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437232A1D4F260A002B398B /* GPUImageSepiaFilter.h */; }; + 843724981D4F260A002B398B /* GPUImageSepiaFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437232B1D4F260A002B398B /* GPUImageSepiaFilter.m */; }; + 843724991D4F260A002B398B /* GPUImageSharpenFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437232C1D4F260A002B398B /* GPUImageSharpenFilter.h */; }; + 8437249A1D4F260A002B398B /* GPUImageSharpenFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437232D1D4F260A002B398B /* GPUImageSharpenFilter.m */; }; + 8437249B1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437232E1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.h */; }; + 8437249C1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437232F1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.m */; }; + 8437249D1D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723301D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.h */; }; + 8437249E1D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723311D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.m */; }; + 8437249F1D4F260A002B398B /* GPUImageSketchFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723321D4F260A002B398B /* GPUImageSketchFilter.h */; }; + 843724A01D4F260A002B398B /* GPUImageSketchFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723331D4F260A002B398B /* GPUImageSketchFilter.m */; }; + 843724A11D4F260A002B398B /* GPUImageSkinToneFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723341D4F260A002B398B /* GPUImageSkinToneFilter.h */; }; + 843724A21D4F260A002B398B /* GPUImageSkinToneFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723351D4F260A002B398B /* GPUImageSkinToneFilter.m */; }; + 843724A31D4F260A002B398B /* GPUImageSmoothToonFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723361D4F260A002B398B /* GPUImageSmoothToonFilter.h */; }; + 843724A41D4F260A002B398B /* GPUImageSmoothToonFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723371D4F260A002B398B /* GPUImageSmoothToonFilter.m */; }; + 843724A51D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723381D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.h */; }; + 843724A61D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723391D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.m */; }; + 843724A71D4F260A002B398B /* GPUImageSoftEleganceFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437233A1D4F260A002B398B /* GPUImageSoftEleganceFilter.h */; }; + 843724A81D4F260A002B398B /* GPUImageSoftEleganceFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437233B1D4F260A002B398B /* GPUImageSoftEleganceFilter.m */; }; + 843724A91D4F260A002B398B /* GPUImageSoftLightBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437233C1D4F260A002B398B /* GPUImageSoftLightBlendFilter.h */; }; + 843724AA1D4F260A002B398B /* GPUImageSoftLightBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437233D1D4F260A002B398B /* GPUImageSoftLightBlendFilter.m */; }; + 843724AB1D4F260A002B398B /* GPUImageSolarizeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437233E1D4F260A002B398B /* GPUImageSolarizeFilter.h */; }; + 843724AC1D4F260A002B398B /* GPUImageSolarizeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437233F1D4F260A002B398B /* GPUImageSolarizeFilter.m */; }; + 843724AD1D4F260A002B398B /* GPUImageSolidColorGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723401D4F260A002B398B /* GPUImageSolidColorGenerator.h */; }; + 843724AE1D4F260A002B398B /* GPUImageSolidColorGenerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723411D4F260A002B398B /* GPUImageSolidColorGenerator.m */; }; + 843724AF1D4F260A002B398B /* GPUImageSourceOverBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723421D4F260A002B398B /* GPUImageSourceOverBlendFilter.h */; }; + 843724B01D4F260A002B398B /* GPUImageSourceOverBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723431D4F260A002B398B /* GPUImageSourceOverBlendFilter.m */; }; + 843724B11D4F260A002B398B /* GPUImageSphereRefractionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723441D4F260A002B398B /* GPUImageSphereRefractionFilter.h */; }; + 843724B21D4F260A002B398B /* GPUImageSphereRefractionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723451D4F260A002B398B /* GPUImageSphereRefractionFilter.m */; }; + 843724B31D4F260A002B398B /* GPUImageStillCamera.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723461D4F260A002B398B /* GPUImageStillCamera.h */; }; + 843724B41D4F260A002B398B /* GPUImageStillCamera.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723471D4F260A002B398B /* GPUImageStillCamera.m */; }; + 843724B51D4F260A002B398B /* GPUImageStretchDistortionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723481D4F260A002B398B /* GPUImageStretchDistortionFilter.h */; }; + 843724B61D4F260A002B398B /* GPUImageStretchDistortionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723491D4F260A002B398B /* GPUImageStretchDistortionFilter.m */; }; + 843724B71D4F260A002B398B /* GPUImageSubtractBlendFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437234A1D4F260A002B398B /* GPUImageSubtractBlendFilter.h */; }; + 843724B81D4F260A002B398B /* GPUImageSubtractBlendFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437234B1D4F260A002B398B /* GPUImageSubtractBlendFilter.m */; }; + 843724B91D4F260A002B398B /* GPUImageSwirlFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437234C1D4F260A002B398B /* GPUImageSwirlFilter.h */; }; + 843724BA1D4F260A002B398B /* GPUImageSwirlFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437234D1D4F260A002B398B /* GPUImageSwirlFilter.m */; }; + 843724BB1D4F260A002B398B /* GPUImageTextureInput.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437234E1D4F260A002B398B /* GPUImageTextureInput.h */; }; + 843724BC1D4F260A002B398B /* GPUImageTextureInput.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437234F1D4F260A002B398B /* GPUImageTextureInput.m */; }; + 843724BD1D4F260A002B398B /* GPUImageTextureOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723501D4F260A002B398B /* GPUImageTextureOutput.h */; }; + 843724BE1D4F260A002B398B /* GPUImageTextureOutput.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723511D4F260A002B398B /* GPUImageTextureOutput.m */; }; + 843724BF1D4F260A002B398B /* GPUImageThreeInputFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723521D4F260A002B398B /* GPUImageThreeInputFilter.h */; }; + 843724C01D4F260A002B398B /* GPUImageThreeInputFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723531D4F260A002B398B /* GPUImageThreeInputFilter.m */; }; + 843724C11D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723541D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.h */; }; + 843724C21D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723551D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.m */; }; + 843724C31D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723561D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.h */; }; + 843724C41D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723571D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m */; }; + 843724C51D4F260A002B398B /* GPUImageThresholdSketchFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723581D4F260A002B398B /* GPUImageThresholdSketchFilter.h */; }; + 843724C61D4F260A002B398B /* GPUImageThresholdSketchFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723591D4F260A002B398B /* GPUImageThresholdSketchFilter.m */; }; + 843724C71D4F260A002B398B /* GPUImageTiltShiftFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437235A1D4F260A002B398B /* GPUImageTiltShiftFilter.h */; }; + 843724C81D4F260A002B398B /* GPUImageTiltShiftFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437235B1D4F260A002B398B /* GPUImageTiltShiftFilter.m */; }; + 843724C91D4F260A002B398B /* GPUImageToneCurveFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437235C1D4F260A002B398B /* GPUImageToneCurveFilter.h */; }; + 843724CA1D4F260A002B398B /* GPUImageToneCurveFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437235D1D4F260A002B398B /* GPUImageToneCurveFilter.m */; }; + 843724CB1D4F260A002B398B /* GPUImageToonFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437235E1D4F260A002B398B /* GPUImageToonFilter.h */; }; + 843724CC1D4F260A002B398B /* GPUImageToonFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437235F1D4F260A002B398B /* GPUImageToonFilter.m */; }; + 843724CD1D4F260A002B398B /* GPUImageTransformFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723601D4F260A002B398B /* GPUImageTransformFilter.h */; }; + 843724CE1D4F260A002B398B /* GPUImageTransformFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723611D4F260A002B398B /* GPUImageTransformFilter.m */; }; + 843724CF1D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723621D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.h */; }; + 843724D01D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723631D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.m */; }; + 843724D11D4F260A002B398B /* GPUImageTwoInputFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723641D4F260A002B398B /* GPUImageTwoInputFilter.h */; }; + 843724D21D4F260A002B398B /* GPUImageTwoInputFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723651D4F260A002B398B /* GPUImageTwoInputFilter.m */; }; + 843724D31D4F260A002B398B /* GPUImageTwoPassFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723661D4F260A002B398B /* GPUImageTwoPassFilter.h */; }; + 843724D41D4F260A002B398B /* GPUImageTwoPassFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723671D4F260A002B398B /* GPUImageTwoPassFilter.m */; }; + 843724D51D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723681D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h */; }; + 843724D61D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723691D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.m */; }; + 843724D71D4F260A002B398B /* GPUImageUIElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437236A1D4F260A002B398B /* GPUImageUIElement.h */; }; + 843724D81D4F260A002B398B /* GPUImageUIElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437236B1D4F260A002B398B /* GPUImageUIElement.m */; }; + 843724D91D4F260A002B398B /* GPUImageUnsharpMaskFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437236C1D4F260A002B398B /* GPUImageUnsharpMaskFilter.h */; }; + 843724DA1D4F260A002B398B /* GPUImageUnsharpMaskFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437236D1D4F260A002B398B /* GPUImageUnsharpMaskFilter.m */; }; + 843724DB1D4F260A002B398B /* GPUImageVideoCamera.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437236E1D4F260A002B398B /* GPUImageVideoCamera.h */; }; + 843724DC1D4F260A002B398B /* GPUImageVideoCamera.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437236F1D4F260A002B398B /* GPUImageVideoCamera.m */; }; + 843724DD1D4F260A002B398B /* GPUImageVignetteFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723701D4F260A002B398B /* GPUImageVignetteFilter.h */; }; + 843724DE1D4F260A002B398B /* GPUImageVignetteFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723711D4F260A002B398B /* GPUImageVignetteFilter.m */; }; + 843724DF1D4F260A002B398B /* GPUImageVoronoiConsumerFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723721D4F260A002B398B /* GPUImageVoronoiConsumerFilter.h */; }; + 843724E01D4F260A002B398B /* GPUImageVoronoiConsumerFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723731D4F260A002B398B /* GPUImageVoronoiConsumerFilter.m */; }; + 843724E11D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723741D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.h */; }; + 843724E21D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723751D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.m */; }; + 843724E31D4F260A002B398B /* GPUImageWhiteBalanceFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723761D4F260A002B398B /* GPUImageWhiteBalanceFilter.h */; }; + 843724E41D4F260A002B398B /* GPUImageWhiteBalanceFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723771D4F260A002B398B /* GPUImageWhiteBalanceFilter.m */; }; + 843724E51D4F260A002B398B /* GPUImageXYDerivativeFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723781D4F260A002B398B /* GPUImageXYDerivativeFilter.h */; }; + 843724E61D4F260A002B398B /* GPUImageXYDerivativeFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723791D4F260A002B398B /* GPUImageXYDerivativeFilter.m */; }; + 843724E71D4F260A002B398B /* GPUImageZoomBlurFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437237A1D4F260A002B398B /* GPUImageZoomBlurFilter.h */; }; + 843724E81D4F260A002B398B /* GPUImageZoomBlurFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 8437237B1D4F260A002B398B /* GPUImageZoomBlurFilter.m */; }; + 843724E91D4F260A002B398B /* GPUImageFramework.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437237E1D4F260A002B398B /* GPUImageFramework.h */; }; + 843724EA1D4F260A002B398B /* GPUImageContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437237F1D4F260A002B398B /* GPUImageContext.h */; }; + 843724EB1D4F260A002B398B /* GPUImageContext.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723801D4F260A002B398B /* GPUImageContext.m */; }; + 843724EC1D4F260A002B398B /* GPUImageMovieWriter.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723811D4F260A002B398B /* GPUImageMovieWriter.h */; }; + 843724ED1D4F260A002B398B /* GPUImageMovieWriter.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723821D4F260A002B398B /* GPUImageMovieWriter.m */; }; + 843724EE1D4F260A002B398B /* GPUImagePicture+TextureSubimage.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723831D4F260A002B398B /* GPUImagePicture+TextureSubimage.h */; }; + 843724EF1D4F260A002B398B /* GPUImagePicture+TextureSubimage.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723841D4F260A002B398B /* GPUImagePicture+TextureSubimage.m */; }; + 843724F01D4F260A002B398B /* GPUImagePicture.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723851D4F260A002B398B /* GPUImagePicture.h */; }; + 843724F11D4F260A002B398B /* GPUImagePicture.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723861D4F260A002B398B /* GPUImagePicture.m */; }; + 843724F21D4F260A002B398B /* GPUImageView.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723871D4F260A002B398B /* GPUImageView.h */; }; + 843724F31D4F260A002B398B /* GPUImageView.m in Sources */ = {isa = PBXBuildFile; fileRef = 843723881D4F260A002B398B /* GPUImageView.m */; }; + 843724F41D4F260A002B398B /* amf.c in Sources */ = {isa = PBXBuildFile; fileRef = 8437238A1D4F260A002B398B /* amf.c */; }; + 843724F51D4F260A002B398B /* amf.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437238B1D4F260A002B398B /* amf.h */; }; + 843724F61D4F260A002B398B /* bytes.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437238C1D4F260A002B398B /* bytes.h */; }; + 843724F71D4F260A002B398B /* dh.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437238D1D4F260A002B398B /* dh.h */; }; + 843724F81D4F260A002B398B /* dhgroups.h in Headers */ = {isa = PBXBuildFile; fileRef = 8437238E1D4F260A002B398B /* dhgroups.h */; }; + 843724F91D4F260A002B398B /* error.c in Sources */ = {isa = PBXBuildFile; fileRef = 8437238F1D4F260A002B398B /* error.c */; }; + 843724FA1D4F260A002B398B /* error.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723901D4F260A002B398B /* error.h */; }; + 843724FB1D4F260A002B398B /* handshake.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723911D4F260A002B398B /* handshake.h */; }; + 843724FC1D4F260A002B398B /* hashswf.c in Sources */ = {isa = PBXBuildFile; fileRef = 843723921D4F260A002B398B /* hashswf.c */; }; + 843724FD1D4F260A002B398B /* http.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723931D4F260A002B398B /* http.h */; }; + 843724FE1D4F260A002B398B /* log.c in Sources */ = {isa = PBXBuildFile; fileRef = 843723941D4F260A002B398B /* log.c */; }; + 843724FF1D4F260A002B398B /* log.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723951D4F260A002B398B /* log.h */; }; + 843725001D4F260A002B398B /* parseurl.c in Sources */ = {isa = PBXBuildFile; fileRef = 843723961D4F260A002B398B /* parseurl.c */; }; + 843725011D4F260A002B398B /* rtmp.c in Sources */ = {isa = PBXBuildFile; fileRef = 843723971D4F260A002B398B /* rtmp.c */; }; + 843725021D4F260A002B398B /* rtmp.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723981D4F260A002B398B /* rtmp.h */; }; + 843725031D4F260A002B398B /* rtmp_sys.h in Headers */ = {isa = PBXBuildFile; fileRef = 843723991D4F260A002B398B /* rtmp_sys.h */; }; B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */; }; B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */; }; B289F1DD1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h in Headers */ = {isa = PBXBuildFile; fileRef = B289F1D61D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h */; }; @@ -121,6 +482,368 @@ 84001FFC1D0017680026C63F /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; 84001FFE1D00176C0026C63F /* VideoToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = VideoToolbox.framework; path = System/Library/Frameworks/VideoToolbox.framework; sourceTree = SDKROOT; }; 840020001D0017850026C63F /* libz.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libz.tbd; path = usr/lib/libz.tbd; sourceTree = SDKROOT; }; + 8437222D1D4F260A002B398B /* GLProgram.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GLProgram.h; sourceTree = ""; }; + 8437222E1D4F260A002B398B /* GLProgram.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GLProgram.m; sourceTree = ""; }; + 8437222F1D4F260A002B398B /* GPUImage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImage.h; sourceTree = ""; }; + 843722301D4F260A002B398B /* GPUImage3x3ConvolutionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImage3x3ConvolutionFilter.h; sourceTree = ""; }; + 843722311D4F260A002B398B /* GPUImage3x3ConvolutionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImage3x3ConvolutionFilter.m; sourceTree = ""; }; + 843722321D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImage3x3TextureSamplingFilter.h; sourceTree = ""; }; + 843722331D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImage3x3TextureSamplingFilter.m; sourceTree = ""; }; + 843722341D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAdaptiveThresholdFilter.h; sourceTree = ""; }; + 843722351D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAdaptiveThresholdFilter.m; sourceTree = ""; }; + 843722361D4F260A002B398B /* GPUImageAddBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAddBlendFilter.h; sourceTree = ""; }; + 843722371D4F260A002B398B /* GPUImageAddBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAddBlendFilter.m; sourceTree = ""; }; + 843722381D4F260A002B398B /* GPUImageAlphaBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAlphaBlendFilter.h; sourceTree = ""; }; + 843722391D4F260A002B398B /* GPUImageAlphaBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAlphaBlendFilter.m; sourceTree = ""; }; + 8437223A1D4F260A002B398B /* GPUImageAmatorkaFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAmatorkaFilter.h; sourceTree = ""; }; + 8437223B1D4F260A002B398B /* GPUImageAmatorkaFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAmatorkaFilter.m; sourceTree = ""; }; + 8437223C1D4F260A002B398B /* GPUImageAverageColor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAverageColor.h; sourceTree = ""; }; + 8437223D1D4F260A002B398B /* GPUImageAverageColor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAverageColor.m; sourceTree = ""; }; + 8437223E1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageAverageLuminanceThresholdFilter.h; sourceTree = ""; }; + 8437223F1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageAverageLuminanceThresholdFilter.m; sourceTree = ""; }; + 843722401D4F260A002B398B /* GPUImageBilateralFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageBilateralFilter.h; sourceTree = ""; }; + 843722411D4F260A002B398B /* GPUImageBilateralFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageBilateralFilter.m; sourceTree = ""; }; + 843722421D4F260A002B398B /* GPUImageBoxBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageBoxBlurFilter.h; sourceTree = ""; }; + 843722431D4F260A002B398B /* GPUImageBoxBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageBoxBlurFilter.m; sourceTree = ""; }; + 843722441D4F260A002B398B /* GPUImageBrightnessFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageBrightnessFilter.h; sourceTree = ""; }; + 843722451D4F260A002B398B /* GPUImageBrightnessFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageBrightnessFilter.m; sourceTree = ""; }; + 843722461D4F260A002B398B /* GPUImageBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageBuffer.h; sourceTree = ""; }; + 843722471D4F260A002B398B /* GPUImageBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageBuffer.m; sourceTree = ""; }; + 843722481D4F260A002B398B /* GPUImageBulgeDistortionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageBulgeDistortionFilter.h; sourceTree = ""; }; + 843722491D4F260A002B398B /* GPUImageBulgeDistortionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageBulgeDistortionFilter.m; sourceTree = ""; }; + 8437224A1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageCannyEdgeDetectionFilter.h; sourceTree = ""; }; + 8437224B1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageCannyEdgeDetectionFilter.m; sourceTree = ""; }; + 8437224C1D4F260A002B398B /* GPUImageCGAColorspaceFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageCGAColorspaceFilter.h; sourceTree = ""; }; + 8437224D1D4F260A002B398B /* GPUImageCGAColorspaceFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageCGAColorspaceFilter.m; sourceTree = ""; }; + 8437224E1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageChromaKeyBlendFilter.h; sourceTree = ""; }; + 8437224F1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageChromaKeyBlendFilter.m; sourceTree = ""; }; + 843722501D4F260A002B398B /* GPUImageChromaKeyFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageChromaKeyFilter.h; sourceTree = ""; }; + 843722511D4F260A002B398B /* GPUImageChromaKeyFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageChromaKeyFilter.m; sourceTree = ""; }; + 843722521D4F260A002B398B /* GPUImageClosingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageClosingFilter.h; sourceTree = ""; }; + 843722531D4F260A002B398B /* GPUImageClosingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageClosingFilter.m; sourceTree = ""; }; + 843722541D4F260A002B398B /* GPUImageColorBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorBlendFilter.h; sourceTree = ""; }; + 843722551D4F260A002B398B /* GPUImageColorBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorBlendFilter.m; sourceTree = ""; }; + 843722561D4F260A002B398B /* GPUImageColorBurnBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorBurnBlendFilter.h; sourceTree = ""; }; + 843722571D4F260A002B398B /* GPUImageColorBurnBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorBurnBlendFilter.m; sourceTree = ""; }; + 843722581D4F260A002B398B /* GPUImageColorConversion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorConversion.h; sourceTree = ""; }; + 843722591D4F260A002B398B /* GPUImageColorConversion.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorConversion.m; sourceTree = ""; }; + 8437225A1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorDodgeBlendFilter.h; sourceTree = ""; }; + 8437225B1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorDodgeBlendFilter.m; sourceTree = ""; }; + 8437225C1D4F260A002B398B /* GPUImageColorInvertFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorInvertFilter.h; sourceTree = ""; }; + 8437225D1D4F260A002B398B /* GPUImageColorInvertFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorInvertFilter.m; sourceTree = ""; }; + 8437225E1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorLocalBinaryPatternFilter.h; sourceTree = ""; }; + 8437225F1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorLocalBinaryPatternFilter.m; sourceTree = ""; }; + 843722601D4F260A002B398B /* GPUImageColorMatrixFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorMatrixFilter.h; sourceTree = ""; }; + 843722611D4F260A002B398B /* GPUImageColorMatrixFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorMatrixFilter.m; sourceTree = ""; }; + 843722621D4F260A002B398B /* GPUImageColorPackingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColorPackingFilter.h; sourceTree = ""; }; + 843722631D4F260A002B398B /* GPUImageColorPackingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColorPackingFilter.m; sourceTree = ""; }; + 843722641D4F260A002B398B /* GPUImageColourFASTFeatureDetector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColourFASTFeatureDetector.h; sourceTree = ""; }; + 843722651D4F260A002B398B /* GPUImageColourFASTFeatureDetector.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColourFASTFeatureDetector.m; sourceTree = ""; }; + 843722661D4F260A002B398B /* GPUImageColourFASTSamplingOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageColourFASTSamplingOperation.h; sourceTree = ""; }; + 843722671D4F260A002B398B /* GPUImageColourFASTSamplingOperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageColourFASTSamplingOperation.m; sourceTree = ""; }; + 843722681D4F260A002B398B /* GPUImageContrastFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageContrastFilter.h; sourceTree = ""; }; + 843722691D4F260A002B398B /* GPUImageContrastFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageContrastFilter.m; sourceTree = ""; }; + 8437226A1D4F260A002B398B /* GPUImageCropFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageCropFilter.h; sourceTree = ""; }; + 8437226B1D4F260A002B398B /* GPUImageCropFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageCropFilter.m; sourceTree = ""; }; + 8437226C1D4F260A002B398B /* GPUImageCrosshairGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageCrosshairGenerator.h; sourceTree = ""; }; + 8437226D1D4F260A002B398B /* GPUImageCrosshairGenerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageCrosshairGenerator.m; sourceTree = ""; }; + 8437226E1D4F260A002B398B /* GPUImageCrosshatchFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageCrosshatchFilter.h; sourceTree = ""; }; + 8437226F1D4F260A002B398B /* GPUImageCrosshatchFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageCrosshatchFilter.m; sourceTree = ""; }; + 843722701D4F260A002B398B /* GPUImageDarkenBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDarkenBlendFilter.h; sourceTree = ""; }; + 843722711D4F260A002B398B /* GPUImageDarkenBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDarkenBlendFilter.m; sourceTree = ""; }; + 843722721D4F260A002B398B /* GPUImageDifferenceBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDifferenceBlendFilter.h; sourceTree = ""; }; + 843722731D4F260A002B398B /* GPUImageDifferenceBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDifferenceBlendFilter.m; sourceTree = ""; }; + 843722741D4F260A002B398B /* GPUImageDilationFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDilationFilter.h; sourceTree = ""; }; + 843722751D4F260A002B398B /* GPUImageDilationFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDilationFilter.m; sourceTree = ""; }; + 843722761D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDirectionalNonMaximumSuppressionFilter.h; sourceTree = ""; }; + 843722771D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDirectionalNonMaximumSuppressionFilter.m; sourceTree = ""; }; + 843722781D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDirectionalSobelEdgeDetectionFilter.h; sourceTree = ""; }; + 843722791D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDirectionalSobelEdgeDetectionFilter.m; sourceTree = ""; }; + 8437227A1D4F260A002B398B /* GPUImageDissolveBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDissolveBlendFilter.h; sourceTree = ""; }; + 8437227B1D4F260A002B398B /* GPUImageDissolveBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDissolveBlendFilter.m; sourceTree = ""; }; + 8437227C1D4F260A002B398B /* GPUImageDivideBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageDivideBlendFilter.h; sourceTree = ""; }; + 8437227D1D4F260A002B398B /* GPUImageDivideBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageDivideBlendFilter.m; sourceTree = ""; }; + 8437227E1D4F260A002B398B /* GPUImageEmbossFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageEmbossFilter.h; sourceTree = ""; }; + 8437227F1D4F260A002B398B /* GPUImageEmbossFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageEmbossFilter.m; sourceTree = ""; }; + 843722801D4F260A002B398B /* GPUImageErosionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageErosionFilter.h; sourceTree = ""; }; + 843722811D4F260A002B398B /* GPUImageErosionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageErosionFilter.m; sourceTree = ""; }; + 843722821D4F260A002B398B /* GPUImageExclusionBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageExclusionBlendFilter.h; sourceTree = ""; }; + 843722831D4F260A002B398B /* GPUImageExclusionBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageExclusionBlendFilter.m; sourceTree = ""; }; + 843722841D4F260A002B398B /* GPUImageExposureFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageExposureFilter.h; sourceTree = ""; }; + 843722851D4F260A002B398B /* GPUImageExposureFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageExposureFilter.m; sourceTree = ""; }; + 843722861D4F260A002B398B /* GPUImageFalseColorFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFalseColorFilter.h; sourceTree = ""; }; + 843722871D4F260A002B398B /* GPUImageFalseColorFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFalseColorFilter.m; sourceTree = ""; }; + 843722881D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFASTCornerDetectionFilter.h; sourceTree = ""; }; + 843722891D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFASTCornerDetectionFilter.m; sourceTree = ""; }; + 8437228A1D4F260A002B398B /* GPUImageFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFilter.h; sourceTree = ""; }; + 8437228B1D4F260A002B398B /* GPUImageFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFilter.m; sourceTree = ""; }; + 8437228C1D4F260A002B398B /* GPUImageFilterGroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFilterGroup.h; sourceTree = ""; }; + 8437228D1D4F260A002B398B /* GPUImageFilterGroup.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFilterGroup.m; sourceTree = ""; }; + 8437228E1D4F260A002B398B /* GPUImageFilterPipeline.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFilterPipeline.h; sourceTree = ""; }; + 8437228F1D4F260A002B398B /* GPUImageFilterPipeline.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFilterPipeline.m; sourceTree = ""; }; + 843722901D4F260A002B398B /* GPUImageFourInputFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFourInputFilter.h; sourceTree = ""; }; + 843722911D4F260A002B398B /* GPUImageFourInputFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFourInputFilter.m; sourceTree = ""; }; + 843722921D4F260A002B398B /* GPUImageFramebuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFramebuffer.h; sourceTree = ""; }; + 843722931D4F260A002B398B /* GPUImageFramebuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFramebuffer.m; sourceTree = ""; }; + 843722941D4F260A002B398B /* GPUImageFramebufferCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFramebufferCache.h; sourceTree = ""; }; + 843722951D4F260A002B398B /* GPUImageFramebufferCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageFramebufferCache.m; sourceTree = ""; }; + 843722961D4F260A002B398B /* GPUImageGammaFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGammaFilter.h; sourceTree = ""; }; + 843722971D4F260A002B398B /* GPUImageGammaFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGammaFilter.m; sourceTree = ""; }; + 843722981D4F260A002B398B /* GPUImageGaussianBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGaussianBlurFilter.h; sourceTree = ""; }; + 843722991D4F260A002B398B /* GPUImageGaussianBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGaussianBlurFilter.m; sourceTree = ""; }; + 8437229A1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGaussianBlurPositionFilter.h; sourceTree = ""; }; + 8437229B1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGaussianBlurPositionFilter.m; sourceTree = ""; }; + 8437229C1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGaussianSelectiveBlurFilter.h; sourceTree = ""; }; + 8437229D1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGaussianSelectiveBlurFilter.m; sourceTree = ""; }; + 8437229E1D4F260A002B398B /* GPUImageGlassSphereFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGlassSphereFilter.h; sourceTree = ""; }; + 8437229F1D4F260A002B398B /* GPUImageGlassSphereFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGlassSphereFilter.m; sourceTree = ""; }; + 843722A01D4F260A002B398B /* GPUImageGrayscaleFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageGrayscaleFilter.h; sourceTree = ""; }; + 843722A11D4F260A002B398B /* GPUImageGrayscaleFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageGrayscaleFilter.m; sourceTree = ""; }; + 843722A21D4F260A002B398B /* GPUImageHalftoneFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHalftoneFilter.h; sourceTree = ""; }; + 843722A31D4F260A002B398B /* GPUImageHalftoneFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHalftoneFilter.m; sourceTree = ""; }; + 843722A41D4F260A002B398B /* GPUImageHardLightBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHardLightBlendFilter.h; sourceTree = ""; }; + 843722A51D4F260A002B398B /* GPUImageHardLightBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHardLightBlendFilter.m; sourceTree = ""; }; + 843722A61D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHarrisCornerDetectionFilter.h; sourceTree = ""; }; + 843722A71D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHarrisCornerDetectionFilter.m; sourceTree = ""; }; + 843722A81D4F260A002B398B /* GPUImageHazeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHazeFilter.h; sourceTree = ""; }; + 843722A91D4F260A002B398B /* GPUImageHazeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHazeFilter.m; sourceTree = ""; }; + 843722AA1D4F260A002B398B /* GPUImageHighlightShadowFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHighlightShadowFilter.h; sourceTree = ""; }; + 843722AB1D4F260A002B398B /* GPUImageHighlightShadowFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHighlightShadowFilter.m; sourceTree = ""; }; + 843722AC1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHighlightShadowTintFilter.h; sourceTree = ""; }; + 843722AD1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHighlightShadowTintFilter.m; sourceTree = ""; }; + 843722AE1D4F260A002B398B /* GPUImageHighPassFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHighPassFilter.h; sourceTree = ""; }; + 843722AF1D4F260A002B398B /* GPUImageHighPassFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHighPassFilter.m; sourceTree = ""; }; + 843722B01D4F260A002B398B /* GPUImageHistogramEqualizationFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHistogramEqualizationFilter.h; sourceTree = ""; }; + 843722B11D4F260A002B398B /* GPUImageHistogramEqualizationFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHistogramEqualizationFilter.m; sourceTree = ""; }; + 843722B21D4F260A002B398B /* GPUImageHistogramFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHistogramFilter.h; sourceTree = ""; }; + 843722B31D4F260A002B398B /* GPUImageHistogramFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHistogramFilter.m; sourceTree = ""; }; + 843722B41D4F260A002B398B /* GPUImageHistogramGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHistogramGenerator.h; sourceTree = ""; }; + 843722B51D4F260A002B398B /* GPUImageHistogramGenerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHistogramGenerator.m; sourceTree = ""; }; + 843722B61D4F260A002B398B /* GPUImageHoughTransformLineDetector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHoughTransformLineDetector.h; sourceTree = ""; }; + 843722B71D4F260A002B398B /* GPUImageHoughTransformLineDetector.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHoughTransformLineDetector.m; sourceTree = ""; }; + 843722B81D4F260A002B398B /* GPUImageHSBFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHSBFilter.h; sourceTree = ""; }; + 843722B91D4F260A002B398B /* GPUImageHSBFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHSBFilter.m; sourceTree = ""; }; + 843722BA1D4F260A002B398B /* GPUImageHueBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHueBlendFilter.h; sourceTree = ""; }; + 843722BB1D4F260A002B398B /* GPUImageHueBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHueBlendFilter.m; sourceTree = ""; }; + 843722BC1D4F260A002B398B /* GPUImageHueFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageHueFilter.h; sourceTree = ""; }; + 843722BD1D4F260A002B398B /* GPUImageHueFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageHueFilter.m; sourceTree = ""; }; + 843722BE1D4F260A002B398B /* GPUImageiOSBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageiOSBlurFilter.h; sourceTree = ""; }; + 843722BF1D4F260A002B398B /* GPUImageiOSBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageiOSBlurFilter.m; sourceTree = ""; }; + 843722C01D4F260A002B398B /* GPUImageJFAVoronoiFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageJFAVoronoiFilter.h; sourceTree = ""; }; + 843722C11D4F260A002B398B /* GPUImageJFAVoronoiFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageJFAVoronoiFilter.m; sourceTree = ""; }; + 843722C21D4F260A002B398B /* GPUImageKuwaharaFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageKuwaharaFilter.h; sourceTree = ""; }; + 843722C31D4F260A002B398B /* GPUImageKuwaharaFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageKuwaharaFilter.m; sourceTree = ""; }; + 843722C41D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageKuwaharaRadius3Filter.h; sourceTree = ""; }; + 843722C51D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageKuwaharaRadius3Filter.m; sourceTree = ""; }; + 843722C61D4F260A002B398B /* GPUImageLanczosResamplingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLanczosResamplingFilter.h; sourceTree = ""; }; + 843722C71D4F260A002B398B /* GPUImageLanczosResamplingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLanczosResamplingFilter.m; sourceTree = ""; }; + 843722C81D4F260A002B398B /* GPUImageLaplacianFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLaplacianFilter.h; sourceTree = ""; }; + 843722C91D4F260A002B398B /* GPUImageLaplacianFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLaplacianFilter.m; sourceTree = ""; }; + 843722CA1D4F260A002B398B /* GPUImageLevelsFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLevelsFilter.h; sourceTree = ""; }; + 843722CB1D4F260A002B398B /* GPUImageLevelsFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLevelsFilter.m; sourceTree = ""; }; + 843722CC1D4F260A002B398B /* GPUImageLightenBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLightenBlendFilter.h; sourceTree = ""; }; + 843722CD1D4F260A002B398B /* GPUImageLightenBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLightenBlendFilter.m; sourceTree = ""; }; + 843722CE1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLinearBurnBlendFilter.h; sourceTree = ""; }; + 843722CF1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLinearBurnBlendFilter.m; sourceTree = ""; }; + 843722D01D4F260A002B398B /* GPUImageLineGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLineGenerator.h; sourceTree = ""; }; + 843722D11D4F260A002B398B /* GPUImageLineGenerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLineGenerator.m; sourceTree = ""; }; + 843722D21D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLocalBinaryPatternFilter.h; sourceTree = ""; }; + 843722D31D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLocalBinaryPatternFilter.m; sourceTree = ""; }; + 843722D41D4F260A002B398B /* GPUImageLookupFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLookupFilter.h; sourceTree = ""; }; + 843722D51D4F260A002B398B /* GPUImageLookupFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLookupFilter.m; sourceTree = ""; }; + 843722D61D4F260A002B398B /* GPUImageLowPassFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLowPassFilter.h; sourceTree = ""; }; + 843722D71D4F260A002B398B /* GPUImageLowPassFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLowPassFilter.m; sourceTree = ""; }; + 843722D81D4F260A002B398B /* GPUImageLuminanceRangeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLuminanceRangeFilter.h; sourceTree = ""; }; + 843722D91D4F260A002B398B /* GPUImageLuminanceRangeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLuminanceRangeFilter.m; sourceTree = ""; }; + 843722DA1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLuminanceThresholdFilter.h; sourceTree = ""; }; + 843722DB1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLuminanceThresholdFilter.m; sourceTree = ""; }; + 843722DC1D4F260A002B398B /* GPUImageLuminosity.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLuminosity.h; sourceTree = ""; }; + 843722DD1D4F260A002B398B /* GPUImageLuminosity.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLuminosity.m; sourceTree = ""; }; + 843722DE1D4F260A002B398B /* GPUImageLuminosityBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageLuminosityBlendFilter.h; sourceTree = ""; }; + 843722DF1D4F260A002B398B /* GPUImageLuminosityBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageLuminosityBlendFilter.m; sourceTree = ""; }; + 843722E01D4F260A002B398B /* GPUImageMaskFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMaskFilter.h; sourceTree = ""; }; + 843722E11D4F260A002B398B /* GPUImageMaskFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMaskFilter.m; sourceTree = ""; }; + 843722E21D4F260A002B398B /* GPUImageMedianFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMedianFilter.h; sourceTree = ""; }; + 843722E31D4F260A002B398B /* GPUImageMedianFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMedianFilter.m; sourceTree = ""; }; + 843722E41D4F260A002B398B /* GPUImageMissEtikateFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMissEtikateFilter.h; sourceTree = ""; }; + 843722E51D4F260A002B398B /* GPUImageMissEtikateFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMissEtikateFilter.m; sourceTree = ""; }; + 843722E61D4F260A002B398B /* GPUImageMonochromeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMonochromeFilter.h; sourceTree = ""; }; + 843722E71D4F260A002B398B /* GPUImageMonochromeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMonochromeFilter.m; sourceTree = ""; }; + 843722E81D4F260A002B398B /* GPUImageMosaicFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMosaicFilter.h; sourceTree = ""; }; + 843722E91D4F260A002B398B /* GPUImageMosaicFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMosaicFilter.m; sourceTree = ""; }; + 843722EA1D4F260A002B398B /* GPUImageMotionBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMotionBlurFilter.h; sourceTree = ""; }; + 843722EB1D4F260A002B398B /* GPUImageMotionBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMotionBlurFilter.m; sourceTree = ""; }; + 843722EC1D4F260A002B398B /* GPUImageMotionDetector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMotionDetector.h; sourceTree = ""; }; + 843722ED1D4F260A002B398B /* GPUImageMotionDetector.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMotionDetector.m; sourceTree = ""; }; + 843722EE1D4F260A002B398B /* GPUImageMovie.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMovie.h; sourceTree = ""; }; + 843722EF1D4F260A002B398B /* GPUImageMovie.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMovie.m; sourceTree = ""; }; + 843722F01D4F260A002B398B /* GPUImageMovieComposition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMovieComposition.h; sourceTree = ""; }; + 843722F11D4F260A002B398B /* GPUImageMovieComposition.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMovieComposition.m; sourceTree = ""; }; + 843722F21D4F260A002B398B /* GPUImageMultiplyBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMultiplyBlendFilter.h; sourceTree = ""; }; + 843722F31D4F260A002B398B /* GPUImageMultiplyBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMultiplyBlendFilter.m; sourceTree = ""; }; + 843722F41D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageNobleCornerDetectionFilter.h; sourceTree = ""; }; + 843722F51D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageNobleCornerDetectionFilter.m; sourceTree = ""; }; + 843722F61D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageNonMaximumSuppressionFilter.h; sourceTree = ""; }; + 843722F71D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageNonMaximumSuppressionFilter.m; sourceTree = ""; }; + 843722F81D4F260A002B398B /* GPUImageNormalBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageNormalBlendFilter.h; sourceTree = ""; }; + 843722F91D4F260A002B398B /* GPUImageNormalBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageNormalBlendFilter.m; sourceTree = ""; }; + 843722FA1D4F260A002B398B /* GPUImageOpacityFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageOpacityFilter.h; sourceTree = ""; }; + 843722FB1D4F260A002B398B /* GPUImageOpacityFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageOpacityFilter.m; sourceTree = ""; }; + 843722FC1D4F260A002B398B /* GPUImageOpeningFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageOpeningFilter.h; sourceTree = ""; }; + 843722FD1D4F260A002B398B /* GPUImageOpeningFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageOpeningFilter.m; sourceTree = ""; }; + 843722FE1D4F260A002B398B /* GPUImageOutput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageOutput.h; sourceTree = ""; }; + 843722FF1D4F260A002B398B /* GPUImageOutput.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageOutput.m; sourceTree = ""; }; + 843723001D4F260A002B398B /* GPUImageOverlayBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageOverlayBlendFilter.h; sourceTree = ""; }; + 843723011D4F260A002B398B /* GPUImageOverlayBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageOverlayBlendFilter.m; sourceTree = ""; }; + 843723021D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageParallelCoordinateLineTransformFilter.h; sourceTree = ""; }; + 843723031D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageParallelCoordinateLineTransformFilter.m; sourceTree = ""; }; + 843723041D4F260A002B398B /* GPUImagePerlinNoiseFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePerlinNoiseFilter.h; sourceTree = ""; }; + 843723051D4F260A002B398B /* GPUImagePerlinNoiseFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePerlinNoiseFilter.m; sourceTree = ""; }; + 843723061D4F260A002B398B /* GPUImagePinchDistortionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePinchDistortionFilter.h; sourceTree = ""; }; + 843723071D4F260A002B398B /* GPUImagePinchDistortionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePinchDistortionFilter.m; sourceTree = ""; }; + 843723081D4F260A002B398B /* GPUImagePixellateFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePixellateFilter.h; sourceTree = ""; }; + 843723091D4F260A002B398B /* GPUImagePixellateFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePixellateFilter.m; sourceTree = ""; }; + 8437230A1D4F260A002B398B /* GPUImagePixellatePositionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePixellatePositionFilter.h; sourceTree = ""; }; + 8437230B1D4F260A002B398B /* GPUImagePixellatePositionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePixellatePositionFilter.m; sourceTree = ""; }; + 8437230C1D4F260A002B398B /* GPUImagePoissonBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePoissonBlendFilter.h; sourceTree = ""; }; + 8437230D1D4F260A002B398B /* GPUImagePoissonBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePoissonBlendFilter.m; sourceTree = ""; }; + 8437230E1D4F260A002B398B /* GPUImagePolarPixellateFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePolarPixellateFilter.h; sourceTree = ""; }; + 8437230F1D4F260A002B398B /* GPUImagePolarPixellateFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePolarPixellateFilter.m; sourceTree = ""; }; + 843723101D4F260A002B398B /* GPUImagePolkaDotFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePolkaDotFilter.h; sourceTree = ""; }; + 843723111D4F260A002B398B /* GPUImagePolkaDotFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePolkaDotFilter.m; sourceTree = ""; }; + 843723121D4F260A002B398B /* GPUImagePosterizeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePosterizeFilter.h; sourceTree = ""; }; + 843723131D4F260A002B398B /* GPUImagePosterizeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePosterizeFilter.m; sourceTree = ""; }; + 843723141D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePrewittEdgeDetectionFilter.h; sourceTree = ""; }; + 843723151D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePrewittEdgeDetectionFilter.m; sourceTree = ""; }; + 843723161D4F260A002B398B /* GPUImageRawDataInput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRawDataInput.h; sourceTree = ""; }; + 843723171D4F260A002B398B /* GPUImageRawDataInput.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRawDataInput.m; sourceTree = ""; }; + 843723181D4F260A002B398B /* GPUImageRawDataOutput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRawDataOutput.h; sourceTree = ""; }; + 843723191D4F260A002B398B /* GPUImageRawDataOutput.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRawDataOutput.m; sourceTree = ""; }; + 8437231A1D4F260A002B398B /* GPUImageRGBClosingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRGBClosingFilter.h; sourceTree = ""; }; + 8437231B1D4F260A002B398B /* GPUImageRGBClosingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRGBClosingFilter.m; sourceTree = ""; }; + 8437231C1D4F260A002B398B /* GPUImageRGBDilationFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRGBDilationFilter.h; sourceTree = ""; }; + 8437231D1D4F260A002B398B /* GPUImageRGBDilationFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRGBDilationFilter.m; sourceTree = ""; }; + 8437231E1D4F260A002B398B /* GPUImageRGBErosionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRGBErosionFilter.h; sourceTree = ""; }; + 8437231F1D4F260A002B398B /* GPUImageRGBErosionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRGBErosionFilter.m; sourceTree = ""; }; + 843723201D4F260A002B398B /* GPUImageRGBFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRGBFilter.h; sourceTree = ""; }; + 843723211D4F260A002B398B /* GPUImageRGBFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRGBFilter.m; sourceTree = ""; }; + 843723221D4F260A002B398B /* GPUImageRGBOpeningFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageRGBOpeningFilter.h; sourceTree = ""; }; + 843723231D4F260A002B398B /* GPUImageRGBOpeningFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageRGBOpeningFilter.m; sourceTree = ""; }; + 843723241D4F260A002B398B /* GPUImageSaturationBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSaturationBlendFilter.h; sourceTree = ""; }; + 843723251D4F260A002B398B /* GPUImageSaturationBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSaturationBlendFilter.m; sourceTree = ""; }; + 843723261D4F260A002B398B /* GPUImageSaturationFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSaturationFilter.h; sourceTree = ""; }; + 843723271D4F260A002B398B /* GPUImageSaturationFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSaturationFilter.m; sourceTree = ""; }; + 843723281D4F260A002B398B /* GPUImageScreenBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageScreenBlendFilter.h; sourceTree = ""; }; + 843723291D4F260A002B398B /* GPUImageScreenBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageScreenBlendFilter.m; sourceTree = ""; }; + 8437232A1D4F260A002B398B /* GPUImageSepiaFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSepiaFilter.h; sourceTree = ""; }; + 8437232B1D4F260A002B398B /* GPUImageSepiaFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSepiaFilter.m; sourceTree = ""; }; + 8437232C1D4F260A002B398B /* GPUImageSharpenFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSharpenFilter.h; sourceTree = ""; }; + 8437232D1D4F260A002B398B /* GPUImageSharpenFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSharpenFilter.m; sourceTree = ""; }; + 8437232E1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageShiTomasiFeatureDetectionFilter.h; sourceTree = ""; }; + 8437232F1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageShiTomasiFeatureDetectionFilter.m; sourceTree = ""; }; + 843723301D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSingleComponentGaussianBlurFilter.h; sourceTree = ""; }; + 843723311D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSingleComponentGaussianBlurFilter.m; sourceTree = ""; }; + 843723321D4F260A002B398B /* GPUImageSketchFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSketchFilter.h; sourceTree = ""; }; + 843723331D4F260A002B398B /* GPUImageSketchFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSketchFilter.m; sourceTree = ""; }; + 843723341D4F260A002B398B /* GPUImageSkinToneFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSkinToneFilter.h; sourceTree = ""; }; + 843723351D4F260A002B398B /* GPUImageSkinToneFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSkinToneFilter.m; sourceTree = ""; }; + 843723361D4F260A002B398B /* GPUImageSmoothToonFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSmoothToonFilter.h; sourceTree = ""; }; + 843723371D4F260A002B398B /* GPUImageSmoothToonFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSmoothToonFilter.m; sourceTree = ""; }; + 843723381D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSobelEdgeDetectionFilter.h; sourceTree = ""; }; + 843723391D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSobelEdgeDetectionFilter.m; sourceTree = ""; }; + 8437233A1D4F260A002B398B /* GPUImageSoftEleganceFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSoftEleganceFilter.h; sourceTree = ""; }; + 8437233B1D4F260A002B398B /* GPUImageSoftEleganceFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSoftEleganceFilter.m; sourceTree = ""; }; + 8437233C1D4F260A002B398B /* GPUImageSoftLightBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSoftLightBlendFilter.h; sourceTree = ""; }; + 8437233D1D4F260A002B398B /* GPUImageSoftLightBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSoftLightBlendFilter.m; sourceTree = ""; }; + 8437233E1D4F260A002B398B /* GPUImageSolarizeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSolarizeFilter.h; sourceTree = ""; }; + 8437233F1D4F260A002B398B /* GPUImageSolarizeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSolarizeFilter.m; sourceTree = ""; }; + 843723401D4F260A002B398B /* GPUImageSolidColorGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSolidColorGenerator.h; sourceTree = ""; }; + 843723411D4F260A002B398B /* GPUImageSolidColorGenerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSolidColorGenerator.m; sourceTree = ""; }; + 843723421D4F260A002B398B /* GPUImageSourceOverBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSourceOverBlendFilter.h; sourceTree = ""; }; + 843723431D4F260A002B398B /* GPUImageSourceOverBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSourceOverBlendFilter.m; sourceTree = ""; }; + 843723441D4F260A002B398B /* GPUImageSphereRefractionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSphereRefractionFilter.h; sourceTree = ""; }; + 843723451D4F260A002B398B /* GPUImageSphereRefractionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSphereRefractionFilter.m; sourceTree = ""; }; + 843723461D4F260A002B398B /* GPUImageStillCamera.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageStillCamera.h; sourceTree = ""; }; + 843723471D4F260A002B398B /* GPUImageStillCamera.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageStillCamera.m; sourceTree = ""; }; + 843723481D4F260A002B398B /* GPUImageStretchDistortionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageStretchDistortionFilter.h; sourceTree = ""; }; + 843723491D4F260A002B398B /* GPUImageStretchDistortionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageStretchDistortionFilter.m; sourceTree = ""; }; + 8437234A1D4F260A002B398B /* GPUImageSubtractBlendFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSubtractBlendFilter.h; sourceTree = ""; }; + 8437234B1D4F260A002B398B /* GPUImageSubtractBlendFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSubtractBlendFilter.m; sourceTree = ""; }; + 8437234C1D4F260A002B398B /* GPUImageSwirlFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageSwirlFilter.h; sourceTree = ""; }; + 8437234D1D4F260A002B398B /* GPUImageSwirlFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageSwirlFilter.m; sourceTree = ""; }; + 8437234E1D4F260A002B398B /* GPUImageTextureInput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTextureInput.h; sourceTree = ""; }; + 8437234F1D4F260A002B398B /* GPUImageTextureInput.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTextureInput.m; sourceTree = ""; }; + 843723501D4F260A002B398B /* GPUImageTextureOutput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTextureOutput.h; sourceTree = ""; }; + 843723511D4F260A002B398B /* GPUImageTextureOutput.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTextureOutput.m; sourceTree = ""; }; + 843723521D4F260A002B398B /* GPUImageThreeInputFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageThreeInputFilter.h; sourceTree = ""; }; + 843723531D4F260A002B398B /* GPUImageThreeInputFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageThreeInputFilter.m; sourceTree = ""; }; + 843723541D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageThresholdEdgeDetectionFilter.h; sourceTree = ""; }; + 843723551D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageThresholdEdgeDetectionFilter.m; sourceTree = ""; }; + 843723561D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageThresholdedNonMaximumSuppressionFilter.h; sourceTree = ""; }; + 843723571D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageThresholdedNonMaximumSuppressionFilter.m; sourceTree = ""; }; + 843723581D4F260A002B398B /* GPUImageThresholdSketchFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageThresholdSketchFilter.h; sourceTree = ""; }; + 843723591D4F260A002B398B /* GPUImageThresholdSketchFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageThresholdSketchFilter.m; sourceTree = ""; }; + 8437235A1D4F260A002B398B /* GPUImageTiltShiftFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTiltShiftFilter.h; sourceTree = ""; }; + 8437235B1D4F260A002B398B /* GPUImageTiltShiftFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTiltShiftFilter.m; sourceTree = ""; }; + 8437235C1D4F260A002B398B /* GPUImageToneCurveFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageToneCurveFilter.h; sourceTree = ""; }; + 8437235D1D4F260A002B398B /* GPUImageToneCurveFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageToneCurveFilter.m; sourceTree = ""; }; + 8437235E1D4F260A002B398B /* GPUImageToonFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageToonFilter.h; sourceTree = ""; }; + 8437235F1D4F260A002B398B /* GPUImageToonFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageToonFilter.m; sourceTree = ""; }; + 843723601D4F260A002B398B /* GPUImageTransformFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTransformFilter.h; sourceTree = ""; }; + 843723611D4F260A002B398B /* GPUImageTransformFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTransformFilter.m; sourceTree = ""; }; + 843723621D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTwoInputCrossTextureSamplingFilter.h; sourceTree = ""; }; + 843723631D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTwoInputCrossTextureSamplingFilter.m; sourceTree = ""; }; + 843723641D4F260A002B398B /* GPUImageTwoInputFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTwoInputFilter.h; sourceTree = ""; }; + 843723651D4F260A002B398B /* GPUImageTwoInputFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTwoInputFilter.m; sourceTree = ""; }; + 843723661D4F260A002B398B /* GPUImageTwoPassFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTwoPassFilter.h; sourceTree = ""; }; + 843723671D4F260A002B398B /* GPUImageTwoPassFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTwoPassFilter.m; sourceTree = ""; }; + 843723681D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageTwoPassTextureSamplingFilter.h; sourceTree = ""; }; + 843723691D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageTwoPassTextureSamplingFilter.m; sourceTree = ""; }; + 8437236A1D4F260A002B398B /* GPUImageUIElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageUIElement.h; sourceTree = ""; }; + 8437236B1D4F260A002B398B /* GPUImageUIElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageUIElement.m; sourceTree = ""; }; + 8437236C1D4F260A002B398B /* GPUImageUnsharpMaskFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageUnsharpMaskFilter.h; sourceTree = ""; }; + 8437236D1D4F260A002B398B /* GPUImageUnsharpMaskFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageUnsharpMaskFilter.m; sourceTree = ""; }; + 8437236E1D4F260A002B398B /* GPUImageVideoCamera.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageVideoCamera.h; sourceTree = ""; }; + 8437236F1D4F260A002B398B /* GPUImageVideoCamera.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageVideoCamera.m; sourceTree = ""; }; + 843723701D4F260A002B398B /* GPUImageVignetteFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageVignetteFilter.h; sourceTree = ""; }; + 843723711D4F260A002B398B /* GPUImageVignetteFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageVignetteFilter.m; sourceTree = ""; }; + 843723721D4F260A002B398B /* GPUImageVoronoiConsumerFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageVoronoiConsumerFilter.h; sourceTree = ""; }; + 843723731D4F260A002B398B /* GPUImageVoronoiConsumerFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageVoronoiConsumerFilter.m; sourceTree = ""; }; + 843723741D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageWeakPixelInclusionFilter.h; sourceTree = ""; }; + 843723751D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageWeakPixelInclusionFilter.m; sourceTree = ""; }; + 843723761D4F260A002B398B /* GPUImageWhiteBalanceFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageWhiteBalanceFilter.h; sourceTree = ""; }; + 843723771D4F260A002B398B /* GPUImageWhiteBalanceFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageWhiteBalanceFilter.m; sourceTree = ""; }; + 843723781D4F260A002B398B /* GPUImageXYDerivativeFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageXYDerivativeFilter.h; sourceTree = ""; }; + 843723791D4F260A002B398B /* GPUImageXYDerivativeFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageXYDerivativeFilter.m; sourceTree = ""; }; + 8437237A1D4F260A002B398B /* GPUImageZoomBlurFilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageZoomBlurFilter.h; sourceTree = ""; }; + 8437237B1D4F260A002B398B /* GPUImageZoomBlurFilter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageZoomBlurFilter.m; sourceTree = ""; }; + 8437237E1D4F260A002B398B /* GPUImageFramework.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageFramework.h; sourceTree = ""; }; + 8437237F1D4F260A002B398B /* GPUImageContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageContext.h; sourceTree = ""; }; + 843723801D4F260A002B398B /* GPUImageContext.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageContext.m; sourceTree = ""; }; + 843723811D4F260A002B398B /* GPUImageMovieWriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageMovieWriter.h; sourceTree = ""; }; + 843723821D4F260A002B398B /* GPUImageMovieWriter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageMovieWriter.m; sourceTree = ""; }; + 843723831D4F260A002B398B /* GPUImagePicture+TextureSubimage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "GPUImagePicture+TextureSubimage.h"; sourceTree = ""; }; + 843723841D4F260A002B398B /* GPUImagePicture+TextureSubimage.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "GPUImagePicture+TextureSubimage.m"; sourceTree = ""; }; + 843723851D4F260A002B398B /* GPUImagePicture.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImagePicture.h; sourceTree = ""; }; + 843723861D4F260A002B398B /* GPUImagePicture.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImagePicture.m; sourceTree = ""; }; + 843723871D4F260A002B398B /* GPUImageView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GPUImageView.h; sourceTree = ""; }; + 843723881D4F260A002B398B /* GPUImageView.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GPUImageView.m; sourceTree = ""; }; + 8437238A1D4F260A002B398B /* amf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amf.c; sourceTree = ""; }; + 8437238B1D4F260A002B398B /* amf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = amf.h; sourceTree = ""; }; + 8437238C1D4F260A002B398B /* bytes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bytes.h; sourceTree = ""; }; + 8437238D1D4F260A002B398B /* dh.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dh.h; sourceTree = ""; }; + 8437238E1D4F260A002B398B /* dhgroups.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dhgroups.h; sourceTree = ""; }; + 8437238F1D4F260A002B398B /* error.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = error.c; sourceTree = ""; }; + 843723901D4F260A002B398B /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + 843723911D4F260A002B398B /* handshake.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = handshake.h; sourceTree = ""; }; + 843723921D4F260A002B398B /* hashswf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hashswf.c; sourceTree = ""; }; + 843723931D4F260A002B398B /* http.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = http.h; sourceTree = ""; }; + 843723941D4F260A002B398B /* log.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = log.c; sourceTree = ""; }; + 843723951D4F260A002B398B /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + 843723961D4F260A002B398B /* parseurl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = parseurl.c; sourceTree = ""; }; + 843723971D4F260A002B398B /* rtmp.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rtmp.c; sourceTree = ""; }; + 843723981D4F260A002B398B /* rtmp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp.h; sourceTree = ""; }; + 843723991D4F260A002B398B /* rtmp_sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rtmp_sys.h; sourceTree = ""; }; B289F1D41D3DE77F00D9C7A5 /* LFStreamingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamingBuffer.h; path = LFLiveKit/publish/LFStreamingBuffer.h; sourceTree = SOURCE_ROOT; }; B289F1D51D3DE77F00D9C7A5 /* LFStreamingBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LFStreamingBuffer.m; path = LFLiveKit/publish/LFStreamingBuffer.m; sourceTree = SOURCE_ROOT; }; B289F1D61D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LFStreamRtmpSocket.h; path = LFLiveKit/publish/LFStreamRtmpSocket.h; sourceTree = SOURCE_ROOT; }; @@ -139,7 +862,6 @@ B2CD146A1D45F18B008082E8 /* LFVideoEncoder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LFVideoEncoder.m; sourceTree = ""; }; B2CD146B1D45F18B008082E8 /* LFH264VideoEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LFH264VideoEncoder.h; sourceTree = ""; }; B2CD146C1D45F18B008082E8 /* LFH264VideoEncoder.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = LFH264VideoEncoder.mm; sourceTree = ""; }; - B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKit.a"; sourceTree = BUILT_PRODUCTS_DIR; }; D0BB7E7CE5403C4911E026B9 /* Pods-LFLiveKit.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKit.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKit/Pods-LFLiveKit.debug.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ @@ -154,7 +876,6 @@ 84001FFB1D0017630026C63F /* UIKit.framework in Frameworks */, 84001FF91D00175D0026C63F /* Foundation.framework in Frameworks */, 84001FF71D0017590026C63F /* AVFoundation.framework in Frameworks */, - AD7F89B4621A7EFEBEA72D49 /* libPods-LFLiveKit.a in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -178,7 +899,6 @@ 84001FFA1D0017630026C63F /* UIKit.framework */, 84001FF81D00175D0026C63F /* Foundation.framework */, 84001FF61D0017590026C63F /* AVFoundation.framework */, - B8CB02D2A92EA1F5A262F154 /* libPods-LFLiveKit.a */, ); name = Frameworks; sourceTree = ""; @@ -224,6 +944,7 @@ 84001FA91D0016380026C63F /* coder */, 84001FB51D0016380026C63F /* filter */, 84001FC91D0016380026C63F /* publish */, + 8437222B1D4F260A002B398B /* Vendor */, 84001F8F1D0015D10026C63F /* Info.plist */, ); path = LFLiveKit; @@ -319,6 +1040,407 @@ path = upload; sourceTree = ""; }; + 8437222B1D4F260A002B398B /* Vendor */ = { + isa = PBXGroup; + children = ( + 8437222C1D4F260A002B398B /* GPUImage */, + 843723891D4F260A002B398B /* pili-librtmp */, + ); + path = Vendor; + sourceTree = ""; + }; + 8437222C1D4F260A002B398B /* GPUImage */ = { + isa = PBXGroup; + children = ( + 8437222D1D4F260A002B398B /* GLProgram.h */, + 8437222E1D4F260A002B398B /* GLProgram.m */, + 8437222F1D4F260A002B398B /* GPUImage.h */, + 843722301D4F260A002B398B /* GPUImage3x3ConvolutionFilter.h */, + 843722311D4F260A002B398B /* GPUImage3x3ConvolutionFilter.m */, + 843722321D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.h */, + 843722331D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.m */, + 843722341D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h */, + 843722351D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.m */, + 843722361D4F260A002B398B /* GPUImageAddBlendFilter.h */, + 843722371D4F260A002B398B /* GPUImageAddBlendFilter.m */, + 843722381D4F260A002B398B /* GPUImageAlphaBlendFilter.h */, + 843722391D4F260A002B398B /* GPUImageAlphaBlendFilter.m */, + 8437223A1D4F260A002B398B /* GPUImageAmatorkaFilter.h */, + 8437223B1D4F260A002B398B /* GPUImageAmatorkaFilter.m */, + 8437223C1D4F260A002B398B /* GPUImageAverageColor.h */, + 8437223D1D4F260A002B398B /* GPUImageAverageColor.m */, + 8437223E1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.h */, + 8437223F1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.m */, + 843722401D4F260A002B398B /* GPUImageBilateralFilter.h */, + 843722411D4F260A002B398B /* GPUImageBilateralFilter.m */, + 843722421D4F260A002B398B /* GPUImageBoxBlurFilter.h */, + 843722431D4F260A002B398B /* GPUImageBoxBlurFilter.m */, + 843722441D4F260A002B398B /* GPUImageBrightnessFilter.h */, + 843722451D4F260A002B398B /* GPUImageBrightnessFilter.m */, + 843722461D4F260A002B398B /* GPUImageBuffer.h */, + 843722471D4F260A002B398B /* GPUImageBuffer.m */, + 843722481D4F260A002B398B /* GPUImageBulgeDistortionFilter.h */, + 843722491D4F260A002B398B /* GPUImageBulgeDistortionFilter.m */, + 8437224A1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.h */, + 8437224B1D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m */, + 8437224C1D4F260A002B398B /* GPUImageCGAColorspaceFilter.h */, + 8437224D1D4F260A002B398B /* GPUImageCGAColorspaceFilter.m */, + 8437224E1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.h */, + 8437224F1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.m */, + 843722501D4F260A002B398B /* GPUImageChromaKeyFilter.h */, + 843722511D4F260A002B398B /* GPUImageChromaKeyFilter.m */, + 843722521D4F260A002B398B /* GPUImageClosingFilter.h */, + 843722531D4F260A002B398B /* GPUImageClosingFilter.m */, + 843722541D4F260A002B398B /* GPUImageColorBlendFilter.h */, + 843722551D4F260A002B398B /* GPUImageColorBlendFilter.m */, + 843722561D4F260A002B398B /* GPUImageColorBurnBlendFilter.h */, + 843722571D4F260A002B398B /* GPUImageColorBurnBlendFilter.m */, + 843722581D4F260A002B398B /* GPUImageColorConversion.h */, + 843722591D4F260A002B398B /* GPUImageColorConversion.m */, + 8437225A1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.h */, + 8437225B1D4F260A002B398B /* GPUImageColorDodgeBlendFilter.m */, + 8437225C1D4F260A002B398B /* GPUImageColorInvertFilter.h */, + 8437225D1D4F260A002B398B /* GPUImageColorInvertFilter.m */, + 8437225E1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.h */, + 8437225F1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.m */, + 843722601D4F260A002B398B /* GPUImageColorMatrixFilter.h */, + 843722611D4F260A002B398B /* GPUImageColorMatrixFilter.m */, + 843722621D4F260A002B398B /* GPUImageColorPackingFilter.h */, + 843722631D4F260A002B398B /* GPUImageColorPackingFilter.m */, + 843722641D4F260A002B398B /* GPUImageColourFASTFeatureDetector.h */, + 843722651D4F260A002B398B /* GPUImageColourFASTFeatureDetector.m */, + 843722661D4F260A002B398B /* GPUImageColourFASTSamplingOperation.h */, + 843722671D4F260A002B398B /* GPUImageColourFASTSamplingOperation.m */, + 843722681D4F260A002B398B /* GPUImageContrastFilter.h */, + 843722691D4F260A002B398B /* GPUImageContrastFilter.m */, + 8437226A1D4F260A002B398B /* GPUImageCropFilter.h */, + 8437226B1D4F260A002B398B /* GPUImageCropFilter.m */, + 8437226C1D4F260A002B398B /* GPUImageCrosshairGenerator.h */, + 8437226D1D4F260A002B398B /* GPUImageCrosshairGenerator.m */, + 8437226E1D4F260A002B398B /* GPUImageCrosshatchFilter.h */, + 8437226F1D4F260A002B398B /* GPUImageCrosshatchFilter.m */, + 843722701D4F260A002B398B /* GPUImageDarkenBlendFilter.h */, + 843722711D4F260A002B398B /* GPUImageDarkenBlendFilter.m */, + 843722721D4F260A002B398B /* GPUImageDifferenceBlendFilter.h */, + 843722731D4F260A002B398B /* GPUImageDifferenceBlendFilter.m */, + 843722741D4F260A002B398B /* GPUImageDilationFilter.h */, + 843722751D4F260A002B398B /* GPUImageDilationFilter.m */, + 843722761D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.h */, + 843722771D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.m */, + 843722781D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.h */, + 843722791D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.m */, + 8437227A1D4F260A002B398B /* GPUImageDissolveBlendFilter.h */, + 8437227B1D4F260A002B398B /* GPUImageDissolveBlendFilter.m */, + 8437227C1D4F260A002B398B /* GPUImageDivideBlendFilter.h */, + 8437227D1D4F260A002B398B /* GPUImageDivideBlendFilter.m */, + 8437227E1D4F260A002B398B /* GPUImageEmbossFilter.h */, + 8437227F1D4F260A002B398B /* GPUImageEmbossFilter.m */, + 843722801D4F260A002B398B /* GPUImageErosionFilter.h */, + 843722811D4F260A002B398B /* GPUImageErosionFilter.m */, + 843722821D4F260A002B398B /* GPUImageExclusionBlendFilter.h */, + 843722831D4F260A002B398B /* GPUImageExclusionBlendFilter.m */, + 843722841D4F260A002B398B /* GPUImageExposureFilter.h */, + 843722851D4F260A002B398B /* GPUImageExposureFilter.m */, + 843722861D4F260A002B398B /* GPUImageFalseColorFilter.h */, + 843722871D4F260A002B398B /* GPUImageFalseColorFilter.m */, + 843722881D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.h */, + 843722891D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.m */, + 8437228A1D4F260A002B398B /* GPUImageFilter.h */, + 8437228B1D4F260A002B398B /* GPUImageFilter.m */, + 8437228C1D4F260A002B398B /* GPUImageFilterGroup.h */, + 8437228D1D4F260A002B398B /* GPUImageFilterGroup.m */, + 8437228E1D4F260A002B398B /* GPUImageFilterPipeline.h */, + 8437228F1D4F260A002B398B /* GPUImageFilterPipeline.m */, + 843722901D4F260A002B398B /* GPUImageFourInputFilter.h */, + 843722911D4F260A002B398B /* GPUImageFourInputFilter.m */, + 843722921D4F260A002B398B /* GPUImageFramebuffer.h */, + 843722931D4F260A002B398B /* GPUImageFramebuffer.m */, + 843722941D4F260A002B398B /* GPUImageFramebufferCache.h */, + 843722951D4F260A002B398B /* GPUImageFramebufferCache.m */, + 843722961D4F260A002B398B /* GPUImageGammaFilter.h */, + 843722971D4F260A002B398B /* GPUImageGammaFilter.m */, + 843722981D4F260A002B398B /* GPUImageGaussianBlurFilter.h */, + 843722991D4F260A002B398B /* GPUImageGaussianBlurFilter.m */, + 8437229A1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.h */, + 8437229B1D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.m */, + 8437229C1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.h */, + 8437229D1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.m */, + 8437229E1D4F260A002B398B /* GPUImageGlassSphereFilter.h */, + 8437229F1D4F260A002B398B /* GPUImageGlassSphereFilter.m */, + 843722A01D4F260A002B398B /* GPUImageGrayscaleFilter.h */, + 843722A11D4F260A002B398B /* GPUImageGrayscaleFilter.m */, + 843722A21D4F260A002B398B /* GPUImageHalftoneFilter.h */, + 843722A31D4F260A002B398B /* GPUImageHalftoneFilter.m */, + 843722A41D4F260A002B398B /* GPUImageHardLightBlendFilter.h */, + 843722A51D4F260A002B398B /* GPUImageHardLightBlendFilter.m */, + 843722A61D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.h */, + 843722A71D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.m */, + 843722A81D4F260A002B398B /* GPUImageHazeFilter.h */, + 843722A91D4F260A002B398B /* GPUImageHazeFilter.m */, + 843722AA1D4F260A002B398B /* GPUImageHighlightShadowFilter.h */, + 843722AB1D4F260A002B398B /* GPUImageHighlightShadowFilter.m */, + 843722AC1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.h */, + 843722AD1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.m */, + 843722AE1D4F260A002B398B /* GPUImageHighPassFilter.h */, + 843722AF1D4F260A002B398B /* GPUImageHighPassFilter.m */, + 843722B01D4F260A002B398B /* GPUImageHistogramEqualizationFilter.h */, + 843722B11D4F260A002B398B /* GPUImageHistogramEqualizationFilter.m */, + 843722B21D4F260A002B398B /* GPUImageHistogramFilter.h */, + 843722B31D4F260A002B398B /* GPUImageHistogramFilter.m */, + 843722B41D4F260A002B398B /* GPUImageHistogramGenerator.h */, + 843722B51D4F260A002B398B /* GPUImageHistogramGenerator.m */, + 843722B61D4F260A002B398B /* GPUImageHoughTransformLineDetector.h */, + 843722B71D4F260A002B398B /* GPUImageHoughTransformLineDetector.m */, + 843722B81D4F260A002B398B /* GPUImageHSBFilter.h */, + 843722B91D4F260A002B398B /* GPUImageHSBFilter.m */, + 843722BA1D4F260A002B398B /* GPUImageHueBlendFilter.h */, + 843722BB1D4F260A002B398B /* GPUImageHueBlendFilter.m */, + 843722BC1D4F260A002B398B /* GPUImageHueFilter.h */, + 843722BD1D4F260A002B398B /* GPUImageHueFilter.m */, + 843722BE1D4F260A002B398B /* GPUImageiOSBlurFilter.h */, + 843722BF1D4F260A002B398B /* GPUImageiOSBlurFilter.m */, + 843722C01D4F260A002B398B /* GPUImageJFAVoronoiFilter.h */, + 843722C11D4F260A002B398B /* GPUImageJFAVoronoiFilter.m */, + 843722C21D4F260A002B398B /* GPUImageKuwaharaFilter.h */, + 843722C31D4F260A002B398B /* GPUImageKuwaharaFilter.m */, + 843722C41D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.h */, + 843722C51D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.m */, + 843722C61D4F260A002B398B /* GPUImageLanczosResamplingFilter.h */, + 843722C71D4F260A002B398B /* GPUImageLanczosResamplingFilter.m */, + 843722C81D4F260A002B398B /* GPUImageLaplacianFilter.h */, + 843722C91D4F260A002B398B /* GPUImageLaplacianFilter.m */, + 843722CA1D4F260A002B398B /* GPUImageLevelsFilter.h */, + 843722CB1D4F260A002B398B /* GPUImageLevelsFilter.m */, + 843722CC1D4F260A002B398B /* GPUImageLightenBlendFilter.h */, + 843722CD1D4F260A002B398B /* GPUImageLightenBlendFilter.m */, + 843722CE1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.h */, + 843722CF1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.m */, + 843722D01D4F260A002B398B /* GPUImageLineGenerator.h */, + 843722D11D4F260A002B398B /* GPUImageLineGenerator.m */, + 843722D21D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.h */, + 843722D31D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.m */, + 843722D41D4F260A002B398B /* GPUImageLookupFilter.h */, + 843722D51D4F260A002B398B /* GPUImageLookupFilter.m */, + 843722D61D4F260A002B398B /* GPUImageLowPassFilter.h */, + 843722D71D4F260A002B398B /* GPUImageLowPassFilter.m */, + 843722D81D4F260A002B398B /* GPUImageLuminanceRangeFilter.h */, + 843722D91D4F260A002B398B /* GPUImageLuminanceRangeFilter.m */, + 843722DA1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.h */, + 843722DB1D4F260A002B398B /* GPUImageLuminanceThresholdFilter.m */, + 843722DC1D4F260A002B398B /* GPUImageLuminosity.h */, + 843722DD1D4F260A002B398B /* GPUImageLuminosity.m */, + 843722DE1D4F260A002B398B /* GPUImageLuminosityBlendFilter.h */, + 843722DF1D4F260A002B398B /* GPUImageLuminosityBlendFilter.m */, + 843722E01D4F260A002B398B /* GPUImageMaskFilter.h */, + 843722E11D4F260A002B398B /* GPUImageMaskFilter.m */, + 843722E21D4F260A002B398B /* GPUImageMedianFilter.h */, + 843722E31D4F260A002B398B /* GPUImageMedianFilter.m */, + 843722E41D4F260A002B398B /* GPUImageMissEtikateFilter.h */, + 843722E51D4F260A002B398B /* GPUImageMissEtikateFilter.m */, + 843722E61D4F260A002B398B /* GPUImageMonochromeFilter.h */, + 843722E71D4F260A002B398B /* GPUImageMonochromeFilter.m */, + 843722E81D4F260A002B398B /* GPUImageMosaicFilter.h */, + 843722E91D4F260A002B398B /* GPUImageMosaicFilter.m */, + 843722EA1D4F260A002B398B /* GPUImageMotionBlurFilter.h */, + 843722EB1D4F260A002B398B /* GPUImageMotionBlurFilter.m */, + 843722EC1D4F260A002B398B /* GPUImageMotionDetector.h */, + 843722ED1D4F260A002B398B /* GPUImageMotionDetector.m */, + 843722EE1D4F260A002B398B /* GPUImageMovie.h */, + 843722EF1D4F260A002B398B /* GPUImageMovie.m */, + 843722F01D4F260A002B398B /* GPUImageMovieComposition.h */, + 843722F11D4F260A002B398B /* GPUImageMovieComposition.m */, + 843722F21D4F260A002B398B /* GPUImageMultiplyBlendFilter.h */, + 843722F31D4F260A002B398B /* GPUImageMultiplyBlendFilter.m */, + 843722F41D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.h */, + 843722F51D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m */, + 843722F61D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.h */, + 843722F71D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.m */, + 843722F81D4F260A002B398B /* GPUImageNormalBlendFilter.h */, + 843722F91D4F260A002B398B /* GPUImageNormalBlendFilter.m */, + 843722FA1D4F260A002B398B /* GPUImageOpacityFilter.h */, + 843722FB1D4F260A002B398B /* GPUImageOpacityFilter.m */, + 843722FC1D4F260A002B398B /* GPUImageOpeningFilter.h */, + 843722FD1D4F260A002B398B /* GPUImageOpeningFilter.m */, + 843722FE1D4F260A002B398B /* GPUImageOutput.h */, + 843722FF1D4F260A002B398B /* GPUImageOutput.m */, + 843723001D4F260A002B398B /* GPUImageOverlayBlendFilter.h */, + 843723011D4F260A002B398B /* GPUImageOverlayBlendFilter.m */, + 843723021D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.h */, + 843723031D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.m */, + 843723041D4F260A002B398B /* GPUImagePerlinNoiseFilter.h */, + 843723051D4F260A002B398B /* GPUImagePerlinNoiseFilter.m */, + 843723061D4F260A002B398B /* GPUImagePinchDistortionFilter.h */, + 843723071D4F260A002B398B /* GPUImagePinchDistortionFilter.m */, + 843723081D4F260A002B398B /* GPUImagePixellateFilter.h */, + 843723091D4F260A002B398B /* GPUImagePixellateFilter.m */, + 8437230A1D4F260A002B398B /* GPUImagePixellatePositionFilter.h */, + 8437230B1D4F260A002B398B /* GPUImagePixellatePositionFilter.m */, + 8437230C1D4F260A002B398B /* GPUImagePoissonBlendFilter.h */, + 8437230D1D4F260A002B398B /* GPUImagePoissonBlendFilter.m */, + 8437230E1D4F260A002B398B /* GPUImagePolarPixellateFilter.h */, + 8437230F1D4F260A002B398B /* GPUImagePolarPixellateFilter.m */, + 843723101D4F260A002B398B /* GPUImagePolkaDotFilter.h */, + 843723111D4F260A002B398B /* GPUImagePolkaDotFilter.m */, + 843723121D4F260A002B398B /* GPUImagePosterizeFilter.h */, + 843723131D4F260A002B398B /* GPUImagePosterizeFilter.m */, + 843723141D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.h */, + 843723151D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.m */, + 843723161D4F260A002B398B /* GPUImageRawDataInput.h */, + 843723171D4F260A002B398B /* GPUImageRawDataInput.m */, + 843723181D4F260A002B398B /* GPUImageRawDataOutput.h */, + 843723191D4F260A002B398B /* GPUImageRawDataOutput.m */, + 8437231A1D4F260A002B398B /* GPUImageRGBClosingFilter.h */, + 8437231B1D4F260A002B398B /* GPUImageRGBClosingFilter.m */, + 8437231C1D4F260A002B398B /* GPUImageRGBDilationFilter.h */, + 8437231D1D4F260A002B398B /* GPUImageRGBDilationFilter.m */, + 8437231E1D4F260A002B398B /* GPUImageRGBErosionFilter.h */, + 8437231F1D4F260A002B398B /* GPUImageRGBErosionFilter.m */, + 843723201D4F260A002B398B /* GPUImageRGBFilter.h */, + 843723211D4F260A002B398B /* GPUImageRGBFilter.m */, + 843723221D4F260A002B398B /* GPUImageRGBOpeningFilter.h */, + 843723231D4F260A002B398B /* GPUImageRGBOpeningFilter.m */, + 843723241D4F260A002B398B /* GPUImageSaturationBlendFilter.h */, + 843723251D4F260A002B398B /* GPUImageSaturationBlendFilter.m */, + 843723261D4F260A002B398B /* GPUImageSaturationFilter.h */, + 843723271D4F260A002B398B /* GPUImageSaturationFilter.m */, + 843723281D4F260A002B398B /* GPUImageScreenBlendFilter.h */, + 843723291D4F260A002B398B /* GPUImageScreenBlendFilter.m */, + 8437232A1D4F260A002B398B /* GPUImageSepiaFilter.h */, + 8437232B1D4F260A002B398B /* GPUImageSepiaFilter.m */, + 8437232C1D4F260A002B398B /* GPUImageSharpenFilter.h */, + 8437232D1D4F260A002B398B /* GPUImageSharpenFilter.m */, + 8437232E1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.h */, + 8437232F1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.m */, + 843723301D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.h */, + 843723311D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.m */, + 843723321D4F260A002B398B /* GPUImageSketchFilter.h */, + 843723331D4F260A002B398B /* GPUImageSketchFilter.m */, + 843723341D4F260A002B398B /* GPUImageSkinToneFilter.h */, + 843723351D4F260A002B398B /* GPUImageSkinToneFilter.m */, + 843723361D4F260A002B398B /* GPUImageSmoothToonFilter.h */, + 843723371D4F260A002B398B /* GPUImageSmoothToonFilter.m */, + 843723381D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.h */, + 843723391D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.m */, + 8437233A1D4F260A002B398B /* GPUImageSoftEleganceFilter.h */, + 8437233B1D4F260A002B398B /* GPUImageSoftEleganceFilter.m */, + 8437233C1D4F260A002B398B /* GPUImageSoftLightBlendFilter.h */, + 8437233D1D4F260A002B398B /* GPUImageSoftLightBlendFilter.m */, + 8437233E1D4F260A002B398B /* GPUImageSolarizeFilter.h */, + 8437233F1D4F260A002B398B /* GPUImageSolarizeFilter.m */, + 843723401D4F260A002B398B /* GPUImageSolidColorGenerator.h */, + 843723411D4F260A002B398B /* GPUImageSolidColorGenerator.m */, + 843723421D4F260A002B398B /* GPUImageSourceOverBlendFilter.h */, + 843723431D4F260A002B398B /* GPUImageSourceOverBlendFilter.m */, + 843723441D4F260A002B398B /* GPUImageSphereRefractionFilter.h */, + 843723451D4F260A002B398B /* GPUImageSphereRefractionFilter.m */, + 843723461D4F260A002B398B /* GPUImageStillCamera.h */, + 843723471D4F260A002B398B /* GPUImageStillCamera.m */, + 843723481D4F260A002B398B /* GPUImageStretchDistortionFilter.h */, + 843723491D4F260A002B398B /* GPUImageStretchDistortionFilter.m */, + 8437234A1D4F260A002B398B /* GPUImageSubtractBlendFilter.h */, + 8437234B1D4F260A002B398B /* GPUImageSubtractBlendFilter.m */, + 8437234C1D4F260A002B398B /* GPUImageSwirlFilter.h */, + 8437234D1D4F260A002B398B /* GPUImageSwirlFilter.m */, + 8437234E1D4F260A002B398B /* GPUImageTextureInput.h */, + 8437234F1D4F260A002B398B /* GPUImageTextureInput.m */, + 843723501D4F260A002B398B /* GPUImageTextureOutput.h */, + 843723511D4F260A002B398B /* GPUImageTextureOutput.m */, + 843723521D4F260A002B398B /* GPUImageThreeInputFilter.h */, + 843723531D4F260A002B398B /* GPUImageThreeInputFilter.m */, + 843723541D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.h */, + 843723551D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.m */, + 843723561D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.h */, + 843723571D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m */, + 843723581D4F260A002B398B /* GPUImageThresholdSketchFilter.h */, + 843723591D4F260A002B398B /* GPUImageThresholdSketchFilter.m */, + 8437235A1D4F260A002B398B /* GPUImageTiltShiftFilter.h */, + 8437235B1D4F260A002B398B /* GPUImageTiltShiftFilter.m */, + 8437235C1D4F260A002B398B /* GPUImageToneCurveFilter.h */, + 8437235D1D4F260A002B398B /* GPUImageToneCurveFilter.m */, + 8437235E1D4F260A002B398B /* GPUImageToonFilter.h */, + 8437235F1D4F260A002B398B /* GPUImageToonFilter.m */, + 843723601D4F260A002B398B /* GPUImageTransformFilter.h */, + 843723611D4F260A002B398B /* GPUImageTransformFilter.m */, + 843723621D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.h */, + 843723631D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.m */, + 843723641D4F260A002B398B /* GPUImageTwoInputFilter.h */, + 843723651D4F260A002B398B /* GPUImageTwoInputFilter.m */, + 843723661D4F260A002B398B /* GPUImageTwoPassFilter.h */, + 843723671D4F260A002B398B /* GPUImageTwoPassFilter.m */, + 843723681D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h */, + 843723691D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.m */, + 8437236A1D4F260A002B398B /* GPUImageUIElement.h */, + 8437236B1D4F260A002B398B /* GPUImageUIElement.m */, + 8437236C1D4F260A002B398B /* GPUImageUnsharpMaskFilter.h */, + 8437236D1D4F260A002B398B /* GPUImageUnsharpMaskFilter.m */, + 8437236E1D4F260A002B398B /* GPUImageVideoCamera.h */, + 8437236F1D4F260A002B398B /* GPUImageVideoCamera.m */, + 843723701D4F260A002B398B /* GPUImageVignetteFilter.h */, + 843723711D4F260A002B398B /* GPUImageVignetteFilter.m */, + 843723721D4F260A002B398B /* GPUImageVoronoiConsumerFilter.h */, + 843723731D4F260A002B398B /* GPUImageVoronoiConsumerFilter.m */, + 843723741D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.h */, + 843723751D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.m */, + 843723761D4F260A002B398B /* GPUImageWhiteBalanceFilter.h */, + 843723771D4F260A002B398B /* GPUImageWhiteBalanceFilter.m */, + 843723781D4F260A002B398B /* GPUImageXYDerivativeFilter.h */, + 843723791D4F260A002B398B /* GPUImageXYDerivativeFilter.m */, + 8437237A1D4F260A002B398B /* GPUImageZoomBlurFilter.h */, + 8437237B1D4F260A002B398B /* GPUImageZoomBlurFilter.m */, + 8437237C1D4F260A002B398B /* iOS */, + ); + path = GPUImage; + sourceTree = ""; + }; + 8437237C1D4F260A002B398B /* iOS */ = { + isa = PBXGroup; + children = ( + 8437237D1D4F260A002B398B /* Framework */, + 8437237F1D4F260A002B398B /* GPUImageContext.h */, + 843723801D4F260A002B398B /* GPUImageContext.m */, + 843723811D4F260A002B398B /* GPUImageMovieWriter.h */, + 843723821D4F260A002B398B /* GPUImageMovieWriter.m */, + 843723831D4F260A002B398B /* GPUImagePicture+TextureSubimage.h */, + 843723841D4F260A002B398B /* GPUImagePicture+TextureSubimage.m */, + 843723851D4F260A002B398B /* GPUImagePicture.h */, + 843723861D4F260A002B398B /* GPUImagePicture.m */, + 843723871D4F260A002B398B /* GPUImageView.h */, + 843723881D4F260A002B398B /* GPUImageView.m */, + ); + path = iOS; + sourceTree = ""; + }; + 8437237D1D4F260A002B398B /* Framework */ = { + isa = PBXGroup; + children = ( + 8437237E1D4F260A002B398B /* GPUImageFramework.h */, + ); + path = Framework; + sourceTree = ""; + }; + 843723891D4F260A002B398B /* pili-librtmp */ = { + isa = PBXGroup; + children = ( + 8437238A1D4F260A002B398B /* amf.c */, + 8437238B1D4F260A002B398B /* amf.h */, + 8437238C1D4F260A002B398B /* bytes.h */, + 8437238D1D4F260A002B398B /* dh.h */, + 8437238E1D4F260A002B398B /* dhgroups.h */, + 8437238F1D4F260A002B398B /* error.c */, + 843723901D4F260A002B398B /* error.h */, + 843723911D4F260A002B398B /* handshake.h */, + 843723921D4F260A002B398B /* hashswf.c */, + 843723931D4F260A002B398B /* http.h */, + 843723941D4F260A002B398B /* log.c */, + 843723951D4F260A002B398B /* log.h */, + 843723961D4F260A002B398B /* parseurl.c */, + 843723971D4F260A002B398B /* rtmp.c */, + 843723981D4F260A002B398B /* rtmp.h */, + 843723991D4F260A002B398B /* rtmp_sys.h */, + ); + path = "pili-librtmp"; + sourceTree = ""; + }; B2CD14611D45F18B008082E8 /* H264 */ = { isa = PBXGroup; children = ( @@ -343,24 +1465,171 @@ buildActionMask = 2147483647; files = ( 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, + 843724711D4F260A002B398B /* GPUImagePerlinNoiseFilter.h in Headers */, + 843724F61D4F260A002B398B /* bytes.h in Headers */, 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, + 843724A51D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.h in Headers */, + 843724231D4F260A002B398B /* GPUImageHoughTransformLineDetector.h in Headers */, + 8437246D1D4F260A002B398B /* GPUImageOverlayBlendFilter.h in Headers */, 84001FE71D0016380026C63F /* LFFrame.h in Headers */, + 843724C11D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.h in Headers */, + 843724B71D4F260A002B398B /* GPUImageSubtractBlendFilter.h in Headers */, + 8437245B1D4F260A002B398B /* GPUImageMovie.h in Headers */, + 843724191D4F260A002B398B /* GPUImageHighlightShadowTintFilter.h in Headers */, + 843724291D4F260A002B398B /* GPUImageHueFilter.h in Headers */, + 843724FF1D4F260A002B398B /* log.h in Headers */, + 843724AF1D4F260A002B398B /* GPUImageSourceOverBlendFilter.h in Headers */, + 843723CB1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.h in Headers */, + 843723DF1D4F260A002B398B /* GPUImageDifferenceBlendFilter.h in Headers */, + 843724EA1D4F260A002B398B /* GPUImageContext.h in Headers */, + 843723D91D4F260A002B398B /* GPUImageCrosshairGenerator.h in Headers */, 84001FDB1D0016380026C63F /* LFLiveAudioConfiguration.h in Headers */, + 843723ED1D4F260A002B398B /* GPUImageErosionFilter.h in Headers */, B289F1DD1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.h in Headers */, + 843724211D4F260A002B398B /* GPUImageHistogramGenerator.h in Headers */, + 843723C51D4F260A002B398B /* GPUImageColorConversion.h in Headers */, + 8437239D1D4F260A002B398B /* GPUImage3x3ConvolutionFilter.h in Headers */, + 843724971D4F260A002B398B /* GPUImageSepiaFilter.h in Headers */, + 843723AF1D4F260A002B398B /* GPUImageBoxBlurFilter.h in Headers */, + 8437248B1D4F260A002B398B /* GPUImageRGBErosionFilter.h in Headers */, + 843724D11D4F260A002B398B /* GPUImageTwoInputFilter.h in Headers */, + 843723B31D4F260A002B398B /* GPUImageBuffer.h in Headers */, + 843724D31D4F260A002B398B /* GPUImageTwoPassFilter.h in Headers */, + 843724311D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.h in Headers */, + 843724F21D4F260A002B398B /* GPUImageView.h in Headers */, + 843724991D4F260A002B398B /* GPUImageSharpenFilter.h in Headers */, 84001FDD1D0016380026C63F /* LFLiveVideoConfiguration.h in Headers */, B2CD14701D45F18B008082E8 /* LFMP4Atom.h in Headers */, 849005B61D4B5C8400D2A3D5 /* error.h in Headers */, + 843724FB1D4F260A002B398B /* handshake.h in Headers */, + 843723F31D4F260A002B398B /* GPUImageFalseColorFilter.h in Headers */, + 843723B71D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.h in Headers */, + 843724151D4F260A002B398B /* GPUImageHazeFilter.h in Headers */, + 843723E51D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.h in Headers */, + 843724091D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.h in Headers */, + 843724DD1D4F260A002B398B /* GPUImageVignetteFilter.h in Headers */, + 843724AD1D4F260A002B398B /* GPUImageSolidColorGenerator.h in Headers */, + 8437244D1D4F260A002B398B /* GPUImageMaskFilter.h in Headers */, + 843723E91D4F260A002B398B /* GPUImageDivideBlendFilter.h in Headers */, 84001FE31D0016380026C63F /* LFLiveSession.h in Headers */, + 843724411D4F260A002B398B /* GPUImageLookupFilter.h in Headers */, + 843724E91D4F260A002B398B /* GPUImageFramework.h in Headers */, + 8437244B1D4F260A002B398B /* GPUImageLuminosityBlendFilter.h in Headers */, + 8437247D1D4F260A002B398B /* GPUImagePolkaDotFilter.h in Headers */, + 843724571D4F260A002B398B /* GPUImageMotionBlurFilter.h in Headers */, + 843724BD1D4F260A002B398B /* GPUImageTextureOutput.h in Headers */, + 843724DB1D4F260A002B398B /* GPUImageVideoCamera.h in Headers */, + 843724791D4F260A002B398B /* GPUImagePoissonBlendFilter.h in Headers */, B289F1DB1D3DE77F00D9C7A5 /* LFStreamingBuffer.h in Headers */, + 843724771D4F260A002B398B /* GPUImagePixellatePositionFilter.h in Headers */, + 843724911D4F260A002B398B /* GPUImageSaturationBlendFilter.h in Headers */, + 8437240D1D4F260A002B398B /* GPUImageGrayscaleFilter.h in Headers */, + 8437249D1D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.h in Headers */, + 843724751D4F260A002B398B /* GPUImagePixellateFilter.h in Headers */, + 843724E71D4F260A002B398B /* GPUImageZoomBlurFilter.h in Headers */, + 843723FF1D4F260A002B398B /* GPUImageFramebuffer.h in Headers */, + 8437244F1D4F260A002B398B /* GPUImageMedianFilter.h in Headers */, + 843724B11D4F260A002B398B /* GPUImageSphereRefractionFilter.h in Headers */, + 843724631D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.h in Headers */, + 843724591D4F260A002B398B /* GPUImageMotionDetector.h in Headers */, + 8437246F1D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.h in Headers */, + 843724931D4F260A002B398B /* GPUImageSaturationFilter.h in Headers */, + 843723EF1D4F260A002B398B /* GPUImageExclusionBlendFilter.h in Headers */, + 843724811D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.h in Headers */, + 843724C91D4F260A002B398B /* GPUImageToneCurveFilter.h in Headers */, + 843724CF1D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.h in Headers */, + 843723AD1D4F260A002B398B /* GPUImageBilateralFilter.h in Headers */, + 843724F81D4F260A002B398B /* dhgroups.h in Headers */, + 843724731D4F260A002B398B /* GPUImagePinchDistortionFilter.h in Headers */, + 843724D71D4F260A002B398B /* GPUImageUIElement.h in Headers */, + 843724BB1D4F260A002B398B /* GPUImageTextureInput.h in Headers */, + 843724871D4F260A002B398B /* GPUImageRGBClosingFilter.h in Headers */, + 843724371D4F260A002B398B /* GPUImageLevelsFilter.h in Headers */, + 843724951D4F260A002B398B /* GPUImageScreenBlendFilter.h in Headers */, 84001FEB1D0016380026C63F /* LFLiveStreamInfo.h in Headers */, + 843724251D4F260A002B398B /* GPUImageHSBFilter.h in Headers */, + 843724891D4F260A002B398B /* GPUImageRGBDilationFilter.h in Headers */, + 843724C31D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.h in Headers */, + 843723A71D4F260A002B398B /* GPUImageAmatorkaFilter.h in Headers */, + 8437245F1D4F260A002B398B /* GPUImageMultiplyBlendFilter.h in Headers */, 84001FE91D0016380026C63F /* LFLiveDebug.h in Headers */, + 843723BB1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.h in Headers */, + 8437239A1D4F260A002B398B /* GLProgram.h in Headers */, + 843723E71D4F260A002B398B /* GPUImageDissolveBlendFilter.h in Headers */, + 843723B51D4F260A002B398B /* GPUImageBulgeDistortionFilter.h in Headers */, + 843724331D4F260A002B398B /* GPUImageLanczosResamplingFilter.h in Headers */, + 843724131D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.h in Headers */, + 843724C71D4F260A002B398B /* GPUImageTiltShiftFilter.h in Headers */, + 843723F91D4F260A002B398B /* GPUImageFilterGroup.h in Headers */, + 843723C31D4F260A002B398B /* GPUImageColorBurnBlendFilter.h in Headers */, + 8437243B1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.h in Headers */, + 843724011D4F260A002B398B /* GPUImageFramebufferCache.h in Headers */, + 8437247B1D4F260A002B398B /* GPUImagePolarPixellateFilter.h in Headers */, + 843724AB1D4F260A002B398B /* GPUImageSolarizeFilter.h in Headers */, + 843724071D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.h in Headers */, + 843723D71D4F260A002B398B /* GPUImageCropFilter.h in Headers */, + 8437249B1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.h in Headers */, + 843724031D4F260A002B398B /* GPUImageGammaFilter.h in Headers */, + 8437242B1D4F260A002B398B /* GPUImageiOSBlurFilter.h in Headers */, + 843724EE1D4F260A002B398B /* GPUImagePicture+TextureSubimage.h in Headers */, + 843724111D4F260A002B398B /* GPUImageHardLightBlendFilter.h in Headers */, + 843723A51D4F260A002B398B /* GPUImageAlphaBlendFilter.h in Headers */, + 843724E11D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.h in Headers */, + 843723EB1D4F260A002B398B /* GPUImageEmbossFilter.h in Headers */, + 843724EC1D4F260A002B398B /* GPUImageMovieWriter.h in Headers */, + 843724451D4F260A002B398B /* GPUImageLuminanceRangeFilter.h in Headers */, + 843723D31D4F260A002B398B /* GPUImageColourFASTSamplingOperation.h in Headers */, + 8437243F1D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.h in Headers */, + 843725021D4F260A002B398B /* rtmp.h in Headers */, B2CD14761D45F18B008082E8 /* LFH264VideoEncoder.h in Headers */, + 843724FA1D4F260A002B398B /* error.h in Headers */, + 843723E31D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.h in Headers */, + 843724351D4F260A002B398B /* GPUImageLaplacianFilter.h in Headers */, + 843724511D4F260A002B398B /* GPUImageMissEtikateFilter.h in Headers */, + 843724D91D4F260A002B398B /* GPUImageUnsharpMaskFilter.h in Headers */, + 843724A71D4F260A002B398B /* GPUImageSoftEleganceFilter.h in Headers */, + 843724CB1D4F260A002B398B /* GPUImageToonFilter.h in Headers */, + 843723DD1D4F260A002B398B /* GPUImageDarkenBlendFilter.h in Headers */, 84001FD61D0016380026C63F /* LFHardwareAudioEncoder.h in Headers */, B289F1E01D3DE77F00D9C7A5 /* NSMutableArray+LFAdd.h in Headers */, + 8437242D1D4F260A002B398B /* GPUImageJFAVoronoiFilter.h in Headers */, + 843723D11D4F260A002B398B /* GPUImageColourFASTFeatureDetector.h in Headers */, + 843723F51D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.h in Headers */, 84001FDF1D0016380026C63F /* LFGPUImageBeautyFilter.h in Headers */, + 843723BF1D4F260A002B398B /* GPUImageClosingFilter.h in Headers */, + 843723FB1D4F260A002B398B /* GPUImageFilterPipeline.h in Headers */, + 843724E51D4F260A002B398B /* GPUImageXYDerivativeFilter.h in Headers */, + 8437241F1D4F260A002B398B /* GPUImageHistogramFilter.h in Headers */, + 8437248D1D4F260A002B398B /* GPUImageRGBFilter.h in Headers */, + 843723E11D4F260A002B398B /* GPUImageDilationFilter.h in Headers */, + 843724531D4F260A002B398B /* GPUImageMonochromeFilter.h in Headers */, + 843724391D4F260A002B398B /* GPUImageLightenBlendFilter.h in Headers */, + 843723CD1D4F260A002B398B /* GPUImageColorMatrixFilter.h in Headers */, + 843724DF1D4F260A002B398B /* GPUImageVoronoiConsumerFilter.h in Headers */, + 8437241B1D4F260A002B398B /* GPUImageHighPassFilter.h in Headers */, + 843724CD1D4F260A002B398B /* GPUImageTransformFilter.h in Headers */, + 843724831D4F260A002B398B /* GPUImageRawDataInput.h in Headers */, + 843724F51D4F260A002B398B /* amf.h in Headers */, + 843723A31D4F260A002B398B /* GPUImageAddBlendFilter.h in Headers */, + 843724B91D4F260A002B398B /* GPUImageSwirlFilter.h in Headers */, + 8437246B1D4F260A002B398B /* GPUImageOutput.h in Headers */, + 843724851D4F260A002B398B /* GPUImageRawDataOutput.h in Headers */, + 843723DB1D4F260A002B398B /* GPUImageCrosshatchFilter.h in Headers */, + 843724491D4F260A002B398B /* GPUImageLuminosity.h in Headers */, 84001FD31D0016380026C63F /* LFVideoCapture.h in Headers */, 84001FD11D0016380026C63F /* LFAudioCapture.h in Headers */, + 843723C71D4F260A002B398B /* GPUImageColorDodgeBlendFilter.h in Headers */, + 8437241D1D4F260A002B398B /* GPUImageHistogramEqualizationFilter.h in Headers */, + 843723C91D4F260A002B398B /* GPUImageColorInvertFilter.h in Headers */, + 843724E31D4F260A002B398B /* GPUImageWhiteBalanceFilter.h in Headers */, + 843724C51D4F260A002B398B /* GPUImageThresholdSketchFilter.h in Headers */, + 8437248F1D4F260A002B398B /* GPUImageRGBOpeningFilter.h in Headers */, + 8437240B1D4F260A002B398B /* GPUImageGlassSphereFilter.h in Headers */, + 8437239F1D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.h in Headers */, + 843724651D4F260A002B398B /* GPUImageNormalBlendFilter.h in Headers */, + 8437245D1D4F260A002B398B /* GPUImageMovieComposition.h in Headers */, 84001FE11D0016380026C63F /* LFGPUImageEmptyFilter.h in Headers */, + 843724B51D4F260A002B398B /* GPUImageStretchDistortionFilter.h in Headers */, 84001FDA1D0016380026C63F /* LFVideoEncoding.h in Headers */, 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, @@ -368,12 +1637,50 @@ B2CD14731D45F18B008082E8 /* LFNALUnit.h in Headers */, 849005BF1D4B5C8400D2A3D5 /* rtmp_sys.h in Headers */, 849005BE1D4B5C8400D2A3D5 /* rtmp.h in Headers */, + 843724BF1D4F260A002B398B /* GPUImageThreeInputFilter.h in Headers */, + 843724F71D4F260A002B398B /* dh.h in Headers */, + 843725031D4F260A002B398B /* rtmp_sys.h in Headers */, + 843724691D4F260A002B398B /* GPUImageOpeningFilter.h in Headers */, + 843723C11D4F260A002B398B /* GPUImageColorBlendFilter.h in Headers */, + 8437249F1D4F260A002B398B /* GPUImageSketchFilter.h in Headers */, 84001FD81D0016380026C63F /* LFHardwareVideoEncoder.h in Headers */, + 843724A91D4F260A002B398B /* GPUImageSoftLightBlendFilter.h in Headers */, + 843723A91D4F260A002B398B /* GPUImageAverageColor.h in Headers */, + 843723FD1D4F260A002B398B /* GPUImageFourInputFilter.h in Headers */, + 843724431D4F260A002B398B /* GPUImageLowPassFilter.h in Headers */, + 8437239C1D4F260A002B398B /* GPUImage.h in Headers */, B289F1DF1D3DE77F00D9C7A5 /* LFStreamSocket.h in Headers */, + 8437242F1D4F260A002B398B /* GPUImageKuwaharaFilter.h in Headers */, + 843723B11D4F260A002B398B /* GPUImageBrightnessFilter.h in Headers */, + 843724FD1D4F260A002B398B /* http.h in Headers */, + 843724611D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.h in Headers */, + 8437243D1D4F260A002B398B /* GPUImageLineGenerator.h in Headers */, 84001FD51D0016380026C63F /* LFAudioEncoding.h in Headers */, B2CD146D1D45F18B008082E8 /* LFAVEncoder.h in Headers */, 849005B11D4B5C8400D2A3D5 /* amf.h in Headers */, + 843723AB1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.h in Headers */, + 843723F71D4F260A002B398B /* GPUImageFilter.h in Headers */, + 8437240F1D4F260A002B398B /* GPUImageHalftoneFilter.h in Headers */, + 843723A11D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h in Headers */, + 843723CF1D4F260A002B398B /* GPUImageColorPackingFilter.h in Headers */, + 843723B91D4F260A002B398B /* GPUImageCGAColorspaceFilter.h in Headers */, + + 843724051D4F260A002B398B /* GPUImageGaussianBlurFilter.h in Headers */, + 843724B31D4F260A002B398B /* GPUImageStillCamera.h in Headers */, + 843724D51D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h in Headers */, + 843723F11D4F260A002B398B /* GPUImageExposureFilter.h in Headers */, + 843723D51D4F260A002B398B /* GPUImageContrastFilter.h in Headers */, + 843724A11D4F260A002B398B /* GPUImageSkinToneFilter.h in Headers */, + 843724271D4F260A002B398B /* GPUImageHueBlendFilter.h in Headers */, + 843724471D4F260A002B398B /* GPUImageLuminanceThresholdFilter.h in Headers */, + 843724551D4F260A002B398B /* GPUImageMosaicFilter.h in Headers */, + 843723BD1D4F260A002B398B /* GPUImageChromaKeyFilter.h in Headers */, + 843724171D4F260A002B398B /* GPUImageHighlightShadowFilter.h in Headers */, 84001F8E1D0015D10026C63F /* LFLiveKit.h in Headers */, + 843724671D4F260A002B398B /* GPUImageOpacityFilter.h in Headers */, + 8437247F1D4F260A002B398B /* GPUImagePosterizeFilter.h in Headers */, + 843724A31D4F260A002B398B /* GPUImageSmoothToonFilter.h in Headers */, + 843724F01D4F260A002B398B /* GPUImagePicture.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -474,29 +1781,207 @@ buildActionMask = 2147483647; files = ( 84001FE21D0016380026C63F /* LFGPUImageEmptyFilter.m in Sources */, + 843724C21D4F260A002B398B /* GPUImageThresholdEdgeDetectionFilter.m in Sources */, 84001FE41D0016380026C63F /* LFLiveSession.m in Sources */, B2CD14711D45F18B008082E8 /* LFMP4Atom.m in Sources */, 849005B81D4B5C8400D2A3D5 /* hashswf.c in Sources */, + 843724BA1D4F260A002B398B /* GPUImageSwirlFilter.m in Sources */, + 843723A41D4F260A002B398B /* GPUImageAddBlendFilter.m in Sources */, + 843723B01D4F260A002B398B /* GPUImageBoxBlurFilter.m in Sources */, + 843723DC1D4F260A002B398B /* GPUImageCrosshatchFilter.m in Sources */, + 843724E41D4F260A002B398B /* GPUImageWhiteBalanceFilter.m in Sources */, + 843724101D4F260A002B398B /* GPUImageHalftoneFilter.m in Sources */, + 843724301D4F260A002B398B /* GPUImageKuwaharaFilter.m in Sources */, + 843724201D4F260A002B398B /* GPUImageHistogramFilter.m in Sources */, + 8437246E1D4F260A002B398B /* GPUImageOverlayBlendFilter.m in Sources */, + 843723A81D4F260A002B398B /* GPUImageAmatorkaFilter.m in Sources */, 84001FE61D0016380026C63F /* LFAudioFrame.m in Sources */, + 843724C41D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m in Sources */, + 843724F91D4F260A002B398B /* error.c in Sources */, + 843724DC1D4F260A002B398B /* GPUImageVideoCamera.m in Sources */, + 8437246C1D4F260A002B398B /* GPUImageOutput.m in Sources */, + 843724B01D4F260A002B398B /* GPUImageSourceOverBlendFilter.m in Sources */, + 843724581D4F260A002B398B /* GPUImageMotionBlurFilter.m in Sources */, 84001FDC1D0016380026C63F /* LFLiveAudioConfiguration.m in Sources */, 84001FD41D0016380026C63F /* LFVideoCapture.m in Sources */, + 843723B81D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m in Sources */, 84001FE81D0016380026C63F /* LFFrame.m in Sources */, B2CD14721D45F18B008082E8 /* LFNALUnit.cpp in Sources */, + 843724CE1D4F260A002B398B /* GPUImageTransformFilter.m in Sources */, + 843723E81D4F260A002B398B /* GPUImageDissolveBlendFilter.m in Sources */, + 843723D81D4F260A002B398B /* GPUImageCropFilter.m in Sources */, + 843723EC1D4F260A002B398B /* GPUImageEmbossFilter.m in Sources */, + 843724AA1D4F260A002B398B /* GPUImageSoftLightBlendFilter.m in Sources */, + 843723F21D4F260A002B398B /* GPUImageExposureFilter.m in Sources */, + 8437244E1D4F260A002B398B /* GPUImageMaskFilter.m in Sources */, + 843724841D4F260A002B398B /* GPUImageRawDataInput.m in Sources */, B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */, + 843723F01D4F260A002B398B /* GPUImageExclusionBlendFilter.m in Sources */, + 843724F31D4F260A002B398B /* GPUImageView.m in Sources */, + 843724081D4F260A002B398B /* GPUImageGaussianBlurPositionFilter.m in Sources */, + 843724A41D4F260A002B398B /* GPUImageSmoothToonFilter.m in Sources */, + 8437241A1D4F260A002B398B /* GPUImageHighlightShadowTintFilter.m in Sources */, + 843724821D4F260A002B398B /* GPUImagePrewittEdgeDetectionFilter.m in Sources */, + 843724CC1D4F260A002B398B /* GPUImageToonFilter.m in Sources */, + 843724801D4F260A002B398B /* GPUImagePosterizeFilter.m in Sources */, B289F1E11D3DE77F00D9C7A5 /* NSMutableArray+LFAdd.m in Sources */, + 843724041D4F260A002B398B /* GPUImageGammaFilter.m in Sources */, + 843724B21D4F260A002B398B /* GPUImageSphereRefractionFilter.m in Sources */, + 8437248E1D4F260A002B398B /* GPUImageRGBFilter.m in Sources */, + 843724161D4F260A002B398B /* GPUImageHazeFilter.m in Sources */, + 843723BC1D4F260A002B398B /* GPUImageChromaKeyBlendFilter.m in Sources */, + 8437247A1D4F260A002B398B /* GPUImagePoissonBlendFilter.m in Sources */, + 843723E01D4F260A002B398B /* GPUImageDifferenceBlendFilter.m in Sources */, + 843723EA1D4F260A002B398B /* GPUImageDivideBlendFilter.m in Sources */, + 8437243E1D4F260A002B398B /* GPUImageLineGenerator.m in Sources */, + 843723B21D4F260A002B398B /* GPUImageBrightnessFilter.m in Sources */, + 843723C21D4F260A002B398B /* GPUImageColorBlendFilter.m in Sources */, + 843724241D4F260A002B398B /* GPUImageHoughTransformLineDetector.m in Sources */, + 843723A21D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.m in Sources */, + 843724BE1D4F260A002B398B /* GPUImageTextureOutput.m in Sources */, + 8437241C1D4F260A002B398B /* GPUImageHighPassFilter.m in Sources */, B2CD14771D45F18B008082E8 /* LFH264VideoEncoder.mm in Sources */, + 843723C01D4F260A002B398B /* GPUImageClosingFilter.m in Sources */, + 843724AE1D4F260A002B398B /* GPUImageSolidColorGenerator.m in Sources */, + 843724ED1D4F260A002B398B /* GPUImageMovieWriter.m in Sources */, + 843723AA1D4F260A002B398B /* GPUImageAverageColor.m in Sources */, 84001FDE1D0016380026C63F /* LFLiveVideoConfiguration.m in Sources */, + 8437247E1D4F260A002B398B /* GPUImagePolkaDotFilter.m in Sources */, + 843723D01D4F260A002B398B /* GPUImageColorPackingFilter.m in Sources */, + 843724F41D4F260A002B398B /* amf.c in Sources */, + 8437242C1D4F260A002B398B /* GPUImageiOSBlurFilter.m in Sources */, + 843724941D4F260A002B398B /* GPUImageSaturationFilter.m in Sources */, + 8437248A1D4F260A002B398B /* GPUImageRGBDilationFilter.m in Sources */, + 843724781D4F260A002B398B /* GPUImagePixellatePositionFilter.m in Sources */, + 8437245A1D4F260A002B398B /* GPUImageMotionDetector.m in Sources */, 84001FD21D0016380026C63F /* LFAudioCapture.m in Sources */, B2CD14751D45F18B008082E8 /* LFVideoEncoder.m in Sources */, + 843724B81D4F260A002B398B /* GPUImageSubtractBlendFilter.m in Sources */, + 843724741D4F260A002B398B /* GPUImagePinchDistortionFilter.m in Sources */, + 843724D61D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.m in Sources */, + 843723DA1D4F260A002B398B /* GPUImageCrosshairGenerator.m in Sources */, + 843724A01D4F260A002B398B /* GPUImageSketchFilter.m in Sources */, + 843723A01D4F260A002B398B /* GPUImage3x3TextureSamplingFilter.m in Sources */, + 8437249A1D4F260A002B398B /* GPUImageSharpenFilter.m in Sources */, + 843723C41D4F260A002B398B /* GPUImageColorBurnBlendFilter.m in Sources */, + 843724121D4F260A002B398B /* GPUImageHardLightBlendFilter.m in Sources */, + 843723D61D4F260A002B398B /* GPUImageContrastFilter.m in Sources */, + 843724BC1D4F260A002B398B /* GPUImageTextureInput.m in Sources */, + 843724521D4F260A002B398B /* GPUImageMissEtikateFilter.m in Sources */, B2CD146F1D45F18B008082E8 /* LICENSE.markdown in Sources */, B289F1DE1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.m in Sources */, + 843724CA1D4F260A002B398B /* GPUImageToneCurveFilter.m in Sources */, + 843724D21D4F260A002B398B /* GPUImageTwoInputFilter.m in Sources */, + 843723F61D4F260A002B398B /* GPUImageFASTCornerDetectionFilter.m in Sources */, + 843724901D4F260A002B398B /* GPUImageRGBOpeningFilter.m in Sources */, + 843724861D4F260A002B398B /* GPUImageRawDataOutput.m in Sources */, + 843724F11D4F260A002B398B /* GPUImagePicture.m in Sources */, + 843723E41D4F260A002B398B /* GPUImageDirectionalNonMaximumSuppressionFilter.m in Sources */, + 843724DE1D4F260A002B398B /* GPUImageVignetteFilter.m in Sources */, + 8437245C1D4F260A002B398B /* GPUImageMovie.m in Sources */, 84001FD91D0016380026C63F /* LFHardwareVideoEncoder.m in Sources */, + 843724A61D4F260A002B398B /* GPUImageSobelEdgeDetectionFilter.m in Sources */, + 843724281D4F260A002B398B /* GPUImageHueBlendFilter.m in Sources */, + 843723B61D4F260A002B398B /* GPUImageBulgeDistortionFilter.m in Sources */, + 8437243A1D4F260A002B398B /* GPUImageLightenBlendFilter.m in Sources */, + 843724AC1D4F260A002B398B /* GPUImageSolarizeFilter.m in Sources */, + 843724481D4F260A002B398B /* GPUImageLuminanceThresholdFilter.m in Sources */, + 843723C61D4F260A002B398B /* GPUImageColorConversion.m in Sources */, + 843724561D4F260A002B398B /* GPUImageMosaicFilter.m in Sources */, + 8437245E1D4F260A002B398B /* GPUImageMovieComposition.m in Sources */, + 843724A81D4F260A002B398B /* GPUImageSoftEleganceFilter.m in Sources */, + 843724961D4F260A002B398B /* GPUImageScreenBlendFilter.m in Sources */, + 843724541D4F260A002B398B /* GPUImageMonochromeFilter.m in Sources */, + 843723B41D4F260A002B398B /* GPUImageBuffer.m in Sources */, + 8437239B1D4F260A002B398B /* GLProgram.m in Sources */, + 843724E81D4F260A002B398B /* GPUImageZoomBlurFilter.m in Sources */, 84001FEC1D0016380026C63F /* LFLiveStreamInfo.m in Sources */, + 8437244A1D4F260A002B398B /* GPUImageLuminosity.m in Sources */, + 843724FC1D4F260A002B398B /* hashswf.c in Sources */, + 843724321D4F260A002B398B /* GPUImageKuwaharaRadius3Filter.m in Sources */, + 843724E21D4F260A002B398B /* GPUImageWeakPixelInclusionFilter.m in Sources */, 84001FEA1D0016380026C63F /* LFLiveDebug.m in Sources */, + 8437244C1D4F260A002B398B /* GPUImageLuminosityBlendFilter.m in Sources */, + 843724FE1D4F260A002B398B /* log.c in Sources */, + 8437241E1D4F260A002B398B /* GPUImageHistogramEqualizationFilter.m in Sources */, 84001FEE1D0016380026C63F /* LFVideoFrame.m in Sources */, + 843723DE1D4F260A002B398B /* GPUImageDarkenBlendFilter.m in Sources */, + 843723D41D4F260A002B398B /* GPUImageColourFASTSamplingOperation.m in Sources */, + 843724921D4F260A002B398B /* GPUImageSaturationBlendFilter.m in Sources */, + 843724261D4F260A002B398B /* GPUImageHSBFilter.m in Sources */, + 843724E61D4F260A002B398B /* GPUImageXYDerivativeFilter.m in Sources */, + 843724401D4F260A002B398B /* GPUImageLocalBinaryPatternFilter.m in Sources */, + 8437240E1D4F260A002B398B /* GPUImageGrayscaleFilter.m in Sources */, + 843724361D4F260A002B398B /* GPUImageLaplacianFilter.m in Sources */, + 843723FE1D4F260A002B398B /* GPUImageFourInputFilter.m in Sources */, + 843724061D4F260A002B398B /* GPUImageGaussianBlurFilter.m in Sources */, + 843724A21D4F260A002B398B /* GPUImageSkinToneFilter.m in Sources */, + 8437249C1D4F260A002B398B /* GPUImageShiTomasiFeatureDetectionFilter.m in Sources */, + 843724601D4F260A002B398B /* GPUImageMultiplyBlendFilter.m in Sources */, + 843723E61D4F260A002B398B /* GPUImageDirectionalSobelEdgeDetectionFilter.m in Sources */, + 8437246A1D4F260A002B398B /* GPUImageOpeningFilter.m in Sources */, + 8437247C1D4F260A002B398B /* GPUImagePolarPixellateFilter.m in Sources */, + 843724381D4F260A002B398B /* GPUImageLevelsFilter.m in Sources */, + 8437248C1D4F260A002B398B /* GPUImageRGBErosionFilter.m in Sources */, + 843723CE1D4F260A002B398B /* GPUImageColorMatrixFilter.m in Sources */, + 8437242E1D4F260A002B398B /* GPUImageJFAVoronoiFilter.m in Sources */, + 843723CC1D4F260A002B398B /* GPUImageColorLocalBinaryPatternFilter.m in Sources */, + 8437239E1D4F260A002B398B /* GPUImage3x3ConvolutionFilter.m in Sources */, + 843724021D4F260A002B398B /* GPUImageFramebufferCache.m in Sources */, + 843724501D4F260A002B398B /* GPUImageMedianFilter.m in Sources */, + 8437240C1D4F260A002B398B /* GPUImageGlassSphereFilter.m in Sources */, + 843724DA1D4F260A002B398B /* GPUImageUnsharpMaskFilter.m in Sources */, + 8437240A1D4F260A002B398B /* GPUImageGaussianSelectiveBlurFilter.m in Sources */, + 843723AE1D4F260A002B398B /* GPUImageBilateralFilter.m in Sources */, + 843724141D4F260A002B398B /* GPUImageHarrisCornerDetectionFilter.m in Sources */, + 843724C01D4F260A002B398B /* GPUImageThreeInputFilter.m in Sources */, + 843724E01D4F260A002B398B /* GPUImageVoronoiConsumerFilter.m in Sources */, + 843724721D4F260A002B398B /* GPUImagePerlinNoiseFilter.m in Sources */, + 843724701D4F260A002B398B /* GPUImageParallelCoordinateLineTransformFilter.m in Sources */, + 843723F81D4F260A002B398B /* GPUImageFilter.m in Sources */, + 843724881D4F260A002B398B /* GPUImageRGBClosingFilter.m in Sources */, + 8437242A1D4F260A002B398B /* GPUImageHueFilter.m in Sources */, + 843724D41D4F260A002B398B /* GPUImageTwoPassFilter.m in Sources */, + 843723CA1D4F260A002B398B /* GPUImageColorInvertFilter.m in Sources */, + 843724D01D4F260A002B398B /* GPUImageTwoInputCrossTextureSamplingFilter.m in Sources */, + 843724421D4F260A002B398B /* GPUImageLookupFilter.m in Sources */, + 843723BA1D4F260A002B398B /* GPUImageCGAColorspaceFilter.m in Sources */, + 843724681D4F260A002B398B /* GPUImageOpacityFilter.m in Sources */, + 843724D81D4F260A002B398B /* GPUImageUIElement.m in Sources */, + 843724981D4F260A002B398B /* GPUImageSepiaFilter.m in Sources */, + 843724461D4F260A002B398B /* GPUImageLuminanceRangeFilter.m in Sources */, 84001FD71D0016380026C63F /* LFHardwareAudioEncoder.m in Sources */, + 843723BE1D4F260A002B398B /* GPUImageChromaKeyFilter.m in Sources */, + 843724761D4F260A002B398B /* GPUImagePixellateFilter.m in Sources */, + 843724641D4F260A002B398B /* GPUImageNonMaximumSuppressionFilter.m in Sources */, + 843724181D4F260A002B398B /* GPUImageHighlightShadowFilter.m in Sources */, + 843723EE1D4F260A002B398B /* GPUImageErosionFilter.m in Sources */, + 843723F41D4F260A002B398B /* GPUImageFalseColorFilter.m in Sources */, + 843723A61D4F260A002B398B /* GPUImageAlphaBlendFilter.m in Sources */, + 843723D21D4F260A002B398B /* GPUImageColourFASTFeatureDetector.m in Sources */, + 843724B41D4F260A002B398B /* GPUImageStillCamera.m in Sources */, + 843724B61D4F260A002B398B /* GPUImageStretchDistortionFilter.m in Sources */, + 843724001D4F260A002B398B /* GPUImageFramebuffer.m in Sources */, + 843724C61D4F260A002B398B /* GPUImageThresholdSketchFilter.m in Sources */, + 8437243C1D4F260A002B398B /* GPUImageLinearBurnBlendFilter.m in Sources */, + 843724EB1D4F260A002B398B /* GPUImageContext.m in Sources */, + 843724C81D4F260A002B398B /* GPUImageTiltShiftFilter.m in Sources */, + 843725001D4F260A002B398B /* parseurl.c in Sources */, + 843724EF1D4F260A002B398B /* GPUImagePicture+TextureSubimage.m in Sources */, + 843723C81D4F260A002B398B /* GPUImageColorDodgeBlendFilter.m in Sources */, + 8437249E1D4F260A002B398B /* GPUImageSingleComponentGaussianBlurFilter.m in Sources */, + 843723E21D4F260A002B398B /* GPUImageDilationFilter.m in Sources */, + 843724661D4F260A002B398B /* GPUImageNormalBlendFilter.m in Sources */, + 843723AC1D4F260A002B398B /* GPUImageAverageLuminanceThresholdFilter.m in Sources */, + 843723FA1D4F260A002B398B /* GPUImageFilterGroup.m in Sources */, 84001FE01D0016380026C63F /* LFGPUImageBeautyFilter.m in Sources */, B2CD146E1D45F18B008082E8 /* LFAVEncoder.mm in Sources */, + 843723FC1D4F260A002B398B /* GPUImageFilterPipeline.m in Sources */, + 843724441D4F260A002B398B /* GPUImageLowPassFilter.m in Sources */, + 843725011D4F260A002B398B /* rtmp.c in Sources */, + 843724221D4F260A002B398B /* GPUImageHistogramGenerator.m in Sources */, + 843724621D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m in Sources */, + 843724341D4F260A002B398B /* GPUImageLanczosResamplingFilter.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 52415b0a0f60f1ad2a3811bffd6536fcd5df2908..1eeb98fc49a942faecdcd4eda90ccebd4d20389a 100644 GIT binary patch literal 17738 zcmd6O2Ygdi+xR*6+}n|kq$_QjmL^S`&ZOB*ncYw*r4!05X&c%|nv^63Dx&9#Aj%R^ zkfpR`i!3LIC@M}A0de5Kh2S1IaPpm-+q4a!ukZJN-{1GA{F2-=p7WgNJkN8UbsO95 zHn&Hkc^yDN0R(8kfB@zt=H>d7*;bd^=5*AjI2*6CHhJ6=Q!K8gRvT%qP4PI}BLQ5r zX-@=v1O$R05Cx(^3`hg%AOmEAERYRUKn-%h5HJ)B1H-`xPy)t)a!?JXfT>^_XaJ4C z3TA-oKpSWW9xxYRa0^%nZUeW2)nE;{2do9_z$VZIwu0SY57-O#f&Jhia0omB4udDb zGvH-#47>tPgEzpN;4SbrcprQOz6IZb@4-dz1NagA27ZSS215Z1fuS%A#y~NQg&8mt zX2EQzf@+upHP8Ta;Se|i7QtdT3YNnvSPdt^$#4ps3a7ya*aSOZC%his2ycQ5;4N@5 zybUgc%i*1H6L`--GYNv+x7>A^Zq_4Znfk!r$Q^@K1P|3ZX)&Fe;XkQVEodl2fUalFFu3l$Oe) zMo>l6D5`=QN7YahsVP(=HG{HKPO5{NP0gW}P)n(0)N*PCbqBSQx|3Q(-9_C^t)|vc zT~s&KLv5yZQIAlMQjbvwsDsqw)KTg=>Urt~>LhiFdYyWoI!k>(eL`KJE>XWwzf!+Z zL#f{pKnT%@LBU9j;*k_3AsI?RsVDY|Q9GK2Txc=64c(5GprvRTT8>trJJ3qB7Hvkm(H`_5I)Hl6L39{BiJn2v zqnFWZ=ymiKdKZ0!K1QFR&(RO)NAwf=8U03w(c!d^7SR!OBppRZ(=oJyPNTDF6>X$V zbS^!NE}%>3QhFpkhAyYa(p7XdJ&kUlr_&a?k#^8dx}BayyXd*}e0m|hh`yCxLEk~I zq}R~*&>QG(dKN9Q`Bx6a6!NiT;KD zmHv%^48;U7!Aukr&BQQbCYDKL6if!AV$@7AQ^J%oBbiZ588ez0!;~{)nM$UHnZQh8 zni(rIgK1%$%sl1>WDyYo~h>`RB4)^d)9(vDv876%>@J40={35W+$kN^@fg9C6N4#L4$fJ3%{Bp?HFkPH+c1*GCo z9EK0!C-E`-75*B_2^xhit7T@p)8_EF3z|H(*%ptrxj__PY@K20uzO0HZ62qq#zR`& zrA}8}b-8?!x>JCS-)$Or4EFPk)8bq3Mi`!G=Y-_h$3BXEDhq*mu zhy1ici`&}looMrPR$JShF0yr@j|vlPj%Mc^cY`Rd+S)|s@JRT2IUIt06wm-8P;LfV zpaXhfz~NYkML1$JFo9fP26;FVCu0ZR!tFGzzRB5aRTHw0v3b<~Fx8cu`sLdiM7jJ( z@29@f<>b_EO-DP2<(2dN#x#hOf44@Bv%}S7Eh0$#3uMZjExyhA_L&a~fwBt};HWN8 zgvD5X3X~Gg8wo~%GBBFsy+%UP$>iH*wUZ;5ZN&h`;OGXCZidZn9h{SsQ|Bg!kYj0X zvpI6gOUua+LWcpr${aJCYVS345{v~EU>vBN#51tN(q^sae_Gvj6*c4px=D9ceMQYU z@15ksvIa~5${tV)>cDs$i{r4Q2h@X!U=ohU2{@5Eg+YY=4VC3(HMI>T#bveQsvBxb z%1erB%f?kSj47GeAd+1*V0>9kSz&oeH5tVZxf(`EePuyKO)rd-UZI>0ESs>D-%1l` zCL}6wxh$RCpb1nFuAMhelP%NC&z8+Io5?S;$vl5PKd}YaxH@F_m!wQ_*4d#OzaT+f6=Yj>^6HjRn4ZhZi6XBdm ztc3yR-Nys!xtw6M6F$!w$W@EI@Ra|YgZ$v|nsEuZgA>oCU>R5rR^W7;firQ|X0Q_6 z308r-a5gsMJY2*vM!KIU@uH}9dqt+4$fnv&Skp7xW}VX@%D%?XN|$xE&Dr6uwYXZW zy^>tSY2RxPud&*RP+OaQa#rqa@+vMeoR?vr-1q7Pe#L>neP^>CY~&&}fO69&PMCS2`51f#lv}_j;2iiAoCg`r2eRgfFr zJ;vIpQ)~JG5i|5kK)y9{|GF#h>MrF>p69MwzoyCMwA(8=p^ldo zul|8XVB3UczD_gD^AjXE6b|!m$cF`d!+8Go_suMUr31m15w!+KW9tC52FJn*uo%3A zTks65#DzrjjLTTRCAHNDq6Us9ng`axI^2qF-Eab|$JgPR|EA`-reg9fF&$d`x6=&m z9Mf3g4A=r&p$%RKXJRLwh26LV&%vE|-Zt0<9ncBe;VkHaZag0^!prg9cs;%scl((S zaIW{dZ*LHd_&3dmUX!B6>gK`}YC@4d%M{M{0ws4>#lH)bjQ3bw1op_vy0SJ)i?yV! z-P2j>ZS`N{X73ure;9eKHTsnL0L$kp*o9V0M<4LEfg8Bh3!Ip2rT=T-e~I`~FYLMh zDcF8^uk^yc{y#<(zf)c91%5+=XvnpQ@;8=d{~2&c=)LXWT6ixp5#c(x9&Uge@s0Q< zj4`_z-Um0qF4&E4#tZN*c%fIrRl00##8md(q_sZ7g1F6Bbd9&UZNx+`YPGm5#Ljb7 zTO2LcsiSPo)&je|%WCjPrx)=jly(od4k4 z#RG6Zw~O6y58Mm);al-yd>g)fGkg#}M0W8AUV@jBT`c=Q-o>j^=e#|E+lARYfBZc2 zzzsYFk8&G00-uJ@z-RFadN~_;2@Paa!^*(H} zPnn6wXPw(_aWvPuEDm>*%hv9hNFrm_xs?`AE8dSm5Z;FOo}va*LqG_L77wF_6W^1B zL40G%t&SE?D*-d0ZM?E&l~H3jx{byUby4N`;eSH6N{((-_>pVTEs3h?4HG@yxJOO#m`OHwiQU%1 z-G1am_~v2~8)>qZxhL32bdcC-B>pw9!*`Ix`sAAnxCE1j1V}nu1W0cq52V!6X0sDh zJ!T+)u|$OJd|>m66^opXCKrhSPqwwUIvrN0J5Q}|5b2v|dz!sCsfc52tK_;W?IaG~ z>a;goT`J$;!R`Hxbrx5f$x!K|+&$C=PT#Dh)=}&6EBIA>yocII-Anxa*YKB|$4^4g zg!uk3=H7TTF;i0}jZ}9jkuUf$m zJz}glT_jvi+_}FWSZHaQSz~P{mJbQtH;6L+el)*^S1l;=gS<{D324evDd2Ogs6*6K zSK)qydYXC$zlGn%XUGkc&(8x@vO2Gm=R2hrsaLL=`6_js?D}2&9=B^nzKEY%J-BbV z)70BnO*%upL%oa7;t%kLWYUOl{qQ_VeMl7GN7Tn$9K6}$v5bqLEREh!Hp!?$3eil! zkfHx9llnklkWdsaO-M=2(i(Dm^E| z)^s#AS*^|1=2H=of+(@%8d;(4=on06raLx{Bx-_j$oPtyQm4a1Mix2kPS@*+gOUU? zd9s4E*VwMNl2vDv+AS?^^316cMZS{Xd_;L~+)HxQJzxvi26m7f^<&@&i9((r`RETx80}MV zfn=ZqU<3)S$VvP}MY7F9VHwFXSHMbGM>5RQi4W}}8RqNZ3X)OYMl#2T;1Qn66y(22 z=G^ZedKmd4 zKEdbk>-bZAek)N1-%}TMiNcW( ziBJTJq=YCMr*hUN{sw=CFXA8Z&&2xth10uzQ3xdQns?v+^Grrge=ckwoIzeV-~Trp zcBJsa{^36bd!WCL(!H>M`mY27kje}E(tiy2z%T~Vd4d1>Ux*)hgAd4vNQ7`3GND{# zMtNv38iI!6-|-*#Pkfn$fQ68S)HXC6jX?Pz9GN*YjD?7WGz%HRntyujF!?khR(#UB z)J0S@$;*(2aV}!YSeyHD{I!-wVhwq?h$l(JHi(9ElY9jLew__Ey-u53WF-Deks()Q zFq?F$LSw0sI3Gp1np~5vFt50XZ6aR-i)wDY2R9I%) zS~#PKGt2o5M**pRXb`EmjbHO9>O3}*$Lr*V^(EMNfPKJt5PpZ@cbX&~W@~X=0|JK< z58q~Wb1X!rm6BvV=iZT9y@141N!rX;{@^aRbUHgom@}#`q}iWv^%2&M=5vJgpbj(} z%|UZfC%PWZLpQK6f`ySRjACIl3u9O)W??J~<5(!!fo?=M5rWPje+$qpXd%bp@hr^Z zs44gUuV7!8|aA)jRTVo~d|5-#UVVxEkg1JFRKL>$c)S2IUlZg0LEQuKJff{lN! z>1b>tqbCiRD2gI8dJ9g9t&JTmJV|A}P5s3qq*BCXbrVzFm#dBN4sWr#1|AN#wy&Lo z^g6l|twMK^E97o43avr+5JQABFnEG;TdCmC7sa+%9k!0%)E6g7*0~X3p3w9>(F|##s;(z-AmTF8+B1bQ8((TC%%-u*=2Q%Cnh<;eR2~A+Tr1^ zZCP_~PLwx)xVi%WT}~!Ozi-FfT4^lIU}1s?oJL#FRok0JJJ1U7kYs3=9R&Q zY5huo=PWX!&0=%(H;{8h2(-KT)}OC zGn7tnv|dBFvbX#Noknl)23_Cj_$erVIp{`ja%ge(-#;17@BD3ara`3m2UEJyI~??X zGSsKj-a}_eum-))!t5^e0Si@}Nu0`yVy{m8D-GcT@d^5r196Up>MnGig*jMWJC=xm zum2154T$VPU!t$j*DTbqP|HGH5BipH==Uttdl|BvE85HOTh?0^S3@opM~nAoU*QhX z=OjYnKeo5vra)0~O&2l^9TrU4CUiYE4lk%cA} z=CaVt!aNoZX5o-+G(!i_fpif06M%Uv9LmCBEG%Z>Xz%|q#88$`;Vu}?E#b7!WFA8= z?&OC5D@Nslir$;ESJXK#l&B1!uDC{@)Yn%smuNaJE!Z#SlYM=p2!!a|CTE-4!nv>f zWv1rJkVxRE_he`>od6>Lx)y>?qGfz71Ph0=u!vMc7`&9EQwSyLR2GgHki@0a=?pH3 z%fbQ{<_}2YIwO4RscG%NG%l@!^(-vp^0?&E;Ir7Z11gNJ**HKk(`Gu4Q_M6ui!v6L z`1I%~9?9Vxk|S7H+NTxiLQX5PaO9Or@NX4Yzpnh>7ccS{Rd5)MW8qjVy}C#)Pp6sh z3;5GDbUn!#(6w|OJ)WMxLNck6g;gx9-b_!VCsE(i#GI>PNgDhD3#VgweYtgpw;+m0 z08tcu7k1x;KZzG+&Xkx~ZEI=u4)&c(6Wz+K)=XRJ8FUK^Ygt&w!tpGeuo-rt8KlZE z3Ex6!MhXo>xwDdvx43NFWlAhq>m1G^a93OAc%xiIG~@kydquOR$;mm=GQUpmbW-?9 ztT=B3-pQHnV!t6?=$t(Hkag1?MBR8x!syv7oY)&*raMVd3Vl6IL~{}gr}o7L=^JSh z1@13Pp>L)auy8U9r(i=L)fD;jur~nVTVyf4#Bb74dKn9+v9N&zgZs!wX0`jxx|6<( zphmA^AyG8Mf|hf{iXaw^+iABF-9S>M+yVF&Sxc|;D>kOx> zmnX>#9wcYn)fA28uB;CCmC$^f{+<4lq+#ekSUA6nCVu>lKDOcIzrfAG>K)7=hU7lG z7@CDQbuj@f#8}?1PJM#}OgK4ACWHxP!dS?%@Mab+=pj$!&-*~0ZzkT6*ZrTvd6Jd& zq;R~Mn7~9`3@A3%8A;iI*`)8&qf8tV--j$i9EnBP;9GSNLyE$DsB!D`)d(^vjIw`L z8VeWq&&p)7c$xhNtmRuNhasg?I~WapfzdH~#=saE6O+pj<6?=IS(dVpXqV+IT*1OS zSh$jfckX~)%n)WMGmLzXVDialA;&UAv)skPySbmMS-6Ay-pTP0msu%sxty*m4~=j) z=_27~yQQ5}qLFUSc;ednxWQYl!SS0{(-wB}o|E?$=~dl42XX5D3UJ&|(#AzLcuu@} z`N)CGk90XZ+R21iZ+*BfPh;jn8vLp04ZS7eOog}Z%zI-pRRez4l8_}s?xi)oF-xYN zs2pY@L)85}EZoqia+s;ibfQO?X-oqP*RpV3H#o&KVq%r7$A)TKbG4_ls>OFUt;|gS zv5cJ|3Tz__xAz}wqEL--D%Rz;nzk%tB@n zb1Mj87K5YAZOrXp6tk3BN?s%CVU{sV`6t5uZjvh`Z-%sxL+)sERCKg8a^=jCR!8$y zjS)?L6+0w@O*~qAZ*g{sgVYL;DWr*?TW)h$`@s2D8Q6FwIA;?EQN11@Q4E`VeLm(6 zW+gdul8dY*KEF>{`RLNa+{vusyq3QjaBZKkK~z9cnQ3*&h>hDuLL;&n7Q)=kvN<+S ztIRsr<{=%;R+(0o-r=^&RI_Csr_61&%509=y&KLT?@M@dJF>BsrdHKBSuenJdvk`& zD^2{ltO49B3RO1#Y5f3>yx05N$4XsHkMCw=HnVV7@6E_;1NI(fJF|n?$-)O%xR-?w z_Velk%-(@_9`VotR_HD|KF1$eVbQkXNR% z$je28!3gql+{5T;^b+x--$fsim*LKlm*KudU!!k{x22|s(4)yqZWVMTdC9Go*uwR6 z3vHuk(rx55w^=l?1?f5Ta{7MyG5R=pO)Z+qVKmGz@=Dq$@;X{MQ$b!to5)Ni8li!) zFiV)D%+~>N0jUA$0hs}+fSdqr!0>?5fa-wSfbjwK0m}kb2XqDO4A>j+aKPb!mjg}) zydLmIz*_-l13nD+IN)5s`M{vStibZXy1?;)^?{Z^Tc9IwVc_z>m4T}Q?+&~-@V>yV zz@32y0-p+eG4PGR_X0l-{4$6N3JwwnDT302GJ~>%v_bkHV^D5TaZqW{sG!k7w+7u6 zv^waXpmjmVgWe4~7j!=8v!E}6z6$y&=u*(HLB9w684QB6f+q#v7Q8?B$>67hpACLK z_{HFtgU<$E2>vSgo8a$)FA9Kw5d;c?1>pjbAW{%5NER3c69tWenSyq~e8J6vTLg;) zO9aaVD+DVAcM8@E_6iON4haqmo)Vl8ydyX#_)c(9@T1^o!7m}EkP#u}AypwYA$1{> zLZ*aF3z;6$65^A2LR&*uhaL_6AuKU0GfWkh6Q&C@gqgxdht-7DhfNBb5;iStW>|ZeE6fwt z88$C$e%MW6%fdE=Jsb8)*qN}iVIPKl9Cjh>^RO?&z7G2)>{2)r9vvbWs9od|c|@~C3q%V=w~B5Py)1f5bVl^9=>3SCh@la~Bl05(BbG+2jaVPC zG2*_63lTp?{2cL1#BY(skyVj3k#&(1BD*4YNA8W>ANf$^_mP*Qf}#Xbp;6&cq9{p} zG%7JFDM}uth#DScjdDk=kJ=oyJ8DnVV^K$=UX6M!>SWaGQEx<@jruU^^P(-$*G0RcJEIpxuZUh5y(;?d=rz%sqPwFvM{kYZ9=$XAX!MuSzs4lTWW^X` z3S-8^jExx=Qx#JaGbLtP%=DPXnC6%nF>K8Cm`7sXk2xRnL(CsBm&H(w#EdvxED}eG zqs3xzoLDZ-5}U;Z;v#X0c%-;YTrI8@j~CaACyA$stzw6Gws?Vfsd%~g4)LAhyTlvC z_ldj2J>o6mZQ{M+1LEhzuZWL}Pl!*6&xqd@zc2nk{E_$@@ps~j;vZu(V{>EkVu!>I zi*?1`6wAggh+P>Y_ z<3`3!j&sDd$GPG>ar5JDi(4MIF0Lo;P~0tO=Oo`qevc1}7sf}#N5#j)$Hq(IrSXaJ>Ud4OF5VDtiZ{m( zjvpF7JU%~ubbMXBBYt80y7)cu&&Pid|D`lpnl8a7NZ6C`NWx01fxJjwA|EL)laG;)m5-BG$*0Mu%Nym*^0o4<^6m1S@?G*Dlc{7n zIUqSGxhi>D^7Q1!Q58 zoEn-cOpQ#9NsUXDrVdJ#ry5doQ}a@XrjAH0Of5+rnL0XkZ0fkw$*GRi8&Y?rKAHNp z5-LT?4CM%Axw1josI)3ul{1wN&Ul$(`XmD`mM zDvv48D8Ezwo)(#AOe;&9lGc`XbJ~WqN7D|b9ZGvL?di04(mqN1BJJn&_;h7@dU{s6 zI$fKtPal;&IsLly`RQxZH>BT}-krWBeS7+u^pDd&P5&(Yi}XtwC?hx{G((sXnURo@ zno*olmeHCqE91rtoUtHdVaD=|%^5o~p3Hb5<6_38Ohx9<%n_LdnZ=o)LG-RnzCkO*|O|e&a7Ekv$Gat-JZ29 z>yE5dS?jVkW^Kyq$=Z^2BI~nkm@UW-%NAuvW+!EdG=2#kt$J@uF6zptJEruN~ap88lfss6{|+7 z%2Z=iwWa=yy>Hs@l_j~Yr7s0r4DXhfPQjaU<>k!vzFxtgJx z;hKC+iDr~$jApE6f@Zp=P1CM%Yi4UYHS;tpG#fM^()_K@~T?Gf!u z+BdcDYd_R}qCKzuTKk>$2kp<=UvvpNjjmERRd=1PP1mk->t^dZbvNj4(=FAl(A}xK zOSeY1R<~ZaUw1@zO82u~sV~u2>+AIO`pNof`ssR?{wDp+`i1(%`X%~h`ZfB^`up{} z_51V>=^xi0)*sP7tA9>^LVr^Kj{bZ7Wdmhk3_*qvLzp4nkZ6z@6b7Xs-C#7B4MPmW z4F!fGL#3h0P;IC+%rV?zSYudc*l5^f=rL?H>@e&y>@n;&95p;|c**dJ;Wfia!)e1? zhBJou3}+3$8pDmrMy;{TIMvu`TyETM+-KZxe8~8O@v!kJmnJdlJ=E>$M=4s}cW{0`m>^9Ff&o#5=#pWgE<>r;< zRpw3RE#__J9p>HUeddSEkC9By(rZoH=vOnKS=$ zZgoqe!|l=O-Ukp+009~>Ab@$5d2N(pirwXQIGaZ)oYfQUH6Hh9h0Rsd;2_Oq3Xiiz z4B-4#+eB~=2n0bO5=cNVpaQ8N4Wxq%pavSC1)0DK`heR&Uyut1fx%!HCAF8n6~@1e?HCunjy9UH~tGm%u)-AK1VFa1gu$ z-UaW0Bj6}_AAA5l1fPR5;0y32_zIi_UxRPJMQ{oH3NAw$GB5xJ!XOw7L!cDOU=)3x5C}0iM~QVqhHVkbP-*mDH_o<&CmgKFdab) zX(=5;C({Z#h1SqoI+HfgR=OWuNDrh3(ZlHxbO}9%uB9i^&9sYl(;j*nJ&R`PdGrGM zL3$CroL)h%rPtBh=^gY=dKdjV{RaIe{TBT;eVBfSewTiaK0+U*KctV-C+RcvxAb@P z_w;%C68$TEnE^}~6V3>j2u8?Am;@$~Q8Fqfl~FSmrY|#q$zgJtJf@Ht$lSpUWr~^c zOcgVMu`$(54dY-YGA+zxrj41-%wXm+cQN-e4=@ihiQKQevDb!>eOa+?UY?Dq? zXg23rt=U;RbAjGBu@uAs<$53kQ9ur&K@8{(VljgQa3BuC!8inmt_SfT0VIMXkPH+c z1&85qya_*rpU0ozlQ@>Jk?XSCCbc*n%^r77jmI&?=CRjS38VAvb+*<k^22g%ZhNhFpu^KvVsCM}$kHQxQW)uIu60gzR|%s^>@{Qz&xG$O$3w7B0y>Zl zlpP=o=z#$Xa0C{103$E~GZx`EJc?VPYE+H0)~+Su9^&w5{b^~7ob^7sRYK)o^(=L^ zx@zosgjAK#%y;zuH4Jw-Ip18`+QKd4`U(8{__)3u^ask-pdS{m1_N*}9D5Yx5k1NW z1)vZNB@91co2E=OSLYEX)0oGj&~_C~T+d#z7sMa~-UhP#h=Eny@Wy&8olV4pVJ z=4rq&7zE)+EI$hDppFo&2MxecB@`8x4)*mavNzXz8VJcAZ56i0R=W@iX&lW9U=nB~ z0__^p4w^u5&j!Ai6A;#U@~xl;#)qi5=Q1%lwB_wo~{S9l5Nd#xomCi zzypf;)~R3`8B|<4wAEv)ZnWn+hygmAZLYQsFcq|c;<3f0-EF>YnE~c15Xg!|z_dGIhK0oP*e#A-fWJWF?<8!lr?D1iVja#}19pRF!5**|>#+f6V+>LloCNs&Kd+ z#E$YBY%Uw|QmztPbG?1sAV;k|r?GLc+udp>hf2zIgM2Q{e^heLi$GdybI%_=L#IyA z8%)MoL8E3EOjdP*ZkBHqFN4>)RlEXT1+Rfa*o-aMiud6wwQo8rx9Qfg;OZXA|M3(Ri9*75#B@D)~WJiZd9+4}LkKzbq zWf^CUnnC{dr1cxP#?iV0eg{{gO% zHu-cBhC<=ZIm4$tsAe4yUzQN~mW z`}~IL~IBEhcKP44xcDGvM8Dxvx&U5C5uQyk7# zcbUyqZ|{t8^SJxM&Aa>Dl+O+Nq7u@bk6V3#D9=b0KVwhgon8yx0gFgn2Z!LA)o>`T z<(48MflDdJZUtd?u*crS$8&H5ENh1)uoTw(19Cq6P`FcdBFT017S0Cj_(SsypdN2Z02IGDxvsB;dtX$=pykebmPgm z_xo@PiC@QoP&gG%gKcp7Xty`?CBY8%U^jN*w*N|RIn(KNc9Y~K-@0Z)ma~{S5aU)n zr5)Z0=i;e&8jh{tH?)G^%9}Djx=XjcGK2TR`>KSoemZx(+H}DC;X+Opf8FioF|Cj1 z2jN3NxoQ<>)x7mQ3LodJXAxWsm%zvHOgszE#&g!drEnR10xriG--lP?jsKzbWcaPb zPrq|p9gVfa8^~?R-PwLqTl)*+`HxJ~x6Tf@mNTI>m|YFm;XD7zgf?*|v>9%}bMZah zCbSLi<|$#1%Sa6evv@8yjq)vxO- z@ExGs2w#P-!9(zM_y&9vz6IZghw=S*A$|Zqh#$fa<45qLc+p1qE_@Fjfk)x{@B{cE z{D=e$i}4ct7+#8(;V1BNyrMI7uw?6WS%nr;RzCSO=4a`2dQ+asP)LHi&PN3Dxc*V+ zuHUf3`3`qYqm$qV#pELE9%66PYjuQ>(VClO%*(Zy40#247PHx+E6mdwEd}IJwzbeu zkW-kKWzk!5jM-*`CCiXqXfkzAZ@zJQA7{o}KeFEVNI8ku)f>1jt#LUU8;5f^3t`!} zFJHhf{SD}A_zmB{`=jsR_p9*ZyixxEeokq&|gkvliR-FiOoiHV#J zd~*($Ch!obrqxA=bT;xt3T;h}Msj%d>PcWI!IT<#aO=7m^PJ5!E`s4zI$9c>&332T zsx?#zt+i7;wO*bye0v|GIoo74TXoqwYpum(v)IkqW~d;54U{Dx=Er3;0F+QU^7X8buDnefR`-91>`eY>7X1 z*oiZev$?djiMuQEyH#qh=Rj2=tf(7?3~S+_7V?M#d;F=DIXsPaUiYhD*;;A>WuvO8 z8mboW$FJfy@Z0!Emm{a@sYx8a4U~hLi2sET;Da4hBh^IseHp*P^Q*jJ20!-5b=2u1 zFld!9{VH}PBCs*?u@`Jp9!x~6-Odh~`t zi>Sra5_}lHgWn~C`v2(1=Mm}&V$aK|6&xa3YxCHKiKq$HzHAw!5X}VK*7xsa$({-f ziV#M}^j4%~WM!K>@%!MA(6Dd;0X{p0#iOFc%jS59xmULm(3{})VHi~u+6X*WdsHM2 ziImE30`<2`dSMzf?XoC>z6aybisI5jXS0X&%yTw6UGK-n#fKy$CMA>hQpa>VnX<01 z(N^y!zd7%cJV42>Tof+kzyd>F1HiNWXLeqPQLxzT(7_3!-b z{D24Wd*0)gdXic-uIsqXC(74BnpT5iB53ZH&k{PQO+dMh*#BB;9W{~KKy4&m;3$3{ zAH*Nv57!Z2u!VYx+DiUzAs*o)d<=g~{(i*yv82B?;Ig(B;=2D$C&CKpr21=0T&Ge` z8+V-io1PrD=IwcedX|{p9%?VA>l|wDMNbx=`sfrs&dcX{>IGgD_IB!dQ0x>&fHJa! zwFK#J0}14>2P71SmJTAhnLcEnvxO6Rm%1O8ttRO(gq!)_FT=7;Fdbm(HR=%cI_O8e zNxenAO&!LSScN~ur|{=kjK9EN;ji&I&h2(3N~ojW>;7l|Cuk>i%uD?AKLykFL{h1f zUfO5=F~)mhz|`kn;$Qw>g74H>?;L0U(X0XWotNl0|1G}Ur5~`(OVH)B)L*FIN#2CI zKwYFRQNL1`so$t8_&fYPK97IEKjNS8&+DnH)F0F}AfPUB(Hi~*U%(fMrk*E9KsJsY zPpphjO%}R{&n8#~X&C0JwY%)KU76f6TQ!N8JY29(@+eipe%v5mn%D1PmENGwGUpkM zn!LPhvnJbO(ra>!g+`54pJ&#YP5NAGejdSsdJ^EL@1CH%*)h4*-i@793FAq3Z#ZMW z#jHX(N6?#LEps{>t8K2q?oQmCkCQr|*{XB>9?56Suokt>}#!D_V3 zn&q`)6wZa+mx!!6BKek#6!=dZ#=>+KPGDgjORmr4*KQU*i(|QnoWJYmkkmB+naJNLGsE8GWf^cag2l*-BE(8gA`3-HmdOgTP;H9LX+Ril^O0m!HmWKm0b7 zq(B_?%{QUIamUkl*xj5Kkzs`-p5XGEmHf{lTeaO+Pk?l&4=1=Rq(=snjf}{I%*cYQ zECeisETmY7SV*&wVPOCZ16dff5$r*Iy)ukL15ggiMR_=zg~6RdByu#da0m;FSV$mX zvM`RPe0z0keZAeqFN_O>dAV|zfhv;oZ?3huYB_Opoqh0%q=K(W;(wR6RyUE}V|ok} zMv@Vo8jWUQIF2nFYIFGTO*9VKfVcyVM^%Vu zpMZrCEEIO2YNDaFEEIXwGMlU6Nb{T4TP0CSw6M9}`?V`|OLm=HxBUBd=FzWjA82^$K7TYCUhcXBkfU2c_J3|hUo1HZ(VxFzmmVJcZ7I)DxmzF#JW z^y--InaaI9urSSQSsC3Ri;&L&pf}K)Rl?q6;%=4XuOFS|91d4IdW!?I|FTcyoIm-0 zM>$*{RHAp#yP$xDS{7zv@W09$c|&3JKKkJ1IC(qzkmKr>Wg+MoI!^LP*Gsf}mHvj; zjy~qd{q6qUg8XkvmbwBe^bQwL5qa?&`#HC~-Eu)aVjN zeS`Pg7~ZvePbOh6BCxI_qU(t9neMNM0Gw;kW%L`mf_`UVUl!iZ!hS65zlI!=ap+Hi zzLBGm9Ek%+Tt}=pro!fOaBhyoLH4QKS?4aXP4yxt#BWFYKkf9}r8Q3Oc9!7x&^w&e zfRRAJThZj?0zj!>7cX_r*Lr106Kr)=m+8|XbQn38=uj5su10Z$MUFcWiT2#iMmurT zq@sZnK<7ef5gkdod-HfS*_M19%M+E6i~}7-%UMX2dr%itMfaxTdnN(s1Uiw0g)AJ1 zCw56ZdB69AHG@(jj zgi`LXjv(WF0R|V6Bo7$7-#hw!x|A-X%Rwkz0Sf3*^k{-;a7YEOa$M|D%mqY3S-rj4 z(b}13;$m?7G!OqIs)a96;o0i(h=pTGeHJWdVbzCpB|VnRF^(QjlY%(9g02SjbPYN* ziXgR(wJv*e1%dWRG*nr`k#6?z_y2_2PN#k=r%*}2LthNV%{7*V<5^hIvT&r&rH|p&WfHMtx{-yWx*Rm^|9xyM;PoY=RtFP7IJYe z@y1+Rfw%C~Udu0-8|L-Oxoy0~c~6&4H{TWLbPnwz$8{xb++CWlQNErV;zN2)KhWiD zZ6O1CoRfMZsU@=*vO6!(^!uH;KF&fu>hb+!0+`V!SU9Z{F{3|)WgYZq^eOr@3)@&o zLVMh;Twl;(^xnYPb_iNmU!Am)cZE;2l{9KzVt8j1s2X` z;T+Ck4(h5Z@GA=KZteb7_g8hBO(W`jGyX7-o=xZ!wS-1#K?CiuqCV(8^48X|s+ zjWI;~ByN&5BUSG8L=#$@nu}YTs<}cJvAwzWhDK41UmYT;QYI02V`ss1K{F}iCPPRQ zKeouxZ0{oHo26&t_2is2EWAe;#KHw!@cPeGsy2DOC6mM?ljDvQ@05`v&UZ8LiME4L zFe#jmyj6#r2fXCf29>X>H`Dh;+n`Dyx?@jhc6t)(oUP5|)q~g~`!q+5tuevrN+2~- ziqICf3Av=WtoxEBkc!M(@ns6if4la#%;J`^Lvyu;@Dnd!SEDEmKG8 z0~k9CpIpt5Lw1!fvgEG|IbMc+j%*UsL;?eb9DA!*GvwH7$Fbd4GG8Yb(+ZUBjGOVW zkiZOUxUb~EBVX6z*x@(!pUKSO`p;tAEL=zWasA0x(!Yb0Rq&43M|&P~cQ?29uy8}S zlf922K?_&9b(1s6WazrPF%L11bdz|Lg`2xcEMZ8omv4C-3te?!%yMR>FC=1~WZ_dj zYilP*7t_J4Vb-#cL?t^|QugG7-Ixu`=AOY0vxRwzh1*!Tos%(#@^+~JAHKH?=otVp zJDF$vhVN#cW#LX1?&6C$_}H8uc^~#0`2w@gZ{&V%;%9nH+y&lp!%5cTzcWh$#6)%- zo!upTeAIXb( z7wAx0Pxq&b$?J9{_;!x@mAS%PW&R9+0VqHi5EY;ZPzIz1qz6<5Gz7Q< zW(M3DaCg8%0V@Jl1+)jO4OkzrG2oSe!vXIF91Zv&;9S6kz`(%Bz}Ud}z{J3$Kt-T3 z&=5EnQpva);AXQLyP+m|$ z(7>Rp(6QjO zU{kOqxKD83;Q7Ii1}_eNEO=?~=fOV&{}lX7@WqgRAwxojh71cC5%NUHhLBAmTSB&m zd>8UtC>2VF280HMhJ;3j_6n7S%0pv9V?!;WRiRCxi$hn2ZV25Nx-)cN=%LUzLf;BK z9QtnP@z4{YCqq9AJso-`^jcU-m?5k(tTwDM%pHcq7KGgwwlM6$u!qBzg)I+T8MZ2{ zJ#0v>2N-#z+R$v#@3mk$; zf+m4eFipS;76_IIo)oMWbO_c8)(f@^b_$*r>=x`1JSR9PcuR0x@TuUG;B&!Q!8d|$ z1>Xy<2(AjQMSzIgBkqVOiYSg49x*TCk%%WERzy4*u{xq7VoSuvB=fyvYe->YejE@`|IWux|d>$rQ;n$#ls~$t=ly$)l2Wk`0ngk}Z<0 zlI@b6lBXrRC3_@qO5T>dBY979RPur3Bgt{e3CT&xXOb(udiBcdHNIC{uSLDK^?IY% zFH#_dQc6loMN+XeQrb%zCykdTO4FrgX+LScbcD1-S}HA_PyAnu^+^K6ni}OMC{kG=VHH$Js7`G^HMcmrBt#RAq_QvgxdnN9xxUb`Win|bZDeiJSjF-ko#Ye~Yj*p8^ zh);@F#4F=dEe;NOC{Dt^S@s|^F6NV*>NGMGx zPgs|*E8&@hXA|}&{GJ$;7?K#4C`c?y9GzI1I4-d&u`zK*;;h6uiEQHB#CeH#CoV{Q zJaJj#^2C*is}kE2*Ceh>+>rQe;)$f*NkfvFk`^SbN!p+EdeU1-hm+n-I+^ri($7g3 zk}f4(PP&qGHR)P1NT!l|CCifK$-R@~larEDl2ys+$(rQMnCp(gtCT~eTk$gV+ z4@Iy-sz_6)6`2aX!lrV3FhR9UJ%s=lgzssXB8)ezNC)i70ws$4ZvH9=LQs#7(n z+Eg=Db5yKquIhf(Qq?L|yK0SUy=tp!hiaE8hEY4g*bPkTA-iUy;5keM|b*^d0HXrN5ZI zKm9=Z@$|3Ke@Op1{bKs%^xre+jKGYLjIfM|3{ggOh9V;~V_-&E#<+|L88sPo8IBBB zh9_fc#`KJt8M8B%XY9@RB;#VnwTwU2P%Ti4)DpE!9j)%I)~Rn-4^Zc-3)F+ucc@F% zqtun^@#+ccCbdiLQBP6NP|sGg>bdIs)r-~b>hdory>RsyH>OJZM>NnNL)W_8y zt3OeHqyA2PUj38$g8GvBPYtaJ)P!ilG;&RxCP9;=QEAdO8cn9gs>#(1)0Al{G@~@* zG!ry68oOqy25TPHtkdk*oX~u#Ii)$H`AYMrHb@(xjnVeg-k~kh4%3!s%e51=P1+W% zTRTPDroB@;PkWE{UhQMrW!e?mRoYG3UE2NHgW6ZMhqR})UueJ5exvs&#f3_& zv$}J-?{z=we$idh{ieIB`!h?NCCQRyMQ6ok#b+gDrDUnH(zDcA1G6f!CT7jfT9~yq zYj4&ES?BanAEFP{hwG(!nO?3>(x>QC_35P2qmTY}{Q!NQzCd51AE_UsAE&R@*Xrx_ z4t=BEsh_N$t)Hu(uV0{lK>x6Qk$#E(wEm3#NBu?pW&Q64U_gcdLy)1DA9d^<``JRCd0oB9~#aX{>T<)r(|2R`)2pc&dJWp zzCU|$_O|TZ+3#k5WehiJj3%Si*w@(4IK)_H9BCY59A}(hbQl|rPNU1%YMg4s#<|A% z#(RuAj87Y%HSRT0rg5fL(`?fm6Kk4ly5F?W^q}c+(^AtDrk6}_nvR)Hm_9X~Hhp0_ zYdUB8)pW)5hv`o){H*yo^NZ$v=J(8>nopa*FrPJ_ zGk<5kWI>i7OQ=O)iL^*9a!ZV*w?%0&SS*&?Ed4BbmO{&5%MeS6rOHxmv0EA}lPpb^ z>6RImnU>j>#g=x6Rg$N7HgYzzV$xqLhFOp pMb^ix%dE?-+pItJ3G5^96W2%Ky+r_u6wq`3m^b)O-`_sU{{gR_L>K@7 diff --git a/LFLiveKit/Info.plist b/LFLiveKit/Info.plist index 84df70fc..f80232a8 100644 --- a/LFLiveKit/Info.plist +++ b/LFLiveKit/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.5 + 1.9.6 CFBundleSignature ???? CFBundleVersion diff --git a/LFLiveKit/Vendor/GPUImage/GLProgram.h b/LFLiveKit/Vendor/GPUImage/GLProgram.h new file mode 100755 index 00000000..cd455e71 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GLProgram.h @@ -0,0 +1,42 @@ +// This is Jeff LaMarche's GLProgram OpenGL shader wrapper class from his OpenGL ES 2.0 book. +// A description of this can be found at his page on the topic: +// http://iphonedevelopment.blogspot.com/2010/11/opengl-es-20-for-ios-chapter-4.html +// I've extended this to be able to take programs as NSStrings in addition to files, for baked-in shaders + +#import + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#import +#import +#else +#import +#import +#endif + +@interface GLProgram : NSObject +{ + NSMutableArray *attributes; + NSMutableArray *uniforms; + GLuint program, + vertShader, + fragShader; +} + +@property(readwrite, nonatomic) BOOL initialized; +@property(readwrite, copy, nonatomic) NSString *vertexShaderLog; +@property(readwrite, copy, nonatomic) NSString *fragmentShaderLog; +@property(readwrite, copy, nonatomic) NSString *programLog; + +- (id)initWithVertexShaderString:(NSString *)vShaderString + fragmentShaderString:(NSString *)fShaderString; +- (id)initWithVertexShaderString:(NSString *)vShaderString + fragmentShaderFilename:(NSString *)fShaderFilename; +- (id)initWithVertexShaderFilename:(NSString *)vShaderFilename + fragmentShaderFilename:(NSString *)fShaderFilename; +- (void)addAttribute:(NSString *)attributeName; +- (GLuint)attributeIndex:(NSString *)attributeName; +- (GLuint)uniformIndex:(NSString *)uniformName; +- (BOOL)link; +- (void)use; +- (void)validate; +@end diff --git a/LFLiveKit/Vendor/GPUImage/GLProgram.m b/LFLiveKit/Vendor/GPUImage/GLProgram.m new file mode 100755 index 00000000..105d75fe --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GLProgram.m @@ -0,0 +1,236 @@ +// This is Jeff LaMarche's GLProgram OpenGL shader wrapper class from his OpenGL ES 2.0 book. +// A description of this can be found at his page on the topic: +// http://iphonedevelopment.blogspot.com/2010/11/opengl-es-20-for-ios-chapter-4.html + + +#import "GLProgram.h" +// START:typedefs +#pragma mark Function Pointer Definitions +typedef void (*GLInfoFunction)(GLuint program, GLenum pname, GLint* params); +typedef void (*GLLogFunction) (GLuint program, GLsizei bufsize, GLsizei* length, GLchar* infolog); +// END:typedefs +#pragma mark - +#pragma mark Private Extension Method Declaration +// START:extension +@interface GLProgram() + +- (BOOL)compileShader:(GLuint *)shader + type:(GLenum)type + string:(NSString *)shaderString; +@end +// END:extension +#pragma mark - + +@implementation GLProgram +// START:init + +@synthesize initialized = _initialized; + +- (id)initWithVertexShaderString:(NSString *)vShaderString + fragmentShaderString:(NSString *)fShaderString; +{ + if ((self = [super init])) + { + _initialized = NO; + + attributes = [[NSMutableArray alloc] init]; + uniforms = [[NSMutableArray alloc] init]; + program = glCreateProgram(); + + if (![self compileShader:&vertShader + type:GL_VERTEX_SHADER + string:vShaderString]) + { + NSLog(@"Failed to compile vertex shader"); + } + + // Create and compile fragment shader + if (![self compileShader:&fragShader + type:GL_FRAGMENT_SHADER + string:fShaderString]) + { + NSLog(@"Failed to compile fragment shader"); + } + + glAttachShader(program, vertShader); + glAttachShader(program, fragShader); + } + + return self; +} + +- (id)initWithVertexShaderString:(NSString *)vShaderString + fragmentShaderFilename:(NSString *)fShaderFilename; +{ + NSString *fragShaderPathname = [[NSBundle mainBundle] pathForResource:fShaderFilename ofType:@"fsh"]; + NSString *fragmentShaderString = [NSString stringWithContentsOfFile:fragShaderPathname encoding:NSUTF8StringEncoding error:nil]; + + if ((self = [self initWithVertexShaderString:vShaderString fragmentShaderString:fragmentShaderString])) + { + } + + return self; +} + +- (id)initWithVertexShaderFilename:(NSString *)vShaderFilename + fragmentShaderFilename:(NSString *)fShaderFilename; +{ + NSString *vertShaderPathname = [[NSBundle mainBundle] pathForResource:vShaderFilename ofType:@"vsh"]; + NSString *vertexShaderString = [NSString stringWithContentsOfFile:vertShaderPathname encoding:NSUTF8StringEncoding error:nil]; + + NSString *fragShaderPathname = [[NSBundle mainBundle] pathForResource:fShaderFilename ofType:@"fsh"]; + NSString *fragmentShaderString = [NSString stringWithContentsOfFile:fragShaderPathname encoding:NSUTF8StringEncoding error:nil]; + + if ((self = [self initWithVertexShaderString:vertexShaderString fragmentShaderString:fragmentShaderString])) + { + } + + return self; +} +// END:init +// START:compile +- (BOOL)compileShader:(GLuint *)shader + type:(GLenum)type + string:(NSString *)shaderString +{ +// CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + GLint status; + const GLchar *source; + + source = + (GLchar *)[shaderString UTF8String]; + if (!source) + { + NSLog(@"Failed to load vertex shader"); + return NO; + } + + *shader = glCreateShader(type); + glShaderSource(*shader, 1, &source, NULL); + glCompileShader(*shader); + + glGetShaderiv(*shader, GL_COMPILE_STATUS, &status); + + if (status != GL_TRUE) + { + GLint logLength; + glGetShaderiv(*shader, GL_INFO_LOG_LENGTH, &logLength); + if (logLength > 0) + { + GLchar *log = (GLchar *)malloc(logLength); + glGetShaderInfoLog(*shader, logLength, &logLength, log); + if (shader == &vertShader) + { + self.vertexShaderLog = [NSString stringWithFormat:@"%s", log]; + } + else + { + self.fragmentShaderLog = [NSString stringWithFormat:@"%s", log]; + } + + free(log); + } + } + +// CFAbsoluteTime linkTime = (CFAbsoluteTimeGetCurrent() - startTime); +// NSLog(@"Compiled in %f ms", linkTime * 1000.0); + + return status == GL_TRUE; +} +// END:compile +#pragma mark - +// START:addattribute +- (void)addAttribute:(NSString *)attributeName +{ + if (![attributes containsObject:attributeName]) + { + [attributes addObject:attributeName]; + glBindAttribLocation(program, + (GLuint)[attributes indexOfObject:attributeName], + [attributeName UTF8String]); + } +} +// END:addattribute +// START:indexmethods +- (GLuint)attributeIndex:(NSString *)attributeName +{ + return (GLuint)[attributes indexOfObject:attributeName]; +} +- (GLuint)uniformIndex:(NSString *)uniformName +{ + return glGetUniformLocation(program, [uniformName UTF8String]); +} +// END:indexmethods +#pragma mark - +// START:link +- (BOOL)link +{ +// CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + GLint status; + + glLinkProgram(program); + + glGetProgramiv(program, GL_LINK_STATUS, &status); + if (status == GL_FALSE) + return NO; + + if (vertShader) + { + glDeleteShader(vertShader); + vertShader = 0; + } + if (fragShader) + { + glDeleteShader(fragShader); + fragShader = 0; + } + + self.initialized = YES; + +// CFAbsoluteTime linkTime = (CFAbsoluteTimeGetCurrent() - startTime); +// NSLog(@"Linked in %f ms", linkTime * 1000.0); + + return YES; +} +// END:link +// START:use +- (void)use +{ + glUseProgram(program); +} +// END:use +#pragma mark - + +- (void)validate; +{ + GLint logLength; + + glValidateProgram(program); + glGetProgramiv(program, GL_INFO_LOG_LENGTH, &logLength); + if (logLength > 0) + { + GLchar *log = (GLchar *)malloc(logLength); + glGetProgramInfoLog(program, logLength, &logLength, log); + self.programLog = [NSString stringWithFormat:@"%s", log]; + free(log); + } +} + +#pragma mark - +// START:dealloc +- (void)dealloc +{ + if (vertShader) + glDeleteShader(vertShader); + + if (fragShader) + glDeleteShader(fragShader); + + if (program) + glDeleteProgram(program); + +} +// END:dealloc +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImage.h b/LFLiveKit/Vendor/GPUImage/GPUImage.h new file mode 100755 index 00000000..3e91c29c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImage.h @@ -0,0 +1,170 @@ +#import "GLProgram.h" + +// Base classes +#import "GPUImageContext.h" +#import "GPUImageOutput.h" +#import "GPUImageView.h" +#import "GPUImageVideoCamera.h" +#import "GPUImageStillCamera.h" +#import "GPUImageMovie.h" +#import "GPUImagePicture.h" +#import "GPUImageRawDataInput.h" +#import "GPUImageRawDataOutput.h" +#import "GPUImageMovieWriter.h" +#import "GPUImageFilterPipeline.h" +#import "GPUImageTextureOutput.h" +#import "GPUImageFilterGroup.h" +#import "GPUImageTextureInput.h" +#import "GPUImageUIElement.h" +#import "GPUImageBuffer.h" +#import "GPUImageFramebuffer.h" +#import "GPUImageFramebufferCache.h" + +// Filters +#import "GPUImageFilter.h" +#import "GPUImageTwoInputFilter.h" +#import "GPUImagePixellateFilter.h" +#import "GPUImagePixellatePositionFilter.h" +#import "GPUImageSepiaFilter.h" +#import "GPUImageColorInvertFilter.h" +#import "GPUImageSaturationFilter.h" +#import "GPUImageContrastFilter.h" +#import "GPUImageExposureFilter.h" +#import "GPUImageBrightnessFilter.h" +#import "GPUImageLevelsFilter.h" +#import "GPUImageSharpenFilter.h" +#import "GPUImageGammaFilter.h" +#import "GPUImageSobelEdgeDetectionFilter.h" +#import "GPUImageSketchFilter.h" +#import "GPUImageToonFilter.h" +#import "GPUImageSmoothToonFilter.h" +#import "GPUImageMultiplyBlendFilter.h" +#import "GPUImageDissolveBlendFilter.h" +#import "GPUImageKuwaharaFilter.h" +#import "GPUImageKuwaharaRadius3Filter.h" +#import "GPUImageVignetteFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageGaussianBlurPositionFilter.h" +#import "GPUImageGaussianSelectiveBlurFilter.h" +#import "GPUImageOverlayBlendFilter.h" +#import "GPUImageDarkenBlendFilter.h" +#import "GPUImageLightenBlendFilter.h" +#import "GPUImageSwirlFilter.h" +#import "GPUImageSourceOverBlendFilter.h" +#import "GPUImageColorBurnBlendFilter.h" +#import "GPUImageColorDodgeBlendFilter.h" +#import "GPUImageScreenBlendFilter.h" +#import "GPUImageExclusionBlendFilter.h" +#import "GPUImageDifferenceBlendFilter.h" +#import "GPUImageSubtractBlendFilter.h" +#import "GPUImageHardLightBlendFilter.h" +#import "GPUImageSoftLightBlendFilter.h" +#import "GPUImageColorBlendFilter.h" +#import "GPUImageHueBlendFilter.h" +#import "GPUImageSaturationBlendFilter.h" +#import "GPUImageLuminosityBlendFilter.h" +#import "GPUImageCropFilter.h" +#import "GPUImageGrayscaleFilter.h" +#import "GPUImageTransformFilter.h" +#import "GPUImageChromaKeyBlendFilter.h" +#import "GPUImageHazeFilter.h" +#import "GPUImageLuminanceThresholdFilter.h" +#import "GPUImagePosterizeFilter.h" +#import "GPUImageBoxBlurFilter.h" +#import "GPUImageAdaptiveThresholdFilter.h" +#import "GPUImageSolarizeFilter.h" +#import "GPUImageUnsharpMaskFilter.h" +#import "GPUImageBulgeDistortionFilter.h" +#import "GPUImagePinchDistortionFilter.h" +#import "GPUImageCrosshatchFilter.h" +#import "GPUImageCGAColorspaceFilter.h" +#import "GPUImagePolarPixellateFilter.h" +#import "GPUImageStretchDistortionFilter.h" +#import "GPUImagePerlinNoiseFilter.h" +#import "GPUImageJFAVoronoiFilter.h" +#import "GPUImageVoronoiConsumerFilter.h" +#import "GPUImageMosaicFilter.h" +#import "GPUImageTiltShiftFilter.h" +#import "GPUImage3x3ConvolutionFilter.h" +#import "GPUImageEmbossFilter.h" +#import "GPUImageCannyEdgeDetectionFilter.h" +#import "GPUImageThresholdEdgeDetectionFilter.h" +#import "GPUImageMaskFilter.h" +#import "GPUImageHistogramFilter.h" +#import "GPUImageHistogramGenerator.h" +#import "GPUImageHistogramEqualizationFilter.h" +#import "GPUImagePrewittEdgeDetectionFilter.h" +#import "GPUImageXYDerivativeFilter.h" +#import "GPUImageHarrisCornerDetectionFilter.h" +#import "GPUImageAlphaBlendFilter.h" +#import "GPUImageNormalBlendFilter.h" +#import "GPUImageNonMaximumSuppressionFilter.h" +#import "GPUImageRGBFilter.h" +#import "GPUImageMedianFilter.h" +#import "GPUImageBilateralFilter.h" +#import "GPUImageCrosshairGenerator.h" +#import "GPUImageToneCurveFilter.h" +#import "GPUImageNobleCornerDetectionFilter.h" +#import "GPUImageShiTomasiFeatureDetectionFilter.h" +#import "GPUImageErosionFilter.h" +#import "GPUImageRGBErosionFilter.h" +#import "GPUImageDilationFilter.h" +#import "GPUImageRGBDilationFilter.h" +#import "GPUImageOpeningFilter.h" +#import "GPUImageRGBOpeningFilter.h" +#import "GPUImageClosingFilter.h" +#import "GPUImageRGBClosingFilter.h" +#import "GPUImageColorPackingFilter.h" +#import "GPUImageSphereRefractionFilter.h" +#import "GPUImageMonochromeFilter.h" +#import "GPUImageOpacityFilter.h" +#import "GPUImageHighlightShadowFilter.h" +#import "GPUImageFalseColorFilter.h" +#import "GPUImageHSBFilter.h" +#import "GPUImageHueFilter.h" +#import "GPUImageGlassSphereFilter.h" +#import "GPUImageLookupFilter.h" +#import "GPUImageAmatorkaFilter.h" +#import "GPUImageMissEtikateFilter.h" +#import "GPUImageSoftEleganceFilter.h" +#import "GPUImageAddBlendFilter.h" +#import "GPUImageDivideBlendFilter.h" +#import "GPUImagePolkaDotFilter.h" +#import "GPUImageLocalBinaryPatternFilter.h" +#import "GPUImageColorLocalBinaryPatternFilter.h" +#import "GPUImageLanczosResamplingFilter.h" +#import "GPUImageAverageColor.h" +#import "GPUImageSolidColorGenerator.h" +#import "GPUImageLuminosity.h" +#import "GPUImageAverageLuminanceThresholdFilter.h" +#import "GPUImageWhiteBalanceFilter.h" +#import "GPUImageChromaKeyFilter.h" +#import "GPUImageLowPassFilter.h" +#import "GPUImageHighPassFilter.h" +#import "GPUImageMotionDetector.h" +#import "GPUImageHalftoneFilter.h" +#import "GPUImageThresholdedNonMaximumSuppressionFilter.h" +#import "GPUImageHoughTransformLineDetector.h" +#import "GPUImageParallelCoordinateLineTransformFilter.h" +#import "GPUImageThresholdSketchFilter.h" +#import "GPUImageLineGenerator.h" +#import "GPUImageLinearBurnBlendFilter.h" +#import "GPUImageGaussianBlurPositionFilter.h" +#import "GPUImagePixellatePositionFilter.h" +#import "GPUImageTwoInputCrossTextureSamplingFilter.h" +#import "GPUImagePoissonBlendFilter.h" +#import "GPUImageMotionBlurFilter.h" +#import "GPUImageZoomBlurFilter.h" +#import "GPUImageLaplacianFilter.h" +#import "GPUImageiOSBlurFilter.h" +#import "GPUImageLuminanceRangeFilter.h" +#import "GPUImageDirectionalNonMaximumSuppressionFilter.h" +#import "GPUImageDirectionalSobelEdgeDetectionFilter.h" +#import "GPUImageSingleComponentGaussianBlurFilter.h" +#import "GPUImageThreeInputFilter.h" +#import "GPUImageFourInputFilter.h" +#import "GPUImageWeakPixelInclusionFilter.h" +#import "GPUImageColorConversion.h" +#import "GPUImageColourFASTFeatureDetector.h" +#import "GPUImageColourFASTSamplingOperation.h" + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.h new file mode 100755 index 00000000..67e68def --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.h @@ -0,0 +1,18 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +/** Runs a 3x3 convolution kernel against the image + */ +@interface GPUImage3x3ConvolutionFilter : GPUImage3x3TextureSamplingFilter +{ + GLint convolutionMatrixUniform; +} + +/** Convolution kernel to run against the image + + The convolution kernel is a 3x3 matrix of values to apply to the pixel and its 8 surrounding pixels. + The matrix is specified in row-major order, with the top left pixel being one.one and the bottom right three.three + If the values in the matrix don't add up to 1.0, the image could be brightened or darkened. + */ +@property(readwrite, nonatomic) GPUMatrix3x3 convolutionKernel; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.m new file mode 100755 index 00000000..c623ac67 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.m @@ -0,0 +1,128 @@ +#import "GPUImage3x3ConvolutionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImage3x3ConvolutionFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + uniform mediump mat3 convolutionMatrix; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + mediump vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + mediump vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + mediump vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + mediump vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + mediump vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + mediump vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + mediump vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + mediump vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + + mediump vec3 resultColor = topLeftColor * convolutionMatrix[0][0] + topColor * convolutionMatrix[0][1] + topRightColor * convolutionMatrix[0][2]; + resultColor += leftColor * convolutionMatrix[1][0] + centerColor.rgb * convolutionMatrix[1][1] + rightColor * convolutionMatrix[1][2]; + resultColor += bottomLeftColor * convolutionMatrix[2][0] + bottomColor * convolutionMatrix[2][1] + bottomRightColor * convolutionMatrix[2][2]; + + gl_FragColor = vec4(resultColor, centerColor.a); + } +); +#else +NSString *const kGPUImage3x3ConvolutionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + uniform mat3 convolutionMatrix; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + + vec3 resultColor = topLeftColor * convolutionMatrix[0][0] + topColor * convolutionMatrix[0][1] + topRightColor * convolutionMatrix[0][2]; + resultColor += leftColor * convolutionMatrix[1][0] + centerColor.rgb * convolutionMatrix[1][1] + rightColor * convolutionMatrix[1][2]; + resultColor += bottomLeftColor * convolutionMatrix[2][0] + bottomColor * convolutionMatrix[2][1] + bottomRightColor * convolutionMatrix[2][2]; + + gl_FragColor = vec4(resultColor, centerColor.a); + } +); +#endif + +@implementation GPUImage3x3ConvolutionFilter + +@synthesize convolutionKernel = _convolutionKernel; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImage3x3ConvolutionFragmentShaderString])) + { + return nil; + } + + self.convolutionKernel = (GPUMatrix3x3){ + {0.f, 0.f, 0.f}, + {0.f, 1.f, 0.f}, + {0.f, 0.f, 0.f} + }; + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + convolutionMatrixUniform = [filterProgram uniformIndex:@"convolutionMatrix"]; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setConvolutionKernel:(GPUMatrix3x3)newValue; +{ + _convolutionKernel = newValue; + + [self setMatrix3f:_convolutionKernel forUniform:convolutionMatrixUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.h new file mode 100644 index 00000000..5599e156 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.h @@ -0,0 +1,18 @@ +#import "GPUImageFilter.h" + +extern NSString *const kGPUImageNearbyTexelSamplingVertexShaderString; + +@interface GPUImage3x3TextureSamplingFilter : GPUImageFilter +{ + GLint texelWidthUniform, texelHeightUniform; + + CGFloat texelWidth, texelHeight; + BOOL hasOverriddenImageSizeFactor; +} + +// The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. +@property(readwrite, nonatomic) CGFloat texelWidth; +@property(readwrite, nonatomic) CGFloat texelHeight; + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.m new file mode 100644 index 00000000..05c4d50c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.m @@ -0,0 +1,121 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +// Override vertex shader to remove dependent texture reads +NSString *const kGPUImageNearbyTexelSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform float texelWidth; + uniform float texelHeight; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 widthStep = vec2(texelWidth, 0.0); + vec2 heightStep = vec2(0.0, texelHeight); + vec2 widthHeightStep = vec2(texelWidth, texelHeight); + vec2 widthNegativeHeightStep = vec2(texelWidth, -texelHeight); + + textureCoordinate = inputTextureCoordinate.xy; + leftTextureCoordinate = inputTextureCoordinate.xy - widthStep; + rightTextureCoordinate = inputTextureCoordinate.xy + widthStep; + + topTextureCoordinate = inputTextureCoordinate.xy - heightStep; + topLeftTextureCoordinate = inputTextureCoordinate.xy - widthHeightStep; + topRightTextureCoordinate = inputTextureCoordinate.xy + widthNegativeHeightStep; + + bottomTextureCoordinate = inputTextureCoordinate.xy + heightStep; + bottomLeftTextureCoordinate = inputTextureCoordinate.xy - widthNegativeHeightStep; + bottomRightTextureCoordinate = inputTextureCoordinate.xy + widthHeightStep; + } +); + + +@implementation GPUImage3x3TextureSamplingFilter + +@synthesize texelWidth = _texelWidth; +@synthesize texelHeight = _texelHeight; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:vertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [self initWithVertexShaderFromString:kGPUImageNearbyTexelSamplingVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!hasOverriddenImageSizeFactor) + { + _texelWidth = 1.0 / filterFrameSize.width; + _texelHeight = 1.0 / filterFrameSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + glUniform1f(texelWidthUniform, _texelHeight); + glUniform1f(texelHeightUniform, _texelWidth); + } + else + { + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + } + }); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelWidth:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelWidth = newValue; + + [self setFloat:_texelWidth forUniform:texelWidthUniform program:filterProgram]; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelHeight = newValue; + + [self setFloat:_texelHeight forUniform:texelHeightUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.h new file mode 100755 index 00000000..32785560 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.h @@ -0,0 +1,9 @@ +#import "GPUImageFilterGroup.h" + +@interface GPUImageAdaptiveThresholdFilter : GPUImageFilterGroup + +/** A multiplier for the background averaging blur radius in pixels, with a default of 4 + */ +@property(readwrite, nonatomic) CGFloat blurRadiusInPixels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.m new file mode 100755 index 00000000..71fa6abf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.m @@ -0,0 +1,100 @@ +#import "GPUImageAdaptiveThresholdFilter.h" +#import "GPUImageFilter.h" +#import "GPUImageTwoInputFilter.h" +#import "GPUImageGrayscaleFilter.h" +#import "GPUImageBoxBlurFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageAdaptiveThresholdFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + highp float blurredInput = texture2D(inputImageTexture, textureCoordinate).r; + highp float localLuminance = texture2D(inputImageTexture2, textureCoordinate2).r; + highp float thresholdResult = step(blurredInput - 0.05, localLuminance); + + gl_FragColor = vec4(vec3(thresholdResult), 1.0); + } +); +#else +NSString *const kGPUImageAdaptiveThresholdFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + float blurredInput = texture2D(inputImageTexture, textureCoordinate).r; + float localLuminance = texture2D(inputImageTexture2, textureCoordinate2).r; + float thresholdResult = step(blurredInput - 0.05, localLuminance); + + gl_FragColor = vec4(vec3(thresholdResult), 1.0); + } +); +#endif + +@interface GPUImageAdaptiveThresholdFilter() +{ + GPUImageBoxBlurFilter *boxBlurFilter; +} +@end + +@implementation GPUImageAdaptiveThresholdFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: reduce to luminance + GPUImageGrayscaleFilter *luminanceFilter = [[GPUImageGrayscaleFilter alloc] init]; + [self addFilter:luminanceFilter]; + + // Second pass: perform a box blur + boxBlurFilter = [[GPUImageBoxBlurFilter alloc] init]; + [self addFilter:boxBlurFilter]; + + // Third pass: compare the blurred background luminance to the local value + GPUImageFilter *adaptiveThresholdFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:kGPUImageAdaptiveThresholdFragmentShaderString]; + [self addFilter:adaptiveThresholdFilter]; + + [luminanceFilter addTarget:boxBlurFilter]; + + [boxBlurFilter addTarget:adaptiveThresholdFilter]; + // To prevent double updating of this filter, disable updates from the sharp luminance image side + [luminanceFilter addTarget:adaptiveThresholdFilter]; + + self.initialFilters = [NSArray arrayWithObject:luminanceFilter]; + self.terminalFilter = adaptiveThresholdFilter; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + boxBlurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return boxBlurFilter.blurRadiusInPixels; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.h new file mode 100644 index 00000000..b14c60c6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageAddBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.m new file mode 100644 index 00000000..c89054a8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.m @@ -0,0 +1,100 @@ +#import "GPUImageAddBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageAddBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 base = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + mediump float r; + if (overlay.r * base.a + base.r * overlay.a >= overlay.a * base.a) { + r = overlay.a * base.a + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + r = overlay.r + base.r; + } + + mediump float g; + if (overlay.g * base.a + base.g * overlay.a >= overlay.a * base.a) { + g = overlay.a * base.a + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + g = overlay.g + base.g; + } + + mediump float b; + if (overlay.b * base.a + base.b * overlay.a >= overlay.a * base.a) { + b = overlay.a * base.a + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + b = overlay.b + base.b; + } + + mediump float a = overlay.a + base.a - overlay.a * base.a; + + gl_FragColor = vec4(r, g, b, a); + } +); +#else +NSString *const kGPUImageAddBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + float r; + if (overlay.r * base.a + base.r * overlay.a >= overlay.a * base.a) { + r = overlay.a * base.a + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + r = overlay.r + base.r; + } + + float g; + if (overlay.g * base.a + base.g * overlay.a >= overlay.a * base.a) { + g = overlay.a * base.a + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + g = overlay.g + base.g; + } + + float b; + if (overlay.b * base.a + base.b * overlay.a >= overlay.a * base.a) { + b = overlay.a * base.a + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + b = overlay.b + base.b; + } + + float a = overlay.a + base.a - overlay.a * base.a; + + gl_FragColor = vec4(r, g, b, a); + } +); +#endif + + + +@implementation GPUImageAddBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageAddBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.h new file mode 100755 index 00000000..c4d75759 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageAlphaBlendFilter : GPUImageTwoInputFilter +{ + GLint mixUniform; +} + +// Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 1.0 as the normal level +@property(readwrite, nonatomic) CGFloat mix; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.m new file mode 100755 index 00000000..077df790 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.m @@ -0,0 +1,72 @@ +#import "GPUImageAlphaBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageAlphaBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform lowp float mixturePercent; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(mix(textureColor.rgb, textureColor2.rgb, textureColor2.a * mixturePercent), textureColor.a); + } +); +#else +NSString *const kGPUImageAlphaBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float mixturePercent; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(mix(textureColor.rgb, textureColor2.rgb, textureColor2.a * mixturePercent), textureColor.a); + } +); +#endif + +@implementation GPUImageAlphaBlendFilter + +@synthesize mix = _mix; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageAlphaBlendFragmentShaderString])) + { + return nil; + } + + mixUniform = [filterProgram uniformIndex:@"mixturePercent"]; + self.mix = 0.5; + + return self; +} + + +#pragma mark - +#pragma mark Accessors + +- (void)setMix:(CGFloat)newValue; +{ + _mix = newValue; + + [self setFloat:_mix forUniform:mixUniform program:filterProgram]; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.h new file mode 100755 index 00000000..1dbe096d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImagePicture; + +/** A photo filter based on Photoshop action by Amatorka + http://amatorka.deviantart.com/art/Amatorka-Action-2-121069631 + */ + +// Note: If you want to use this effect you have to add lookup_amatorka.png +// from Resources folder to your application bundle. + +@interface GPUImageAmatorkaFilter : GPUImageFilterGroup +{ + GPUImagePicture *lookupImageSource; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.m new file mode 100755 index 00000000..1ab3ec4e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.m @@ -0,0 +1,38 @@ +#import "GPUImageAmatorkaFilter.h" +#import "GPUImagePicture.h" +#import "GPUImageLookupFilter.h" + +@implementation GPUImageAmatorkaFilter + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + UIImage *image = [UIImage imageNamed:@"lookup_amatorka.png"]; +#else + NSImage *image = [NSImage imageNamed:@"lookup_amatorka.png"]; +#endif + + NSAssert(image, @"To use GPUImageAmatorkaFilter you need to add lookup_amatorka.png from GPUImage/framework/Resources to your application bundle."); + + lookupImageSource = [[GPUImagePicture alloc] initWithImage:image]; + GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init]; + [self addFilter:lookupFilter]; + + [lookupImageSource addTarget:lookupFilter atTextureLocation:1]; + [lookupImageSource processImage]; + + self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil]; + self.terminalFilter = lookupFilter; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.h b/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.h new file mode 100644 index 00000000..e3d957d0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.h @@ -0,0 +1,20 @@ +#import "GPUImageFilter.h" + +extern NSString *const kGPUImageColorAveragingVertexShaderString; + +@interface GPUImageAverageColor : GPUImageFilter +{ + GLint texelWidthUniform, texelHeightUniform; + + NSUInteger numberOfStages; + + GLubyte *rawImagePixels; + CGSize finalStageSize; +} + +// This block is called on the completion of color averaging for a frame +@property(nonatomic, copy) void(^colorAverageProcessingFinishedBlock)(CGFloat redComponent, CGFloat greenComponent, CGFloat blueComponent, CGFloat alphaComponent, CMTime frameTime); + +- (void)extractAverageColorAtFrameTime:(CMTime)frameTime; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.m b/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.m new file mode 100644 index 00000000..e2dd7e73 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.m @@ -0,0 +1,204 @@ +#import "GPUImageAverageColor.h" + +NSString *const kGPUImageColorAveragingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform float texelWidth; + uniform float texelHeight; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + void main() + { + gl_Position = position; + + upperLeftInputTextureCoordinate = inputTextureCoordinate.xy + vec2(-texelWidth, -texelHeight); + upperRightInputTextureCoordinate = inputTextureCoordinate.xy + vec2(texelWidth, -texelHeight); + lowerLeftInputTextureCoordinate = inputTextureCoordinate.xy + vec2(-texelWidth, texelHeight); + lowerRightInputTextureCoordinate = inputTextureCoordinate.xy + vec2(texelWidth, texelHeight); + } + ); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorAveragingFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + varying highp vec2 outputTextureCoordinate; + + varying highp vec2 upperLeftInputTextureCoordinate; + varying highp vec2 upperRightInputTextureCoordinate; + varying highp vec2 lowerLeftInputTextureCoordinate; + varying highp vec2 lowerRightInputTextureCoordinate; + + void main() + { + highp vec4 upperLeftColor = texture2D(inputImageTexture, upperLeftInputTextureCoordinate); + highp vec4 upperRightColor = texture2D(inputImageTexture, upperRightInputTextureCoordinate); + highp vec4 lowerLeftColor = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate); + highp vec4 lowerRightColor = texture2D(inputImageTexture, lowerRightInputTextureCoordinate); + + gl_FragColor = 0.25 * (upperLeftColor + upperRightColor + lowerLeftColor + lowerRightColor); + } +); +#else +NSString *const kGPUImageColorAveragingFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 outputTextureCoordinate; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + void main() + { + vec4 upperLeftColor = texture2D(inputImageTexture, upperLeftInputTextureCoordinate); + vec4 upperRightColor = texture2D(inputImageTexture, upperRightInputTextureCoordinate); + vec4 lowerLeftColor = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate); + vec4 lowerRightColor = texture2D(inputImageTexture, lowerRightInputTextureCoordinate); + + gl_FragColor = 0.25 * (upperLeftColor + upperRightColor + lowerLeftColor + lowerRightColor); + } +); +#endif + +@implementation GPUImageAverageColor + +@synthesize colorAverageProcessingFinishedBlock = _colorAverageProcessingFinishedBlock; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageColorAveragingVertexShaderString fragmentShaderFromString:kGPUImageColorAveragingFragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + finalStageSize = CGSizeMake(1.0, 1.0); + + __unsafe_unretained GPUImageAverageColor *weakSelf = self; + [self setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime) { + [weakSelf extractAverageColorAtFrameTime:frameTime]; + }]; + + return self; +} + +- (void)dealloc; +{ + if (rawImagePixels != NULL) + { + free(rawImagePixels); + } +} + +#pragma mark - +#pragma mark Managing the display FBOs + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + outputFramebuffer = nil; + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + GLuint currentTexture = [firstInputFramebuffer texture]; + + NSUInteger numberOfReductionsInX = floor(log(inputTextureSize.width) / log(4.0)); + NSUInteger numberOfReductionsInY = floor(log(inputTextureSize.height) / log(4.0)); + NSUInteger reductionsToHitSideLimit = MIN(numberOfReductionsInX, numberOfReductionsInY); + for (NSUInteger currentReduction = 0; currentReduction < reductionsToHitSideLimit; currentReduction++) + { + CGSize currentStageSize = CGSizeMake(floor(inputTextureSize.width / pow(4.0, currentReduction + 1.0)), floor(inputTextureSize.height / pow(4.0, currentReduction + 1.0))); + + [outputFramebuffer unlock]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:currentStageSize textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, currentTexture); + + glUniform1i(filterInputTextureUniform, 2); + + glUniform1f(texelWidthUniform, 0.25 / currentStageSize.width); + glUniform1f(texelHeightUniform, 0.25 / currentStageSize.height); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + currentTexture = [outputFramebuffer texture]; + finalStageSize = currentStageSize; + } + + [firstInputFramebuffer unlock]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = kGPUImageNoRotation; +} + +- (void)extractAverageColorAtFrameTime:(CMTime)frameTime; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + // we need a normal color texture for averaging the color values + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture internal format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + NSUInteger totalNumberOfPixels = round(finalStageSize.width * finalStageSize.height); + + if (rawImagePixels == NULL) + { + rawImagePixels = (GLubyte *)malloc(totalNumberOfPixels * 4); + } + + [GPUImageContext useImageProcessingContext]; + [outputFramebuffer activateFramebuffer]; + glReadPixels(0, 0, (int)finalStageSize.width, (int)finalStageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + + NSUInteger redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0; + NSUInteger byteIndex = 0; + for (NSUInteger currentPixel = 0; currentPixel < totalNumberOfPixels; currentPixel++) + { + redTotal += rawImagePixels[byteIndex++]; + greenTotal += rawImagePixels[byteIndex++]; + blueTotal += rawImagePixels[byteIndex++]; + alphaTotal += rawImagePixels[byteIndex++]; + } + + CGFloat normalizedRedTotal = (CGFloat)redTotal / (CGFloat)totalNumberOfPixels / 255.0; + CGFloat normalizedGreenTotal = (CGFloat)greenTotal / (CGFloat)totalNumberOfPixels / 255.0; + CGFloat normalizedBlueTotal = (CGFloat)blueTotal / (CGFloat)totalNumberOfPixels / 255.0; + CGFloat normalizedAlphaTotal = (CGFloat)alphaTotal / (CGFloat)totalNumberOfPixels / 255.0; + + if (_colorAverageProcessingFinishedBlock != NULL) + { + _colorAverageProcessingFinishedBlock(normalizedRedTotal, normalizedGreenTotal, normalizedBlueTotal, normalizedAlphaTotal, frameTime); + } + }); +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.h new file mode 100644 index 00000000..7f1ae464 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.h @@ -0,0 +1,8 @@ +#import "GPUImageFilterGroup.h" + +@interface GPUImageAverageLuminanceThresholdFilter : GPUImageFilterGroup + +// This is multiplied by the continually calculated average image luminosity to arrive at the final threshold. Default is 1.0. +@property(readwrite, nonatomic) CGFloat thresholdMultiplier; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.m new file mode 100644 index 00000000..eb2796f4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.m @@ -0,0 +1,47 @@ +#import "GPUImageAverageLuminanceThresholdFilter.h" +#import "GPUImageLuminosity.h" +#import "GPUImageLuminanceThresholdFilter.h" + +@interface GPUImageAverageLuminanceThresholdFilter() +{ + GPUImageLuminosity *luminosityFilter; + GPUImageLuminanceThresholdFilter *luminanceThresholdFilter; +} +@end + +@implementation GPUImageAverageLuminanceThresholdFilter + +@synthesize thresholdMultiplier = _thresholdMultiplier; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + self.thresholdMultiplier = 1.0; + + luminosityFilter = [[GPUImageLuminosity alloc] init]; + [self addFilter:luminosityFilter]; + + luminanceThresholdFilter = [[GPUImageLuminanceThresholdFilter alloc] init]; + [self addFilter:luminanceThresholdFilter]; + + __unsafe_unretained GPUImageAverageLuminanceThresholdFilter *weakSelf = self; + __unsafe_unretained GPUImageLuminanceThresholdFilter *weakThreshold = luminanceThresholdFilter; + + [luminosityFilter setLuminosityProcessingFinishedBlock:^(CGFloat luminosity, CMTime frameTime) { + weakThreshold.threshold = luminosity * weakSelf.thresholdMultiplier; + }]; + + self.initialFilters = [NSArray arrayWithObjects:luminosityFilter, luminanceThresholdFilter, nil]; + self.terminalFilter = luminanceThresholdFilter; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.h new file mode 100644 index 00000000..6b736ccf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.h @@ -0,0 +1,10 @@ +#import "GPUImageGaussianBlurFilter.h" + +@interface GPUImageBilateralFilter : GPUImageGaussianBlurFilter +{ + CGFloat firstDistanceNormalizationFactorUniform; + CGFloat secondDistanceNormalizationFactorUniform; +} +// A normalization factor for the distance between central color and sample color. +@property(nonatomic, readwrite) CGFloat distanceNormalizationFactor; +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.m new file mode 100644 index 00000000..c2a8c867 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.m @@ -0,0 +1,231 @@ +#import "GPUImageBilateralFilter.h" + +NSString *const kGPUImageBilateralBlurVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + const int GAUSSIAN_SAMPLES = 9; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 textureCoordinate; + varying vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + + // Calculate the positions for the blur + int multiplier = 0; + vec2 blurStep; + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset); + + for (int i = 0; i < GAUSSIAN_SAMPLES; i++) + { + multiplier = (i - ((GAUSSIAN_SAMPLES - 1) / 2)); + // Blur in x (horizontal) + blurStep = float(multiplier) * singleStepOffset; + blurCoordinates[i] = inputTextureCoordinate.xy + blurStep; + } + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageBilateralFilterFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + const lowp int GAUSSIAN_SAMPLES = 9; + + varying highp vec2 textureCoordinate; + varying highp vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + uniform mediump float distanceNormalizationFactor; + + void main() + { + lowp vec4 centralColor; + lowp float gaussianWeightTotal; + lowp vec4 sum; + lowp vec4 sampleColor; + lowp float distanceFromCentralColor; + lowp float gaussianWeight; + + centralColor = texture2D(inputImageTexture, blurCoordinates[4]); + gaussianWeightTotal = 0.18; + sum = centralColor * 0.18; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[0]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.05 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[1]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[2]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.12 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[3]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.15 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[5]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.15 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[6]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.12 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[7]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[8]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.05 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + gl_FragColor = sum / gaussianWeightTotal; + } +); +#else +NSString *const kGPUImageBilateralFilterFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + const int GAUSSIAN_SAMPLES = 9; + + varying vec2 textureCoordinate; + varying vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + uniform float distanceNormalizationFactor; + + void main() + { + vec4 centralColor; + float gaussianWeightTotal; + vec4 sum; + vec4 sampleColor; + float distanceFromCentralColor; + float gaussianWeight; + + centralColor = texture2D(inputImageTexture, blurCoordinates[4]); + gaussianWeightTotal = 0.18; + sum = centralColor * 0.18; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[0]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.05 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[1]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[2]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.12 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[3]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.15 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[5]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.15 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[6]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.12 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[7]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + sampleColor = texture2D(inputImageTexture, blurCoordinates[8]); + distanceFromCentralColor = min(distance(centralColor, sampleColor) * distanceNormalizationFactor, 1.0); + gaussianWeight = 0.05 * (1.0 - distanceFromCentralColor); + gaussianWeightTotal += gaussianWeight; + sum += sampleColor * gaussianWeight; + + gl_FragColor = sum / gaussianWeightTotal; + } +); +#endif + +@implementation GPUImageBilateralFilter + +@synthesize distanceNormalizationFactor = _distanceNormalizationFactor; + +- (id)init; +{ + + if (!(self = [super initWithFirstStageVertexShaderFromString:kGPUImageBilateralBlurVertexShaderString + firstStageFragmentShaderFromString:kGPUImageBilateralFilterFragmentShaderString + secondStageVertexShaderFromString:kGPUImageBilateralBlurVertexShaderString + secondStageFragmentShaderFromString:kGPUImageBilateralFilterFragmentShaderString])) { + return nil; + } + + firstDistanceNormalizationFactorUniform = [filterProgram uniformIndex:@"distanceNormalizationFactor"]; + secondDistanceNormalizationFactorUniform = [filterProgram uniformIndex:@"distanceNormalizationFactor"]; + + self.texelSpacingMultiplier = 4.0; + self.distanceNormalizationFactor = 8.0; + + + return self; +} + + +#pragma mark - +#pragma mark Accessors + +- (void)setDistanceNormalizationFactor:(CGFloat)newValue +{ + _distanceNormalizationFactor = newValue; + + [self setFloat:newValue + forUniform:firstDistanceNormalizationFactorUniform + program:filterProgram]; + + [self setFloat:newValue + forUniform:secondDistanceNormalizationFactorUniform + program:secondFilterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.h new file mode 100755 index 00000000..3fd880bf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageGaussianBlurFilter.h" + +/** A hardware-accelerated box blur of an image + */ +@interface GPUImageBoxBlurFilter : GPUImageGaussianBlurFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.m new file mode 100755 index 00000000..5a49385b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.m @@ -0,0 +1,178 @@ +#import "GPUImageBoxBlurFilter.h" + + +@implementation GPUImageBoxBlurFilter + ++ (NSString *)vertexShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImageVertexShaderString; + } + + // From these weights we calculate the offsets to read interpolated values from + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + // Header + [shaderString appendFormat:@"\ + attribute vec4 position;\n\ + attribute vec4 inputTextureCoordinate;\n\ + \n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + gl_Position = position;\n\ + \n\ + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2))]; + + // Inner offset loop + [shaderString appendString:@"blurCoordinates[0] = inputTextureCoordinate.xy;\n"]; + for (NSUInteger currentOptimizedOffset = 0; currentOptimizedOffset < numberOfOptimizedOffsets; currentOptimizedOffset++) + { + GLfloat optimizedOffset = (GLfloat)(currentOptimizedOffset * 2) + 1.5; + + [shaderString appendFormat:@"\ + blurCoordinates[%lu] = inputTextureCoordinate.xy + singleStepOffset * %f;\n\ + blurCoordinates[%lu] = inputTextureCoordinate.xy - singleStepOffset * %f;\n", (unsigned long)((currentOptimizedOffset * 2) + 1), optimizedOffset, (unsigned long)((currentOptimizedOffset * 2) + 2), optimizedOffset]; + } + + // Footer + [shaderString appendString:@"}\n"]; + + return shaderString; +} + ++ (NSString *)fragmentShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImagePassthroughFragmentShaderString; + } + + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + NSUInteger trueNumberOfOptimizedOffsets = blurRadius / 2 + (blurRadius % 2); + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + + // Header +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform highp float texelWidthOffset;\n\ + uniform highp float texelHeightOffset;\n\ + \n\ + varying highp vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + lowp vec4 sum = vec4(0.0);\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2)) ]; +#else + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + vec4 sum = vec4(0.0);\n", 1 + (numberOfOptimizedOffsets * 2) ]; +#endif + + GLfloat boxWeight = 1.0 / (GLfloat)((blurRadius * 2) + 1); + + // Inner texture loop + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0]) * %f;\n", boxWeight]; + + for (NSUInteger currentBlurCoordinateIndex = 0; currentBlurCoordinateIndex < numberOfOptimizedOffsets; currentBlurCoordinateIndex++) + { + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 1), boxWeight * 2.0]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 2), boxWeight * 2.0]; + } + + // If the number of required samples exceeds the amount we can pass in via varyings, we have to do dependent texture reads in the fragment shader + if (trueNumberOfOptimizedOffsets > numberOfOptimizedOffsets) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendString:@"highp vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#else + [shaderString appendString:@"vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#endif + + for (NSUInteger currentOverlowTextureRead = numberOfOptimizedOffsets; currentOverlowTextureRead < trueNumberOfOptimizedOffsets; currentOverlowTextureRead++) + { + GLfloat optimizedOffset = (GLfloat)(currentOverlowTextureRead * 2) + 1.5; + + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] + singleStepOffset * %f) * %f;\n", optimizedOffset, boxWeight * 2.0]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] - singleStepOffset * %f) * %f;\n", optimizedOffset, boxWeight * 2.0]; + } + } + + // Footer + [shaderString appendString:@"\ + gl_FragColor = sum;\n\ + }\n"]; + + return shaderString; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + [super setupFilterForSize:filterFrameSize]; + + if (shouldResizeBlurRadiusWithImageSize == YES) + { + + } +} + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + // NSString *currentGaussianBlurVertexShader = [GPUImageGaussianBlurFilter vertexShaderForStandardGaussianOfRadius:4 sigma:2.0]; + // NSString *currentGaussianBlurFragmentShader = [GPUImageGaussianBlurFilter fragmentShaderForStandardGaussianOfRadius:4 sigma:2.0]; + + NSString *currentBoxBlurVertexShader = [[self class] vertexShaderForOptimizedBlurOfRadius:4 sigma:0.0]; + NSString *currentBoxBlurFragmentShader = [[self class] fragmentShaderForOptimizedBlurOfRadius:4 sigma:0.0]; + + if (!(self = [super initWithFirstStageVertexShaderFromString:currentBoxBlurVertexShader firstStageFragmentShaderFromString:currentBoxBlurFragmentShader secondStageVertexShaderFromString:currentBoxBlurVertexShader secondStageFragmentShaderFromString:currentBoxBlurFragmentShader])) + { + return nil; + } + + _blurRadiusInPixels = 4.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + CGFloat newBlurRadius = round(round(newValue / 2.0) * 2.0); // For now, only do even radii + + if (newBlurRadius != _blurRadiusInPixels) + { + _blurRadiusInPixels = newBlurRadius; + + NSString *newGaussianBlurVertexShader = [[self class] vertexShaderForOptimizedBlurOfRadius:_blurRadiusInPixels sigma:0.0]; + NSString *newGaussianBlurFragmentShader = [[self class] fragmentShaderForOptimizedBlurOfRadius:_blurRadiusInPixels sigma:0.0]; + + // NSLog(@"Optimized vertex shader: \n%@", newGaussianBlurVertexShader); + // NSLog(@"Optimized fragment shader: \n%@", newGaussianBlurFragmentShader); + // + [self switchToVertexShader:newGaussianBlurVertexShader fragmentShader:newGaussianBlurFragmentShader]; + } + shouldResizeBlurRadiusWithImageSize = NO; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.h new file mode 100755 index 00000000..046473b9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageFilter.h" + +@interface GPUImageBrightnessFilter : GPUImageFilter +{ + GLint brightnessUniform; +} + +// Brightness ranges from -1.0 to 1.0, with 0.0 as the normal level +@property(readwrite, nonatomic) CGFloat brightness; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.m new file mode 100755 index 00000000..7e526d85 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.m @@ -0,0 +1,66 @@ +#import "GPUImageBrightnessFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float brightness; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w); + } +); +#else +NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float brightness; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w); + } + ); +#endif + +@implementation GPUImageBrightnessFilter + +@synthesize brightness = _brightness; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageBrightnessFragmentShaderString])) + { + return nil; + } + + brightnessUniform = [filterProgram uniformIndex:@"brightness"]; + self.brightness = 0.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBrightness:(CGFloat)newValue; +{ + _brightness = newValue; + + [self setFloat:_brightness forUniform:brightnessUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h b/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h new file mode 100644 index 00000000..caf09c8d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h @@ -0,0 +1,10 @@ +#import "GPUImageFilter.h" + +@interface GPUImageBuffer : GPUImageFilter +{ + NSMutableArray *bufferedFramebuffers; +} + +@property(readwrite, nonatomic) NSUInteger bufferSize; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.m b/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.m new file mode 100644 index 00000000..c90d020a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.m @@ -0,0 +1,112 @@ +#import "GPUImageBuffer.h" + +@interface GPUImageBuffer() + +@end + +@implementation GPUImageBuffer + +@synthesize bufferSize = _bufferSize; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImagePassthroughFragmentShaderString])) + { + return nil; + } + + bufferedFramebuffers = [[NSMutableArray alloc] init]; +// [bufferedTextures addObject:[NSNumber numberWithInt:outputTexture]]; + _bufferSize = 1; + + return self; +} + +- (void)dealloc +{ + for (GPUImageFramebuffer *currentFramebuffer in bufferedFramebuffers) + { + [currentFramebuffer unlock]; + } +} + +#pragma mark - +#pragma mark GPUImageInput + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + if ([bufferedFramebuffers count] >= _bufferSize) + { + outputFramebuffer = [bufferedFramebuffers objectAtIndex:0]; + [bufferedFramebuffers removeObjectAtIndex:0]; + } + else + { + // Nothing yet in the buffer, so don't process further until the buffer is full + outputFramebuffer = firstInputFramebuffer; + [firstInputFramebuffer lock]; + } + + [bufferedFramebuffers addObject:firstInputFramebuffer]; + + // Need to pass along rotation information, as we're just holding on to buffered framebuffers and not rotating them ourselves + for (id currentTarget in targets) + { + if (currentTarget != self.targetToIgnoreForUpdates) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setInputRotation:inputRotation atIndex:textureIndex]; + } + } + + // Let the downstream video elements see the previous frame from the buffer before rendering a new one into place + [self informTargetsAboutNewFrameAtTime:frameTime]; + +// [self renderToTextureWithVertices:imageVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // No need to render to another texture anymore, since we'll be hanging on to the textures in our buffer +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBufferSize:(NSUInteger)newValue; +{ + if ( (newValue == _bufferSize) || (newValue < 1) ) + { + return; + } + + if (newValue > _bufferSize) + { + NSUInteger texturesToAdd = newValue - _bufferSize; + for (NSUInteger currentTextureIndex = 0; currentTextureIndex < texturesToAdd; currentTextureIndex++) + { + // TODO: Deal with the growth of the size of the buffer by rotating framebuffers, no textures + } + } + else + { + NSUInteger texturesToRemove = _bufferSize - newValue; + for (NSUInteger currentTextureIndex = 0; currentTextureIndex < texturesToRemove; currentTextureIndex++) + { + GPUImageFramebuffer *lastFramebuffer = [bufferedFramebuffers lastObject]; + [bufferedFramebuffers removeObjectAtIndex:([bufferedFramebuffers count] - 1)]; + + [lastFramebuffer unlock]; + lastFramebuffer = nil; + } + } + + _bufferSize = newValue; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.h new file mode 100755 index 00000000..d416e536 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.h @@ -0,0 +1,16 @@ +#import "GPUImageFilter.h" + +/// Creates a bulge distortion on the image +@interface GPUImageBulgeDistortionFilter : GPUImageFilter +{ + GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform; +} + +/// The center about which to apply the distortion, with a default of (0.5, 0.5) +@property(readwrite, nonatomic) CGPoint center; +/// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25 +@property(readwrite, nonatomic) CGFloat radius; +/// The amount of distortion to apply, from -1.0 to 1.0, with a default of 0.5 +@property(readwrite, nonatomic) CGFloat scale; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.m new file mode 100755 index 00000000..ad002678 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.m @@ -0,0 +1,174 @@ +#import "GPUImageBulgeDistortionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageBulgeDistortionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float aspectRatio; + uniform highp vec2 center; + uniform highp float radius; + uniform highp float scale; + + void main() + { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, ((textureCoordinate.y - center.y) * aspectRatio) + center.y); + highp float dist = distance(center, textureCoordinateToUse); + textureCoordinateToUse = textureCoordinate; + + if (dist < radius) + { + textureCoordinateToUse -= center; + highp float percent = 1.0 - ((radius - dist) / radius) * scale; + percent = percent * percent; + + textureCoordinateToUse = textureCoordinateToUse * percent; + textureCoordinateToUse += center; + } + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + } +); +#else +NSString *const kGPUImageBulgeDistortionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float aspectRatio; + uniform vec2 center; + uniform float radius; + uniform float scale; + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, ((textureCoordinate.y - center.y) * aspectRatio) + center.y); + float dist = distance(center, textureCoordinateToUse); + textureCoordinateToUse = textureCoordinate; + + if (dist < radius) + { + textureCoordinateToUse -= center; + float percent = 1.0 - ((radius - dist) / radius) * scale; + percent = percent * percent; + + textureCoordinateToUse = textureCoordinateToUse * percent; + textureCoordinateToUse += center; + } + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + } +); +#endif + + +@interface GPUImageBulgeDistortionFilter () + +- (void)adjustAspectRatio; + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end + +@implementation GPUImageBulgeDistortionFilter + +@synthesize aspectRatio = _aspectRatio; +@synthesize center = _center; +@synthesize radius = _radius; +@synthesize scale = _scale; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageBulgeDistortionFragmentShaderString])) + { + return nil; + } + + aspectRatioUniform = [filterProgram uniformIndex:@"aspectRatio"]; + radiusUniform = [filterProgram uniformIndex:@"radius"]; + scaleUniform = [filterProgram uniformIndex:@"scale"]; + centerUniform = [filterProgram uniformIndex:@"center"]; + + self.radius = 0.25; + self.scale = 0.5; + self.center = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:filterProgram]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; + [self adjustAspectRatio]; +} + +- (void)setRadius:(CGFloat)newValue; +{ + _radius = newValue; + + [self setFloat:_radius forUniform:radiusUniform program:filterProgram]; +} + +- (void)setScale:(CGFloat)newValue; +{ + _scale = newValue; + + [self setFloat:_scale forUniform:scaleUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.h new file mode 100755 index 00000000..4f97804b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageFilter.h" + +@interface GPUImageCGAColorspaceFilter : GPUImageFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.m new file mode 100755 index 00000000..eee939a0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.m @@ -0,0 +1,113 @@ +// +// GPUImageCGAColorspaceFilter.m +// + +#import "GPUImageCGAColorspaceFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageCGAColorspaceFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + highp vec2 sampleDivisor = vec2(1.0 / 200.0, 1.0 / 320.0); + //highp vec4 colorDivisor = vec4(colorDepth); + + highp vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor); + highp vec4 color = texture2D(inputImageTexture, samplePos ); + + //gl_FragColor = texture2D(inputImageTexture, samplePos ); + mediump vec4 colorCyan = vec4(85.0 / 255.0, 1.0, 1.0, 1.0); + mediump vec4 colorMagenta = vec4(1.0, 85.0 / 255.0, 1.0, 1.0); + mediump vec4 colorWhite = vec4(1.0, 1.0, 1.0, 1.0); + mediump vec4 colorBlack = vec4(0.0, 0.0, 0.0, 1.0); + + mediump vec4 endColor; + highp float blackDistance = distance(color, colorBlack); + highp float whiteDistance = distance(color, colorWhite); + highp float magentaDistance = distance(color, colorMagenta); + highp float cyanDistance = distance(color, colorCyan); + + mediump vec4 finalColor; + + highp float colorDistance = min(magentaDistance, cyanDistance); + colorDistance = min(colorDistance, whiteDistance); + colorDistance = min(colorDistance, blackDistance); + + if (colorDistance == blackDistance) { + finalColor = colorBlack; + } else if (colorDistance == whiteDistance) { + finalColor = colorWhite; + } else if (colorDistance == cyanDistance) { + finalColor = colorCyan; + } else { + finalColor = colorMagenta; + } + + gl_FragColor = finalColor; + } +); +#else +NSString *const kGPUImageCGAColorspaceFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec2 sampleDivisor = vec2(1.0 / 200.0, 1.0 / 320.0); + //highp vec4 colorDivisor = vec4(colorDepth); + + vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor); + vec4 color = texture2D(inputImageTexture, samplePos ); + + //gl_FragColor = texture2D(inputImageTexture, samplePos ); + vec4 colorCyan = vec4(85.0 / 255.0, 1.0, 1.0, 1.0); + vec4 colorMagenta = vec4(1.0, 85.0 / 255.0, 1.0, 1.0); + vec4 colorWhite = vec4(1.0, 1.0, 1.0, 1.0); + vec4 colorBlack = vec4(0.0, 0.0, 0.0, 1.0); + + vec4 endColor; + float blackDistance = distance(color, colorBlack); + float whiteDistance = distance(color, colorWhite); + float magentaDistance = distance(color, colorMagenta); + float cyanDistance = distance(color, colorCyan); + + vec4 finalColor; + + float colorDistance = min(magentaDistance, cyanDistance); + colorDistance = min(colorDistance, whiteDistance); + colorDistance = min(colorDistance, blackDistance); + + if (colorDistance == blackDistance) { + finalColor = colorBlack; + } else if (colorDistance == whiteDistance) { + finalColor = colorWhite; + } else if (colorDistance == cyanDistance) { + finalColor = colorCyan; + } else { + finalColor = colorMagenta; + } + + gl_FragColor = finalColor; + } +); +#endif + +@implementation GPUImageCGAColorspaceFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageCGAColorspaceFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.h new file mode 100755 index 00000000..53c58502 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.h @@ -0,0 +1,62 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGrayscaleFilter; +@class GPUImageSingleComponentGaussianBlurFilter; +@class GPUImageDirectionalSobelEdgeDetectionFilter; +@class GPUImageDirectionalNonMaximumSuppressionFilter; +@class GPUImageWeakPixelInclusionFilter; + +/** This applies the edge detection process described by John Canny in + + Canny, J., A Computational Approach To Edge Detection, IEEE Trans. Pattern Analysis and Machine Intelligence, 8(6):679–698, 1986. + + and implemented in OpenGL ES by + + A. Ensor, S. Hall. GPU-based Image Analysis on Mobile Devices. Proceedings of Image and Vision Computing New Zealand 2011. + + It starts with a conversion to luminance, followed by an accelerated 9-hit Gaussian blur. A Sobel operator is applied to obtain the overall + gradient strength in the blurred image, as well as the direction (in texture sampling steps) of the gradient. A non-maximum suppression filter + acts along the direction of the gradient, highlighting strong edges that pass the threshold and completely removing those that fail the lower + threshold. Finally, pixels from in-between these thresholds are either included in edges or rejected based on neighboring pixels. + */ +@interface GPUImageCannyEdgeDetectionFilter : GPUImageFilterGroup +{ + GPUImageGrayscaleFilter *luminanceFilter; + GPUImageSingleComponentGaussianBlurFilter *blurFilter; + GPUImageDirectionalSobelEdgeDetectionFilter *edgeDetectionFilter; + GPUImageDirectionalNonMaximumSuppressionFilter *nonMaximumSuppressionFilter; + GPUImageWeakPixelInclusionFilter *weakPixelInclusionFilter; +} + +/** The image width and height factors tweak the appearance of the edges. + + These parameters affect the visibility of the detected edges + + By default, they match the inverse of the filter size in pixels + */ +@property(readwrite, nonatomic) CGFloat texelWidth; +/** The image width and height factors tweak the appearance of the edges. + + These parameters affect the visibility of the detected edges + + By default, they match the inverse of the filter size in pixels + */ +@property(readwrite, nonatomic) CGFloat texelHeight; + +/** The underlying blur radius for the Gaussian blur. Default is 2.0. + */ +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +/** The underlying blur texel spacing multiplier. Default is 1.0. + */ +@property (readwrite, nonatomic) CGFloat blurTexelSpacingMultiplier; + +/** Any edge with a gradient magnitude above this threshold will pass and show up in the final result. + */ +@property(readwrite, nonatomic) CGFloat upperThreshold; + +/** Any edge with a gradient magnitude below this threshold will fail and be removed from the final result. + */ +@property(readwrite, nonatomic) CGFloat lowerThreshold; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.m new file mode 100755 index 00000000..7327d532 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCannyEdgeDetectionFilter.m @@ -0,0 +1,125 @@ +#import "GPUImageCannyEdgeDetectionFilter.h" + +#import "GPUImageGrayscaleFilter.h" +#import "GPUImageDirectionalSobelEdgeDetectionFilter.h" +#import "GPUImageDirectionalNonMaximumSuppressionFilter.h" +#import "GPUImageWeakPixelInclusionFilter.h" +#import "GPUImageSingleComponentGaussianBlurFilter.h" + +@implementation GPUImageCannyEdgeDetectionFilter + +@synthesize upperThreshold; +@synthesize lowerThreshold; +@synthesize blurRadiusInPixels; +@synthesize blurTexelSpacingMultiplier; +@synthesize texelWidth; +@synthesize texelHeight; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: convert image to luminance + luminanceFilter = [[GPUImageGrayscaleFilter alloc] init]; + [self addFilter:luminanceFilter]; + + // Second pass: apply a variable Gaussian blur + blurFilter = [[GPUImageSingleComponentGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Third pass: run the Sobel edge detection, with calculated gradient directions, on this blurred image + edgeDetectionFilter = [[GPUImageDirectionalSobelEdgeDetectionFilter alloc] init]; + [self addFilter:edgeDetectionFilter]; + + // Fourth pass: apply non-maximum suppression + nonMaximumSuppressionFilter = [[GPUImageDirectionalNonMaximumSuppressionFilter alloc] init]; + [self addFilter:nonMaximumSuppressionFilter]; + + // Fifth pass: include weak pixels to complete edges + weakPixelInclusionFilter = [[GPUImageWeakPixelInclusionFilter alloc] init]; + [self addFilter:weakPixelInclusionFilter]; + + [luminanceFilter addTarget:blurFilter]; + [blurFilter addTarget:edgeDetectionFilter]; + [edgeDetectionFilter addTarget:nonMaximumSuppressionFilter]; + [nonMaximumSuppressionFilter addTarget:weakPixelInclusionFilter]; + + self.initialFilters = [NSArray arrayWithObject:luminanceFilter]; +// self.terminalFilter = nonMaximumSuppressionFilter; + self.terminalFilter = weakPixelInclusionFilter; + + self.blurRadiusInPixels = 2.0; + self.blurTexelSpacingMultiplier = 1.0; + self.upperThreshold = 0.4; + self.lowerThreshold = 0.1; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setBlurTexelSpacingMultiplier:(CGFloat)newValue; +{ + blurFilter.texelSpacingMultiplier = newValue; +} + +- (CGFloat)blurTexelSpacingMultiplier; +{ + return blurFilter.texelSpacingMultiplier; +} + +- (void)setTexelWidth:(CGFloat)newValue; +{ + edgeDetectionFilter.texelWidth = newValue; +} + +- (CGFloat)texelWidth; +{ + return edgeDetectionFilter.texelWidth; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + edgeDetectionFilter.texelHeight = newValue; +} + +- (CGFloat)texelHeight; +{ + return edgeDetectionFilter.texelHeight; +} + +- (void)setUpperThreshold:(CGFloat)newValue; +{ + nonMaximumSuppressionFilter.upperThreshold = newValue; +} + +- (CGFloat)upperThreshold; +{ + return nonMaximumSuppressionFilter.upperThreshold; +} + +- (void)setLowerThreshold:(CGFloat)newValue; +{ + nonMaximumSuppressionFilter.lowerThreshold = newValue; +} + +- (CGFloat)lowerThreshold; +{ + return nonMaximumSuppressionFilter.lowerThreshold; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.h new file mode 100755 index 00000000..00a5aed7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.h @@ -0,0 +1,32 @@ +#import "GPUImageTwoInputFilter.h" + +/** Selectively replaces a color in the first image with the second image + */ +@interface GPUImageChromaKeyBlendFilter : GPUImageTwoInputFilter +{ + GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform; +} + +/** The threshold sensitivity controls how similar pixels need to be colored to be replaced + + The default value is 0.3 + */ +@property(readwrite, nonatomic) CGFloat thresholdSensitivity; + +/** The degree of smoothing controls how gradually similar colors are replaced in the image + + The default value is 0.1 + */ +@property(readwrite, nonatomic) CGFloat smoothing; + +/** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0). + + The default is green: (0.0, 1.0, 0.0). + + @param redComponent Red component of color to be replaced + @param greenComponent Green component of color to be replaced + @param blueComponent Blue component of color to be replaced + */ +- (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.m new file mode 100755 index 00000000..4e6b4661 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.m @@ -0,0 +1,117 @@ +#import "GPUImageChromaKeyBlendFilter.h" + +// Shader code based on Apple's CIChromaKeyFilter example: https://developer.apple.com/library/mac/#samplecode/CIChromaKeyFilter/Introduction/Intro.html + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageChromaKeyBlendFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform float thresholdSensitivity; + uniform float smoothing; + uniform vec3 colorToReplace; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + float maskY = 0.2989 * colorToReplace.r + 0.5866 * colorToReplace.g + 0.1145 * colorToReplace.b; + float maskCr = 0.7132 * (colorToReplace.r - maskY); + float maskCb = 0.5647 * (colorToReplace.b - maskY); + + float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b; + float Cr = 0.7132 * (textureColor.r - Y); + float Cb = 0.5647 * (textureColor.b - Y); + +// float blendValue = 1.0 - smoothstep(thresholdSensitivity - smoothing, thresholdSensitivity , abs(Cr - maskCr) + abs(Cb - maskCb)); + float blendValue = 1.0 - smoothstep(thresholdSensitivity, thresholdSensitivity + smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb))); + gl_FragColor = mix(textureColor, textureColor2, blendValue); + } +); +#else +NSString *const kGPUImageChromaKeyBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform float thresholdSensitivity; + uniform float smoothing; + uniform vec3 colorToReplace; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + float maskY = 0.2989 * colorToReplace.r + 0.5866 * colorToReplace.g + 0.1145 * colorToReplace.b; + float maskCr = 0.7132 * (colorToReplace.r - maskY); + float maskCb = 0.5647 * (colorToReplace.b - maskY); + + float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b; + float Cr = 0.7132 * (textureColor.r - Y); + float Cb = 0.5647 * (textureColor.b - Y); + + // float blendValue = 1.0 - smoothstep(thresholdSensitivity - smoothing, thresholdSensitivity , abs(Cr - maskCr) + abs(Cb - maskCb)); + float blendValue = 1.0 - smoothstep(thresholdSensitivity, thresholdSensitivity + smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb))); + gl_FragColor = mix(textureColor, textureColor2, blendValue); + } +); +#endif + +@implementation GPUImageChromaKeyBlendFilter + +@synthesize thresholdSensitivity = _thresholdSensitivity; +@synthesize smoothing = _smoothing; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageChromaKeyBlendFragmentShaderString])) + { + return nil; + } + + thresholdSensitivityUniform = [filterProgram uniformIndex:@"thresholdSensitivity"]; + smoothingUniform = [filterProgram uniformIndex:@"smoothing"]; + colorToReplaceUniform = [filterProgram uniformIndex:@"colorToReplace"]; + + self.thresholdSensitivity = 0.4; + self.smoothing = 0.1; + [self setColorToReplaceRed:0.0 green:1.0 blue:0.0]; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 colorToReplace = {redComponent, greenComponent, blueComponent}; + + [self setVec3:colorToReplace forUniform:colorToReplaceUniform program:filterProgram]; +} + +- (void)setThresholdSensitivity:(CGFloat)newValue; +{ + _thresholdSensitivity = newValue; + + [self setFloat:(GLfloat)_thresholdSensitivity forUniform:thresholdSensitivityUniform program:filterProgram]; +} + +- (void)setSmoothing:(CGFloat)newValue; +{ + _smoothing = newValue; + + [self setFloat:(GLfloat)_smoothing forUniform:smoothingUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.h new file mode 100644 index 00000000..5ee7e498 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.h @@ -0,0 +1,30 @@ +#import "GPUImageFilter.h" + +@interface GPUImageChromaKeyFilter : GPUImageFilter +{ + GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform; +} + +/** The threshold sensitivity controls how similar pixels need to be colored to be replaced + + The default value is 0.3 + */ +@property(readwrite, nonatomic) CGFloat thresholdSensitivity; + +/** The degree of smoothing controls how gradually similar colors are replaced in the image + + The default value is 0.1 + */ +@property(readwrite, nonatomic) CGFloat smoothing; + +/** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0). + + The default is green: (0.0, 1.0, 0.0). + + @param redComponent Red component of color to be replaced + @param greenComponent Green component of color to be replaced + @param blueComponent Blue component of color to be replaced + */ +- (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.m new file mode 100644 index 00000000..c70ef61d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.m @@ -0,0 +1,113 @@ +#import "GPUImageChromaKeyFilter.h" + +// Shader code based on Apple's CIChromaKeyFilter example: https://developer.apple.com/library/mac/#samplecode/CIChromaKeyFilter/Introduction/Intro.html + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageChromaKeyFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying highp vec2 textureCoordinate; + + uniform float thresholdSensitivity; + uniform float smoothing; + uniform vec3 colorToReplace; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + float maskY = 0.2989 * colorToReplace.r + 0.5866 * colorToReplace.g + 0.1145 * colorToReplace.b; + float maskCr = 0.7132 * (colorToReplace.r - maskY); + float maskCb = 0.5647 * (colorToReplace.b - maskY); + + float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b; + float Cr = 0.7132 * (textureColor.r - Y); + float Cb = 0.5647 * (textureColor.b - Y); + + // float blendValue = 1.0 - smoothstep(thresholdSensitivity - smoothing, thresholdSensitivity , abs(Cr - maskCr) + abs(Cb - maskCb)); + float blendValue = smoothstep(thresholdSensitivity, thresholdSensitivity + smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb))); + gl_FragColor = vec4(textureColor.rgb, textureColor.a * blendValue); + } +); +#else +NSString *const kGPUImageChromaKeyFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform float thresholdSensitivity; + uniform float smoothing; + uniform vec3 colorToReplace; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + float maskY = 0.2989 * colorToReplace.r + 0.5866 * colorToReplace.g + 0.1145 * colorToReplace.b; + float maskCr = 0.7132 * (colorToReplace.r - maskY); + float maskCb = 0.5647 * (colorToReplace.b - maskY); + + float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b; + float Cr = 0.7132 * (textureColor.r - Y); + float Cb = 0.5647 * (textureColor.b - Y); + + // float blendValue = 1.0 - smoothstep(thresholdSensitivity - smoothing, thresholdSensitivity , abs(Cr - maskCr) + abs(Cb - maskCb)); + float blendValue = smoothstep(thresholdSensitivity, thresholdSensitivity + smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb))); + gl_FragColor = vec4(textureColor.rgb, textureColor.a * blendValue); + } + ); +#endif + +@implementation GPUImageChromaKeyFilter + +@synthesize thresholdSensitivity = _thresholdSensitivity; +@synthesize smoothing = _smoothing; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageChromaKeyFragmentShaderString])) + { + return nil; + } + + thresholdSensitivityUniform = [filterProgram uniformIndex:@"thresholdSensitivity"]; + smoothingUniform = [filterProgram uniformIndex:@"smoothing"]; + colorToReplaceUniform = [filterProgram uniformIndex:@"colorToReplace"]; + + self.thresholdSensitivity = 0.4; + self.smoothing = 0.1; + [self setColorToReplaceRed:0.0 green:1.0 blue:0.0]; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 colorToReplace = {redComponent, greenComponent, blueComponent}; + + [self setVec3:colorToReplace forUniform:colorToReplaceUniform program:filterProgram]; +} + +- (void)setThresholdSensitivity:(CGFloat)newValue; +{ + _thresholdSensitivity = newValue; + + [self setFloat:(GLfloat)_thresholdSensitivity forUniform:thresholdSensitivityUniform program:filterProgram]; +} + +- (void)setSmoothing:(CGFloat)newValue; +{ + _smoothing = newValue; + + [self setFloat:(GLfloat)_smoothing forUniform:smoothingUniform program:filterProgram]; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.h new file mode 100644 index 00000000..61e34c41 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageErosionFilter; +@class GPUImageDilationFilter; + +// A filter that first performs a dilation on the red channel of an image, followed by an erosion of the same radius. +// This helps to filter out smaller dark elements. + +@interface GPUImageClosingFilter : GPUImageFilterGroup +{ + GPUImageErosionFilter *erosionFilter; + GPUImageDilationFilter *dilationFilter; +} + +@property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; + +- (id)initWithRadius:(NSUInteger)radius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.m new file mode 100644 index 00000000..01e9b29b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.m @@ -0,0 +1,57 @@ +#import "GPUImageClosingFilter.h" +#import "GPUImageErosionFilter.h" +#import "GPUImageDilationFilter.h" + +@implementation GPUImageClosingFilter + +@synthesize verticalTexelSpacing = _verticalTexelSpacing; +@synthesize horizontalTexelSpacing = _horizontalTexelSpacing; + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +- (id)initWithRadius:(NSUInteger)radius; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: dilation + dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius]; + [self addFilter:dilationFilter]; + + // Second pass: erosion + erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius]; + [self addFilter:erosionFilter]; + + [dilationFilter addTarget:erosionFilter]; + + self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil]; + self.terminalFilter = erosionFilter; + + return self; +} + +- (void)setVerticalTexelSpacing:(CGFloat)newValue; +{ + _verticalTexelSpacing = newValue; + erosionFilter.verticalTexelSpacing = newValue; + dilationFilter.verticalTexelSpacing = newValue; +} + +- (void)setHorizontalTexelSpacing:(CGFloat)newValue; +{ + _horizontalTexelSpacing = newValue; + erosionFilter.horizontalTexelSpacing = newValue; + dilationFilter.horizontalTexelSpacing = newValue; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.h new file mode 100644 index 00000000..302a16c6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageColorBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.m new file mode 100644 index 00000000..ced1cb8e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.m @@ -0,0 +1,113 @@ +#import "GPUImageColorBlendFilter.h" + +/** + * Color blend mode based upon pseudo code from the PDF specification. + */ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + highp float lum(lowp vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + lowp vec3 clipcolor(lowp vec3 c) { + highp float l = lum(c); + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + lowp vec3 setlum(lowp vec3 c, highp float l) { + highp float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + void main() + { + highp vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + highp vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(overlayColor.rgb, lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#else +NSString *const kGPUImageColorBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + float lum(vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + vec3 clipcolor(vec3 c) { + float l = lum(c); + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + vec3 setlum(vec3 c, float l) { + float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + void main() + { + vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(overlayColor.rgb, lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#endif + + +@implementation GPUImageColorBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.h new file mode 100755 index 00000000..50ebb3f4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.h @@ -0,0 +1,9 @@ +#import "GPUImageTwoInputFilter.h" + +/** Applies a color burn blend of two images + */ +@interface GPUImageColorBurnBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.m new file mode 100755 index 00000000..5d6ff60d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageColorBurnBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + mediump vec4 whiteColor = vec4(1.0); + gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2; + } +); +#else +NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + vec4 whiteColor = vec4(1.0); + gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2; + } +); +#endif + +@implementation GPUImageColorBurnBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorBurnBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.h new file mode 100644 index 00000000..02f82bf8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.h @@ -0,0 +1,12 @@ +#ifndef GPUImageColorConversion_h +#define GPUImageColorConversion_h + +extern GLfloat *kColorConversion601; +extern GLfloat *kColorConversion601FullRange; +extern GLfloat *kColorConversion709; +extern NSString *const kGPUImageYUVVideoRangeConversionForRGFragmentShaderString; +extern NSString *const kGPUImageYUVFullRangeConversionForLAFragmentShaderString; +extern NSString *const kGPUImageYUVVideoRangeConversionForLAFragmentShaderString; + + +#endif /* GPUImageColorConversion_h */ diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.m new file mode 100644 index 00000000..35534c20 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.m @@ -0,0 +1,159 @@ +#import "GPUImageFilter.h" + +// Color Conversion Constants (YUV to RGB) including adjustment from 16-235/16-240 (video range) + +// BT.601, which is the standard for SDTV. +GLfloat kColorConversion601Default[] = { + 1.164, 1.164, 1.164, + 0.0, -0.392, 2.017, + 1.596, -0.813, 0.0, +}; + +// BT.601 full range (ref: http://www.equasys.de/colorconversion.html) +GLfloat kColorConversion601FullRangeDefault[] = { + 1.0, 1.0, 1.0, + 0.0, -0.343, 1.765, + 1.4, -0.711, 0.0, +}; + +// BT.709, which is the standard for HDTV. +GLfloat kColorConversion709Default[] = { + 1.164, 1.164, 1.164, + 0.0, -0.213, 2.112, + 1.793, -0.533, 0.0, +}; + + +GLfloat *kColorConversion601 = kColorConversion601Default; +GLfloat *kColorConversion601FullRange = kColorConversion601FullRangeDefault; +GLfloat *kColorConversion709 = kColorConversion709Default; + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageYUVVideoRangeConversionForRGFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D luminanceTexture; + uniform sampler2D chrominanceTexture; + uniform mediump mat3 colorConversionMatrix; + + void main() + { + mediump vec3 yuv; + lowp vec3 rgb; + + yuv.x = texture2D(luminanceTexture, textureCoordinate).r; + yuv.yz = texture2D(chrominanceTexture, textureCoordinate).rg - vec2(0.5, 0.5); + rgb = colorConversionMatrix * yuv; + + gl_FragColor = vec4(rgb, 1); + } + ); +#else +NSString *const kGPUImageYUVVideoRangeConversionForRGFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D luminanceTexture; + uniform sampler2D chrominanceTexture; + + void main() + { + vec3 yuv; + vec3 rgb; + + yuv.x = texture2D(luminanceTexture, textureCoordinate).r; + yuv.yz = texture2D(chrominanceTexture, textureCoordinate).rg - vec2(0.5, 0.5); + + // BT.601, which is the standard for SDTV is provided as a reference + /* + rgb = mat3( 1, 1, 1, + 0, -.39465, 2.03211, + 1.13983, -.58060, 0) * yuv; + */ + + // Using BT.709 which is the standard for HDTV + rgb = mat3( 1, 1, 1, + 0, -.21482, 2.12798, + 1.28033, -.38059, 0) * yuv; + + gl_FragColor = vec4(rgb, 1); + } + ); +#endif + +NSString *const kGPUImageYUVFullRangeConversionForLAFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D luminanceTexture; + uniform sampler2D chrominanceTexture; + uniform mediump mat3 colorConversionMatrix; + + void main() + { + mediump vec3 yuv; + lowp vec3 rgb; + + yuv.x = texture2D(luminanceTexture, textureCoordinate).r; + yuv.yz = texture2D(chrominanceTexture, textureCoordinate).ra - vec2(0.5, 0.5); + rgb = colorConversionMatrix * yuv; + + gl_FragColor = vec4(rgb, 1); + } + ); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageYUVVideoRangeConversionForLAFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D luminanceTexture; + uniform sampler2D chrominanceTexture; + uniform mediump mat3 colorConversionMatrix; + + void main() + { + mediump vec3 yuv; + lowp vec3 rgb; + + yuv.x = texture2D(luminanceTexture, textureCoordinate).r - (16.0/255.0); + yuv.yz = texture2D(chrominanceTexture, textureCoordinate).ra - vec2(0.5, 0.5); + rgb = colorConversionMatrix * yuv; + + gl_FragColor = vec4(rgb, 1); + } + ); +#else +NSString *const kGPUImageYUVVideoRangeConversionForLAFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D luminanceTexture; + uniform sampler2D chrominanceTexture; + + void main() + { + vec3 yuv; + vec3 rgb; + + yuv.x = texture2D(luminanceTexture, textureCoordinate).r; + yuv.yz = texture2D(chrominanceTexture, textureCoordinate).ra - vec2(0.5, 0.5); + + // BT.601, which is the standard for SDTV is provided as a reference + /* + rgb = mat3( 1, 1, 1, + 0, -.39465, 2.03211, + 1.13983, -.58060, 0) * yuv; + */ + + // Using BT.709 which is the standard for HDTV + rgb = mat3( 1, 1, 1, + 0, -.21482, 2.12798, + 1.28033, -.38059, 0) * yuv; + + gl_FragColor = vec4(rgb, 1); + } + ); +#endif + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.h new file mode 100755 index 00000000..0f541c42 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.h @@ -0,0 +1,9 @@ +#import "GPUImageTwoInputFilter.h" + +/** Applies a color dodge blend of two images + */ +@interface GPUImageColorDodgeBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.m new file mode 100755 index 00000000..6a38827c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.m @@ -0,0 +1,75 @@ +#import "GPUImageColorDodgeBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorDodgeBlendFragmentShaderString = SHADER_STRING +( + + precision mediump float; + + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + vec3 baseOverlayAlphaProduct = vec3(overlay.a * base.a); + vec3 rightHandProduct = overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a); + + vec3 firstBlendColor = baseOverlayAlphaProduct + rightHandProduct; + vec3 overlayRGB = clamp((overlay.rgb / clamp(overlay.a, 0.01, 1.0)) * step(0.0, overlay.a), 0.0, 0.99); + + vec3 secondBlendColor = (base.rgb * overlay.a) / (1.0 - overlayRGB) + rightHandProduct; + + vec3 colorChoice = step((overlay.rgb * base.a + base.rgb * overlay.a), baseOverlayAlphaProduct); + + gl_FragColor = vec4(mix(firstBlendColor, secondBlendColor, colorChoice), 1.0); + } +); +#else +NSString *const kGPUImageColorDodgeBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + vec3 baseOverlayAlphaProduct = vec3(overlay.a * base.a); + vec3 rightHandProduct = overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a); + + vec3 firstBlendColor = baseOverlayAlphaProduct + rightHandProduct; + vec3 overlayRGB = clamp((overlay.rgb / clamp(overlay.a, 0.01, 1.0)) * step(0.0, overlay.a), 0.0, 0.99); + + vec3 secondBlendColor = (base.rgb * overlay.a) / (1.0 - overlayRGB) + rightHandProduct; + + vec3 colorChoice = step((overlay.rgb * base.a + base.rgb * overlay.a), baseOverlayAlphaProduct); + + gl_FragColor = vec4(mix(firstBlendColor, secondBlendColor, colorChoice), 1.0); + } +); +#endif + +@implementation GPUImageColorDodgeBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorDodgeBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.h new file mode 100755 index 00000000..aaeec438 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageFilter.h" + +@interface GPUImageColorInvertFilter : GPUImageFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.m new file mode 100755 index 00000000..0a8798b1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.m @@ -0,0 +1,46 @@ +#import "GPUImageColorInvertFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); + } +); +#else +NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); + } + ); +#endif + +@implementation GPUImageColorInvertFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageInvertFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.h new file mode 100644 index 00000000..8b57cd7c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageColorLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.m new file mode 100644 index 00000000..d51aedf6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.m @@ -0,0 +1,159 @@ +#import "GPUImageColorLocalBinaryPatternFilter.h" + +// This is based on "Accelerating image recognition on mobile devices using GPGPU" by Miguel Bordallo Lopez, Henri Nykanen, Jari Hannuksela, Olli Silven and Markku Vehvilainen +// http://www.ee.oulu.fi/~jhannuks/publications/SPIE2011a.pdf + +// Right pixel is the most significant bit, traveling clockwise to get to the upper right, which is the least significant +// If the external pixel is greater than or equal to the center, set to 1, otherwise 0 +// +// 2 1 0 +// 3 7 +// 4 5 6 + +// 01101101 +// 76543210 + +@implementation GPUImageColorLocalBinaryPatternFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorLocalBinaryPatternFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec3 centerColor = texture2D(inputImageTexture, textureCoordinate).rgb; + lowp vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + lowp vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + lowp vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + lowp vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + lowp vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + lowp vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + lowp vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + lowp vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + lowp float redByteTally = 1.0 / 255.0 * step(centerColor.r, topRightColor.r); + redByteTally += 2.0 / 255.0 * step(centerColor.r, topColor.r); + redByteTally += 4.0 / 255.0 * step(centerColor.r, topLeftColor.r); + redByteTally += 8.0 / 255.0 * step(centerColor.r, leftColor.r); + redByteTally += 16.0 / 255.0 * step(centerColor.r, bottomLeftColor.r); + redByteTally += 32.0 / 255.0 * step(centerColor.r, bottomColor.r); + redByteTally += 64.0 / 255.0 * step(centerColor.r, bottomRightColor.r); + redByteTally += 128.0 / 255.0 * step(centerColor.r, rightColor.r); + + lowp float blueByteTally = 1.0 / 255.0 * step(centerColor.b, topRightColor.b); + blueByteTally += 2.0 / 255.0 * step(centerColor.b, topColor.b); + blueByteTally += 4.0 / 255.0 * step(centerColor.b, topLeftColor.b); + blueByteTally += 8.0 / 255.0 * step(centerColor.b, leftColor.b); + blueByteTally += 16.0 / 255.0 * step(centerColor.b, bottomLeftColor.b); + blueByteTally += 32.0 / 255.0 * step(centerColor.b, bottomColor.b); + blueByteTally += 64.0 / 255.0 * step(centerColor.b, bottomRightColor.b); + blueByteTally += 128.0 / 255.0 * step(centerColor.b, rightColor.b); + + lowp float greenByteTally = 1.0 / 255.0 * step(centerColor.g, topRightColor.g); + greenByteTally += 2.0 / 255.0 * step(centerColor.g, topColor.g); + greenByteTally += 4.0 / 255.0 * step(centerColor.g, topLeftColor.g); + greenByteTally += 8.0 / 255.0 * step(centerColor.g, leftColor.g); + greenByteTally += 16.0 / 255.0 * step(centerColor.g, bottomLeftColor.g); + greenByteTally += 32.0 / 255.0 * step(centerColor.g, bottomColor.g); + greenByteTally += 64.0 / 255.0 * step(centerColor.g, bottomRightColor.g); + greenByteTally += 128.0 / 255.0 * step(centerColor.g, rightColor.g); + + // TODO: Replace the above with a dot product and two vec4s + // TODO: Apply step to a matrix, rather than individually + + gl_FragColor = vec4(redByteTally, blueByteTally, greenByteTally, 1.0); + } +); +#else +NSString *const kGPUImageColorLocalBinaryPatternFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec3 centerColor = texture2D(inputImageTexture, textureCoordinate).rgb; + vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + float redByteTally = 1.0 / 255.0 * step(centerColor.r, topRightColor.r); + redByteTally += 2.0 / 255.0 * step(centerColor.r, topColor.r); + redByteTally += 4.0 / 255.0 * step(centerColor.r, topLeftColor.r); + redByteTally += 8.0 / 255.0 * step(centerColor.r, leftColor.r); + redByteTally += 16.0 / 255.0 * step(centerColor.r, bottomLeftColor.r); + redByteTally += 32.0 / 255.0 * step(centerColor.r, bottomColor.r); + redByteTally += 64.0 / 255.0 * step(centerColor.r, bottomRightColor.r); + redByteTally += 128.0 / 255.0 * step(centerColor.r, rightColor.r); + + float blueByteTally = 1.0 / 255.0 * step(centerColor.b, topRightColor.b); + blueByteTally += 2.0 / 255.0 * step(centerColor.b, topColor.b); + blueByteTally += 4.0 / 255.0 * step(centerColor.b, topLeftColor.b); + blueByteTally += 8.0 / 255.0 * step(centerColor.b, leftColor.b); + blueByteTally += 16.0 / 255.0 * step(centerColor.b, bottomLeftColor.b); + blueByteTally += 32.0 / 255.0 * step(centerColor.b, bottomColor.b); + blueByteTally += 64.0 / 255.0 * step(centerColor.b, bottomRightColor.b); + blueByteTally += 128.0 / 255.0 * step(centerColor.b, rightColor.b); + + float greenByteTally = 1.0 / 255.0 * step(centerColor.g, topRightColor.g); + greenByteTally += 2.0 / 255.0 * step(centerColor.g, topColor.g); + greenByteTally += 4.0 / 255.0 * step(centerColor.g, topLeftColor.g); + greenByteTally += 8.0 / 255.0 * step(centerColor.g, leftColor.g); + greenByteTally += 16.0 / 255.0 * step(centerColor.g, bottomLeftColor.g); + greenByteTally += 32.0 / 255.0 * step(centerColor.g, bottomColor.g); + greenByteTally += 64.0 / 255.0 * step(centerColor.g, bottomRightColor.g); + greenByteTally += 128.0 / 255.0 * step(centerColor.g, rightColor.g); + + // TODO: Replace the above with a dot product and two vec4s + // TODO: Apply step to a matrix, rather than individually + + gl_FragColor = vec4(redByteTally, blueByteTally, greenByteTally, 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorLocalBinaryPatternFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.h new file mode 100755 index 00000000..75887276 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilter.h" + +/** Transforms the colors of an image by applying a matrix to them + */ +@interface GPUImageColorMatrixFilter : GPUImageFilter +{ + GLint colorMatrixUniform; + GLint intensityUniform; +} + +/** A 4x4 matrix used to transform each color in an image + */ +@property(readwrite, nonatomic) GPUMatrix4x4 colorMatrix; + +/** The degree to which the new transformed color replaces the original color for each pixel + */ +@property(readwrite, nonatomic) CGFloat intensity; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.m new file mode 100755 index 00000000..0e21c7e8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.m @@ -0,0 +1,87 @@ +#import "GPUImageColorMatrixFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorMatrixFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform lowp mat4 colorMatrix; + uniform lowp float intensity; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 outputColor = textureColor * colorMatrix; + + gl_FragColor = (intensity * outputColor) + ((1.0 - intensity) * textureColor); + } +); +#else +NSString *const kGPUImageColorMatrixFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform mat4 colorMatrix; + uniform float intensity; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 outputColor = textureColor * colorMatrix; + + gl_FragColor = (intensity * outputColor) + ((1.0 - intensity) * textureColor); + } +); +#endif + +@implementation GPUImageColorMatrixFilter + +@synthesize intensity = _intensity; +@synthesize colorMatrix = _colorMatrix; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorMatrixFragmentShaderString])) + { + return nil; + } + + colorMatrixUniform = [filterProgram uniformIndex:@"colorMatrix"]; + intensityUniform = [filterProgram uniformIndex:@"intensity"]; + + self.intensity = 1.f; + self.colorMatrix = (GPUMatrix4x4){ + {1.f, 0.f, 0.f, 0.f}, + {0.f, 1.f, 0.f, 0.f}, + {0.f, 0.f, 1.f, 0.f}, + {0.f, 0.f, 0.f, 1.f} + }; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setIntensity:(CGFloat)newIntensity; +{ + _intensity = newIntensity; + + [self setFloat:_intensity forUniform:intensityUniform program:filterProgram]; +} + +- (void)setColorMatrix:(GPUMatrix4x4)newColorMatrix; +{ + _colorMatrix = newColorMatrix; + + [self setMatrix4f:_colorMatrix forUniform:colorMatrixUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.h new file mode 100644 index 00000000..c2edca51 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.h @@ -0,0 +1,10 @@ +#import "GPUImageFilter.h" + +@interface GPUImageColorPackingFilter : GPUImageFilter +{ + GLint texelWidthUniform, texelHeightUniform; + + CGFloat texelWidth, texelHeight; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.m new file mode 100644 index 00000000..1a087ca8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.m @@ -0,0 +1,139 @@ +#import "GPUImageColorPackingFilter.h" + +NSString *const kGPUImageColorPackingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform float texelWidth; + uniform float texelHeight; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + void main() + { + gl_Position = position; + + upperLeftInputTextureCoordinate = inputTextureCoordinate.xy + vec2(-texelWidth, -texelHeight); + upperRightInputTextureCoordinate = inputTextureCoordinate.xy + vec2(texelWidth, -texelHeight); + lowerLeftInputTextureCoordinate = inputTextureCoordinate.xy + vec2(-texelWidth, texelHeight); + lowerRightInputTextureCoordinate = inputTextureCoordinate.xy + vec2(texelWidth, texelHeight); + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColorPackingFragmentShaderString = SHADER_STRING +( + precision lowp float; + + uniform sampler2D inputImageTexture; + + uniform mediump mat3 convolutionMatrix; + + varying highp vec2 outputTextureCoordinate; + + varying highp vec2 upperLeftInputTextureCoordinate; + varying highp vec2 upperRightInputTextureCoordinate; + varying highp vec2 lowerLeftInputTextureCoordinate; + varying highp vec2 lowerRightInputTextureCoordinate; + + void main() + { + float upperLeftIntensity = texture2D(inputImageTexture, upperLeftInputTextureCoordinate).r; + float upperRightIntensity = texture2D(inputImageTexture, upperRightInputTextureCoordinate).r; + float lowerLeftIntensity = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).r; + float lowerRightIntensity = texture2D(inputImageTexture, lowerRightInputTextureCoordinate).r; + + gl_FragColor = vec4(upperLeftIntensity, upperRightIntensity, lowerLeftIntensity, lowerRightIntensity); + } +); +#else +NSString *const kGPUImageColorPackingFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + uniform mat3 convolutionMatrix; + + varying vec2 outputTextureCoordinate; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + void main() + { + float upperLeftIntensity = texture2D(inputImageTexture, upperLeftInputTextureCoordinate).r; + float upperRightIntensity = texture2D(inputImageTexture, upperRightInputTextureCoordinate).r; + float lowerLeftIntensity = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).r; + float lowerRightIntensity = texture2D(inputImageTexture, lowerRightInputTextureCoordinate).r; + + gl_FragColor = vec4(upperLeftIntensity, upperRightIntensity, lowerLeftIntensity, lowerRightIntensity); + } +); +#endif + +@implementation GPUImageColorPackingFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageColorPackingVertexShaderString fragmentShaderFromString:kGPUImageColorPackingFragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + texelWidth = 0.5 / inputTextureSize.width; + texelHeight = 0.5 / inputTextureSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + glUniform1f(texelWidthUniform, texelWidth); + glUniform1f(texelHeightUniform, texelHeight); + }); +} + +#pragma mark - +#pragma mark Managing the display FBOs + +- (CGSize)sizeOfFBO; +{ + CGSize outputSize = [self maximumOutputSize]; + if ( (CGSizeEqualToSize(outputSize, CGSizeZero)) || (inputTextureSize.width < outputSize.width) ) + { + CGSize quarterSize; + quarterSize.width = inputTextureSize.width / 2.0; + quarterSize.height = inputTextureSize.height / 2.0; + return quarterSize; + } + else + { + return outputSize; + } +} + +#pragma mark - +#pragma mark Rendering + +- (CGSize)outputFrameSize; +{ + CGSize quarterSize; + quarterSize.width = inputTextureSize.width / 2.0; + quarterSize.height = inputTextureSize.height / 2.0; + return quarterSize; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.h b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.h new file mode 100755 index 00000000..3d51ed17 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.h @@ -0,0 +1,21 @@ +#import "GPUImageFilterGroup.h" + +// This generates image-wide feature descriptors using the ColourFAST process, as developed and described in +// +// A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129. +// +// Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014. +// http://aut.researchgateway.ac.nz/handle/10292/7991 + +@class GPUImageColourFASTSamplingOperation; +@class GPUImageBoxBlurFilter; + +@interface GPUImageColourFASTFeatureDetector : GPUImageFilterGroup +{ + GPUImageBoxBlurFilter *blurFilter; + GPUImageColourFASTSamplingOperation *colourFASTSamplingOperation; +} +// The blur radius of the underlying box blur. The default is 3.0. +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.m b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.m new file mode 100755 index 00000000..f4e3684c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.m @@ -0,0 +1,48 @@ +#import "GPUImageColourFASTFeatureDetector.h" +#import "GPUImageColourFASTSamplingOperation.h" +#import "GPUImageBoxBlurFilter.h" + +@implementation GPUImageColourFASTFeatureDetector + +@synthesize blurRadiusInPixels; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: apply a variable Gaussian blur + blurFilter = [[GPUImageBoxBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Second pass: combine the blurred image with the original sharp one + colourFASTSamplingOperation = [[GPUImageColourFASTSamplingOperation alloc] init]; + [self addFilter:colourFASTSamplingOperation]; + + // Texture location 0 needs to be the sharp image for both the blur and the second stage processing + [blurFilter addTarget:colourFASTSamplingOperation atTextureLocation:1]; + + self.initialFilters = [NSArray arrayWithObjects:blurFilter, colourFASTSamplingOperation, nil]; + self.terminalFilter = colourFASTSamplingOperation; + + self.blurRadiusInPixels = 3.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.h b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.h new file mode 100755 index 00000000..78e6ede9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.h @@ -0,0 +1,22 @@ +#import "GPUImageTwoInputFilter.h" + +// This is the feature extraction phase of the ColourFAST feature detector, as described in: +// +// A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129. +// +// Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014. +// http://aut.researchgateway.ac.nz/handle/10292/7991 + +@interface GPUImageColourFASTSamplingOperation : GPUImageTwoInputFilter +{ + GLint texelWidthUniform, texelHeightUniform; + + CGFloat texelWidth, texelHeight; + BOOL hasOverriddenImageSizeFactor; +} + +// The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. +@property(readwrite, nonatomic) CGFloat texelWidth; +@property(readwrite, nonatomic) CGFloat texelHeight; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.m b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.m new file mode 100755 index 00000000..fc67f470 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.m @@ -0,0 +1,204 @@ +#import "GPUImageColourFASTSamplingOperation.h" + +NSString *const kGPUImageColourFASTSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + attribute vec4 inputTextureCoordinate2; + + uniform float texelWidth; + uniform float texelHeight; + + varying vec2 textureCoordinate; + varying vec2 pointATextureCoordinate; + varying vec2 pointBTextureCoordinate; + varying vec2 pointCTextureCoordinate; + varying vec2 pointDTextureCoordinate; + varying vec2 pointETextureCoordinate; + varying vec2 pointFTextureCoordinate; + varying vec2 pointGTextureCoordinate; + varying vec2 pointHTextureCoordinate; + + void main() + { + gl_Position = position; + + float tripleTexelWidth = 3.0 * texelWidth; + float tripleTexelHeight = 3.0 * texelHeight; + + textureCoordinate = inputTextureCoordinate.xy; + + pointATextureCoordinate = vec2(textureCoordinate.x + tripleTexelWidth, textureCoordinate.y + texelHeight); + pointBTextureCoordinate = vec2(textureCoordinate.x + texelWidth, textureCoordinate.y + tripleTexelHeight); + pointCTextureCoordinate = vec2(textureCoordinate.x - texelWidth, textureCoordinate.y + tripleTexelHeight); + pointDTextureCoordinate = vec2(textureCoordinate.x - tripleTexelWidth, textureCoordinate.y + texelHeight); + pointETextureCoordinate = vec2(textureCoordinate.x - tripleTexelWidth, textureCoordinate.y - texelHeight); + pointFTextureCoordinate = vec2(textureCoordinate.x - texelWidth, textureCoordinate.y - tripleTexelHeight); + pointGTextureCoordinate = vec2(textureCoordinate.x + texelWidth, textureCoordinate.y - tripleTexelHeight); + pointHTextureCoordinate = vec2(textureCoordinate.x + tripleTexelWidth, textureCoordinate.y - texelHeight); + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageColourFASTSamplingFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 pointATextureCoordinate; + varying vec2 pointBTextureCoordinate; + varying vec2 pointCTextureCoordinate; + varying vec2 pointDTextureCoordinate; + varying vec2 pointETextureCoordinate; + varying vec2 pointFTextureCoordinate; + varying vec2 pointGTextureCoordinate; + varying vec2 pointHTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + const float PITwo = 6.2832; + const float PI = 3.1416; + void main() + { + vec3 centerColor = texture2D(inputImageTexture, textureCoordinate).rgb; + + vec3 pointAColor = texture2D(inputImageTexture, pointATextureCoordinate).rgb; + vec3 pointBColor = texture2D(inputImageTexture, pointBTextureCoordinate).rgb; + vec3 pointCColor = texture2D(inputImageTexture, pointCTextureCoordinate).rgb; + vec3 pointDColor = texture2D(inputImageTexture, pointDTextureCoordinate).rgb; + vec3 pointEColor = texture2D(inputImageTexture, pointETextureCoordinate).rgb; + vec3 pointFColor = texture2D(inputImageTexture, pointFTextureCoordinate).rgb; + vec3 pointGColor = texture2D(inputImageTexture, pointGTextureCoordinate).rgb; + vec3 pointHColor = texture2D(inputImageTexture, pointHTextureCoordinate).rgb; + + vec3 colorComparison = ((pointAColor + pointBColor + pointCColor + pointDColor + pointEColor + pointFColor + pointGColor + pointHColor) * 0.125) - centerColor; + + // Direction calculation drawn from Appendix B of Seth Hall's Ph.D. thesis + + vec3 dirX = (pointAColor*0.94868) + (pointBColor*0.316227) - (pointCColor*0.316227) - (pointDColor*0.94868) - (pointEColor*0.94868) - (pointFColor*0.316227) + (pointGColor*0.316227) + (pointHColor*0.94868); + vec3 dirY = (pointAColor*0.316227) + (pointBColor*0.94868) + (pointCColor*0.94868) + (pointDColor*0.316227) - (pointEColor*0.316227) - (pointFColor*0.94868) - (pointGColor*0.94868) - (pointHColor*0.316227); + vec3 absoluteDifference = abs(colorComparison); + float componentLength = length(colorComparison); + float avgX = dot(absoluteDifference, dirX) / componentLength; + float avgY = dot(absoluteDifference, dirY) / componentLength; + float angle = atan(avgY, avgX); + + vec3 normalizedColorComparison = (colorComparison + 1.0) * 0.5; + + gl_FragColor = vec4(normalizedColorComparison, (angle+PI)/PITwo); + } +); +#else +NSString *const kGPUImageColourFASTSamplingFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 pointATextureCoordinate; + varying vec2 pointBTextureCoordinate; + varying vec2 pointCTextureCoordinate; + varying vec2 pointDTextureCoordinate; + varying vec2 pointETextureCoordinate; + varying vec2 pointFTextureCoordinate; + varying vec2 pointGTextureCoordinate; + varying vec2 pointHTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + const float PITwo = 6.2832; + const float PI = 3.1416; + void main() + { + vec3 centerColor = texture2D(inputImageTexture, textureCoordinate).rgb; + + vec3 pointAColor = texture2D(inputImageTexture, pointATextureCoordinate).rgb; + vec3 pointBColor = texture2D(inputImageTexture, pointBTextureCoordinate).rgb; + vec3 pointCColor = texture2D(inputImageTexture, pointCTextureCoordinate).rgb; + vec3 pointDColor = texture2D(inputImageTexture, pointDTextureCoordinate).rgb; + vec3 pointEColor = texture2D(inputImageTexture, pointETextureCoordinate).rgb; + vec3 pointFColor = texture2D(inputImageTexture, pointFTextureCoordinate).rgb; + vec3 pointGColor = texture2D(inputImageTexture, pointGTextureCoordinate).rgb; + vec3 pointHColor = texture2D(inputImageTexture, pointHTextureCoordinate).rgb; + + vec3 colorComparison = ((pointAColor + pointBColor + pointCColor + pointDColor + pointEColor + pointFColor + pointGColor + pointHColor) * 0.125) - centerColor; + + // Direction calculation drawn from Appendix B of Seth Hall's Ph.D. thesis + + vec3 dirX = (pointAColor*0.94868) + (pointBColor*0.316227) - (pointCColor*0.316227) - (pointDColor*0.94868) - (pointEColor*0.94868) - (pointFColor*0.316227) + (pointGColor*0.316227) + (pointHColor*0.94868); + vec3 dirY = (pointAColor*0.316227) + (pointBColor*0.94868) + (pointCColor*0.94868) + (pointDColor*0.316227) - (pointEColor*0.316227) - (pointFColor*0.94868) - (pointGColor*0.94868) - (pointHColor*0.316227); + vec3 absoluteDifference = abs(colorComparison); + float componentLength = length(colorComparison); + float avgX = dot(absoluteDifference, dirX) / componentLength; + float avgY = dot(absoluteDifference, dirY) / componentLength; + float angle = atan(avgY, avgX); + + vec3 normalizedColorComparison = (colorComparison + 1.0) * 0.5; + + gl_FragColor = vec4(normalizedColorComparison, (angle+PI)/PITwo); + } +); +#endif + + +@implementation GPUImageColourFASTSamplingOperation + +@synthesize texelWidth = _texelWidth; +@synthesize texelHeight = _texelHeight; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageColourFASTSamplingVertexShaderString fragmentShaderFromString:kGPUImageColourFASTSamplingFragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!hasOverriddenImageSizeFactor) + { + _texelWidth = 1.0 / filterFrameSize.width; + _texelHeight = 1.0 / filterFrameSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + glUniform1f(texelWidthUniform, _texelHeight); + glUniform1f(texelHeightUniform, _texelWidth); + } + else + { + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + } + }); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelWidth:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelWidth = newValue; + + [self setFloat:_texelWidth forUniform:texelWidthUniform program:filterProgram]; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelHeight = newValue; + + [self setFloat:_texelHeight forUniform:texelHeightUniform program:filterProgram]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.h new file mode 100755 index 00000000..e09e6dc4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +/** Adjusts the contrast of the image + */ +@interface GPUImageContrastFilter : GPUImageFilter +{ + GLint contrastUniform; +} + +/** Contrast ranges from 0.0 to 4.0 (max contrast), with 1.0 as the normal level + */ +@property(readwrite, nonatomic) CGFloat contrast; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.m new file mode 100755 index 00000000..5ed1e22f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.m @@ -0,0 +1,66 @@ +#import "GPUImageContrastFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float contrast; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w); + } +); +#else +NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float contrast; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w); + } + ); +#endif + +@implementation GPUImageContrastFilter + +@synthesize contrast = _contrast; + +#pragma mark - +#pragma mark Initialization + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageContrastFragmentShaderString])) + { + return nil; + } + + contrastUniform = [filterProgram uniformIndex:@"contrast"]; + self.contrast = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setContrast:(CGFloat)newValue; +{ + _contrast = newValue; + + [self setFloat:_contrast forUniform:contrastUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.h new file mode 100755 index 00000000..641fb7bf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +@interface GPUImageCropFilter : GPUImageFilter +{ + GLfloat cropTextureCoordinates[8]; +} + +// The crop region is the rectangle within the image to crop. It is normalized to a coordinate space from 0.0 to 1.0, with 0.0, 0.0 being the upper left corner of the image +@property(readwrite, nonatomic) CGRect cropRegion; + +// Initialization and teardown +- (id)initWithCropRegion:(CGRect)newCropRegion; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.m new file mode 100755 index 00000000..22e33c17 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.m @@ -0,0 +1,274 @@ +#import "GPUImageCropFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageCropFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } +); +#else +NSString *const kGPUImageCropFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } +); +#endif + +@interface GPUImageCropFilter () + +- (void)calculateCropTextureCoordinates; + +@end + +@interface GPUImageCropFilter() +{ + CGSize originallySuppliedInputSize; +} + +@end + +@implementation GPUImageCropFilter + +@synthesize cropRegion = _cropRegion; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithCropRegion:(CGRect)newCropRegion; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageCropFragmentShaderString])) + { + return nil; + } + + self.cropRegion = newCropRegion; + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithCropRegion:CGRectMake(0.0, 0.0, 1.0, 1.0)])) + { + return nil; + } + + return self; +} + +#pragma mark - +#pragma mark Rendering + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (self.preventRendering) + { + return; + } + +// if (overrideInputSize) +// { +// if (CGSizeEqualToSize(forcedMaximumSize, CGSizeZero)) +// { +// return; +// } +// else +// { +// CGRect insetRect = AVMakeRectWithAspectRatioInsideRect(newSize, CGRectMake(0.0, 0.0, forcedMaximumSize.width, forcedMaximumSize.height)); +// inputTextureSize = insetRect.size; +// return; +// } +// } + + CGSize rotatedSize = [self rotatedSize:newSize forIndex:textureIndex]; + originallySuppliedInputSize = rotatedSize; + + CGSize scaledSize; + scaledSize.width = rotatedSize.width * _cropRegion.size.width; + scaledSize.height = rotatedSize.height * _cropRegion.size.height; + + + if (CGSizeEqualToSize(scaledSize, CGSizeZero)) + { + inputTextureSize = scaledSize; + } + else if (!CGSizeEqualToSize(inputTextureSize, scaledSize)) + { + inputTextureSize = scaledSize; + } +} + +#pragma mark - +#pragma mark GPUImageInput + +- (void)calculateCropTextureCoordinates; +{ + CGFloat minX = _cropRegion.origin.x; + CGFloat minY = _cropRegion.origin.y; + CGFloat maxX = CGRectGetMaxX(_cropRegion); + CGFloat maxY = CGRectGetMaxY(_cropRegion); + + switch(inputRotation) + { + case kGPUImageNoRotation: // Works + { + cropTextureCoordinates[0] = minX; // 0,0 + cropTextureCoordinates[1] = minY; + + cropTextureCoordinates[2] = maxX; // 1,0 + cropTextureCoordinates[3] = minY; + + cropTextureCoordinates[4] = minX; // 0,1 + cropTextureCoordinates[5] = maxY; + + cropTextureCoordinates[6] = maxX; // 1,1 + cropTextureCoordinates[7] = maxY; + }; break; + case kGPUImageRotateLeft: // Fixed + { + cropTextureCoordinates[0] = maxY; // 1,0 + cropTextureCoordinates[1] = 1.0 - maxX; + + cropTextureCoordinates[2] = maxY; // 1,1 + cropTextureCoordinates[3] = 1.0 - minX; + + cropTextureCoordinates[4] = minY; // 0,0 + cropTextureCoordinates[5] = 1.0 - maxX; + + cropTextureCoordinates[6] = minY; // 0,1 + cropTextureCoordinates[7] = 1.0 - minX; + }; break; + case kGPUImageRotateRight: // Fixed + { + cropTextureCoordinates[0] = minY; // 0,1 + cropTextureCoordinates[1] = 1.0 - minX; + + cropTextureCoordinates[2] = minY; // 0,0 + cropTextureCoordinates[3] = 1.0 - maxX; + + cropTextureCoordinates[4] = maxY; // 1,1 + cropTextureCoordinates[5] = 1.0 - minX; + + cropTextureCoordinates[6] = maxY; // 1,0 + cropTextureCoordinates[7] = 1.0 - maxX; + }; break; + case kGPUImageFlipVertical: // Works for me + { + cropTextureCoordinates[0] = minX; // 0,1 + cropTextureCoordinates[1] = maxY; + + cropTextureCoordinates[2] = maxX; // 1,1 + cropTextureCoordinates[3] = maxY; + + cropTextureCoordinates[4] = minX; // 0,0 + cropTextureCoordinates[5] = minY; + + cropTextureCoordinates[6] = maxX; // 1,0 + cropTextureCoordinates[7] = minY; + }; break; + case kGPUImageFlipHorizonal: // Works for me + { + cropTextureCoordinates[0] = maxX; // 1,0 + cropTextureCoordinates[1] = minY; + + cropTextureCoordinates[2] = minX; // 0,0 + cropTextureCoordinates[3] = minY; + + cropTextureCoordinates[4] = maxX; // 1,1 + cropTextureCoordinates[5] = maxY; + + cropTextureCoordinates[6] = minX; // 0,1 + cropTextureCoordinates[7] = maxY; + }; break; + case kGPUImageRotate180: // Fixed + { + cropTextureCoordinates[0] = maxX; // 1,1 + cropTextureCoordinates[1] = maxY; + + cropTextureCoordinates[2] = minX; // 0,1 + cropTextureCoordinates[3] = maxY; + + cropTextureCoordinates[4] = maxX; // 1,0 + cropTextureCoordinates[5] = minY; + + cropTextureCoordinates[6] = minX; // 0,0 + cropTextureCoordinates[7] = minY; + }; break; + case kGPUImageRotateRightFlipVertical: // Fixed + { + cropTextureCoordinates[0] = minY; // 0,0 + cropTextureCoordinates[1] = 1.0 - maxX; + + cropTextureCoordinates[2] = minY; // 0,1 + cropTextureCoordinates[3] = 1.0 - minX; + + cropTextureCoordinates[4] = maxY; // 1,0 + cropTextureCoordinates[5] = 1.0 - maxX; + + cropTextureCoordinates[6] = maxY; // 1,1 + cropTextureCoordinates[7] = 1.0 - minX; + }; break; + case kGPUImageRotateRightFlipHorizontal: // Fixed + { + cropTextureCoordinates[0] = maxY; // 1,1 + cropTextureCoordinates[1] = 1.0 - minX; + + cropTextureCoordinates[2] = maxY; // 1,0 + cropTextureCoordinates[3] = 1.0 - maxX; + + cropTextureCoordinates[4] = minY; // 0,1 + cropTextureCoordinates[5] = 1.0 - minX; + + cropTextureCoordinates[6] = minY; // 0,0 + cropTextureCoordinates[7] = 1.0 - maxX; + }; break; + } +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + static const GLfloat cropSquareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + [self renderToTextureWithVertices:cropSquareVertices textureCoordinates:cropTextureCoordinates]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setCropRegion:(CGRect)newValue; +{ + NSParameterAssert(newValue.origin.x >= 0 && newValue.origin.x <= 1 && + newValue.origin.y >= 0 && newValue.origin.y <= 1 && + newValue.size.width >= 0 && newValue.size.width <= 1 && + newValue.size.height >= 0 && newValue.size.height <= 1); + + _cropRegion = newValue; + [self calculateCropTextureCoordinates]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self calculateCropTextureCoordinates]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.h b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.h new file mode 100644 index 00000000..569774f5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.h @@ -0,0 +1,17 @@ +#import "GPUImageFilter.h" + +@interface GPUImageCrosshairGenerator : GPUImageFilter +{ + GLint crosshairWidthUniform, crosshairColorUniform; +} + +// The width of the displayed crosshairs, in pixels. Currently this only works well for odd widths. The default is 5. +@property(readwrite, nonatomic) CGFloat crosshairWidth; + +// The color of the crosshairs is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0). +- (void)setCrosshairColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +// Rendering +- (void)renderCrosshairsFromArray:(GLfloat *)crosshairCoordinates count:(NSUInteger)numberOfCrosshairs frameTime:(CMTime)frameTime; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.m b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.m new file mode 100644 index 00000000..9e2a29a2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.m @@ -0,0 +1,139 @@ +#import "GPUImageCrosshairGenerator.h" + +NSString *const kGPUImageCrosshairVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + uniform float crosshairWidth; + + varying vec2 centerLocation; + varying float pointSpacing; + + void main() + { + gl_Position = vec4(((position.xy * 2.0) - 1.0), 0.0, 1.0); + gl_PointSize = crosshairWidth + 1.0; + pointSpacing = 1.0 / crosshairWidth; + centerLocation = vec2(pointSpacing * ceil(crosshairWidth / 2.0), pointSpacing * ceil(crosshairWidth / 2.0)); + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageCrosshairFragmentShaderString = SHADER_STRING +( + uniform lowp vec3 crosshairColor; + + varying highp vec2 centerLocation; + varying highp float pointSpacing; + + void main() + { + lowp vec2 distanceFromCenter = abs(centerLocation - gl_PointCoord.xy); + lowp float axisTest = step(pointSpacing, gl_PointCoord.y) * step(distanceFromCenter.x, 0.09) + step(pointSpacing, gl_PointCoord.x) * step(distanceFromCenter.y, 0.09); + + gl_FragColor = vec4(crosshairColor * axisTest, axisTest); +// gl_FragColor = vec4(distanceFromCenterInX, distanceFromCenterInY, 0.0, 1.0); + } +); +#else +NSString *const kGPUImageCrosshairFragmentShaderString = SHADER_STRING +( + GPUImageEscapedHashIdentifier(version 120)\n + + uniform vec3 crosshairColor; + + varying vec2 centerLocation; + varying float pointSpacing; + + void main() + { + vec2 distanceFromCenter = abs(centerLocation - gl_PointCoord.xy); + float axisTest = step(pointSpacing, gl_PointCoord.y) * step(distanceFromCenter.x, 0.09) + step(pointSpacing, gl_PointCoord.x) * step(distanceFromCenter.y, 0.09); + + gl_FragColor = vec4(crosshairColor * axisTest, axisTest); + // gl_FragColor = vec4(distanceFromCenterInX, distanceFromCenterInY, 0.0, 1.0); + } +); +#endif + +@implementation GPUImageCrosshairGenerator + +@synthesize crosshairWidth = _crosshairWidth; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageCrosshairVertexShaderString fragmentShaderFromString:kGPUImageCrosshairFragmentShaderString])) + { + return nil; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + crosshairWidthUniform = [filterProgram uniformIndex:@"crosshairWidth"]; + crosshairColorUniform = [filterProgram uniformIndex:@"crosshairColor"]; + + self.crosshairWidth = 5.0; + [self setCrosshairColorRed:0.0 green:1.0 blue:0.0]; + }); + + return self; +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderCrosshairsFromArray:(GLfloat *)crosshairCoordinates count:(NSUInteger)numberOfCrosshairs frameTime:(CMTime)frameTime; +{ + if (self.preventRendering) + { + return; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#else + glEnable(GL_POINT_SPRITE); + glEnable(GL_VERTEX_PROGRAM_POINT_SIZE); +#endif + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(0.0, 0.0, 0.0, 0.0); + glClear(GL_COLOR_BUFFER_BIT); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, crosshairCoordinates); + + glDrawArrays(GL_POINTS, 0, (GLsizei)numberOfCrosshairs); + + [self informTargetsAboutNewFrameAtTime:frameTime]; + }); +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // Prevent rendering of the frame by normal means +} + +#pragma mark - +#pragma mark Accessors + +- (void)setCrosshairWidth:(CGFloat)newValue; +{ + _crosshairWidth = newValue; + + [self setFloat:_crosshairWidth forUniform:crosshairWidthUniform program:filterProgram]; +} + +- (void)setCrosshairColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 crosshairColor = {redComponent, greenComponent, blueComponent}; + + [self setVec3:crosshairColor forUniform:crosshairColorUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.h new file mode 100755 index 00000000..dab18967 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImageCrosshatchFilter : GPUImageFilter +{ + GLint crossHatchSpacingUniform, lineWidthUniform; +} +// The fractional width of the image to use as the spacing for the crosshatch. The default is 0.03. +@property(readwrite, nonatomic) CGFloat crossHatchSpacing; + +// A relative width for the crosshatch lines. The default is 0.003. +@property(readwrite, nonatomic) CGFloat lineWidth; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.m new file mode 100755 index 00000000..51dbd592 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.m @@ -0,0 +1,163 @@ +#import "GPUImageCrosshatchFilter.h" + +// Shader code based on http://machinesdontcare.wordpress.com/ + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageCrosshatchFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float crossHatchSpacing; + uniform highp float lineWidth; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + highp float luminance = dot(texture2D(inputImageTexture, textureCoordinate).rgb, W); + + lowp vec4 colorToDisplay = vec4(1.0, 1.0, 1.0, 1.0); + if (luminance < 1.00) + { + if (mod(textureCoordinate.x + textureCoordinate.y, crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.75) + { + if (mod(textureCoordinate.x - textureCoordinate.y, crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.50) + { + if (mod(textureCoordinate.x + textureCoordinate.y - (crossHatchSpacing / 2.0), crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.3) + { + if (mod(textureCoordinate.x - textureCoordinate.y - (crossHatchSpacing / 2.0), crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + + gl_FragColor = colorToDisplay; + } +); +#else +NSString *const kGPUImageCrosshatchFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float crossHatchSpacing; + uniform float lineWidth; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + float luminance = dot(texture2D(inputImageTexture, textureCoordinate).rgb, W); + + vec4 colorToDisplay = vec4(1.0, 1.0, 1.0, 1.0); + if (luminance < 1.00) + { + if (mod(textureCoordinate.x + textureCoordinate.y, crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.75) + { + if (mod(textureCoordinate.x - textureCoordinate.y, crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.50) + { + if (mod(textureCoordinate.x + textureCoordinate.y - (crossHatchSpacing / 2.0), crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + if (luminance < 0.3) + { + if (mod(textureCoordinate.x - textureCoordinate.y - (crossHatchSpacing / 2.0), crossHatchSpacing) <= lineWidth) + { + colorToDisplay = vec4(0.0, 0.0, 0.0, 1.0); + } + } + + gl_FragColor = colorToDisplay; + } +); +#endif + +@implementation GPUImageCrosshatchFilter + +@synthesize crossHatchSpacing = _crossHatchSpacing; +@synthesize lineWidth = _lineWidth; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageCrosshatchFragmentShaderString])) + { + return nil; + } + + crossHatchSpacingUniform = [filterProgram uniformIndex:@"crossHatchSpacing"]; + lineWidthUniform = [filterProgram uniformIndex:@"lineWidth"]; + + self.crossHatchSpacing = 0.03; + self.lineWidth = 0.003; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setCrossHatchSpacing:(CGFloat)newValue; +{ + CGFloat singlePixelSpacing; + if (inputTextureSize.width != 0.0) + { + singlePixelSpacing = 1.0 / inputTextureSize.width; + } + else + { + singlePixelSpacing = 1.0 / 2048.0; + } + + if (newValue < singlePixelSpacing) + { + _crossHatchSpacing = singlePixelSpacing; + } + else + { + _crossHatchSpacing = newValue; + } + + [self setFloat:_crossHatchSpacing forUniform:crossHatchSpacingUniform program:filterProgram]; +} + +- (void)setLineWidth:(CGFloat)newValue; +{ + _lineWidth = newValue; + + [self setFloat:_lineWidth forUniform:lineWidthUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.h new file mode 100755 index 00000000..5dfe3405 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageDarkenBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.m new file mode 100644 index 00000000..85ec9e8a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageDarkenBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 base = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0); + } +); +#else +NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0); + } + ); +#endif + +@implementation GPUImageDarkenBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDarkenBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.h new file mode 100755 index 00000000..7c7dfc23 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageDifferenceBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.m new file mode 100755 index 00000000..01bf09b2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.m @@ -0,0 +1,50 @@ +#import "GPUImageDifferenceBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a); + } +); +#else +NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a); + } +); +#endif + +@implementation GPUImageDifferenceBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDifferenceBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.h new file mode 100644 index 00000000..59423a37 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.h @@ -0,0 +1,16 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +// For each pixel, this sets it to the maximum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center. +// This extends out bright features, and is most commonly used with black-and-white thresholded images. + +extern NSString *const kGPUImageDilationRadiusOneVertexShaderString; +extern NSString *const kGPUImageDilationRadiusTwoVertexShaderString; +extern NSString *const kGPUImageDilationRadiusThreeVertexShaderString; +extern NSString *const kGPUImageDilationRadiusFourVertexShaderString; + +@interface GPUImageDilationFilter : GPUImageTwoPassTextureSamplingFilter + +// Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. +- (id)initWithRadius:(NSUInteger)dilationRadius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.m new file mode 100644 index 00000000..df065185 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.m @@ -0,0 +1,431 @@ +#import "GPUImageDilationFilter.h" + +@implementation GPUImageDilationFilter + +NSString *const kGPUImageDilationRadiusOneVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec2 inputTextureCoordinate; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 offset = vec2(texelWidthOffset, texelHeightOffset); + + centerTextureCoordinate = inputTextureCoordinate; + oneStepNegativeTextureCoordinate = inputTextureCoordinate - offset; + oneStepPositiveTextureCoordinate = inputTextureCoordinate + offset; + } +); + +NSString *const kGPUImageDilationRadiusTwoVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec2 inputTextureCoordinate; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 offset = vec2(texelWidthOffset, texelHeightOffset); + + centerTextureCoordinate = inputTextureCoordinate; + oneStepNegativeTextureCoordinate = inputTextureCoordinate - offset; + oneStepPositiveTextureCoordinate = inputTextureCoordinate + offset; + twoStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 2.0); + twoStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 2.0); + } +); + +NSString *const kGPUImageDilationRadiusThreeVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec2 inputTextureCoordinate; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 offset = vec2(texelWidthOffset, texelHeightOffset); + + centerTextureCoordinate = inputTextureCoordinate; + oneStepNegativeTextureCoordinate = inputTextureCoordinate - offset; + oneStepPositiveTextureCoordinate = inputTextureCoordinate + offset; + twoStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 2.0); + twoStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 2.0); + threeStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 3.0); + threeStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 3.0); + } +); + +NSString *const kGPUImageDilationRadiusFourVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec2 inputTextureCoordinate; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 offset = vec2(texelWidthOffset, texelHeightOffset); + + centerTextureCoordinate = inputTextureCoordinate; + oneStepNegativeTextureCoordinate = inputTextureCoordinate - offset; + oneStepPositiveTextureCoordinate = inputTextureCoordinate + offset; + twoStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 2.0); + twoStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 2.0); + threeStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 3.0); + threeStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 3.0); + fourStepsNegativeTextureCoordinate = inputTextureCoordinate - (offset * 4.0); + fourStepsPositiveTextureCoordinate = inputTextureCoordinate + (offset * 4.0); + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDilationRadiusOneFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + + lowp float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusTwoFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + + lowp float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusThreeFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + + lowp float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusFourFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + float fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate).r; + float fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate).r; + + lowp float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + maxValue = max(maxValue, fourStepsPositiveIntensity); + maxValue = max(maxValue, fourStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); +#else +NSString *const kGPUImageDilationRadiusOneFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + + float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusTwoFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + + float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusThreeFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + + float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); + +NSString *const kGPUImageDilationRadiusFourFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + float fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate).r; + float fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate).r; + + float maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + maxValue = max(maxValue, fourStepsPositiveIntensity); + maxValue = max(maxValue, fourStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(maxValue), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithRadius:(NSUInteger)dilationRadius; +{ + NSString *fragmentShaderForThisRadius = nil; + NSString *vertexShaderForThisRadius = nil; + + switch (dilationRadius) + { + case 0: + case 1: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusOneVertexShaderString; + fragmentShaderForThisRadius = kGPUImageDilationRadiusOneFragmentShaderString; + }; break; + case 2: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusTwoVertexShaderString; + fragmentShaderForThisRadius = kGPUImageDilationRadiusTwoFragmentShaderString; + }; break; + case 3: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusThreeVertexShaderString; + fragmentShaderForThisRadius = kGPUImageDilationRadiusThreeFragmentShaderString; + }; break; + case 4: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageDilationRadiusFourFragmentShaderString; + }; break; + default: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageDilationRadiusFourFragmentShaderString; + }; break; + } + + if (!(self = [super initWithFirstStageVertexShaderFromString:vertexShaderForThisRadius firstStageFragmentShaderFromString:fragmentShaderForThisRadius secondStageVertexShaderFromString:vertexShaderForThisRadius secondStageFragmentShaderFromString:fragmentShaderForThisRadius])) + { + return nil; + } + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.h new file mode 100644 index 00000000..fdffb9fb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilter.h" + +@interface GPUImageDirectionalNonMaximumSuppressionFilter : GPUImageFilter +{ + GLint texelWidthUniform, texelHeightUniform; + GLint upperThresholdUniform, lowerThresholdUniform; + + BOOL hasOverriddenImageSizeFactor; +} + +// The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. +@property(readwrite, nonatomic) CGFloat texelWidth; +@property(readwrite, nonatomic) CGFloat texelHeight; + +// These thresholds set cutoffs for the intensities that definitely get registered (upper threshold) and those that definitely don't (lower threshold) +@property(readwrite, nonatomic) CGFloat upperThreshold; +@property(readwrite, nonatomic) CGFloat lowerThreshold; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.m new file mode 100644 index 00000000..b442f3aa --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.m @@ -0,0 +1,141 @@ +#import "GPUImageDirectionalNonMaximumSuppressionFilter.h" + +@implementation GPUImageDirectionalNonMaximumSuppressionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDirectionalNonmaximumSuppressionFragmentShaderString = SHADER_STRING +( + precision mediump float; + + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float texelWidth; + uniform highp float texelHeight; + uniform mediump float upperThreshold; + uniform mediump float lowerThreshold; + + void main() + { + vec3 currentGradientAndDirection = texture2D(inputImageTexture, textureCoordinate).rgb; + vec2 gradientDirection = ((currentGradientAndDirection.gb * 2.0) - 1.0) * vec2(texelWidth, texelHeight); + + float firstSampledGradientMagnitude = texture2D(inputImageTexture, textureCoordinate + gradientDirection).r; + float secondSampledGradientMagnitude = texture2D(inputImageTexture, textureCoordinate - gradientDirection).r; + + float multiplier = step(firstSampledGradientMagnitude, currentGradientAndDirection.r); + multiplier = multiplier * step(secondSampledGradientMagnitude, currentGradientAndDirection.r); + + float thresholdCompliance = smoothstep(lowerThreshold, upperThreshold, currentGradientAndDirection.r); + multiplier = multiplier * thresholdCompliance; + + gl_FragColor = vec4(multiplier, multiplier, multiplier, 1.0); + } +); +#else +NSString *const kGPUImageDirectionalNonmaximumSuppressionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float texelWidth; + uniform float texelHeight; + uniform float upperThreshold; + uniform float lowerThreshold; + + void main() + { + vec3 currentGradientAndDirection = texture2D(inputImageTexture, textureCoordinate).rgb; + vec2 gradientDirection = ((currentGradientAndDirection.gb * 2.0) - 1.0) * vec2(texelWidth, texelHeight); + + float firstSampledGradientMagnitude = texture2D(inputImageTexture, textureCoordinate + gradientDirection).r; + float secondSampledGradientMagnitude = texture2D(inputImageTexture, textureCoordinate - gradientDirection).r; + + float multiplier = step(firstSampledGradientMagnitude, currentGradientAndDirection.r); + multiplier = multiplier * step(secondSampledGradientMagnitude, currentGradientAndDirection.r); + + float thresholdCompliance = smoothstep(lowerThreshold, upperThreshold, currentGradientAndDirection.r); + multiplier = multiplier * thresholdCompliance; + + gl_FragColor = vec4(multiplier, multiplier, multiplier, 1.0); + } +); +#endif + +@synthesize texelWidth = _texelWidth; +@synthesize texelHeight = _texelHeight; +@synthesize upperThreshold = _upperThreshold; +@synthesize lowerThreshold = _lowerThreshold; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDirectionalNonmaximumSuppressionFragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + upperThresholdUniform = [filterProgram uniformIndex:@"upperThreshold"]; + lowerThresholdUniform = [filterProgram uniformIndex:@"lowerThreshold"]; + + self.upperThreshold = 0.5; + self.lowerThreshold = 0.1; + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!hasOverriddenImageSizeFactor) + { + _texelWidth = 1.0 / filterFrameSize.width; + _texelHeight = 1.0 / filterFrameSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + }); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelWidth:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelWidth = newValue; + + [self setFloat:_texelWidth forUniform:texelWidthUniform program:filterProgram]; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelHeight = newValue; + + [self setFloat:_texelHeight forUniform:texelHeightUniform program:filterProgram]; +} + +- (void)setLowerThreshold:(CGFloat)newValue; +{ + _lowerThreshold = newValue; + + [self setFloat:_lowerThreshold forUniform:lowerThresholdUniform program:filterProgram]; +} + +- (void)setUpperThreshold:(CGFloat)newValue; +{ + _upperThreshold = newValue; + + [self setFloat:_upperThreshold forUniform:upperThresholdUniform program:filterProgram]; +} + + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.h new file mode 100644 index 00000000..cfccc897 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageDirectionalSobelEdgeDetectionFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.m new file mode 100644 index 00000000..a3575e3a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.m @@ -0,0 +1,103 @@ +#import "GPUImageDirectionalSobelEdgeDetectionFilter.h" + +@implementation GPUImageDirectionalSobelEdgeDetectionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDirectionalSobelEdgeDetectionFragmentShaderString = SHADER_STRING +( + precision mediump float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + + vec2 gradientDirection; + gradientDirection.x = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + gradientDirection.y = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + + float gradientMagnitude = length(gradientDirection); + vec2 normalizedDirection = normalize(gradientDirection); + normalizedDirection = sign(normalizedDirection) * floor(abs(normalizedDirection) + 0.617316); // Offset by 1-sin(pi/8) to set to 0 if near axis, 1 if away + normalizedDirection = (normalizedDirection + 1.0) * 0.5; // Place -1.0 - 1.0 within 0 - 1.0 + + gl_FragColor = vec4(gradientMagnitude, normalizedDirection.x, normalizedDirection.y, 1.0); + } +); +#else +NSString *const kGPUImageDirectionalSobelEdgeDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + + vec2 gradientDirection; + gradientDirection.x = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + gradientDirection.y = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + + float gradientMagnitude = length(gradientDirection); + vec2 normalizedDirection = normalize(gradientDirection); + normalizedDirection = sign(normalizedDirection) * floor(abs(normalizedDirection) + 0.617316); // Offset by 1-sin(pi/8) to set to 0 if near axis, 1 if away + normalizedDirection = (normalizedDirection + 1.0) * 0.5; // Place -1.0 - 1.0 within 0 - 1.0 + + gl_FragColor = vec4(gradientMagnitude, normalizedDirection.x, normalizedDirection.y, 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDirectionalSobelEdgeDetectionFragmentShaderString])) + { + return nil; + } + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.h new file mode 100755 index 00000000..b4e5720a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageDissolveBlendFilter : GPUImageTwoInputFilter +{ + GLint mixUniform; +} + +// Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 0.5 (half of either) as the normal level +@property(readwrite, nonatomic) CGFloat mix; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.m new file mode 100755 index 00000000..b4a5609f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.m @@ -0,0 +1,72 @@ +#import "GPUImageDissolveBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDissolveBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + uniform lowp float mixturePercent; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = mix(textureColor, textureColor2, mixturePercent); + } +); +#else +NSString *const kGPUImageDissolveBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + uniform float mixturePercent; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = mix(textureColor, textureColor2, mixturePercent); + } +); +#endif + +@implementation GPUImageDissolveBlendFilter + +@synthesize mix = _mix; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDissolveBlendFragmentShaderString])) + { + return nil; + } + + mixUniform = [filterProgram uniformIndex:@"mixturePercent"]; + self.mix = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setMix:(CGFloat)newValue; +{ + _mix = newValue; + + [self setFloat:_mix forUniform:mixUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.h new file mode 100644 index 00000000..ad798e29 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageDivideBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.m new file mode 100644 index 00000000..63ee071e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.m @@ -0,0 +1,96 @@ +#import "GPUImageDivideBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageDivideBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + mediump float ra; + if (overlay.a == 0.0 || ((base.r / overlay.r) > (base.a / overlay.a))) + ra = overlay.a * base.a + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + else + ra = (base.r * overlay.a * overlay.a) / overlay.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + + + mediump float ga; + if (overlay.a == 0.0 || ((base.g / overlay.g) > (base.a / overlay.a))) + ga = overlay.a * base.a + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + else + ga = (base.g * overlay.a * overlay.a) / overlay.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + + + mediump float ba; + if (overlay.a == 0.0 || ((base.b / overlay.b) > (base.a / overlay.a))) + ba = overlay.a * base.a + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + else + ba = (base.b * overlay.a * overlay.a) / overlay.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + + mediump float a = overlay.a + base.a - overlay.a * base.a; + + gl_FragColor = vec4(ra, ga, ba, a); + } +); +#else +NSString *const kGPUImageDivideBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + float ra; + if (overlay.a == 0.0 || ((base.r / overlay.r) > (base.a / overlay.a))) + ra = overlay.a * base.a + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + else + ra = (base.r * overlay.a * overlay.a) / overlay.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + + + float ga; + if (overlay.a == 0.0 || ((base.g / overlay.g) > (base.a / overlay.a))) + ga = overlay.a * base.a + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + else + ga = (base.g * overlay.a * overlay.a) / overlay.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + + + float ba; + if (overlay.a == 0.0 || ((base.b / overlay.b) > (base.a / overlay.a))) + ba = overlay.a * base.a + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + else + ba = (base.b * overlay.a * overlay.a) / overlay.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + + float a = overlay.a + base.a - overlay.a * base.a; + + gl_FragColor = vec4(ra, ga, ba, a); + } + ); +#endif + +@implementation GPUImageDivideBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageDivideBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.h new file mode 100755 index 00000000..dbd21e82 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.h @@ -0,0 +1,8 @@ +#import "GPUImage3x3ConvolutionFilter.h" + +@interface GPUImageEmbossFilter : GPUImage3x3ConvolutionFilter + +// The strength of the embossing, from 0.0 to 4.0, with 1.0 as the normal level +@property(readwrite, nonatomic) CGFloat intensity; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.m new file mode 100755 index 00000000..6ba48cd4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.m @@ -0,0 +1,49 @@ +#import "GPUImageEmbossFilter.h" + +@implementation GPUImageEmbossFilter + +@synthesize intensity = _intensity; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + self.intensity = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setIntensity:(CGFloat)newValue; +{ +// [(GPUImage3x3ConvolutionFilter *)filter setConvolutionMatrix:(GPUMatrix3x3){ +// {-2.0f, -1.0f, 0.0f}, +// {-1.0f, 1.0f, 1.0f}, +// { 0.0f, 1.0f, 2.0f} +// }]; + + _intensity = newValue; + + GPUMatrix3x3 newConvolutionMatrix; + newConvolutionMatrix.one.one = _intensity * (-2.0); + newConvolutionMatrix.one.two = -_intensity; + newConvolutionMatrix.one.three = 0.0f; + + newConvolutionMatrix.two.one = -_intensity; + newConvolutionMatrix.two.two = 1.0; + newConvolutionMatrix.two.three = _intensity; + + newConvolutionMatrix.three.one = 0.0f; + newConvolutionMatrix.three.two = _intensity; + newConvolutionMatrix.three.three = _intensity * 2.0; + + self.convolutionKernel = newConvolutionMatrix; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.h new file mode 100644 index 00000000..b311a265 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +// For each pixel, this sets it to the minimum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center. +// This extends out dark features, and is most commonly used with black-and-white thresholded images. + +@interface GPUImageErosionFilter : GPUImageTwoPassTextureSamplingFilter + +// Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. +- (id)initWithRadius:(NSUInteger)erosionRadius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.m new file mode 100644 index 00000000..05f4f28d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.m @@ -0,0 +1,312 @@ +#import "GPUImageErosionFilter.h" +#import "GPUImageDilationFilter.h" + +@implementation GPUImageErosionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageErosionRadiusOneFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + + lowp float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusTwoFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + + lowp float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusThreeFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + + lowp float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusFourFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + float fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate).r; + float fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate).r; + + lowp float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + minValue = min(minValue, fourStepsPositiveIntensity); + minValue = min(minValue, fourStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); +#else +NSString *const kGPUImageErosionRadiusOneFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + + float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusTwoFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + + float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusThreeFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + + float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); + +NSString *const kGPUImageErosionRadiusFourFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate).r; + float oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate).r; + float oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate).r; + float twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate).r; + float twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate).r; + float threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate).r; + float threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate).r; + float fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate).r; + float fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate).r; + + float minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + minValue = min(minValue, fourStepsPositiveIntensity); + minValue = min(minValue, fourStepsNegativeIntensity); + + gl_FragColor = vec4(vec3(minValue), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithRadius:(NSUInteger)dilationRadius; +{ + NSString *fragmentShaderForThisRadius = nil; + NSString *vertexShaderForThisRadius = nil; + + switch (dilationRadius) + { + case 0: + case 1: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusOneVertexShaderString; + fragmentShaderForThisRadius = kGPUImageErosionRadiusOneFragmentShaderString; + }; break; + case 2: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusTwoVertexShaderString; + fragmentShaderForThisRadius = kGPUImageErosionRadiusTwoFragmentShaderString; + }; break; + case 3: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusThreeVertexShaderString; + fragmentShaderForThisRadius = kGPUImageErosionRadiusThreeFragmentShaderString; + }; break; + case 4: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageErosionRadiusFourFragmentShaderString; + }; break; + default: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageErosionRadiusFourFragmentShaderString; + }; break; + } + + if (!(self = [super initWithFirstStageVertexShaderFromString:vertexShaderForThisRadius firstStageFragmentShaderFromString:fragmentShaderForThisRadius secondStageVertexShaderFromString:vertexShaderForThisRadius secondStageFragmentShaderFromString:fragmentShaderForThisRadius])) + { + return nil; + } + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.h new file mode 100755 index 00000000..f7c83f57 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageExclusionBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.m new file mode 100755 index 00000000..c364159f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.m @@ -0,0 +1,56 @@ +#import "GPUImageExclusionBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageExclusionBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + // Dca = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) + + gl_FragColor = vec4((overlay.rgb * base.a + base.rgb * overlay.a - 2.0 * overlay.rgb * base.rgb) + overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a), base.a); + } +); +#else +NSString *const kGPUImageExclusionBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + // Dca = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) + + gl_FragColor = vec4((overlay.rgb * base.a + base.rgb * overlay.a - 2.0 * overlay.rgb * base.rgb) + overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a), base.a); + } + ); +#endif + +@implementation GPUImageExclusionBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageExclusionBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.h new file mode 100755 index 00000000..886a052f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageFilter.h" + +@interface GPUImageExposureFilter : GPUImageFilter +{ + GLint exposureUniform; +} + +// Exposure ranges from -10.0 to 10.0, with 0.0 as the normal level +@property(readwrite, nonatomic) CGFloat exposure; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.m new file mode 100755 index 00000000..d5ee2c9f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.m @@ -0,0 +1,66 @@ +#import "GPUImageExposureFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float exposure; + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w); + } +); +#else +NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float exposure; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w); + } +); +#endif + +@implementation GPUImageExposureFilter + +@synthesize exposure = _exposure; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageExposureFragmentShaderString])) + { + return nil; + } + + exposureUniform = [filterProgram uniformIndex:@"exposure"]; + self.exposure = 0.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setExposure:(CGFloat)newValue; +{ + _exposure = newValue; + + [self setFloat:_exposure forUniform:exposureUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.h new file mode 100644 index 00000000..86e7cf42 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.h @@ -0,0 +1,33 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGrayscaleFilter; +@class GPUImage3x3TextureSamplingFilter; +@class GPUImageNonMaximumSuppressionFilter; + +/* + An implementation of the Features from Accelerated Segment Test (FAST) feature detector as described in the following publications: + + E. Rosten and T. Drummond. Fusing points and lines for high performance tracking. IEEE International Conference on Computer Vision, 2005. + E. Rosten and T. Drummond. Machine learning for high-speed corner detection. European Conference on Computer Vision, 2006. + + For more about the FAST feature detector, see the resources here: + http://www.edwardrosten.com/work/fast.html + */ + +typedef enum { kGPUImageFAST12Contiguous, kGPUImageFAST12ContiguousNonMaximumSuppressed} GPUImageFASTDetectorType; + +@interface GPUImageFASTCornerDetectionFilter : GPUImageFilterGroup +{ + GPUImageGrayscaleFilter *luminanceReductionFilter; + GPUImage3x3TextureSamplingFilter *featureDetectionFilter; + GPUImageNonMaximumSuppressionFilter *nonMaximumSuppressionFilter; +// Generate a lookup texture based on the bit patterns + +// Step 1: convert to monochrome if necessary +// Step 2: do a lookup at each pixel based on the Bresenham circle, encode comparison in two color components +// Step 3: do non-maximum suppression of close corner points +} + +- (id)initWithFASTDetectorVariant:(GPUImageFASTDetectorType)detectorType; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.m new file mode 100644 index 00000000..b04a24aa --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.m @@ -0,0 +1,89 @@ +#import "GPUImageFASTCornerDetectionFilter.h" + +#import "GPUImageGrayscaleFilter.h" +#import "GPUImage3x3TextureSamplingFilter.h" +#import "GPUImageNonMaximumSuppressionFilter.h" + +// 14 total texture coordinates from vertex shader for non-dependent reads +// 3 texture coordinates for dependent reads, then + +NSString *const kGPUImageFASTDetectorFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform sampler2D lookupTable; + + void main() + { + lowp float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; + lowp float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + lowp float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + lowp float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + lowp float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + lowp float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + lowp float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + lowp float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + lowp float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + + lowp float byteTally = 1.0 / 255.0 * step(centerIntensity, topRightIntensity); + byteTally += 2.0 / 255.0 * step(centerIntensity, topIntensity); + byteTally += 4.0 / 255.0 * step(centerIntensity, topLeftIntensity); + byteTally += 8.0 / 255.0 * step(centerIntensity, leftIntensity); + byteTally += 16.0 / 255.0 * step(centerIntensity, bottomLeftIntensity); + byteTally += 32.0 / 255.0 * step(centerIntensity, bottomIntensity); + byteTally += 64.0 / 255.0 * step(centerIntensity, bottomRightIntensity); + byteTally += 128.0 / 255.0 * step(centerIntensity, rightIntensity); + + // TODO: Replace the above with a dot product and two vec4s + // TODO: Apply step to a matrix, rather than individually + + gl_FragColor = vec4(byteTally, byteTally, byteTally, 1.0); + } + ); + + +@implementation GPUImageFASTCornerDetectionFilter + +- (id)init; +{ + if (!(self = [self initWithFASTDetectorVariant:kGPUImageFAST12ContiguousNonMaximumSuppressed])) + { + return nil; + } + + return self; +} + +- (id)initWithFASTDetectorVariant:(GPUImageFASTDetectorType)detectorType; +{ + if (!(self = [super init])) + { + return nil; + } + +// [derivativeFilter addTarget:blurFilter]; +// [blurFilter addTarget:harrisCornerDetectionFilter]; +// [harrisCornerDetectionFilter addTarget:nonMaximumSuppressionFilter]; + // [simpleThresholdFilter addTarget:colorPackingFilter]; + +// self.initialFilters = [NSArray arrayWithObjects:derivativeFilter, nil]; + // self.terminalFilter = colorPackingFilter; +// self.terminalFilter = nonMaximumSuppressionFilter; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.h new file mode 100644 index 00000000..cb0b82f7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.h @@ -0,0 +1,15 @@ +#import "GPUImageFilter.h" + +@interface GPUImageFalseColorFilter : GPUImageFilter +{ + GLint firstColorUniform, secondColorUniform; +} + +// The first and second colors specify what colors replace the dark and light areas of the image, respectively. The defaults are (0.0, 0.0, 0.5) amd (1.0, 0.0, 0.0). +@property(readwrite, nonatomic) GPUVector4 firstColor; +@property(readwrite, nonatomic) GPUVector4 secondColor; + +- (void)setFirstColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +- (void)setSecondColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.m new file mode 100644 index 00000000..f514dbab --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.m @@ -0,0 +1,101 @@ +#import "GPUImageFalseColorFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUFalseColorFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float intensity; + uniform vec3 firstColor; + uniform vec3 secondColor; + + const mediump vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + + gl_FragColor = vec4( mix(firstColor.rgb, secondColor.rgb, luminance), textureColor.a); + } +); +#else +NSString *const kGPUFalseColorFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float intensity; + uniform vec3 firstColor; + uniform vec3 secondColor; + + const vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + + gl_FragColor = vec4( mix(firstColor.rgb, secondColor.rgb, luminance), textureColor.a); + } +); +#endif + + +@implementation GPUImageFalseColorFilter + +@synthesize secondColor = _secondColor; +@synthesize firstColor = _firstColor; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUFalseColorFragmentShaderString])) + { + return nil; + } + + firstColorUniform = [filterProgram uniformIndex:@"firstColor"]; + secondColorUniform = [filterProgram uniformIndex:@"secondColor"]; + + self.firstColor = (GPUVector4){0.0f, 0.0f, 0.5f, 1.0f}; + self.secondColor = (GPUVector4){1.0f, 0.0f, 0.0f, 1.0f}; + + return self; +} + + +#pragma mark - +#pragma mark Accessors + +- (void)setFirstColor:(GPUVector4)newValue; +{ + _firstColor = newValue; + + [self setFirstColorRed:_firstColor.one green:_firstColor.two blue:_firstColor.three]; +} + +- (void)setSecondColor:(GPUVector4)newValue; +{ + _secondColor = newValue; + + [self setSecondColorRed:_secondColor.one green:_secondColor.two blue:_secondColor.three]; +} + +- (void)setFirstColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 firstColor = {redComponent, greenComponent, blueComponent}; + + [self setVec3:firstColor forUniform:firstColorUniform program:filterProgram]; +} + +- (void)setSecondColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 secondColor = {redComponent, greenComponent, blueComponent}; + + [self setVec3:secondColor forUniform:secondColorUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageFilter.h new file mode 100755 index 00000000..0171aa80 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilter.h @@ -0,0 +1,134 @@ +#import "GPUImageOutput.h" + +#define STRINGIZE(x) #x +#define STRINGIZE2(x) STRINGIZE(x) +#define SHADER_STRING(text) @ STRINGIZE2(text) + +#define GPUImageHashIdentifier # +#define GPUImageWrappedLabel(x) x +#define GPUImageEscapedHashIdentifier(a) GPUImageWrappedLabel(GPUImageHashIdentifier)a + +extern NSString *const kGPUImageVertexShaderString; +extern NSString *const kGPUImagePassthroughFragmentShaderString; + +struct GPUVector4 { + GLfloat one; + GLfloat two; + GLfloat three; + GLfloat four; +}; +typedef struct GPUVector4 GPUVector4; + +struct GPUVector3 { + GLfloat one; + GLfloat two; + GLfloat three; +}; +typedef struct GPUVector3 GPUVector3; + +struct GPUMatrix4x4 { + GPUVector4 one; + GPUVector4 two; + GPUVector4 three; + GPUVector4 four; +}; +typedef struct GPUMatrix4x4 GPUMatrix4x4; + +struct GPUMatrix3x3 { + GPUVector3 one; + GPUVector3 two; + GPUVector3 three; +}; +typedef struct GPUMatrix3x3 GPUMatrix3x3; + +/** GPUImage's base filter class + + Filters and other subsequent elements in the chain conform to the GPUImageInput protocol, which lets them take in the supplied or processed texture from the previous link in the chain and do something with it. Objects one step further down the chain are considered targets, and processing can be branched by adding multiple targets to a single output or filter. + */ +@interface GPUImageFilter : GPUImageOutput +{ + GPUImageFramebuffer *firstInputFramebuffer; + + GLProgram *filterProgram; + GLint filterPositionAttribute, filterTextureCoordinateAttribute; + GLint filterInputTextureUniform; + GLfloat backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha; + + BOOL isEndProcessing; + + CGSize currentFilterSize; + GPUImageRotationMode inputRotation; + + BOOL currentlyReceivingMonochromeInput; + + NSMutableDictionary *uniformStateRestorationBlocks; + dispatch_semaphore_t imageCaptureSemaphore; +} + +@property(readonly) CVPixelBufferRef renderTarget; +@property(readwrite, nonatomic) BOOL preventRendering; +@property(readwrite, nonatomic) BOOL currentlyReceivingMonochromeInput; + +/// @name Initialization and teardown + +/** + Initialize with vertex and fragment shaders + + You make take advantage of the SHADER_STRING macro to write your shaders in-line. + @param vertexShaderString Source code of the vertex shader to use + @param fragmentShaderString Source code of the fragment shader to use + */ +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; + +/** + Initialize with a fragment shader + + You may take advantage of the SHADER_STRING macro to write your shader in-line. + @param fragmentShaderString Source code of fragment shader to use + */ +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +/** + Initialize with a fragment shader + @param fragmentShaderFilename Filename of fragment shader to load + */ +- (id)initWithFragmentShaderFromFile:(NSString *)fragmentShaderFilename; +- (void)initializeAttributes; +- (void)setupFilterForSize:(CGSize)filterFrameSize; +- (CGSize)rotatedSize:(CGSize)sizeToRotate forIndex:(NSInteger)textureIndex; +- (CGPoint)rotatedPoint:(CGPoint)pointToRotate forRotation:(GPUImageRotationMode)rotation; + +/// @name Managing the display FBOs +/** Size of the frame buffer object + */ +- (CGSize)sizeOfFBO; + +/// @name Rendering ++ (const GLfloat *)textureCoordinatesForRotation:(GPUImageRotationMode)rotationMode; +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +- (void)informTargetsAboutNewFrameAtTime:(CMTime)frameTime; +- (CGSize)outputFrameSize; + +/// @name Input parameters +- (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +- (void)setInteger:(GLint)newInteger forUniformName:(NSString *)uniformName; +- (void)setFloat:(GLfloat)newFloat forUniformName:(NSString *)uniformName; +- (void)setSize:(CGSize)newSize forUniformName:(NSString *)uniformName; +- (void)setPoint:(CGPoint)newPoint forUniformName:(NSString *)uniformName; +- (void)setFloatVec3:(GPUVector3)newVec3 forUniformName:(NSString *)uniformName; +- (void)setFloatVec4:(GPUVector4)newVec4 forUniform:(NSString *)uniformName; +- (void)setFloatArray:(GLfloat *)array length:(GLsizei)count forUniform:(NSString*)uniformName; + +- (void)setMatrix3f:(GPUMatrix3x3)matrix forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setMatrix4f:(GPUMatrix4x4)matrix forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setFloat:(GLfloat)floatValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setPoint:(CGPoint)pointValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setSize:(CGSize)sizeValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setVec3:(GPUVector3)vectorValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setVec4:(GPUVector4)vectorValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setFloatArray:(GLfloat *)arrayValue length:(GLsizei)arrayLength forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +- (void)setInteger:(GLint)intValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; + +- (void)setAndExecuteUniformStateCallbackAtIndex:(GLint)uniform forProgram:(GLProgram *)shaderProgram toBlock:(dispatch_block_t)uniformStateBlock; +- (void)setUniformsForProgramAtIndex:(NSUInteger)programIndex; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageFilter.m new file mode 100755 index 00000000..406d707a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilter.m @@ -0,0 +1,753 @@ +#import "GPUImageFilter.h" +#import "GPUImagePicture.h" +#import + +// Hardcode the vertex shader for standard filters, but this can be overridden +NSString *const kGPUImageVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + varying vec2 textureCoordinate; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + } + ); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + +NSString *const kGPUImagePassthroughFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } +); + +#else + +NSString *const kGPUImagePassthroughFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } +); +#endif + + +@implementation GPUImageFilter + +@synthesize preventRendering = _preventRendering; +@synthesize currentlyReceivingMonochromeInput; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super init])) + { + return nil; + } + + uniformStateRestorationBlocks = [NSMutableDictionary dictionaryWithCapacity:10]; + _preventRendering = NO; + currentlyReceivingMonochromeInput = NO; + inputRotation = kGPUImageNoRotation; + backgroundColorRed = 0.0; + backgroundColorGreen = 0.0; + backgroundColorBlue = 0.0; + backgroundColorAlpha = 0.0; + imageCaptureSemaphore = dispatch_semaphore_create(0); + dispatch_semaphore_signal(imageCaptureSemaphore); + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + filterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:vertexShaderString fragmentShaderString:fragmentShaderString]; + + if (!filterProgram.initialized) + { + [self initializeAttributes]; + + if (![filterProgram link]) + { + NSString *progLog = [filterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [filterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [filterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + filterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + filterPositionAttribute = [filterProgram attributeIndex:@"position"]; + filterTextureCoordinateAttribute = [filterProgram attributeIndex:@"inputTextureCoordinate"]; + filterInputTextureUniform = [filterProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputImageTexture" for the fragment shader + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glEnableVertexAttribArray(filterPositionAttribute); + glEnableVertexAttribArray(filterTextureCoordinateAttribute); + }); + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [self initWithVertexShaderFromString:kGPUImageVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithFragmentShaderFromFile:(NSString *)fragmentShaderFilename; +{ + NSString *fragmentShaderPathname = [[NSBundle mainBundle] pathForResource:fragmentShaderFilename ofType:@"fsh"]; + NSString *fragmentShaderString = [NSString stringWithContentsOfFile:fragmentShaderPathname encoding:NSUTF8StringEncoding error:nil]; + + if (!(self = [self initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImagePassthroughFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (void)initializeAttributes; +{ + [filterProgram addAttribute:@"position"]; + [filterProgram addAttribute:@"inputTextureCoordinate"]; + + // Override this, calling back to this super method, in order to add new attributes to your vertex shader +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + // This is where you can override to provide some custom setup, if your filter has a size-dependent element +} + +- (void)dealloc +{ +#if !OS_OBJECT_USE_OBJC + if (imageCaptureSemaphore != NULL) + { + dispatch_release(imageCaptureSemaphore); + } +#endif + +} + +#pragma mark - +#pragma mark Still image processing + +- (void)useNextFrameForImageCapture; +{ + usingNextFrameForImageCapture = YES; + + // Set the semaphore high, if it isn't already + if (dispatch_semaphore_wait(imageCaptureSemaphore, DISPATCH_TIME_NOW) != 0) + { + return; + } +} + +- (CGImageRef)newCGImageFromCurrentlyProcessedOutput +{ + // Give it three seconds to process, then abort if they forgot to set up the image capture properly + double timeoutForImageCapture = 3.0; + dispatch_time_t convertedTimeout = dispatch_time(DISPATCH_TIME_NOW, timeoutForImageCapture * NSEC_PER_SEC); + + if (dispatch_semaphore_wait(imageCaptureSemaphore, convertedTimeout) != 0) + { + return NULL; + } + + GPUImageFramebuffer* framebuffer = [self framebufferForOutput]; + + usingNextFrameForImageCapture = NO; + dispatch_semaphore_signal(imageCaptureSemaphore); + + CGImageRef image = [framebuffer newCGImageFromFramebufferContents]; + return image; +} + +#pragma mark - +#pragma mark Managing the display FBOs + +- (CGSize)sizeOfFBO; +{ + CGSize outputSize = [self maximumOutputSize]; + if ( (CGSizeEqualToSize(outputSize, CGSizeZero)) || (inputTextureSize.width < outputSize.width) ) + { + return inputTextureSize; + } + else + { + return outputSize; + } +} + +#pragma mark - +#pragma mark Rendering + ++ (const GLfloat *)textureCoordinatesForRotation:(GPUImageRotationMode)rotationMode; +{ + static const GLfloat noRotationTextureCoordinates[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 1.0f, + }; + + static const GLfloat rotateLeftTextureCoordinates[] = { + 1.0f, 0.0f, + 1.0f, 1.0f, + 0.0f, 0.0f, + 0.0f, 1.0f, + }; + + static const GLfloat rotateRightTextureCoordinates[] = { + 0.0f, 1.0f, + 0.0f, 0.0f, + 1.0f, 1.0f, + 1.0f, 0.0f, + }; + + static const GLfloat verticalFlipTextureCoordinates[] = { + 0.0f, 1.0f, + 1.0f, 1.0f, + 0.0f, 0.0f, + 1.0f, 0.0f, + }; + + static const GLfloat horizontalFlipTextureCoordinates[] = { + 1.0f, 0.0f, + 0.0f, 0.0f, + 1.0f, 1.0f, + 0.0f, 1.0f, + }; + + static const GLfloat rotateRightVerticalFlipTextureCoordinates[] = { + 0.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 1.0f, 1.0f, + }; + + static const GLfloat rotateRightHorizontalFlipTextureCoordinates[] = { + 1.0f, 1.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 0.0f, 0.0f, + }; + + static const GLfloat rotate180TextureCoordinates[] = { + 1.0f, 1.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 0.0f, 0.0f, + }; + + switch(rotationMode) + { + case kGPUImageNoRotation: return noRotationTextureCoordinates; + case kGPUImageRotateLeft: return rotateLeftTextureCoordinates; + case kGPUImageRotateRight: return rotateRightTextureCoordinates; + case kGPUImageFlipVertical: return verticalFlipTextureCoordinates; + case kGPUImageFlipHorizonal: return horizontalFlipTextureCoordinates; + case kGPUImageRotateRightFlipVertical: return rotateRightVerticalFlipTextureCoordinates; + case kGPUImageRotateRightFlipHorizontal: return rotateRightHorizontalFlipTextureCoordinates; + case kGPUImageRotate180: return rotate180TextureCoordinates; + } +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + + glUniform1i(filterInputTextureUniform, 2); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + [firstInputFramebuffer unlock]; + + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +- (void)informTargetsAboutNewFrameAtTime:(CMTime)frameTime; +{ + if (self.frameProcessingCompletionBlock != NULL) + { + self.frameProcessingCompletionBlock(self, frameTime); + } + + // Get all targets the framebuffer so they can grab a lock on it + for (id currentTarget in targets) + { + if (currentTarget != self.targetToIgnoreForUpdates) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [self setInputFramebufferForTarget:currentTarget atIndex:textureIndex]; + [currentTarget setInputSize:[self outputFrameSize] atIndex:textureIndex]; + } + } + + // Release our hold so it can return to the cache immediately upon processing + [[self framebufferForOutput] unlock]; + + if (usingNextFrameForImageCapture) + { +// usingNextFrameForImageCapture = NO; + } + else + { + [self removeOutputFramebuffer]; + } + + // Trigger processing last, so that our unlock comes first in serial execution, avoiding the need for a callback + for (id currentTarget in targets) + { + if (currentTarget != self.targetToIgnoreForUpdates) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + [currentTarget newFrameReadyAtTime:frameTime atIndex:textureIndex]; + } + } +} + +- (CGSize)outputFrameSize; +{ + return inputTextureSize; +} + +#pragma mark - +#pragma mark Input parameters + +- (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +{ + backgroundColorRed = redComponent; + backgroundColorGreen = greenComponent; + backgroundColorBlue = blueComponent; + backgroundColorAlpha = alphaComponent; +} + +- (void)setInteger:(GLint)newInteger forUniformName:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setInteger:newInteger forUniform:uniformIndex program:filterProgram]; +} + +- (void)setFloat:(GLfloat)newFloat forUniformName:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setFloat:newFloat forUniform:uniformIndex program:filterProgram]; +} + +- (void)setSize:(CGSize)newSize forUniformName:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setSize:newSize forUniform:uniformIndex program:filterProgram]; +} + +- (void)setPoint:(CGPoint)newPoint forUniformName:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setPoint:newPoint forUniform:uniformIndex program:filterProgram]; +} + +- (void)setFloatVec3:(GPUVector3)newVec3 forUniformName:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setVec3:newVec3 forUniform:uniformIndex program:filterProgram]; +} + +- (void)setFloatVec4:(GPUVector4)newVec4 forUniform:(NSString *)uniformName; +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + [self setVec4:newVec4 forUniform:uniformIndex program:filterProgram]; +} + +- (void)setFloatArray:(GLfloat *)array length:(GLsizei)count forUniform:(NSString*)uniformName +{ + GLint uniformIndex = [filterProgram uniformIndex:uniformName]; + + [self setFloatArray:array length:count forUniform:uniformIndex program:filterProgram]; +} + +- (void)setMatrix3f:(GPUMatrix3x3)matrix forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniformMatrix3fv(uniform, 1, GL_FALSE, (GLfloat *)&matrix); + }]; + }); +} + +- (void)setMatrix4f:(GPUMatrix4x4)matrix forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniformMatrix4fv(uniform, 1, GL_FALSE, (GLfloat *)&matrix); + }]; + }); +} + +- (void)setFloat:(GLfloat)floatValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniform1f(uniform, floatValue); + }]; + }); +} + +- (void)setPoint:(CGPoint)pointValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + GLfloat positionArray[2]; + positionArray[0] = pointValue.x; + positionArray[1] = pointValue.y; + + glUniform2fv(uniform, 1, positionArray); + }]; + }); +} + +- (void)setSize:(CGSize)sizeValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + GLfloat sizeArray[2]; + sizeArray[0] = sizeValue.width; + sizeArray[1] = sizeValue.height; + + glUniform2fv(uniform, 1, sizeArray); + }]; + }); +} + +- (void)setVec3:(GPUVector3)vectorValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniform3fv(uniform, 1, (GLfloat *)&vectorValue); + }]; + }); +} + +- (void)setVec4:(GPUVector4)vectorValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniform4fv(uniform, 1, (GLfloat *)&vectorValue); + }]; + }); +} + +- (void)setFloatArray:(GLfloat *)arrayValue length:(GLsizei)arrayLength forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + // Make a copy of the data, so it doesn't get overwritten before async call executes + NSData* arrayData = [NSData dataWithBytes:arrayValue length:arrayLength * sizeof(arrayValue[0])]; + + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniform1fv(uniform, arrayLength, [arrayData bytes]); + }]; + }); +} + +- (void)setInteger:(GLint)intValue forUniform:(GLint)uniform program:(GLProgram *)shaderProgram; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:shaderProgram]; + + [self setAndExecuteUniformStateCallbackAtIndex:uniform forProgram:shaderProgram toBlock:^{ + glUniform1i(uniform, intValue); + }]; + }); +} + +- (void)setAndExecuteUniformStateCallbackAtIndex:(GLint)uniform forProgram:(GLProgram *)shaderProgram toBlock:(dispatch_block_t)uniformStateBlock; +{ + [uniformStateRestorationBlocks setObject:[uniformStateBlock copy] forKey:[NSNumber numberWithInt:uniform]]; + uniformStateBlock(); +} + +- (void)setUniformsForProgramAtIndex:(NSUInteger)programIndex; +{ + [uniformStateRestorationBlocks enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop){ + dispatch_block_t currentBlock = obj; + currentBlock(); + }]; +} + +#pragma mark - +#pragma mark GPUImageInput + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + static const GLfloat imageVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + [self renderToTextureWithVertices:imageVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; +} + +- (NSInteger)nextAvailableTextureIndex; +{ + return 0; +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + firstInputFramebuffer = newInputFramebuffer; + [firstInputFramebuffer lock]; +} + +- (CGSize)rotatedSize:(CGSize)sizeToRotate forIndex:(NSInteger)textureIndex; +{ + CGSize rotatedSize = sizeToRotate; + + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + rotatedSize.width = sizeToRotate.height; + rotatedSize.height = sizeToRotate.width; + } + + return rotatedSize; +} + +- (CGPoint)rotatedPoint:(CGPoint)pointToRotate forRotation:(GPUImageRotationMode)rotation; +{ + CGPoint rotatedPoint; + switch(rotation) + { + case kGPUImageNoRotation: return pointToRotate; break; + case kGPUImageFlipHorizonal: + { + rotatedPoint.x = 1.0 - pointToRotate.x; + rotatedPoint.y = pointToRotate.y; + }; break; + case kGPUImageFlipVertical: + { + rotatedPoint.x = pointToRotate.x; + rotatedPoint.y = 1.0 - pointToRotate.y; + }; break; + case kGPUImageRotateLeft: + { + rotatedPoint.x = 1.0 - pointToRotate.y; + rotatedPoint.y = pointToRotate.x; + }; break; + case kGPUImageRotateRight: + { + rotatedPoint.x = pointToRotate.y; + rotatedPoint.y = 1.0 - pointToRotate.x; + }; break; + case kGPUImageRotateRightFlipVertical: + { + rotatedPoint.x = pointToRotate.y; + rotatedPoint.y = pointToRotate.x; + }; break; + case kGPUImageRotateRightFlipHorizontal: + { + rotatedPoint.x = 1.0 - pointToRotate.y; + rotatedPoint.y = 1.0 - pointToRotate.x; + }; break; + case kGPUImageRotate180: + { + rotatedPoint.x = 1.0 - pointToRotate.x; + rotatedPoint.y = 1.0 - pointToRotate.y; + }; break; + } + + return rotatedPoint; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (self.preventRendering) + { + return; + } + + if (overrideInputSize) + { + if (CGSizeEqualToSize(forcedMaximumSize, CGSizeZero)) + { + } + else + { + CGRect insetRect = AVMakeRectWithAspectRatioInsideRect(newSize, CGRectMake(0.0, 0.0, forcedMaximumSize.width, forcedMaximumSize.height)); + inputTextureSize = insetRect.size; + } + } + else + { + CGSize rotatedSize = [self rotatedSize:newSize forIndex:textureIndex]; + + if (CGSizeEqualToSize(rotatedSize, CGSizeZero)) + { + inputTextureSize = rotatedSize; + } + else if (!CGSizeEqualToSize(inputTextureSize, rotatedSize)) + { + inputTextureSize = rotatedSize; + } + } + + [self setupFilterForSize:[self sizeOfFBO]]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = newInputRotation; +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + if (CGSizeEqualToSize(frameSize, CGSizeZero)) + { + overrideInputSize = NO; + } + else + { + overrideInputSize = YES; + inputTextureSize = frameSize; + forcedMaximumSize = CGSizeZero; + } +} + +- (void)forceProcessingAtSizeRespectingAspectRatio:(CGSize)frameSize; +{ + if (CGSizeEqualToSize(frameSize, CGSizeZero)) + { + overrideInputSize = NO; + inputTextureSize = CGSizeZero; + forcedMaximumSize = CGSizeZero; + } + else + { + overrideInputSize = YES; + forcedMaximumSize = frameSize; + } +} + +- (CGSize)maximumOutputSize; +{ + // I'm temporarily disabling adjustments for smaller output sizes until I figure out how to make this work better + return CGSizeZero; + + /* + if (CGSizeEqualToSize(cachedMaximumOutputSize, CGSizeZero)) + { + for (id currentTarget in targets) + { + if ([currentTarget maximumOutputSize].width > cachedMaximumOutputSize.width) + { + cachedMaximumOutputSize = [currentTarget maximumOutputSize]; + } + } + } + + return cachedMaximumOutputSize; + */ +} + +- (void)endProcessing +{ + if (!isEndProcessing) + { + isEndProcessing = YES; + + for (id currentTarget in targets) + { + [currentTarget endProcessing]; + } + } +} + +- (BOOL)wantsMonochromeInput; +{ + return NO; +} + +#pragma mark - +#pragma mark Accessors + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.h b/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.h new file mode 100755 index 00000000..6817cdf0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.h @@ -0,0 +1,19 @@ +#import "GPUImageOutput.h" +#import "GPUImageFilter.h" + +@interface GPUImageFilterGroup : GPUImageOutput +{ + NSMutableArray *filters; + BOOL isEndProcessing; +} + +@property(readwrite, nonatomic, strong) GPUImageOutput *terminalFilter; +@property(readwrite, nonatomic, strong) NSArray *initialFilters; +@property(readwrite, nonatomic, strong) GPUImageOutput *inputFilterToIgnoreForUpdates; + +// Filter management +- (void)addFilter:(GPUImageOutput *)newFilter; +- (GPUImageOutput *)filterAtIndex:(NSUInteger)filterIndex; +- (NSUInteger)filterCount; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.m b/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.m new file mode 100755 index 00000000..72cfe5e7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.m @@ -0,0 +1,208 @@ +#import "GPUImageFilterGroup.h" +#import "GPUImagePicture.h" + +@implementation GPUImageFilterGroup + +@synthesize terminalFilter = _terminalFilter; +@synthesize initialFilters = _initialFilters; +@synthesize inputFilterToIgnoreForUpdates = _inputFilterToIgnoreForUpdates; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + filters = [[NSMutableArray alloc] init]; + + return self; +} + +#pragma mark - +#pragma mark Filter management + +- (void)addFilter:(GPUImageOutput *)newFilter; +{ + [filters addObject:newFilter]; +} + +- (GPUImageOutput *)filterAtIndex:(NSUInteger)filterIndex; +{ + return [filters objectAtIndex:filterIndex]; +} + +- (NSUInteger)filterCount; +{ + return [filters count]; +} + +#pragma mark - +#pragma mark Still image processing + +- (void)useNextFrameForImageCapture; +{ + [self.terminalFilter useNextFrameForImageCapture]; +} + +- (CGImageRef)newCGImageFromCurrentlyProcessedOutput; +{ + return [self.terminalFilter newCGImageFromCurrentlyProcessedOutput]; +} + +#pragma mark - +#pragma mark GPUImageOutput overrides + +- (void)setTargetToIgnoreForUpdates:(id)targetToIgnoreForUpdates; +{ + [_terminalFilter setTargetToIgnoreForUpdates:targetToIgnoreForUpdates]; +} + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + [_terminalFilter addTarget:newTarget atTextureLocation:textureLocation]; +} + +- (void)removeTarget:(id)targetToRemove; +{ + [_terminalFilter removeTarget:targetToRemove]; +} + +- (void)removeAllTargets; +{ + [_terminalFilter removeAllTargets]; +} + +- (NSArray *)targets; +{ + return [_terminalFilter targets]; +} + +- (void)setFrameProcessingCompletionBlock:(void (^)(GPUImageOutput *, CMTime))frameProcessingCompletionBlock; +{ + [_terminalFilter setFrameProcessingCompletionBlock:frameProcessingCompletionBlock]; +} + +- (void (^)(GPUImageOutput *, CMTime))frameProcessingCompletionBlock; +{ + return [_terminalFilter frameProcessingCompletionBlock]; +} + +#pragma mark - +#pragma mark GPUImageInput protocol + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + for (GPUImageOutput *currentFilter in _initialFilters) + { + if (currentFilter != self.inputFilterToIgnoreForUpdates) + { + [currentFilter newFrameReadyAtTime:frameTime atIndex:textureIndex]; + } + } +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + for (GPUImageOutput *currentFilter in _initialFilters) + { + [currentFilter setInputFramebuffer:newInputFramebuffer atIndex:textureIndex]; + } +} + +- (NSInteger)nextAvailableTextureIndex; +{ +// if ([_initialFilters count] > 0) +// { +// return [[_initialFilters objectAtIndex:0] nextAvailableTextureIndex]; +// } + + return 0; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + for (GPUImageOutput *currentFilter in _initialFilters) + { + [currentFilter setInputSize:newSize atIndex:textureIndex]; + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + for (GPUImageOutput *currentFilter in _initialFilters) + { + [currentFilter setInputRotation:newInputRotation atIndex:(NSInteger)textureIndex]; + } +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + for (GPUImageOutput *currentFilter in filters) + { + [currentFilter forceProcessingAtSize:frameSize]; + } +} + +- (void)forceProcessingAtSizeRespectingAspectRatio:(CGSize)frameSize; +{ + for (GPUImageOutput *currentFilter in filters) + { + [currentFilter forceProcessingAtSizeRespectingAspectRatio:frameSize]; + } +} + +- (CGSize)maximumOutputSize; +{ + // I'm temporarily disabling adjustments for smaller output sizes until I figure out how to make this work better + return CGSizeZero; + + /* + if (CGSizeEqualToSize(cachedMaximumOutputSize, CGSizeZero)) + { + for (id currentTarget in _initialFilters) + { + if ([currentTarget maximumOutputSize].width > cachedMaximumOutputSize.width) + { + cachedMaximumOutputSize = [currentTarget maximumOutputSize]; + } + } + } + + return cachedMaximumOutputSize; + */ +} + +- (void)endProcessing; +{ + if (!isEndProcessing) + { + isEndProcessing = YES; + + for (id currentTarget in _initialFilters) + { + [currentTarget endProcessing]; + } + } +} + +- (BOOL)wantsMonochromeInput; +{ + BOOL allInputsWantMonochromeInput = YES; + for (GPUImageOutput *currentFilter in _initialFilters) + { + allInputsWantMonochromeInput = allInputsWantMonochromeInput && [currentFilter wantsMonochromeInput]; + } + + return allInputsWantMonochromeInput; +} + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +{ + for (GPUImageOutput *currentFilter in _initialFilters) + { + [currentFilter setCurrentlyReceivingMonochromeInput:newValue]; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.h b/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.h new file mode 100755 index 00000000..dc2baeac --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.h @@ -0,0 +1,30 @@ +#import +#import "GPUImageOutput.h" + +@interface GPUImageFilterPipeline : NSObject +{ + NSString *stringValue; +} + +@property (strong) NSMutableArray *filters; + +@property (strong) GPUImageOutput *input; +@property (strong) id output; + +- (id) initWithOrderedFilters:(NSArray*) filters input:(GPUImageOutput*)input output:(id )output; +- (id) initWithConfiguration:(NSDictionary*) configuration input:(GPUImageOutput*)input output:(id )output; +- (id) initWithConfigurationFile:(NSURL*) configuration input:(GPUImageOutput*)input output:(id )output; + +- (void) addFilter:(GPUImageOutput *)filter; +- (void) addFilter:(GPUImageOutput *)filter atIndex:(NSUInteger)insertIndex; +- (void) replaceFilterAtIndex:(NSUInteger)index withFilter:(GPUImageOutput *)filter; +- (void) replaceAllFilters:(NSArray *) newFilters; +- (void) removeFilter:(GPUImageOutput *)filter; +- (void) removeFilterAtIndex:(NSUInteger)index; +- (void) removeAllFilters; + +- (UIImage *) currentFilteredFrame; +- (UIImage *) currentFilteredFrameWithOrientation:(UIImageOrientation)imageOrientation; +- (CGImageRef) newCGImageFromCurrentFilteredFrame; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.m b/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.m new file mode 100755 index 00000000..4fbe5ebb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.m @@ -0,0 +1,218 @@ +#import "GPUImageFilterPipeline.h" + +@interface GPUImageFilterPipeline () + +- (BOOL)_parseConfiguration:(NSDictionary *)configuration; + +- (void)_refreshFilters; + +@end + +@implementation GPUImageFilterPipeline + +@synthesize filters = _filters, input = _input, output = _output; + +#pragma mark Config file init + +- (id)initWithConfiguration:(NSDictionary *)configuration input:(GPUImageOutput *)input output:(id )output { + self = [super init]; + if (self) { + self.input = input; + self.output = output; + if (![self _parseConfiguration:configuration]) { + NSLog(@"Sorry, a parsing error occurred."); + abort(); + } + [self _refreshFilters]; + } + return self; +} + +- (id)initWithConfigurationFile:(NSURL *)configuration input:(GPUImageOutput *)input output:(id )output { + return [self initWithConfiguration:[NSDictionary dictionaryWithContentsOfURL:configuration] input:input output:output]; +} + +- (BOOL)_parseConfiguration:(NSDictionary *)configuration { + NSArray *filters = [configuration objectForKey:@"Filters"]; + if (!filters) { + return NO; + } + + NSError *regexError = nil; + NSRegularExpression *parsingRegex = [NSRegularExpression regularExpressionWithPattern:@"(float|CGPoint|NSString)\\((.*?)(?:,\\s*(.*?))*\\)" + options:0 + error:®exError]; + + // It's faster to put them into an array and then pass it to the filters property than it is to call [self addFilter:] every time + NSMutableArray *orderedFilters = [NSMutableArray arrayWithCapacity:[filters count]]; + for (NSDictionary *filter in filters) { + NSString *filterName = [filter objectForKey:@"FilterName"]; + Class theClass = NSClassFromString(filterName); + GPUImageOutput *genericFilter = [[theClass alloc] init]; + // Set up the properties + NSDictionary *filterAttributes; + if ((filterAttributes = [filter objectForKey:@"Attributes"])) { + for (NSString *propertyKey in filterAttributes) { + // Set up the selector + SEL theSelector = NSSelectorFromString(propertyKey); + NSInvocation *inv = [NSInvocation invocationWithMethodSignature:[theClass instanceMethodSignatureForSelector:theSelector]]; + [inv setSelector:theSelector]; + [inv setTarget:genericFilter]; + + // check selector given with parameter + if ([propertyKey hasSuffix:@":"]) { + + stringValue = nil; + + // Then parse the arguments + NSMutableArray *parsedArray; + if ([[filterAttributes objectForKey:propertyKey] isKindOfClass:[NSArray class]]) { + NSArray *array = [filterAttributes objectForKey:propertyKey]; + parsedArray = [NSMutableArray arrayWithCapacity:[array count]]; + for (NSString *string in array) { + NSTextCheckingResult *parse = [parsingRegex firstMatchInString:string + options:0 + range:NSMakeRange(0, [string length])]; + + NSString *modifier = [string substringWithRange:[parse rangeAtIndex:1]]; + if ([modifier isEqualToString:@"float"]) { + // Float modifier, one argument + CGFloat value = [[string substringWithRange:[parse rangeAtIndex:2]] floatValue]; + [parsedArray addObject:[NSNumber numberWithFloat:value]]; + [inv setArgument:&value atIndex:2]; + } else if ([modifier isEqualToString:@"CGPoint"]) { + // CGPoint modifier, two float arguments + CGFloat x = [[string substringWithRange:[parse rangeAtIndex:2]] floatValue]; + CGFloat y = [[string substringWithRange:[parse rangeAtIndex:3]] floatValue]; + CGPoint value = CGPointMake(x, y); + [parsedArray addObject:[NSValue valueWithCGPoint:value]]; + } else if ([modifier isEqualToString:@"NSString"]) { + // NSString modifier, one string argument + stringValue = [[string substringWithRange:[parse rangeAtIndex:2]] copy]; + [inv setArgument:&stringValue atIndex:2]; + + } else { + return NO; + } + } + [inv setArgument:&parsedArray atIndex:2]; + } else { + NSString *string = [filterAttributes objectForKey:propertyKey]; + NSTextCheckingResult *parse = [parsingRegex firstMatchInString:string + options:0 + range:NSMakeRange(0, [string length])]; + + NSString *modifier = [string substringWithRange:[parse rangeAtIndex:1]]; + if ([modifier isEqualToString:@"float"]) { + // Float modifier, one argument + CGFloat value = [[string substringWithRange:[parse rangeAtIndex:2]] floatValue]; + [inv setArgument:&value atIndex:2]; + } else if ([modifier isEqualToString:@"CGPoint"]) { + // CGPoint modifier, two float arguments + CGFloat x = [[string substringWithRange:[parse rangeAtIndex:2]] floatValue]; + CGFloat y = [[string substringWithRange:[parse rangeAtIndex:3]] floatValue]; + CGPoint value = CGPointMake(x, y); + [inv setArgument:&value atIndex:2]; + } else if ([modifier isEqualToString:@"NSString"]) { + // NSString modifier, one string argument + stringValue = [[string substringWithRange:[parse rangeAtIndex:2]] copy]; + [inv setArgument:&stringValue atIndex:2]; + + } else { + return NO; + } + } + } + + + [inv invoke]; + } + } + [orderedFilters addObject:genericFilter]; + } + self.filters = orderedFilters; + + return YES; +} + +#pragma mark Regular init + +- (id)initWithOrderedFilters:(NSArray *)filters input:(GPUImageOutput *)input output:(id )output { + self = [super init]; + if (self) { + self.input = input; + self.output = output; + self.filters = [NSMutableArray arrayWithArray:filters]; + [self _refreshFilters]; + } + return self; +} + +- (void)addFilter:(GPUImageOutput *)filter atIndex:(NSUInteger)insertIndex { + [self.filters insertObject:filter atIndex:insertIndex]; + [self _refreshFilters]; +} + +- (void)addFilter:(GPUImageOutput *)filter { + [self.filters addObject:filter]; + [self _refreshFilters]; +} + +- (void)replaceFilterAtIndex:(NSUInteger)index withFilter:(GPUImageOutput *)filter { + [self.filters replaceObjectAtIndex:index withObject:filter]; + [self _refreshFilters]; +} + +- (void) removeFilter:(GPUImageOutput *)filter; +{ + [self.filters removeObject:filter]; + [self _refreshFilters]; +} + +- (void)removeFilterAtIndex:(NSUInteger)index { + [self.filters removeObjectAtIndex:index]; + [self _refreshFilters]; +} + +- (void)removeAllFilters { + [self.filters removeAllObjects]; + [self _refreshFilters]; +} + +- (void)replaceAllFilters:(NSArray *)newFilters { + self.filters = [NSMutableArray arrayWithArray:newFilters]; + [self _refreshFilters]; +} + +- (void)_refreshFilters { + + id prevFilter = self.input; + GPUImageOutput *theFilter = nil; + + for (int i = 0; i < [self.filters count]; i++) { + theFilter = [self.filters objectAtIndex:i]; + [prevFilter removeAllTargets]; + [prevFilter addTarget:theFilter]; + prevFilter = theFilter; + } + + [prevFilter removeAllTargets]; + + if (self.output != nil) { + [prevFilter addTarget:self.output]; + } +} + +- (UIImage *)currentFilteredFrame { + return [(GPUImageOutput *)[_filters lastObject] imageFromCurrentFramebuffer]; +} + +- (UIImage *)currentFilteredFrameWithOrientation:(UIImageOrientation)imageOrientation { + return [(GPUImageOutput *)[_filters lastObject] imageFromCurrentFramebufferWithOrientation:imageOrientation]; +} + +- (CGImageRef)newCGImageFromCurrentFilteredFrame { + return [(GPUImageOutput *)[_filters lastObject] newCGImageFromCurrentlyProcessedOutput]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.h new file mode 100644 index 00000000..3beab2df --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.h @@ -0,0 +1,21 @@ +#import "GPUImageThreeInputFilter.h" + +extern NSString *const kGPUImageFourInputTextureVertexShaderString; + +@interface GPUImageFourInputFilter : GPUImageThreeInputFilter +{ + GPUImageFramebuffer *fourthInputFramebuffer; + + GLint filterFourthTextureCoordinateAttribute; + GLint filterInputTextureUniform4; + GPUImageRotationMode inputRotation4; + GLuint filterSourceTexture4; + CMTime fourthFrameTime; + + BOOL hasSetThirdTexture, hasReceivedFourthFrame, fourthFrameWasVideo; + BOOL fourthFrameCheckDisabled; +} + +- (void)disableFourthFrameCheck; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.m new file mode 100644 index 00000000..bc660a3d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.m @@ -0,0 +1,401 @@ +#import "GPUImageFourInputFilter.h" + + +NSString *const kGPUImageFourInputTextureVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + attribute vec4 inputTextureCoordinate2; + attribute vec4 inputTextureCoordinate3; + attribute vec4 inputTextureCoordinate4; + + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + varying vec2 textureCoordinate3; + varying vec2 textureCoordinate4; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + textureCoordinate2 = inputTextureCoordinate2.xy; + textureCoordinate3 = inputTextureCoordinate3.xy; + textureCoordinate4 = inputTextureCoordinate4.xy; + } +); + +@implementation GPUImageFourInputFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [self initWithVertexShaderFromString:kGPUImageFourInputTextureVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:vertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + inputRotation4 = kGPUImageNoRotation; + + hasSetThirdTexture = NO; + + hasReceivedFourthFrame = NO; + fourthFrameWasVideo = NO; + fourthFrameCheckDisabled = NO; + + fourthFrameTime = kCMTimeInvalid; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + filterFourthTextureCoordinateAttribute = [filterProgram attributeIndex:@"inputTextureCoordinate4"]; + + filterInputTextureUniform4 = [filterProgram uniformIndex:@"inputImageTexture4"]; // This does assume a name of "inputImageTexture3" for the third input texture in the fragment shader + glEnableVertexAttribArray(filterFourthTextureCoordinateAttribute); + }); + + return self; +} + +- (void)initializeAttributes; +{ + [super initializeAttributes]; + [filterProgram addAttribute:@"inputTextureCoordinate4"]; +} + +- (void)disableFourthFrameCheck; +{ + fourthFrameCheckDisabled = YES; +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + [thirdInputFramebuffer unlock]; + [fourthInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform, 2); + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [secondInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform2, 3); + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, [thirdInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform3, 4); + + glActiveTexture(GL_TEXTURE5); + glBindTexture(GL_TEXTURE_2D, [fourthInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform4, 5); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + glVertexAttribPointer(filterSecondTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation2]); + glVertexAttribPointer(filterThirdTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation3]); + glVertexAttribPointer(filterFourthTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation4]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + [thirdInputFramebuffer unlock]; + [fourthInputFramebuffer unlock]; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +#pragma mark - +#pragma mark GPUImageInput + +- (NSInteger)nextAvailableTextureIndex; +{ + if (hasSetThirdTexture) + { + return 3; + } + else if (hasSetSecondTexture) + { + return 2; + } + else if (hasSetFirstTexture) + { + return 1; + } + else + { + return 0; + } +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + firstInputFramebuffer = newInputFramebuffer; + hasSetFirstTexture = YES; + [firstInputFramebuffer lock]; + } + else if (textureIndex == 1) + { + secondInputFramebuffer = newInputFramebuffer; + hasSetSecondTexture = YES; + [secondInputFramebuffer lock]; + } + else if (textureIndex == 2) + { + thirdInputFramebuffer = newInputFramebuffer; + hasSetThirdTexture = YES; + [thirdInputFramebuffer lock]; + } + else + { + fourthInputFramebuffer = newInputFramebuffer; + [fourthInputFramebuffer lock]; + } +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + [super setInputSize:newSize atIndex:textureIndex]; + + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetFirstTexture = NO; + } + } + else if (textureIndex == 1) + { + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetSecondTexture = NO; + } + } + else if (textureIndex == 2) + { + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetThirdTexture = NO; + } + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + inputRotation = newInputRotation; + } + else if (textureIndex == 1) + { + inputRotation2 = newInputRotation; + } + else if (textureIndex == 2) + { + inputRotation3 = newInputRotation; + } + else + { + inputRotation4 = newInputRotation; + } +} + +- (CGSize)rotatedSize:(CGSize)sizeToRotate forIndex:(NSInteger)textureIndex; +{ + CGSize rotatedSize = sizeToRotate; + + GPUImageRotationMode rotationToCheck; + if (textureIndex == 0) + { + rotationToCheck = inputRotation; + } + else if (textureIndex == 1) + { + rotationToCheck = inputRotation2; + } + else if (textureIndex == 2) + { + rotationToCheck = inputRotation3; + } + else + { + rotationToCheck = inputRotation4; + } + + if (GPUImageRotationSwapsWidthAndHeight(rotationToCheck)) + { + rotatedSize.width = sizeToRotate.height; + rotatedSize.height = sizeToRotate.width; + } + + return rotatedSize; +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + // You can set up infinite update loops, so this helps to short circuit them + if (hasReceivedFirstFrame && hasReceivedSecondFrame && hasReceivedThirdFrame) + { + return; + } + + BOOL updatedMovieFrameOppositeStillImage = NO; + + if (textureIndex == 0) + { + hasReceivedFirstFrame = YES; + firstFrameTime = frameTime; + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + if (thirdFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + if (fourthFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(secondFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else if (textureIndex == 1) + { + hasReceivedSecondFrame = YES; + secondFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + if (thirdFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + if (fourthFrameCheckDisabled) + { + hasReceivedFourthFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else if (textureIndex == 2) + { + hasReceivedThirdFrame = YES; + thirdFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + if (fourthFrameCheckDisabled) + { + hasReceivedFourthFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else + { + hasReceivedFourthFrame = YES; + fourthFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + if (thirdFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + + // || (hasReceivedFirstFrame && secondFrameCheckDisabled) || (hasReceivedSecondFrame && firstFrameCheckDisabled) + if ((hasReceivedFirstFrame && hasReceivedSecondFrame && hasReceivedThirdFrame && hasReceivedFourthFrame) || updatedMovieFrameOppositeStillImage) + { + static const GLfloat imageVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + [self renderToTextureWithVertices:imageVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; + + hasReceivedFirstFrame = NO; + hasReceivedSecondFrame = NO; + hasReceivedThirdFrame = NO; + hasReceivedFourthFrame = NO; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.h b/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.h new file mode 100644 index 00000000..bdb6a1f1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.h @@ -0,0 +1,59 @@ +#import + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#import +#import +#import +#else +#import +#import +#endif + +#import +#import + + +typedef struct GPUTextureOptions { + GLenum minFilter; + GLenum magFilter; + GLenum wrapS; + GLenum wrapT; + GLenum internalFormat; + GLenum format; + GLenum type; +} GPUTextureOptions; + +@interface GPUImageFramebuffer : NSObject + +@property(readonly) CGSize size; +@property(readonly) GPUTextureOptions textureOptions; +@property(readonly) GLuint texture; +@property(readonly) BOOL missingFramebuffer; + +// Initialization and teardown +- (id)initWithSize:(CGSize)framebufferSize; +- (id)initWithSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)fboTextureOptions onlyTexture:(BOOL)onlyGenerateTexture; +- (id)initWithSize:(CGSize)framebufferSize overriddenTexture:(GLuint)inputTexture; + +// Usage +- (void)activateFramebuffer; + +// Reference counting +- (void)lock; +- (void)unlock; +- (void)clearAllLocks; +- (void)disableReferenceCounting; +- (void)enableReferenceCounting; + +// Image capture +- (CGImageRef)newCGImageFromFramebufferContents; +- (void)restoreRenderTarget; + +// Raw data bytes +- (void)lockForReading; +- (void)unlockAfterReading; +- (NSUInteger)bytesPerRow; +- (GLubyte *)byteBuffer; +- (CVPixelBufferRef)pixelBuffer; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.m b/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.m new file mode 100644 index 00000000..ea55c266 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.m @@ -0,0 +1,457 @@ +#import "GPUImageFramebuffer.h" +#import "GPUImageOutput.h" + +@interface GPUImageFramebuffer() +{ + GLuint framebuffer; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CVPixelBufferRef renderTarget; + CVOpenGLESTextureRef renderTexture; + NSUInteger readLockCount; +#else +#endif + NSUInteger framebufferReferenceCount; + BOOL referenceCountingDisabled; +} + +- (void)generateFramebuffer; +- (void)generateTexture; +- (void)destroyFramebuffer; + +@end + +void dataProviderReleaseCallback (void *info, const void *data, size_t size); +void dataProviderUnlockCallback (void *info, const void *data, size_t size); + +@implementation GPUImageFramebuffer + +@synthesize size = _size; +@synthesize textureOptions = _textureOptions; +@synthesize texture = _texture; +@synthesize missingFramebuffer = _missingFramebuffer; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)fboTextureOptions onlyTexture:(BOOL)onlyGenerateTexture; +{ + if (!(self = [super init])) + { + return nil; + } + + _textureOptions = fboTextureOptions; + _size = framebufferSize; + framebufferReferenceCount = 0; + referenceCountingDisabled = NO; + _missingFramebuffer = onlyGenerateTexture; + + if (_missingFramebuffer) + { + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + [self generateTexture]; + framebuffer = 0; + }); + } + else + { + [self generateFramebuffer]; + } + return self; +} + +- (id)initWithSize:(CGSize)framebufferSize overriddenTexture:(GLuint)inputTexture; +{ + if (!(self = [super init])) + { + return nil; + } + + GPUTextureOptions defaultTextureOptions; + defaultTextureOptions.minFilter = GL_LINEAR; + defaultTextureOptions.magFilter = GL_LINEAR; + defaultTextureOptions.wrapS = GL_CLAMP_TO_EDGE; + defaultTextureOptions.wrapT = GL_CLAMP_TO_EDGE; + defaultTextureOptions.internalFormat = GL_RGBA; + defaultTextureOptions.format = GL_BGRA; + defaultTextureOptions.type = GL_UNSIGNED_BYTE; + + _textureOptions = defaultTextureOptions; + _size = framebufferSize; + framebufferReferenceCount = 0; + referenceCountingDisabled = YES; + + _texture = inputTexture; + + return self; +} + +- (id)initWithSize:(CGSize)framebufferSize; +{ + GPUTextureOptions defaultTextureOptions; + defaultTextureOptions.minFilter = GL_LINEAR; + defaultTextureOptions.magFilter = GL_LINEAR; + defaultTextureOptions.wrapS = GL_CLAMP_TO_EDGE; + defaultTextureOptions.wrapT = GL_CLAMP_TO_EDGE; + defaultTextureOptions.internalFormat = GL_RGBA; + defaultTextureOptions.format = GL_BGRA; + defaultTextureOptions.type = GL_UNSIGNED_BYTE; + + if (!(self = [self initWithSize:framebufferSize textureOptions:defaultTextureOptions onlyTexture:NO])) + { + return nil; + } + + return self; +} + +- (void)dealloc +{ + [self destroyFramebuffer]; +} + +#pragma mark - +#pragma mark Internal + +- (void)generateTexture; +{ + glActiveTexture(GL_TEXTURE1); + glGenTextures(1, &_texture); + glBindTexture(GL_TEXTURE_2D, _texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, _textureOptions.minFilter); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, _textureOptions.magFilter); + // This is necessary for non-power-of-two textures + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, _textureOptions.wrapS); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, _textureOptions.wrapT); + + // TODO: Handle mipmaps +} + +- (void)generateFramebuffer; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + glGenFramebuffers(1, &framebuffer); + glBindFramebuffer(GL_FRAMEBUFFER, framebuffer); + + // By default, all framebuffers on iOS 5.0+ devices are backed by texture caches, using one shared cache + if ([GPUImageContext supportsFastTextureUpload]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CVOpenGLESTextureCacheRef coreVideoTextureCache = [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache]; + // Code originally sourced from http://allmybrain.com/2011/12/08/rendering-to-a-texture-with-ios-5-texture-cache-api/ + + CFDictionaryRef empty; // empty value for attr value. + CFMutableDictionaryRef attrs; + empty = CFDictionaryCreate(kCFAllocatorDefault, NULL, NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); // our empty IOSurface properties dictionary + attrs = CFDictionaryCreateMutable(kCFAllocatorDefault, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); + CFDictionarySetValue(attrs, kCVPixelBufferIOSurfacePropertiesKey, empty); + + CVReturn err = CVPixelBufferCreate(kCFAllocatorDefault, (int)_size.width, (int)_size.height, kCVPixelFormatType_32BGRA, attrs, &renderTarget); + if (err) + { + NSLog(@"FBO size: %f, %f", _size.width, _size.height); + NSAssert(NO, @"Error at CVPixelBufferCreate %d", err); + } + + err = CVOpenGLESTextureCacheCreateTextureFromImage (kCFAllocatorDefault, coreVideoTextureCache, renderTarget, + NULL, // texture attributes + GL_TEXTURE_2D, + _textureOptions.internalFormat, // opengl format + (int)_size.width, + (int)_size.height, + _textureOptions.format, // native iOS format + _textureOptions.type, + 0, + &renderTexture); + if (err) + { + NSAssert(NO, @"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err); + } + + CFRelease(attrs); + CFRelease(empty); + + glBindTexture(CVOpenGLESTextureGetTarget(renderTexture), CVOpenGLESTextureGetName(renderTexture)); + _texture = CVOpenGLESTextureGetName(renderTexture); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, _textureOptions.wrapS); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, _textureOptions.wrapT); + + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, CVOpenGLESTextureGetName(renderTexture), 0); +#endif + } + else + { + [self generateTexture]; + + glBindTexture(GL_TEXTURE_2D, _texture); + + glTexImage2D(GL_TEXTURE_2D, 0, _textureOptions.internalFormat, (int)_size.width, (int)_size.height, 0, _textureOptions.format, _textureOptions.type, 0); + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _texture, 0); + } + + #ifndef NS_BLOCK_ASSERTIONS + GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); + NSAssert(status == GL_FRAMEBUFFER_COMPLETE, @"Incomplete filter FBO: %d", status); + #endif + + glBindTexture(GL_TEXTURE_2D, 0); + }); +} + +- (void)destroyFramebuffer; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + if (framebuffer) + { + glDeleteFramebuffers(1, &framebuffer); + framebuffer = 0; + } + + + if ([GPUImageContext supportsFastTextureUpload] && (!_missingFramebuffer)) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + if (renderTarget) + { + CFRelease(renderTarget); + renderTarget = NULL; + } + + if (renderTexture) + { + CFRelease(renderTexture); + renderTexture = NULL; + } +#endif + } + else + { + glDeleteTextures(1, &_texture); + } + + }); +} + +#pragma mark - +#pragma mark Usage + +- (void)activateFramebuffer; +{ + glBindFramebuffer(GL_FRAMEBUFFER, framebuffer); + glViewport(0, 0, (int)_size.width, (int)_size.height); +} + +#pragma mark - +#pragma mark Reference counting + +- (void)lock; +{ + if (referenceCountingDisabled) + { + return; + } + + framebufferReferenceCount++; +} + +- (void)unlock; +{ + if (referenceCountingDisabled) + { + return; + } + + NSAssert(framebufferReferenceCount > 0, @"Tried to overrelease a framebuffer, did you forget to call -useNextFrameForImageCapture before using -imageFromCurrentFramebuffer?"); + framebufferReferenceCount--; + if (framebufferReferenceCount < 1) + { + [[GPUImageContext sharedFramebufferCache] returnFramebufferToCache:self]; + } +} + +- (void)clearAllLocks; +{ + framebufferReferenceCount = 0; +} + +- (void)disableReferenceCounting; +{ + referenceCountingDisabled = YES; +} + +- (void)enableReferenceCounting; +{ + referenceCountingDisabled = NO; +} + +#pragma mark - +#pragma mark Image capture + +void dataProviderReleaseCallback (void *info, const void *data, size_t size) +{ + free((void *)data); +} + +void dataProviderUnlockCallback (void *info, const void *data, size_t size) +{ + GPUImageFramebuffer *framebuffer = (__bridge_transfer GPUImageFramebuffer*)info; + + [framebuffer restoreRenderTarget]; + [framebuffer unlock]; + [[GPUImageContext sharedFramebufferCache] removeFramebufferFromActiveImageCaptureList:framebuffer]; +} + +- (CGImageRef)newCGImageFromFramebufferContents; +{ + // a CGImage can only be created from a 'normal' color texture + NSAssert(self.textureOptions.internalFormat == GL_RGBA, @"For conversion to a CGImage the output texture format for this filter must be GL_RGBA."); + NSAssert(self.textureOptions.type == GL_UNSIGNED_BYTE, @"For conversion to a CGImage the type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + __block CGImageRef cgImageFromBytes; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + NSUInteger totalBytesForImage = (int)_size.width * (int)_size.height * 4; + // It appears that the width of a texture must be padded out to be a multiple of 8 (32 bytes) if reading from it using a texture cache + + GLubyte *rawImagePixels; + + CGDataProviderRef dataProvider = NULL; + if ([GPUImageContext supportsFastTextureUpload]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + NSUInteger paddedWidthOfImage = CVPixelBufferGetBytesPerRow(renderTarget) / 4.0; + NSUInteger paddedBytesForImage = paddedWidthOfImage * (int)_size.height * 4; + + glFinish(); + CFRetain(renderTarget); // I need to retain the pixel buffer here and release in the data source callback to prevent its bytes from being prematurely deallocated during a photo write operation + [self lockForReading]; + rawImagePixels = (GLubyte *)CVPixelBufferGetBaseAddress(renderTarget); + dataProvider = CGDataProviderCreateWithData((__bridge_retained void*)self, rawImagePixels, paddedBytesForImage, dataProviderUnlockCallback); + [[GPUImageContext sharedFramebufferCache] addFramebufferToActiveImageCaptureList:self]; // In case the framebuffer is swapped out on the filter, need to have a strong reference to it somewhere for it to hang on while the image is in existence +#else +#endif + } + else + { + [self activateFramebuffer]; + rawImagePixels = (GLubyte *)malloc(totalBytesForImage); + glReadPixels(0, 0, (int)_size.width, (int)_size.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + dataProvider = CGDataProviderCreateWithData(NULL, rawImagePixels, totalBytesForImage, dataProviderReleaseCallback); + [self unlock]; // Don't need to keep this around anymore + } + + CGColorSpaceRef defaultRGBColorSpace = CGColorSpaceCreateDeviceRGB(); + + if ([GPUImageContext supportsFastTextureUpload]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + cgImageFromBytes = CGImageCreate((int)_size.width, (int)_size.height, 8, 32, CVPixelBufferGetBytesPerRow(renderTarget), defaultRGBColorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst, dataProvider, NULL, NO, kCGRenderingIntentDefault); +#else +#endif + } + else + { + cgImageFromBytes = CGImageCreate((int)_size.width, (int)_size.height, 8, 32, 4 * (int)_size.width, defaultRGBColorSpace, kCGBitmapByteOrderDefault | kCGImageAlphaLast, dataProvider, NULL, NO, kCGRenderingIntentDefault); + } + + // Capture image with current device orientation + CGDataProviderRelease(dataProvider); + CGColorSpaceRelease(defaultRGBColorSpace); + + }); + + return cgImageFromBytes; +} + +- (void)restoreRenderTarget; +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [self unlockAfterReading]; + CFRelease(renderTarget); +#else +#endif +} + +#pragma mark - +#pragma mark Raw data bytes + +- (void)lockForReading +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + if ([GPUImageContext supportsFastTextureUpload]) + { + if (readLockCount == 0) + { + CVPixelBufferLockBaseAddress(renderTarget, 0); + } + readLockCount++; + } +#endif +} + +- (void)unlockAfterReading +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + if ([GPUImageContext supportsFastTextureUpload]) + { + NSAssert(readLockCount > 0, @"Unbalanced call to -[GPUImageFramebuffer unlockAfterReading]"); + readLockCount--; + if (readLockCount == 0) + { + CVPixelBufferUnlockBaseAddress(renderTarget, 0); + } + } +#endif +} + +- (NSUInteger)bytesPerRow; +{ + if ([GPUImageContext supportsFastTextureUpload]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + return CVPixelBufferGetBytesPerRow(renderTarget); +#else + return _size.width * 4; // TODO: do more with this on the non-texture-cache side +#endif + } + else + { + return _size.width * 4; + } +} + +- (GLubyte *)byteBuffer; +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [self lockForReading]; + GLubyte * bufferBytes = CVPixelBufferGetBaseAddress(renderTarget); + [self unlockAfterReading]; + return bufferBytes; +#else + return NULL; // TODO: do more with this on the non-texture-cache side +#endif +} + +- (CVPixelBufferRef )pixelBuffer; +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + return renderTarget; +#else + return NULL; // TODO: do more with this on the non-texture-cache side +#endif +} + +- (GLuint)texture; +{ +// NSLog(@"Accessing texture: %d from FB: %@", _texture, self); + return _texture; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.h b/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.h new file mode 100644 index 00000000..e56a3456 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.h @@ -0,0 +1,15 @@ +#import +#import +#import "GPUImageFramebuffer.h" + +@interface GPUImageFramebufferCache : NSObject + +// Framebuffer management +- (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture; +- (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize onlyTexture:(BOOL)onlyTexture; +- (void)returnFramebufferToCache:(GPUImageFramebuffer *)framebuffer; +- (void)purgeAllUnassignedFramebuffers; +- (void)addFramebufferToActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; +- (void)removeFramebufferFromActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.m b/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.m new file mode 100644 index 00000000..53faf2c3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.m @@ -0,0 +1,190 @@ +#import "GPUImageFramebufferCache.h" +#import "GPUImageContext.h" +#import "GPUImageOutput.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#import +#else +#endif + +@interface GPUImageFramebufferCache() +{ +// NSCache *framebufferCache; + NSMutableDictionary *framebufferCache; + NSMutableDictionary *framebufferTypeCounts; + NSMutableArray *activeImageCaptureList; // Where framebuffers that may be lost by a filter, but which are still needed for a UIImage, etc., are stored + id memoryWarningObserver; + + dispatch_queue_t framebufferCacheQueue; +} + +- (NSString *)hashForSize:(CGSize)size textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture; + +@end + + +@implementation GPUImageFramebufferCache + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + __unsafe_unretained __typeof__ (self) weakSelf = self; + memoryWarningObserver = [[NSNotificationCenter defaultCenter] addObserverForName:UIApplicationDidReceiveMemoryWarningNotification object:nil queue:nil usingBlock:^(NSNotification *note) { + __typeof__ (self) strongSelf = weakSelf; + if (strongSelf) { + [strongSelf purgeAllUnassignedFramebuffers]; + } + }]; +#else +#endif + +// framebufferCache = [[NSCache alloc] init]; + framebufferCache = [[NSMutableDictionary alloc] init]; + framebufferTypeCounts = [[NSMutableDictionary alloc] init]; + activeImageCaptureList = [[NSMutableArray alloc] init]; + framebufferCacheQueue = dispatch_queue_create("com.sunsetlakesoftware.GPUImage.framebufferCacheQueue", GPUImageDefaultQueueAttribute()); + + return self; +} + +- (void)dealloc; +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [[NSNotificationCenter defaultCenter] removeObserver:self]; +#else +#endif +} + +#pragma mark - +#pragma mark Framebuffer management + +- (NSString *)hashForSize:(CGSize)size textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture; +{ + if (onlyTexture) + { + return [NSString stringWithFormat:@"%.1fx%.1f-%d:%d:%d:%d:%d:%d:%d-NOFB", size.width, size.height, textureOptions.minFilter, textureOptions.magFilter, textureOptions.wrapS, textureOptions.wrapT, textureOptions.internalFormat, textureOptions.format, textureOptions.type]; + } + else + { + return [NSString stringWithFormat:@"%.1fx%.1f-%d:%d:%d:%d:%d:%d:%d", size.width, size.height, textureOptions.minFilter, textureOptions.magFilter, textureOptions.wrapS, textureOptions.wrapT, textureOptions.internalFormat, textureOptions.format, textureOptions.type]; + } +} + +- (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture; +{ + __block GPUImageFramebuffer *framebufferFromCache = nil; +// dispatch_sync(framebufferCacheQueue, ^{ + runSynchronouslyOnVideoProcessingQueue(^{ + NSString *lookupHash = [self hashForSize:framebufferSize textureOptions:textureOptions onlyTexture:onlyTexture]; + NSNumber *numberOfMatchingTexturesInCache = [framebufferTypeCounts objectForKey:lookupHash]; + NSInteger numberOfMatchingTextures = [numberOfMatchingTexturesInCache integerValue]; + + if ([numberOfMatchingTexturesInCache integerValue] < 1) + { + // Nothing in the cache, create a new framebuffer to use + framebufferFromCache = [[GPUImageFramebuffer alloc] initWithSize:framebufferSize textureOptions:textureOptions onlyTexture:onlyTexture]; + } + else + { + // Something found, pull the old framebuffer and decrement the count + NSInteger currentTextureID = (numberOfMatchingTextures - 1); + while ((framebufferFromCache == nil) && (currentTextureID >= 0)) + { + NSString *textureHash = [NSString stringWithFormat:@"%@-%ld", lookupHash, (long)currentTextureID]; + framebufferFromCache = [framebufferCache objectForKey:textureHash]; + // Test the values in the cache first, to see if they got invalidated behind our back + if (framebufferFromCache != nil) + { + // Withdraw this from the cache while it's in use + [framebufferCache removeObjectForKey:textureHash]; + } + currentTextureID--; + } + + currentTextureID++; + + [framebufferTypeCounts setObject:[NSNumber numberWithInteger:currentTextureID] forKey:lookupHash]; + + if (framebufferFromCache == nil) + { + framebufferFromCache = [[GPUImageFramebuffer alloc] initWithSize:framebufferSize textureOptions:textureOptions onlyTexture:onlyTexture]; + } + } + }); + + [framebufferFromCache lock]; + return framebufferFromCache; +} + +- (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize onlyTexture:(BOOL)onlyTexture; +{ + GPUTextureOptions defaultTextureOptions; + defaultTextureOptions.minFilter = GL_LINEAR; + defaultTextureOptions.magFilter = GL_LINEAR; + defaultTextureOptions.wrapS = GL_CLAMP_TO_EDGE; + defaultTextureOptions.wrapT = GL_CLAMP_TO_EDGE; + defaultTextureOptions.internalFormat = GL_RGBA; + defaultTextureOptions.format = GL_BGRA; + defaultTextureOptions.type = GL_UNSIGNED_BYTE; + + return [self fetchFramebufferForSize:framebufferSize textureOptions:defaultTextureOptions onlyTexture:onlyTexture]; +} + +- (void)returnFramebufferToCache:(GPUImageFramebuffer *)framebuffer; +{ + [framebuffer clearAllLocks]; + +// dispatch_async(framebufferCacheQueue, ^{ + runAsynchronouslyOnVideoProcessingQueue(^{ + CGSize framebufferSize = framebuffer.size; + GPUTextureOptions framebufferTextureOptions = framebuffer.textureOptions; + NSString *lookupHash = [self hashForSize:framebufferSize textureOptions:framebufferTextureOptions onlyTexture:framebuffer.missingFramebuffer]; + NSNumber *numberOfMatchingTexturesInCache = [framebufferTypeCounts objectForKey:lookupHash]; + NSInteger numberOfMatchingTextures = [numberOfMatchingTexturesInCache integerValue]; + + NSString *textureHash = [NSString stringWithFormat:@"%@-%ld", lookupHash, (long)numberOfMatchingTextures]; + +// [framebufferCache setObject:framebuffer forKey:textureHash cost:round(framebufferSize.width * framebufferSize.height * 4.0)]; + [framebufferCache setObject:framebuffer forKey:textureHash]; + [framebufferTypeCounts setObject:[NSNumber numberWithInteger:(numberOfMatchingTextures + 1)] forKey:lookupHash]; + }); +} + +- (void)purgeAllUnassignedFramebuffers; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ +// dispatch_async(framebufferCacheQueue, ^{ + [framebufferCache removeAllObjects]; + [framebufferTypeCounts removeAllObjects]; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CVOpenGLESTextureCacheFlush([[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], 0); +#else +#endif + }); +} + +- (void)addFramebufferToActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ +// dispatch_async(framebufferCacheQueue, ^{ + [activeImageCaptureList addObject:framebuffer]; + }); +} + +- (void)removeFramebufferFromActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ +// dispatch_async(framebufferCacheQueue, ^{ + [activeImageCaptureList removeObject:framebuffer]; + }); +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.h new file mode 100755 index 00000000..0521d089 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageFilter.h" + +@interface GPUImageGammaFilter : GPUImageFilter +{ + GLint gammaUniform; +} + +// Gamma ranges from 0.0 to 3.0, with 1.0 as the normal level +@property(readwrite, nonatomic) CGFloat gamma; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.m new file mode 100755 index 00000000..35adaba2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.m @@ -0,0 +1,66 @@ +#import "GPUImageGammaFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float gamma; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w); + } +); +#else +NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float gamma; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w); + } +); +#endif + +@implementation GPUImageGammaFilter + +@synthesize gamma = _gamma; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageGammaFragmentShaderString])) + { + return nil; + } + + gammaUniform = [filterProgram uniformIndex:@"gamma"]; + self.gamma = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setGamma:(CGFloat)newValue; +{ + _gamma = newValue; + + [self setFloat:_gamma forUniform:gammaUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.h new file mode 100755 index 00000000..1fb7a137 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.h @@ -0,0 +1,36 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +/** A Gaussian blur filter + Interpolated optimization based on Daniel Rákos' work at http://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/ + */ + +@interface GPUImageGaussianBlurFilter : GPUImageTwoPassTextureSamplingFilter +{ + BOOL shouldResizeBlurRadiusWithImageSize; + CGFloat _blurRadiusInPixels; +} + +/** A multiplier for the spacing between texels, ranging from 0.0 on up, with a default of 1.0. Adjusting this may slightly increase the blur strength, but will introduce artifacts in the result. + */ +@property (readwrite, nonatomic) CGFloat texelSpacingMultiplier; + +/** A radius in pixels to use for the blur, with a default of 2.0. This adjusts the sigma variable in the Gaussian distribution function. + */ +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +/** Setting these properties will allow the blur radius to scale with the size of the image. These properties are mutually exclusive; setting either will set the other to 0. + */ +@property (readwrite, nonatomic) CGFloat blurRadiusAsFractionOfImageWidth; +@property (readwrite, nonatomic) CGFloat blurRadiusAsFractionOfImageHeight; + +/// The number of times to sequentially blur the incoming image. The more passes, the slower the filter. +@property(readwrite, nonatomic) NSUInteger blurPasses; + ++ (NSString *)vertexShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; ++ (NSString *)fragmentShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; ++ (NSString *)vertexShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; ++ (NSString *)fragmentShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; + +- (void)switchToVertexShader:(NSString *)newVertexShader fragmentShader:(NSString *)newFragmentShader; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.m new file mode 100755 index 00000000..ec99352a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.m @@ -0,0 +1,513 @@ +#import "GPUImageGaussianBlurFilter.h" + +@implementation GPUImageGaussianBlurFilter + +@synthesize texelSpacingMultiplier = _texelSpacingMultiplier; +@synthesize blurRadiusInPixels = _blurRadiusInPixels; +@synthesize blurRadiusAsFractionOfImageWidth = _blurRadiusAsFractionOfImageWidth; +@synthesize blurRadiusAsFractionOfImageHeight = _blurRadiusAsFractionOfImageHeight; +@synthesize blurPasses = _blurPasses; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString +{ + if (!(self = [super initWithFirstStageVertexShaderFromString:firstStageVertexShaderString firstStageFragmentShaderFromString:firstStageFragmentShaderString secondStageVertexShaderFromString:secondStageVertexShaderString secondStageFragmentShaderFromString:secondStageFragmentShaderString])) + { + return nil; + } + + self.texelSpacingMultiplier = 1.0; + _blurRadiusInPixels = 2.0; + shouldResizeBlurRadiusWithImageSize = NO; + + return self; +} + +- (id)init; +{ + NSString *currentGaussianBlurVertexShader = [[self class] vertexShaderForOptimizedBlurOfRadius:4 sigma:2.0]; + NSString *currentGaussianBlurFragmentShader = [[self class] fragmentShaderForOptimizedBlurOfRadius:4 sigma:2.0]; + + return [self initWithFirstStageVertexShaderFromString:currentGaussianBlurVertexShader firstStageFragmentShaderFromString:currentGaussianBlurFragmentShader secondStageVertexShaderFromString:currentGaussianBlurVertexShader secondStageFragmentShaderFromString:currentGaussianBlurFragmentShader]; +} + +#pragma mark - +#pragma mark Auto-generation of optimized Gaussian shaders + +// "Implementation limit of 32 varying components exceeded" - Max number of varyings for these GPUs + ++ (NSString *)vertexShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImageVertexShaderString; + } + +// NSLog(@"Max varyings: %d", [GPUImageContext maximumVaryingVectorsForThisDevice]); + NSMutableString *shaderString = [[NSMutableString alloc] init]; + + // Header + [shaderString appendFormat:@"\ + attribute vec4 position;\n\ + attribute vec4 inputTextureCoordinate;\n\ + \n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + gl_Position = position;\n\ + \n\ + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n", (unsigned long)(blurRadius * 2 + 1) ]; + + // Inner offset loop + for (NSUInteger currentBlurCoordinateIndex = 0; currentBlurCoordinateIndex < (blurRadius * 2 + 1); currentBlurCoordinateIndex++) + { + NSInteger offsetFromCenter = currentBlurCoordinateIndex - blurRadius; + if (offsetFromCenter < 0) + { + [shaderString appendFormat:@"blurCoordinates[%ld] = inputTextureCoordinate.xy - singleStepOffset * %f;\n", (unsigned long)currentBlurCoordinateIndex, (GLfloat)(-offsetFromCenter)]; + } + else if (offsetFromCenter > 0) + { + [shaderString appendFormat:@"blurCoordinates[%ld] = inputTextureCoordinate.xy + singleStepOffset * %f;\n", (unsigned long)currentBlurCoordinateIndex, (GLfloat)(offsetFromCenter)]; + } + else + { + [shaderString appendFormat:@"blurCoordinates[%ld] = inputTextureCoordinate.xy;\n", (unsigned long)currentBlurCoordinateIndex]; + } + } + + // Footer + [shaderString appendString:@"}\n"]; + + return shaderString; +} + ++ (NSString *)fragmentShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImagePassthroughFragmentShaderString; + } + + // First, generate the normal Gaussian weights for a given sigma + GLfloat *standardGaussianWeights = calloc(blurRadius + 1, sizeof(GLfloat)); + GLfloat sumOfWeights = 0.0; + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = (1.0 / sqrt(2.0 * M_PI * pow(sigma, 2.0))) * exp(-pow(currentGaussianWeightIndex, 2.0) / (2.0 * pow(sigma, 2.0))); + + if (currentGaussianWeightIndex == 0) + { + sumOfWeights += standardGaussianWeights[currentGaussianWeightIndex]; + } + else + { + sumOfWeights += 2.0 * standardGaussianWeights[currentGaussianWeightIndex]; + } + } + + // Next, normalize these weights to prevent the clipping of the Gaussian curve at the end of the discrete samples from reducing luminance + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = standardGaussianWeights[currentGaussianWeightIndex] / sumOfWeights; + } + + // Finally, generate the shader from these weights + NSMutableString *shaderString = [[NSMutableString alloc] init]; + + // Header +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + \n\ + varying highp vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + lowp vec4 sum = vec4(0.0);\n", (unsigned long)(blurRadius * 2 + 1) ]; +#else + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + vec4 sum = vec4(0.0);\n", (blurRadius * 2 + 1) ]; +#endif + + // Inner texture loop + for (NSUInteger currentBlurCoordinateIndex = 0; currentBlurCoordinateIndex < (blurRadius * 2 + 1); currentBlurCoordinateIndex++) + { + NSInteger offsetFromCenter = currentBlurCoordinateIndex - blurRadius; + if (offsetFromCenter < 0) + { + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)currentBlurCoordinateIndex, standardGaussianWeights[-offsetFromCenter]]; + } + else + { + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)currentBlurCoordinateIndex, standardGaussianWeights[offsetFromCenter]]; + } + } + + // Footer + [shaderString appendString:@"\ + gl_FragColor = sum;\n\ + }\n"]; + + free(standardGaussianWeights); + return shaderString; +} + ++ (NSString *)vertexShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImageVertexShaderString; + } + + // First, generate the normal Gaussian weights for a given sigma + GLfloat *standardGaussianWeights = calloc(blurRadius + 1, sizeof(GLfloat)); + GLfloat sumOfWeights = 0.0; + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = (1.0 / sqrt(2.0 * M_PI * pow(sigma, 2.0))) * exp(-pow(currentGaussianWeightIndex, 2.0) / (2.0 * pow(sigma, 2.0))); + + if (currentGaussianWeightIndex == 0) + { + sumOfWeights += standardGaussianWeights[currentGaussianWeightIndex]; + } + else + { + sumOfWeights += 2.0 * standardGaussianWeights[currentGaussianWeightIndex]; + } + } + + // Next, normalize these weights to prevent the clipping of the Gaussian curve at the end of the discrete samples from reducing luminance + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = standardGaussianWeights[currentGaussianWeightIndex] / sumOfWeights; + } + + // From these weights we calculate the offsets to read interpolated values from + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + GLfloat *optimizedGaussianOffsets = calloc(numberOfOptimizedOffsets, sizeof(GLfloat)); + + for (NSUInteger currentOptimizedOffset = 0; currentOptimizedOffset < numberOfOptimizedOffsets; currentOptimizedOffset++) + { + GLfloat firstWeight = standardGaussianWeights[currentOptimizedOffset*2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentOptimizedOffset*2 + 2]; + + GLfloat optimizedWeight = firstWeight + secondWeight; + + optimizedGaussianOffsets[currentOptimizedOffset] = (firstWeight * (currentOptimizedOffset*2 + 1) + secondWeight * (currentOptimizedOffset*2 + 2)) / optimizedWeight; + } + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + // Header + [shaderString appendFormat:@"\ + attribute vec4 position;\n\ + attribute vec4 inputTextureCoordinate;\n\ + \n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + gl_Position = position;\n\ + \n\ + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2))]; + + // Inner offset loop + [shaderString appendString:@"blurCoordinates[0] = inputTextureCoordinate.xy;\n"]; + for (NSUInteger currentOptimizedOffset = 0; currentOptimizedOffset < numberOfOptimizedOffsets; currentOptimizedOffset++) + { + [shaderString appendFormat:@"\ + blurCoordinates[%lu] = inputTextureCoordinate.xy + singleStepOffset * %f;\n\ + blurCoordinates[%lu] = inputTextureCoordinate.xy - singleStepOffset * %f;\n", (unsigned long)((currentOptimizedOffset * 2) + 1), optimizedGaussianOffsets[currentOptimizedOffset], (unsigned long)((currentOptimizedOffset * 2) + 2), optimizedGaussianOffsets[currentOptimizedOffset]]; + } + + // Footer + [shaderString appendString:@"}\n"]; + + free(optimizedGaussianOffsets); + free(standardGaussianWeights); + return shaderString; +} + ++ (NSString *)fragmentShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImagePassthroughFragmentShaderString; + } + + // First, generate the normal Gaussian weights for a given sigma + GLfloat *standardGaussianWeights = calloc(blurRadius + 1, sizeof(GLfloat)); + GLfloat sumOfWeights = 0.0; + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = (1.0 / sqrt(2.0 * M_PI * pow(sigma, 2.0))) * exp(-pow(currentGaussianWeightIndex, 2.0) / (2.0 * pow(sigma, 2.0))); + + if (currentGaussianWeightIndex == 0) + { + sumOfWeights += standardGaussianWeights[currentGaussianWeightIndex]; + } + else + { + sumOfWeights += 2.0 * standardGaussianWeights[currentGaussianWeightIndex]; + } + } + + // Next, normalize these weights to prevent the clipping of the Gaussian curve at the end of the discrete samples from reducing luminance + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = standardGaussianWeights[currentGaussianWeightIndex] / sumOfWeights; + } + + // From these weights we calculate the offsets to read interpolated values from + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + NSUInteger trueNumberOfOptimizedOffsets = blurRadius / 2 + (blurRadius % 2); + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + + // Header +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform highp float texelWidthOffset;\n\ + uniform highp float texelHeightOffset;\n\ + \n\ + varying highp vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + lowp vec4 sum = vec4(0.0);\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2)) ]; +#else + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + vec4 sum = vec4(0.0);\n", 1 + (numberOfOptimizedOffsets * 2) ]; +#endif + + // Inner texture loop + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0]) * %f;\n", standardGaussianWeights[0]]; + + for (NSUInteger currentBlurCoordinateIndex = 0; currentBlurCoordinateIndex < numberOfOptimizedOffsets; currentBlurCoordinateIndex++) + { + GLfloat firstWeight = standardGaussianWeights[currentBlurCoordinateIndex * 2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentBlurCoordinateIndex * 2 + 2]; + GLfloat optimizedWeight = firstWeight + secondWeight; + + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 1), optimizedWeight]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]) * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 2), optimizedWeight]; + } + + // If the number of required samples exceeds the amount we can pass in via varyings, we have to do dependent texture reads in the fragment shader + if (trueNumberOfOptimizedOffsets > numberOfOptimizedOffsets) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendString:@"highp vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#else + [shaderString appendString:@"vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#endif + + for (NSUInteger currentOverlowTextureRead = numberOfOptimizedOffsets; currentOverlowTextureRead < trueNumberOfOptimizedOffsets; currentOverlowTextureRead++) + { + GLfloat firstWeight = standardGaussianWeights[currentOverlowTextureRead * 2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentOverlowTextureRead * 2 + 2]; + + GLfloat optimizedWeight = firstWeight + secondWeight; + GLfloat optimizedOffset = (firstWeight * (currentOverlowTextureRead * 2 + 1) + secondWeight * (currentOverlowTextureRead * 2 + 2)) / optimizedWeight; + + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] + singleStepOffset * %f) * %f;\n", optimizedOffset, optimizedWeight]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] - singleStepOffset * %f) * %f;\n", optimizedOffset, optimizedWeight]; + } + } + + // Footer + [shaderString appendString:@"\ + gl_FragColor = sum;\n\ + }\n"]; + + free(standardGaussianWeights); + return shaderString; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + [super setupFilterForSize:filterFrameSize]; + + if (shouldResizeBlurRadiusWithImageSize) + { + if (self.blurRadiusAsFractionOfImageWidth > 0) + { + self.blurRadiusInPixels = filterFrameSize.width * self.blurRadiusAsFractionOfImageWidth; + } + else + { + self.blurRadiusInPixels = filterFrameSize.height * self.blurRadiusAsFractionOfImageHeight; + } + } +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + [super renderToTextureWithVertices:vertices textureCoordinates:textureCoordinates]; + + for (NSUInteger currentAdditionalBlurPass = 1; currentAdditionalBlurPass < _blurPasses; currentAdditionalBlurPass++) + { + [super renderToTextureWithVertices:vertices textureCoordinates:[[self class] textureCoordinatesForRotation:kGPUImageNoRotation]]; + } +} + +- (void)switchToVertexShader:(NSString *)newVertexShader fragmentShader:(NSString *)newFragmentShader; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + filterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:newVertexShader fragmentShaderString:newFragmentShader]; + + if (!filterProgram.initialized) + { + [self initializeAttributes]; + + if (![filterProgram link]) + { + NSString *progLog = [filterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [filterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [filterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + filterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + filterPositionAttribute = [filterProgram attributeIndex:@"position"]; + filterTextureCoordinateAttribute = [filterProgram attributeIndex:@"inputTextureCoordinate"]; + filterInputTextureUniform = [filterProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputImageTexture" for the fragment shader + verticalPassTexelWidthOffsetUniform = [filterProgram uniformIndex:@"texelWidthOffset"]; + verticalPassTexelHeightOffsetUniform = [filterProgram uniformIndex:@"texelHeightOffset"]; + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glEnableVertexAttribArray(filterPositionAttribute); + glEnableVertexAttribArray(filterTextureCoordinateAttribute); + + secondFilterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:newVertexShader fragmentShaderString:newFragmentShader]; + + if (!secondFilterProgram.initialized) + { + [self initializeSecondaryAttributes]; + + if (![secondFilterProgram link]) + { + NSString *progLog = [secondFilterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [secondFilterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [secondFilterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + secondFilterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + secondFilterPositionAttribute = [secondFilterProgram attributeIndex:@"position"]; + secondFilterTextureCoordinateAttribute = [secondFilterProgram attributeIndex:@"inputTextureCoordinate"]; + secondFilterInputTextureUniform = [secondFilterProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputImageTexture" for the fragment shader + secondFilterInputTextureUniform2 = [secondFilterProgram uniformIndex:@"inputImageTexture2"]; // This does assume a name of "inputImageTexture2" for second input texture in the fragment shader + horizontalPassTexelWidthOffsetUniform = [secondFilterProgram uniformIndex:@"texelWidthOffset"]; + horizontalPassTexelHeightOffsetUniform = [secondFilterProgram uniformIndex:@"texelHeightOffset"]; + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + + glEnableVertexAttribArray(secondFilterPositionAttribute); + glEnableVertexAttribArray(secondFilterTextureCoordinateAttribute); + + [self setupFilterForSize:[self sizeOfFBO]]; + glFinish(); + }); + +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelSpacingMultiplier:(CGFloat)newValue; +{ + _texelSpacingMultiplier = newValue; + + _verticalTexelSpacing = _texelSpacingMultiplier; + _horizontalTexelSpacing = _texelSpacingMultiplier; + + [self setupFilterForSize:[self sizeOfFBO]]; +} + +// inputRadius for Core Image's CIGaussianBlur is really sigma in the Gaussian equation, so I'm using that for my blur radius, to be consistent +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + // 7.0 is the limit for blur size for hardcoded varying offsets + + if (round(newValue) != _blurRadiusInPixels) + { + _blurRadiusInPixels = round(newValue); // For now, only do integral sigmas + + NSUInteger calculatedSampleRadius = 0; + if (_blurRadiusInPixels >= 1) // Avoid a divide-by-zero error here + { + // Calculate the number of pixels to sample from by setting a bottom limit for the contribution of the outermost pixel + CGFloat minimumWeightToFindEdgeOfSamplingArea = 1.0/256.0; + calculatedSampleRadius = floor(sqrt(-2.0 * pow(_blurRadiusInPixels, 2.0) * log(minimumWeightToFindEdgeOfSamplingArea * sqrt(2.0 * M_PI * pow(_blurRadiusInPixels, 2.0))) )); + calculatedSampleRadius += calculatedSampleRadius % 2; // There's nothing to gain from handling odd radius sizes, due to the optimizations I use + } + +// NSLog(@"Blur radius: %f, calculated sample radius: %d", _blurRadiusInPixels, calculatedSampleRadius); +// + NSString *newGaussianBlurVertexShader = [[self class] vertexShaderForOptimizedBlurOfRadius:calculatedSampleRadius sigma:_blurRadiusInPixels]; + NSString *newGaussianBlurFragmentShader = [[self class] fragmentShaderForOptimizedBlurOfRadius:calculatedSampleRadius sigma:_blurRadiusInPixels]; + +// NSLog(@"Optimized vertex shader: \n%@", newGaussianBlurVertexShader); +// NSLog(@"Optimized fragment shader: \n%@", newGaussianBlurFragmentShader); +// + [self switchToVertexShader:newGaussianBlurVertexShader fragmentShader:newGaussianBlurFragmentShader]; + } + shouldResizeBlurRadiusWithImageSize = NO; +} + +- (void)setBlurRadiusAsFractionOfImageWidth:(CGFloat)blurRadiusAsFractionOfImageWidth +{ + if (blurRadiusAsFractionOfImageWidth < 0) return; + + shouldResizeBlurRadiusWithImageSize = _blurRadiusAsFractionOfImageWidth != blurRadiusAsFractionOfImageWidth && blurRadiusAsFractionOfImageWidth > 0; + _blurRadiusAsFractionOfImageWidth = blurRadiusAsFractionOfImageWidth; + _blurRadiusAsFractionOfImageHeight = 0; +} + +- (void)setBlurRadiusAsFractionOfImageHeight:(CGFloat)blurRadiusAsFractionOfImageHeight +{ + if (blurRadiusAsFractionOfImageHeight < 0) return; + + shouldResizeBlurRadiusWithImageSize = _blurRadiusAsFractionOfImageHeight != blurRadiusAsFractionOfImageHeight && blurRadiusAsFractionOfImageHeight > 0; + _blurRadiusAsFractionOfImageHeight = blurRadiusAsFractionOfImageHeight; + _blurRadiusAsFractionOfImageWidth = 0; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.h new file mode 100755 index 00000000..dc88a563 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.h @@ -0,0 +1,22 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +/** A more generalized 9x9 Gaussian blur filter + */ +@interface GPUImageGaussianBlurPositionFilter : GPUImageTwoPassTextureSamplingFilter +{ + GLint blurCenterUniform, blurRadiusUniform, aspectRatioUniform; +} + +/** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 + */ +@property (readwrite, nonatomic) CGFloat blurSize; + +/** Center for the blur, defaults to 0.5, 0.5 + */ +@property (readwrite, nonatomic) CGPoint blurCenter; + +/** Radius for the blur, defaults to 1.0 + */ +@property (readwrite, nonatomic) CGFloat blurRadius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.m new file mode 100755 index 00000000..8ecd9246 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.m @@ -0,0 +1,232 @@ +#import "GPUImageGaussianBlurPositionFilter.h" + +NSString *const kGPUImageGaussianBlurPositionVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + const int GAUSSIAN_SAMPLES = 9; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + varying vec2 textureCoordinate; + varying vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + + // Calculate the positions for the blur + int multiplier = 0; + vec2 blurStep; + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset); + + for (int i = 0; i < GAUSSIAN_SAMPLES; i++) { + multiplier = (i - ((GAUSSIAN_SAMPLES - 1) / 2)); + // Blur in x (horizontal) + blurStep = float(multiplier) * singleStepOffset; + blurCoordinates[i] = inputTextureCoordinate.xy + blurStep; + } + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGaussianBlurPositionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + const lowp int GAUSSIAN_SAMPLES = 9; + + varying highp vec2 textureCoordinate; + varying highp vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + uniform highp float aspectRatio; + uniform lowp vec2 blurCenter; + uniform highp float blurRadius; + + void main() { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float dist = distance(blurCenter, textureCoordinateToUse); + + if (dist < blurRadius) + { + lowp vec4 sum = vec4(0.0); + + sum += texture2D(inputImageTexture, blurCoordinates[0]) * 0.05; + sum += texture2D(inputImageTexture, blurCoordinates[1]) * 0.09; + sum += texture2D(inputImageTexture, blurCoordinates[2]) * 0.12; + sum += texture2D(inputImageTexture, blurCoordinates[3]) * 0.15; + sum += texture2D(inputImageTexture, blurCoordinates[4]) * 0.18; + sum += texture2D(inputImageTexture, blurCoordinates[5]) * 0.15; + sum += texture2D(inputImageTexture, blurCoordinates[6]) * 0.12; + sum += texture2D(inputImageTexture, blurCoordinates[7]) * 0.09; + sum += texture2D(inputImageTexture, blurCoordinates[8]) * 0.05; + + gl_FragColor = sum; + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } + } +); +#else +NSString *const kGPUImageGaussianBlurPositionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + const int GAUSSIAN_SAMPLES = 9; + + varying vec2 textureCoordinate; + varying vec2 blurCoordinates[GAUSSIAN_SAMPLES]; + + uniform float aspectRatio; + uniform vec2 blurCenter; + uniform float blurRadius; + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float dist = distance(blurCenter, textureCoordinateToUse); + + if (dist < blurRadius) + { + vec4 sum = vec4(0.0); + + sum += texture2D(inputImageTexture, blurCoordinates[0]) * 0.05; + sum += texture2D(inputImageTexture, blurCoordinates[1]) * 0.09; + sum += texture2D(inputImageTexture, blurCoordinates[2]) * 0.12; + sum += texture2D(inputImageTexture, blurCoordinates[3]) * 0.15; + sum += texture2D(inputImageTexture, blurCoordinates[4]) * 0.18; + sum += texture2D(inputImageTexture, blurCoordinates[5]) * 0.15; + sum += texture2D(inputImageTexture, blurCoordinates[6]) * 0.12; + sum += texture2D(inputImageTexture, blurCoordinates[7]) * 0.09; + sum += texture2D(inputImageTexture, blurCoordinates[8]) * 0.05; + + gl_FragColor = sum; + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate); + } + } +); +#endif + +@interface GPUImageGaussianBlurPositionFilter () + +- (void)adjustAspectRatio; + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end + +@implementation GPUImageGaussianBlurPositionFilter + +@synthesize blurSize = _blurSize; +@synthesize blurCenter = _blurCenter; +@synthesize aspectRatio = _aspectRatio; + +- (id) initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString + firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString + secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString + secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString { + + if (!(self = [super initWithFirstStageVertexShaderFromString:firstStageVertexShaderString ? firstStageVertexShaderString : kGPUImageGaussianBlurPositionVertexShaderString + firstStageFragmentShaderFromString:firstStageFragmentShaderString ? firstStageFragmentShaderString : kGPUImageGaussianBlurPositionFragmentShaderString + secondStageVertexShaderFromString:secondStageVertexShaderString ? secondStageVertexShaderString : kGPUImageGaussianBlurPositionVertexShaderString + secondStageFragmentShaderFromString:secondStageFragmentShaderString ? secondStageFragmentShaderString : kGPUImageGaussianBlurPositionFragmentShaderString])) { + return nil; + } + + aspectRatioUniform = [secondFilterProgram uniformIndex:@"aspectRatio"]; + blurCenterUniform = [secondFilterProgram uniformIndex:@"blurCenter"]; + blurRadiusUniform = [secondFilterProgram uniformIndex:@"blurRadius"]; + + self.blurSize = 1.0; + self.blurRadius = 1.0; + self.blurCenter = CGPointMake(0.5, 0.5); + + return self; +} + +- (id)init; +{ + return [self initWithFirstStageVertexShaderFromString:nil + firstStageFragmentShaderFromString:nil + secondStageVertexShaderFromString:nil + secondStageFragmentShaderFromString:nil]; +} + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setBlurCenter:self.blurCenter]; + [self adjustAspectRatio]; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurSize:(CGFloat)newValue; +{ + _blurSize = newValue; + + _verticalTexelSpacing = _blurSize; + _horizontalTexelSpacing = _blurSize; + + [self setupFilterForSize:[self sizeOfFBO]]; +} + +- (void) setBlurCenter:(CGPoint)blurCenter; +{ + _blurCenter = blurCenter; + CGPoint rotatedPoint = [self rotatedPoint:blurCenter forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:blurCenterUniform program:secondFilterProgram]; +} + +- (void) setBlurRadius:(CGFloat)blurRadius; +{ + _blurRadius = blurRadius; + + [self setFloat:_blurRadius forUniform:blurRadiusUniform program:secondFilterProgram]; +} + +- (void) setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:secondFilterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.h new file mode 100755 index 00000000..02324566 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.h @@ -0,0 +1,30 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGaussianBlurFilter; + +/** A Gaussian blur that preserves focus within a circular region + */ +@interface GPUImageGaussianSelectiveBlurFilter : GPUImageFilterGroup +{ + GPUImageGaussianBlurFilter *blurFilter; + GPUImageFilter *selectiveFocusFilter; + BOOL hasOverriddenAspectRatio; +} + +/** The radius of the circular area being excluded from the blur + */ +@property (readwrite, nonatomic) CGFloat excludeCircleRadius; +/** The center of the circular area being excluded from the blur + */ +@property (readwrite, nonatomic) CGPoint excludeCirclePoint; +/** The size of the area between the blurred portion and the clear circle + */ +@property (readwrite, nonatomic) CGFloat excludeBlurSize; +/** A radius in pixels to use for the blur, with a default of 5.0. This adjusts the sigma variable in the Gaussian distribution function. + */ +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; +/** The aspect ratio of the image, used to adjust the circularity of the in-focus region. By default, this matches the image aspect ratio, but you can override this value. + */ +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.m new file mode 100755 index 00000000..7ebc9e19 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.m @@ -0,0 +1,147 @@ +#import "GPUImageGaussianSelectiveBlurFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageTwoInputFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGaussianSelectiveBlurFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform lowp float excludeCircleRadius; + uniform lowp vec2 excludeCirclePoint; + uniform lowp float excludeBlurSize; + uniform highp float aspectRatio; + + void main() + { + lowp vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2); + + highp vec2 textureCoordinateToUse = vec2(textureCoordinate2.x, (textureCoordinate2.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float distanceFromCenter = distance(excludeCirclePoint, textureCoordinateToUse); + + gl_FragColor = mix(sharpImageColor, blurredImageColor, smoothstep(excludeCircleRadius - excludeBlurSize, excludeCircleRadius, distanceFromCenter)); + } +); +#else +NSString *const kGPUImageGaussianSelectiveBlurFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float excludeCircleRadius; + uniform vec2 excludeCirclePoint; + uniform float excludeBlurSize; + uniform float aspectRatio; + + void main() + { + vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + vec4 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2); + + vec2 textureCoordinateToUse = vec2(textureCoordinate2.x, (textureCoordinate2.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float distanceFromCenter = distance(excludeCirclePoint, textureCoordinateToUse); + + gl_FragColor = mix(sharpImageColor, blurredImageColor, smoothstep(excludeCircleRadius - excludeBlurSize, excludeCircleRadius, distanceFromCenter)); + } +); +#endif + +@implementation GPUImageGaussianSelectiveBlurFilter + +@synthesize excludeCirclePoint = _excludeCirclePoint, excludeCircleRadius = _excludeCircleRadius, excludeBlurSize = _excludeBlurSize; +@synthesize blurRadiusInPixels = _blurRadiusInPixels; +@synthesize aspectRatio = _aspectRatio; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + hasOverriddenAspectRatio = NO; + + // First pass: apply a variable Gaussian blur + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Second pass: combine the blurred image with the original sharp one + selectiveFocusFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:kGPUImageGaussianSelectiveBlurFragmentShaderString]; + [self addFilter:selectiveFocusFilter]; + + // Texture location 0 needs to be the sharp image for both the blur and the second stage processing + [blurFilter addTarget:selectiveFocusFilter atTextureLocation:1]; + + // To prevent double updating of this filter, disable updates from the sharp image side + self.initialFilters = [NSArray arrayWithObjects:blurFilter, selectiveFocusFilter, nil]; + self.terminalFilter = selectiveFocusFilter; + + self.blurRadiusInPixels = 5.0; + + self.excludeCircleRadius = 60.0/320.0; + self.excludeCirclePoint = CGPointMake(0.5f, 0.5f); + self.excludeBlurSize = 30.0/320.0; + + return self; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + inputTextureSize = newSize; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!hasOverriddenAspectRatio) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + _aspectRatio = (inputTextureSize.width / inputTextureSize.height); + [selectiveFocusFilter setFloat:_aspectRatio forUniformName:@"aspectRatio"]; + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setExcludeCirclePoint:(CGPoint)newValue; +{ + _excludeCirclePoint = newValue; + [selectiveFocusFilter setPoint:newValue forUniformName:@"excludeCirclePoint"]; +} + +- (void)setExcludeCircleRadius:(CGFloat)newValue; +{ + _excludeCircleRadius = newValue; + [selectiveFocusFilter setFloat:newValue forUniformName:@"excludeCircleRadius"]; +} + +- (void)setExcludeBlurSize:(CGFloat)newValue; +{ + _excludeBlurSize = newValue; + [selectiveFocusFilter setFloat:newValue forUniformName:@"excludeBlurSize"]; +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + hasOverriddenAspectRatio = YES; + _aspectRatio = newValue; + [selectiveFocusFilter setFloat:_aspectRatio forUniformName:@"aspectRatio"]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.h new file mode 100644 index 00000000..809a4ee8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageSphereRefractionFilter.h" + +@interface GPUImageGlassSphereFilter : GPUImageSphereRefractionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.m new file mode 100644 index 00000000..18662916 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.m @@ -0,0 +1,106 @@ +#import "GPUImageGlassSphereFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGlassSphereFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 center; + uniform highp float radius; + uniform highp float aspectRatio; + uniform highp float refractiveIndex; +// uniform vec3 lightPosition; + const highp vec3 lightPosition = vec3(-0.5, 0.5, 1.0); + const highp vec3 ambientLightPosition = vec3(0.0, 0.0, 1.0); + + void main() + { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float distanceFromCenter = distance(center, textureCoordinateToUse); + lowp float checkForPresenceWithinSphere = step(distanceFromCenter, radius); + + distanceFromCenter = distanceFromCenter / radius; + + highp float normalizedDepth = radius * sqrt(1.0 - distanceFromCenter * distanceFromCenter); + highp vec3 sphereNormal = normalize(vec3(textureCoordinateToUse - center, normalizedDepth)); + + highp vec3 refractedVector = 2.0 * refract(vec3(0.0, 0.0, -1.0), sphereNormal, refractiveIndex); + refractedVector.xy = -refractedVector.xy; + + highp vec3 finalSphereColor = texture2D(inputImageTexture, (refractedVector.xy + 1.0) * 0.5).rgb; + + // Grazing angle lighting + highp float lightingIntensity = 2.5 * (1.0 - pow(clamp(dot(ambientLightPosition, sphereNormal), 0.0, 1.0), 0.25)); + finalSphereColor += lightingIntensity; + + // Specular lighting + lightingIntensity = clamp(dot(normalize(lightPosition), sphereNormal), 0.0, 1.0); + lightingIntensity = pow(lightingIntensity, 15.0); + finalSphereColor += vec3(0.8, 0.8, 0.8) * lightingIntensity; + + gl_FragColor = vec4(finalSphereColor, 1.0) * checkForPresenceWithinSphere; + } +); +#else +NSString *const kGPUImageGlassSphereFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 center; + uniform float radius; + uniform float aspectRatio; + uniform float refractiveIndex; + // uniform vec3 lightPosition; + const vec3 lightPosition = vec3(-0.5, 0.5, 1.0); + const vec3 ambientLightPosition = vec3(0.0, 0.0, 1.0); + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float distanceFromCenter = distance(center, textureCoordinateToUse); + float checkForPresenceWithinSphere = step(distanceFromCenter, radius); + + distanceFromCenter = distanceFromCenter / radius; + + float normalizedDepth = radius * sqrt(1.0 - distanceFromCenter * distanceFromCenter); + vec3 sphereNormal = normalize(vec3(textureCoordinateToUse - center, normalizedDepth)); + + vec3 refractedVector = 2.0 * refract(vec3(0.0, 0.0, -1.0), sphereNormal, refractiveIndex); + refractedVector.xy = -refractedVector.xy; + + vec3 finalSphereColor = texture2D(inputImageTexture, (refractedVector.xy + 1.0) * 0.5).rgb; + + // Grazing angle lighting + float lightingIntensity = 2.5 * (1.0 - pow(clamp(dot(ambientLightPosition, sphereNormal), 0.0, 1.0), 0.25)); + finalSphereColor += lightingIntensity; + + // Specular lighting + lightingIntensity = clamp(dot(normalize(lightPosition), sphereNormal), 0.0, 1.0); + lightingIntensity = pow(lightingIntensity, 15.0); + finalSphereColor += vec3(0.8, 0.8, 0.8) * lightingIntensity; + + gl_FragColor = vec4(finalSphereColor, 1.0) * checkForPresenceWithinSphere; + } +); +#endif + +@implementation GPUImageGlassSphereFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageGlassSphereFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.h new file mode 100755 index 00000000..2d97f8c3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.h @@ -0,0 +1,9 @@ +#import "GPUImageFilter.h" + +extern NSString *const kGPUImageLuminanceFragmentShaderString; + +/** Converts an image to grayscale (a slightly faster implementation of the saturation filter, without the ability to vary the color contribution) + */ +@interface GPUImageGrayscaleFilter : GPUImageFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.m new file mode 100755 index 00000000..0066ca87 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.m @@ -0,0 +1,141 @@ +#import "GPUImageGrayscaleFilter.h" + +@implementation GPUImageGrayscaleFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLuminanceFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, W); + + gl_FragColor = vec4(vec3(luminance), textureColor.a); + } +); +#else +NSString *const kGPUImageLuminanceFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, W); + + gl_FragColor = vec4(vec3(luminance), textureColor.a); + } +); +#endif + + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (!currentlyReceivingMonochromeInput) + { + [super renderToTextureWithVertices:vertices textureCoordinates:textureCoordinates]; + } +} + +//- (void)setInputTexture:(GLuint)newInputTexture atIndex:(NSInteger)textureIndex; +//{ +// [super setInputTexture:newInputTexture atIndex:textureIndex]; +// if (currentlyReceivingMonochromeInput) +// { +// [self notifyTargetsAboutNewOutputTexture]; +// } +//} + +//- (GLuint)textureForOutput; +//{ +// if (currentlyReceivingMonochromeInput) +// { +// return filterSourceTexture; +// } +// else +// { +// return outputTexture; +// } +//} + +- (BOOL)wantsMonochromeInput; +{ +// return YES; + return NO; +} + +- (BOOL)providesMonochromeOutput; +{ +// return YES; + return NO; +} + +// TODO: Rewrite this based on the new GPUImageFilter implementation +//- (void)informTargetsAboutNewFrameAtTime:(CMTime)frameTime; +//{ +// if (self.frameProcessingCompletionBlock != NULL) +// { +// self.frameProcessingCompletionBlock(self, frameTime); +// } +// +// for (id currentTarget in targets) +// { +// if (currentTarget != self.targetToIgnoreForUpdates) +// { +// NSInteger indexOfObject = [targets indexOfObject:currentTarget]; +// NSInteger textureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; +// +// if ([GPUImageContext supportsFastTextureUpload] && preparedToCaptureImage) +// { +// [self setInputTextureForTarget:currentTarget atIndex:textureIndex]; +// } +// +// if (currentlyReceivingMonochromeInput) +// { +// [currentTarget setInputRotation:inputRotation atIndex:textureIndex]; +// +// CGSize sizeToRotate = [self outputFrameSize]; +// CGSize rotatedSize = sizeToRotate; +// if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) +// { +// rotatedSize.width = sizeToRotate.height; +// rotatedSize.height = sizeToRotate.width; +// } +// [currentTarget setInputSize:rotatedSize atIndex:textureIndex]; +// } +// else +// { +// [currentTarget setInputSize:[self outputFrameSize] atIndex:textureIndex]; +// } +// [currentTarget newFrameReadyAtTime:frameTime atIndex:textureIndex]; +// } +// } +//} + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLuminanceFragmentShaderString])) + { + return nil; + } + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h new file mode 100644 index 00000000..65a9e1de --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h @@ -0,0 +1,27 @@ +#import "GPUImageColorMatrixFilter.h" + +@interface GPUImageHSBFilter : GPUImageColorMatrixFilter + +/** Reset the filter to have no transformations. + */ +- (void)reset; + +/** Add a hue rotation to the filter. + The hue rotation is in the range [-360, 360] with 0 being no-change. + Note that this adjustment is additive, so use the reset method if you need to. + */ +- (void)rotateHue:(float)h; + +/** Add a saturation adjustment to the filter. + The saturation adjustment is in the range [0.0, 2.0] with 1.0 being no-change. + Note that this adjustment is additive, so use the reset method if you need to. + */ +- (void)adjustSaturation:(float)s; + +/** Add a brightness adjustment to the filter. + The brightness adjustment is in the range [0.0, 2.0] with 1.0 being no-change. + Note that this adjustment is additive, so use the reset method if you need to. + */ +- (void)adjustBrightness:(float)b; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.m new file mode 100644 index 00000000..eb668f7d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.m @@ -0,0 +1,414 @@ +#import "GPUImageHSBFilter.h" + +@implementation GPUImageHSBFilter { + float matrix[4][4]; +} + +- (id)init +{ + self = [super init]; + if (self) { + [self reset]; + } + return self; +} + +- (void)reset { + identmat(matrix); + [self _updateColorMatrix]; +} + +- (void)rotateHue:(float)h { + huerotatemat(matrix, h); + [self _updateColorMatrix]; +} + +- (void)adjustSaturation:(float)s { + saturatemat(matrix, s); + [self _updateColorMatrix]; +} + +- (void)adjustBrightness:(float)b { + cscalemat(matrix, b, b, b); + [self _updateColorMatrix]; +} + +- (void)_updateColorMatrix { + GPUMatrix4x4 gpuMatrix; + gpuMatrix.one.one = matrix[0][0]; + gpuMatrix.one.two = matrix[1][0]; + gpuMatrix.one.three = matrix[2][0]; + gpuMatrix.one.four = matrix[3][0]; + gpuMatrix.two.one = matrix[0][1]; + gpuMatrix.two.two = matrix[1][1]; + gpuMatrix.two.three = matrix[2][1]; + gpuMatrix.two.four = matrix[3][1]; + gpuMatrix.three.one = matrix[0][2]; + gpuMatrix.three.two = matrix[1][2]; + gpuMatrix.three.three = matrix[2][2]; + gpuMatrix.three.four = matrix[3][2]; + gpuMatrix.four.one = matrix[0][3]; + gpuMatrix.four.two = matrix[1][3]; + gpuMatrix.four.three = matrix[2][3]; + gpuMatrix.four.four = matrix[3][3]; + self.colorMatrix = gpuMatrix; +} + +#pragma mark - Matrix algorithms + +/* Matrix algorithms adapted from http://www.graficaobscura.com/matrix/index.html + + Note about luminance vector values below from that page: + Where rwgt is 0.3086, gwgt is 0.6094, and bwgt is 0.0820. This is the luminance vector. Notice here that we do not use the standard NTSC weights of 0.299, 0.587, and 0.114. The NTSC weights are only applicable to RGB colors in a gamma 2.2 color space. For linear RGB colors the values above are better. + */ +//#define RLUM (0.3086f) +//#define GLUM (0.6094f) +//#define BLUM (0.0820f) + +/* This is the vector value from the PDF specification, and may be closer to what Photoshop uses */ +#define RLUM (0.3f) +#define GLUM (0.59f) +#define BLUM (0.11f) + +/* + * matrixmult - + * multiply two matricies + */ +static void matrixmult(a,b,c) +float a[4][4], b[4][4], c[4][4]; +{ + int x, y; + float temp[4][4]; + + for(y=0; y<4 ; y++) + for(x=0 ; x<4 ; x++) { + temp[y][x] = b[y][0] * a[0][x] + + b[y][1] * a[1][x] + + b[y][2] * a[2][x] + + b[y][3] * a[3][x]; + } + for(y=0; y<4; y++) + for(x=0; x<4; x++) + c[y][x] = temp[y][x]; +} + +/* + * identmat - + * make an identity matrix + */ +static void identmat(matrix) +float matrix[4][4]; +{ + memset(matrix, 0, sizeof(float[4][4])); + matrix[0][0] = 1.0f; + matrix[1][1] = 1.0f; + matrix[2][2] = 1.0f; + matrix[3][3] = 1.0f; +} + +/* + * xformpnt - + * transform a 3D point using a matrix + */ +static void xformpnt(matrix,x,y,z,tx,ty,tz) +float matrix[4][4]; +float x,y,z; +float *tx,*ty,*tz; +{ + *tx = x*matrix[0][0] + y*matrix[1][0] + z*matrix[2][0] + matrix[3][0]; + *ty = x*matrix[0][1] + y*matrix[1][1] + z*matrix[2][1] + matrix[3][1]; + *tz = x*matrix[0][2] + y*matrix[1][2] + z*matrix[2][2] + matrix[3][2]; +} + +/* + * cscalemat - + * make a color scale marix + */ +static void cscalemat(mat,rscale,gscale,bscale) +float mat[4][4]; +float rscale, gscale, bscale; +{ + float mmat[4][4]; + + mmat[0][0] = rscale; + mmat[0][1] = 0.0; + mmat[0][2] = 0.0; + mmat[0][3] = 0.0; + + mmat[1][0] = 0.0; + mmat[1][1] = gscale; + mmat[1][2] = 0.0; + mmat[1][3] = 0.0; + + + mmat[2][0] = 0.0; + mmat[2][1] = 0.0; + mmat[2][2] = bscale; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * saturatemat - + * make a saturation marix + */ +static void saturatemat(mat,sat) +float mat[4][4]; +float sat; +{ + float mmat[4][4]; + float a, b, c, d, e, f, g, h, i; + float rwgt, gwgt, bwgt; + + rwgt = RLUM; + gwgt = GLUM; + bwgt = BLUM; + + a = (1.0-sat)*rwgt + sat; + b = (1.0-sat)*rwgt; + c = (1.0-sat)*rwgt; + d = (1.0-sat)*gwgt; + e = (1.0-sat)*gwgt + sat; + f = (1.0-sat)*gwgt; + g = (1.0-sat)*bwgt; + h = (1.0-sat)*bwgt; + i = (1.0-sat)*bwgt + sat; + mmat[0][0] = a; + mmat[0][1] = b; + mmat[0][2] = c; + mmat[0][3] = 0.0; + + mmat[1][0] = d; + mmat[1][1] = e; + mmat[1][2] = f; + mmat[1][3] = 0.0; + + mmat[2][0] = g; + mmat[2][1] = h; + mmat[2][2] = i; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * xrotate - + * rotate about the x (red) axis + */ +static void xrotatemat(mat,rs,rc) +float mat[4][4]; +float rs, rc; +{ + float mmat[4][4]; + + mmat[0][0] = 1.0; + mmat[0][1] = 0.0; + mmat[0][2] = 0.0; + mmat[0][3] = 0.0; + + mmat[1][0] = 0.0; + mmat[1][1] = rc; + mmat[1][2] = rs; + mmat[1][3] = 0.0; + + mmat[2][0] = 0.0; + mmat[2][1] = -rs; + mmat[2][2] = rc; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * yrotate - + * rotate about the y (green) axis + */ +static void yrotatemat(mat,rs,rc) +float mat[4][4]; +float rs, rc; +{ + float mmat[4][4]; + + mmat[0][0] = rc; + mmat[0][1] = 0.0; + mmat[0][2] = -rs; + mmat[0][3] = 0.0; + + mmat[1][0] = 0.0; + mmat[1][1] = 1.0; + mmat[1][2] = 0.0; + mmat[1][3] = 0.0; + + mmat[2][0] = rs; + mmat[2][1] = 0.0; + mmat[2][2] = rc; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * zrotate - + * rotate about the z (blue) axis + */ +static void zrotatemat(mat,rs,rc) +float mat[4][4]; +float rs, rc; +{ + float mmat[4][4]; + + mmat[0][0] = rc; + mmat[0][1] = rs; + mmat[0][2] = 0.0; + mmat[0][3] = 0.0; + + mmat[1][0] = -rs; + mmat[1][1] = rc; + mmat[1][2] = 0.0; + mmat[1][3] = 0.0; + + mmat[2][0] = 0.0; + mmat[2][1] = 0.0; + mmat[2][2] = 1.0; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * zshear - + * shear z using x and y. + */ +static void zshearmat(mat,dx,dy) +float mat[4][4]; +float dx, dy; +{ + float mmat[4][4]; + + mmat[0][0] = 1.0; + mmat[0][1] = 0.0; + mmat[0][2] = dx; + mmat[0][3] = 0.0; + + mmat[1][0] = 0.0; + mmat[1][1] = 1.0; + mmat[1][2] = dy; + mmat[1][3] = 0.0; + + mmat[2][0] = 0.0; + mmat[2][1] = 0.0; + mmat[2][2] = 1.0; + mmat[2][3] = 0.0; + + mmat[3][0] = 0.0; + mmat[3][1] = 0.0; + mmat[3][2] = 0.0; + mmat[3][3] = 1.0; + matrixmult(mmat,mat,mat); +} + +/* + * simplehuerotatemat - + * simple hue rotation. This changes luminance + */ +//static void simplehuerotatemat(mat,rot) +//float mat[4][4]; +//float rot; +//{ +// float mag; +// float xrs, xrc; +// float yrs, yrc; +// float zrs, zrc; +// +// /* rotate the grey vector into positive Z */ +// mag = sqrt(2.0); +// xrs = 1.0/mag; +// xrc = 1.0/mag; +// xrotatemat(mat,xrs,xrc); +// +// mag = sqrt(3.0); +// yrs = -1.0/mag; +// yrc = sqrt(2.0)/mag; +// yrotatemat(mat,yrs,yrc); +// +// /* rotate the hue */ +// zrs = sin(rot*M_PI/180.0); +// zrc = cos(rot*M_PI/180.0); +// zrotatemat(mat,zrs,zrc); +// +// /* rotate the grey vector back into place */ +// yrotatemat(mat,-yrs,yrc); +// xrotatemat(mat,-xrs,xrc); +//} + +/* + * huerotatemat - + * rotate the hue, while maintaining luminance. + */ +static void huerotatemat(mat,rot) +float mat[4][4]; +float rot; +{ + float mmat[4][4]; + float mag; + float lx, ly, lz; + float xrs, xrc; + float yrs, yrc; + float zrs, zrc; + float zsx, zsy; + + identmat(mmat); + + /* rotate the grey vector into positive Z */ + mag = sqrt(2.0); + xrs = 1.0/mag; + xrc = 1.0/mag; + xrotatemat(mmat,xrs,xrc); + mag = sqrt(3.0); + yrs = -1.0/mag; + yrc = sqrt(2.0)/mag; + yrotatemat(mmat,yrs,yrc); + + /* shear the space to make the luminance plane horizontal */ + xformpnt(mmat,RLUM,GLUM,BLUM,&lx,&ly,&lz); + zsx = lx/lz; + zsy = ly/lz; + zshearmat(mmat,zsx,zsy); + + /* rotate the hue */ + zrs = sin(rot*M_PI/180.0); + zrc = cos(rot*M_PI/180.0); + zrotatemat(mmat,zrs,zrc); + + /* unshear the space to put the luminance plane back */ + zshearmat(mmat,-zsx,-zsy); + + /* rotate the grey vector back into place */ + yrotatemat(mmat,-yrs,yrc); + xrotatemat(mmat,-xrs,xrc); + + matrixmult(mmat,mat,mat); +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.h new file mode 100644 index 00000000..1860bc97 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.h @@ -0,0 +1,5 @@ +#import "GPUImagePixellateFilter.h" + +@interface GPUImageHalftoneFilter : GPUImagePixellateFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.m new file mode 100644 index 00000000..1b621c6a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.m @@ -0,0 +1,79 @@ +#import "GPUImageHalftoneFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHalftoneFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float fractionalWidthOfPixel; + uniform highp float aspectRatio; + uniform highp float dotScaling; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + highp vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + highp vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp vec2 adjustedSamplePos = vec2(samplePos.x, (samplePos.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float distanceFromSamplePoint = distance(adjustedSamplePos, textureCoordinateToUse); + + lowp vec3 sampledColor = texture2D(inputImageTexture, samplePos ).rgb; + highp float dotScaling = 1.0 - dot(sampledColor, W); + + lowp float checkForPresenceWithinDot = 1.0 - step(distanceFromSamplePoint, (fractionalWidthOfPixel * 0.5) * dotScaling); + + gl_FragColor = vec4(vec3(checkForPresenceWithinDot), 1.0); + } +); +#else +NSString *const kGPUImageHalftoneFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float fractionalWidthOfPixel; + uniform float aspectRatio; + uniform float dotScaling; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + vec2 adjustedSamplePos = vec2(samplePos.x, (samplePos.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float distanceFromSamplePoint = distance(adjustedSamplePos, textureCoordinateToUse); + + vec3 sampledColor = texture2D(inputImageTexture, samplePos ).rgb; + float dotScaling = 1.0 - dot(sampledColor, W); + + float checkForPresenceWithinDot = 1.0 - step(distanceFromSamplePoint, (fractionalWidthOfPixel * 0.5) * dotScaling); + + gl_FragColor = vec4(vec3(checkForPresenceWithinDot), 1.0); + } +); +#endif + +@implementation GPUImageHalftoneFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageHalftoneFragmentShaderString])) + { + return nil; + } + + self.fractionalWidthOfAPixel = 0.01; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.h new file mode 100755 index 00000000..47d62609 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageHardLightBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.m new file mode 100755 index 00000000..2896ce82 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.m @@ -0,0 +1,99 @@ +#import "GPUImageHardLightBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHardLightBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + highp float ra; + if (2.0 * overlay.r < overlay.a) { + ra = 2.0 * overlay.r * base.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + ra = overlay.a * base.a - 2.0 * (base.a - base.r) * (overlay.a - overlay.r) + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } + + highp float ga; + if (2.0 * overlay.g < overlay.a) { + ga = 2.0 * overlay.g * base.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + ga = overlay.a * base.a - 2.0 * (base.a - base.g) * (overlay.a - overlay.g) + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } + + highp float ba; + if (2.0 * overlay.b < overlay.a) { + ba = 2.0 * overlay.b * base.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + ba = overlay.a * base.a - 2.0 * (base.a - base.b) * (overlay.a - overlay.b) + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } + + gl_FragColor = vec4(ra, ga, ba, 1.0); + } +); +#else +NSString *const kGPUImageHardLightBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + float ra; + if (2.0 * overlay.r < overlay.a) { + ra = 2.0 * overlay.r * base.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + ra = overlay.a * base.a - 2.0 * (base.a - base.r) * (overlay.a - overlay.r) + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } + + float ga; + if (2.0 * overlay.g < overlay.a) { + ga = 2.0 * overlay.g * base.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + ga = overlay.a * base.a - 2.0 * (base.a - base.g) * (overlay.a - overlay.g) + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } + + float ba; + if (2.0 * overlay.b < overlay.a) { + ba = 2.0 * overlay.b * base.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + ba = overlay.a * base.a - 2.0 * (base.a - base.b) * (overlay.a - overlay.b) + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } + + gl_FragColor = vec4(ra, ga, ba, 1.0); + } +); +#endif + + +@implementation GPUImageHardLightBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageHardLightBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.h new file mode 100755 index 00000000..1492b8b8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.h @@ -0,0 +1,53 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGaussianBlurFilter; +@class GPUImageXYDerivativeFilter; +@class GPUImageGrayscaleFilter; +@class GPUImageGaussianBlurFilter; +@class GPUImageThresholdedNonMaximumSuppressionFilter; +@class GPUImageColorPackingFilter; + +//#define DEBUGFEATUREDETECTION + +/** Harris corner detector + + First pass: reduce to luminance and take the derivative of the luminance texture (GPUImageXYDerivativeFilter) + + Second pass: blur the derivative (GPUImageGaussianBlurFilter) + + Third pass: apply the Harris corner detection calculation + + This is the Harris corner detector, as described in + C. Harris and M. Stephens. A Combined Corner and Edge Detector. Proc. Alvey Vision Conf., Univ. Manchester, pp. 147-151, 1988. + */ +@interface GPUImageHarrisCornerDetectionFilter : GPUImageFilterGroup +{ + GPUImageXYDerivativeFilter *derivativeFilter; + GPUImageGaussianBlurFilter *blurFilter; + GPUImageFilter *harrisCornerDetectionFilter; + GPUImageThresholdedNonMaximumSuppressionFilter *nonMaximumSuppressionFilter; + GPUImageColorPackingFilter *colorPackingFilter; + GLfloat *cornersArray; + GLubyte *rawImagePixels; +} + +/** The radius of the underlying Gaussian blur. The default is 2.0. + */ +@property(readwrite, nonatomic) CGFloat blurRadiusInPixels; + +// This changes the dynamic range of the Harris corner detector by amplifying small cornerness values. Default is 5.0. +@property(readwrite, nonatomic) CGFloat sensitivity; + +// A threshold value at which a point is recognized as being a corner after the non-maximum suppression. Default is 0.20. +@property(readwrite, nonatomic) CGFloat threshold; + +// This block is called on the detection of new corner points, usually on every processed frame. A C array containing normalized coordinates in X, Y pairs is passed in, along with a count of the number of corners detected and the current timestamp of the video frame +@property(nonatomic, copy) void(^cornersDetectedBlock)(GLfloat* cornerArray, NSUInteger cornersDetected, CMTime frameTime); + +// These images are only enabled when built with DEBUGFEATUREDETECTION defined, and are used to examine the intermediate states of the feature detector +@property(nonatomic, readonly, strong) NSMutableArray *intermediateImages; + +// Initialization and teardown +- (id)initWithCornerDetectionFragmentShader:(NSString *)cornerDetectionFragmentShader; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.m new file mode 100755 index 00000000..999748dd --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHarrisCornerDetectionFilter.m @@ -0,0 +1,292 @@ +#import "GPUImageHarrisCornerDetectionFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageXYDerivativeFilter.h" +#import "GPUImageGrayscaleFilter.h" +#import "GPUImageThresholdedNonMaximumSuppressionFilter.h" +#import "GPUImageColorPackingFilter.h" +#import "GPUImageGaussianBlurFilter.h" + +@interface GPUImageHarrisCornerDetectionFilter() + +- (void)extractCornerLocationsFromImageAtFrameTime:(CMTime)frameTime; + +@end + +// This is the Harris corner detector, as described in +// C. Harris and M. Stephens. A Combined Corner and Edge Detector. Proc. Alvey Vision Conf., Univ. Manchester, pp. 147-151, 1988. + +@implementation GPUImageHarrisCornerDetectionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHarrisCornerDetectionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float sensitivity; + + const mediump float harrisConstant = 0.04; + + void main() + { + mediump vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + mediump float derivativeSum = derivativeElements.x + derivativeElements.y; + + mediump float zElement = (derivativeElements.z * 2.0) - 1.0; + + // R = Ix^2 * Iy^2 - Ixy * Ixy - k * (Ix^2 + Iy^2)^2 + mediump float cornerness = derivativeElements.x * derivativeElements.y - (zElement * zElement) - harrisConstant * derivativeSum * derivativeSum; + + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#else +NSString *const kGPUImageHarrisCornerDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float sensitivity; + + const float harrisConstant = 0.04; + + void main() + { + vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + float derivativeSum = derivativeElements.x + derivativeElements.y; + + float zElement = (derivativeElements.z * 2.0) - 1.0; + + // R = Ix^2 * Iy^2 - Ixy * Ixy - k * (Ix^2 + Iy^2)^2 + float cornerness = derivativeElements.x * derivativeElements.y - (zElement * zElement) - harrisConstant * derivativeSum * derivativeSum; + + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#endif + +@synthesize blurRadiusInPixels; +@synthesize cornersDetectedBlock; +@synthesize sensitivity = _sensitivity; +@synthesize threshold = _threshold; +@synthesize intermediateImages = _intermediateImages; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithCornerDetectionFragmentShader:kGPUImageHarrisCornerDetectionFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithCornerDetectionFragmentShader:(NSString *)cornerDetectionFragmentShader; +{ + if (!(self = [super init])) + { + return nil; + } + +#ifdef DEBUGFEATUREDETECTION + _intermediateImages = [[NSMutableArray alloc] init]; +#endif + + // First pass: reduce to luminance and take the derivative of the luminance texture + derivativeFilter = [[GPUImageXYDerivativeFilter alloc] init]; + [self addFilter:derivativeFilter]; + +#ifdef DEBUGFEATUREDETECTION + __unsafe_unretained NSMutableArray *weakIntermediateImages = _intermediateImages; + __unsafe_unretained GPUImageFilter *weakFilter = derivativeFilter; + [derivativeFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ + UIImage *intermediateImage = [weakFilter imageFromCurrentlyProcessedOutput]; + [weakIntermediateImages addObject:intermediateImage]; + }]; +#endif + + // Second pass: blur the derivative + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + +#ifdef DEBUGFEATUREDETECTION + weakFilter = blurFilter; + [blurFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ + UIImage *intermediateImage = [weakFilter imageFromCurrentlyProcessedOutput]; + [weakIntermediateImages addObject:intermediateImage]; + }]; +#endif + + // Third pass: apply the Harris corner detection calculation + harrisCornerDetectionFilter = [[GPUImageFilter alloc] initWithFragmentShaderFromString:cornerDetectionFragmentShader]; + [self addFilter:harrisCornerDetectionFilter]; + +#ifdef DEBUGFEATUREDETECTION + weakFilter = harrisCornerDetectionFilter; + [harrisCornerDetectionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ + UIImage *intermediateImage = [weakFilter imageFromCurrentlyProcessedOutput]; + [weakIntermediateImages addObject:intermediateImage]; + }]; +#endif + + // Fourth pass: apply non-maximum suppression and thresholding to find the local maxima + nonMaximumSuppressionFilter = [[GPUImageThresholdedNonMaximumSuppressionFilter alloc] init]; + [self addFilter:nonMaximumSuppressionFilter]; + + __unsafe_unretained GPUImageHarrisCornerDetectionFilter *weakSelf = self; +#ifdef DEBUGFEATUREDETECTION + weakFilter = nonMaximumSuppressionFilter; + [nonMaximumSuppressionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ + UIImage *intermediateImage = [weakFilter imageFromCurrentlyProcessedOutput]; + [weakIntermediateImages addObject:intermediateImage]; + + [weakSelf extractCornerLocationsFromImageAtFrameTime:frameTime]; + }]; +#else + [nonMaximumSuppressionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime) { + [weakSelf extractCornerLocationsFromImageAtFrameTime:frameTime]; + }]; +#endif + +// Sixth pass: compress the thresholded points into the RGBA channels +// colorPackingFilter = [[GPUImageColorPackingFilter alloc] init]; +// [self addFilter:colorPackingFilter]; +// +// +//#ifdef DEBUGFEATUREDETECTION +// __unsafe_unretained GPUImageHarrisCornerDetectionFilter *weakSelf = self; +// weakFilter = colorPackingFilter; +// [colorPackingFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ +// NSLog(@"Triggered response from compaction filter"); +// +// UIImage *intermediateImage = [weakFilter imageFromCurrentlyProcessedOutput]; +// [weakIntermediateImages addObject:intermediateImage]; +// +// [weakSelf extractCornerLocationsFromImageAtFrameTime:frameTime]; +// }]; +//#else +// __unsafe_unretained GPUImageHarrisCornerDetectionFilter *weakSelf = self; +// [colorPackingFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime) { +// [weakSelf extractCornerLocationsFromImageAtFrameTime:frameTime]; +// }]; +//#endif + + [derivativeFilter addTarget:blurFilter]; + [blurFilter addTarget:harrisCornerDetectionFilter]; + [harrisCornerDetectionFilter addTarget:nonMaximumSuppressionFilter]; +// [simpleThresholdFilter addTarget:colorPackingFilter]; + + self.initialFilters = [NSArray arrayWithObjects:derivativeFilter, nil]; +// self.terminalFilter = colorPackingFilter; + self.terminalFilter = nonMaximumSuppressionFilter; + + self.blurRadiusInPixels = 2.0; + self.sensitivity = 5.0; + self.threshold = 0.20; + + return self; +} + +- (void)dealloc; +{ + free(rawImagePixels); + free(cornersArray); +} + +#pragma mark - +#pragma mark Corner extraction + +- (void)extractCornerLocationsFromImageAtFrameTime:(CMTime)frameTime; +{ + // we need a normal color texture for this filter + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + NSUInteger numberOfCorners = 0; + CGSize imageSize = nonMaximumSuppressionFilter.outputFrameSize; + + unsigned int imageByteSize = imageSize.width * imageSize.height * 4; + + if (rawImagePixels == NULL) + { + rawImagePixels = (GLubyte *)malloc(imageByteSize); + cornersArray = calloc(512 * 2, sizeof(GLfloat)); + } + + glReadPixels(0, 0, (int)imageSize.width, (int)imageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + + CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + unsigned int imageWidth = imageSize.width * 4; + + unsigned int currentByte = 0; + unsigned int cornerStorageIndex = 0; + while (currentByte < imageByteSize) + { + GLubyte colorByte = rawImagePixels[currentByte]; + + if (colorByte > 0) + { + unsigned int xCoordinate = currentByte % imageWidth; + unsigned int yCoordinate = currentByte / imageWidth; + + cornersArray[cornerStorageIndex++] = (CGFloat)(xCoordinate / 4) / imageSize.width; + cornersArray[cornerStorageIndex++] = (CGFloat)(yCoordinate) / imageSize.height; + numberOfCorners++; + + numberOfCorners = MIN(numberOfCorners, 511); + cornerStorageIndex = MIN(cornerStorageIndex, 1021); + } + currentByte +=4; + } + + CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); + NSLog(@"Processing time : %f ms", 1000.0 * currentFrameTime); + + if (cornersDetectedBlock != NULL) + { + cornersDetectedBlock(cornersArray, numberOfCorners, frameTime); + } +} + +- (BOOL)wantsMonochromeInput; +{ +// return YES; + return NO; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setSensitivity:(CGFloat)newValue; +{ + _sensitivity = newValue; + [harrisCornerDetectionFilter setFloat:newValue forUniformName:@"sensitivity"]; +} + +- (void)setThreshold:(CGFloat)newValue; +{ + nonMaximumSuppressionFilter.threshold = newValue; +} + +- (CGFloat)threshold; +{ + return nonMaximumSuppressionFilter.threshold; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.h new file mode 100755 index 00000000..eb3fbca6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.h @@ -0,0 +1,29 @@ +#import "GPUImageFilter.h" + +/* + * The haze filter can be used to add or remove haze (similar to a UV filter) + * + * @author Alaric Cole + * @creationDate 03/10/12 + * + */ + +/** The haze filter can be used to add or remove haze + + This is similar to a UV filter + */ +@interface GPUImageHazeFilter : GPUImageFilter +{ + GLint distanceUniform; + GLint slopeUniform; +} + +/** Strength of the color applied. Default 0. Values between -.3 and .3 are best + */ +@property(readwrite, nonatomic) CGFloat distance; + +/** Amount of color change. Default 0. Values between -.3 and .3 are best + */ +@property(readwrite, nonatomic) CGFloat slope; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.m new file mode 100755 index 00000000..f90fc22f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.m @@ -0,0 +1,96 @@ +#import "GPUImageHazeFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHazeFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform lowp float hazeDistance; + uniform highp float slope; + + void main() + { + //todo reconsider precision modifiers + highp vec4 color = vec4(1.0);//todo reimplement as a parameter + + highp float d = textureCoordinate.y * slope + hazeDistance; + + highp vec4 c = texture2D(inputImageTexture, textureCoordinate) ; // consider using unpremultiply + + c = (c - d * color) / (1.0 -d); + + gl_FragColor = c; //consider using premultiply(c); + } +); +#else +NSString *const kGPUImageHazeFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float hazeDistance; + uniform float slope; + + void main() + { + //todo reconsider precision modifiers + vec4 color = vec4(1.0);//todo reimplement as a parameter + + float d = textureCoordinate.y * slope + hazeDistance; + + vec4 c = texture2D(inputImageTexture, textureCoordinate) ; // consider using unpremultiply + + c = (c - d * color) / (1.0 -d); + + gl_FragColor = c; //consider using premultiply(c); + } +); +#endif + + + + +@implementation GPUImageHazeFilter + +@synthesize distance = _distance; +@synthesize slope = _slope; +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageHazeFragmentShaderString])) + { + return nil; + } + + distanceUniform = [filterProgram uniformIndex:@"hazeDistance"]; + slopeUniform = [filterProgram uniformIndex:@"slope"]; + + self.distance = 0.2; + self.slope = 0.0; + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setDistance:(CGFloat)newValue; +{ + _distance = newValue; + + [self setFloat:_distance forUniform:distanceUniform program:filterProgram]; +} + +- (void)setSlope:(CGFloat)newValue; +{ + _slope = newValue; + + [self setFloat:_slope forUniform:slopeUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.h new file mode 100644 index 00000000..263d8df1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilterGroup.h" +#import "GPUImageLowPassFilter.h" +#import "GPUImageDifferenceBlendFilter.h" + +@interface GPUImageHighPassFilter : GPUImageFilterGroup +{ + GPUImageLowPassFilter *lowPassFilter; + GPUImageDifferenceBlendFilter *differenceBlendFilter; +} + +// This controls the degree by which the previous accumulated frames are blended and then subtracted from the current one. This ranges from 0.0 to 1.0, with a default of 0.5. +@property(readwrite, nonatomic) CGFloat filterStrength; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.m new file mode 100644 index 00000000..511240d8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.m @@ -0,0 +1,46 @@ +#import "GPUImageHighPassFilter.h" + +@implementation GPUImageHighPassFilter + +@synthesize filterStrength; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // Start with a low pass filter to define the component to be removed + lowPassFilter = [[GPUImageLowPassFilter alloc] init]; + [self addFilter:lowPassFilter]; + + // Take the difference of the current frame from the low pass filtered result to get the high pass + differenceBlendFilter = [[GPUImageDifferenceBlendFilter alloc] init]; + [self addFilter:differenceBlendFilter]; + + // Texture location 0 needs to be the original image for the difference blend + [lowPassFilter addTarget:differenceBlendFilter atTextureLocation:1]; + + self.initialFilters = [NSArray arrayWithObjects:lowPassFilter, differenceBlendFilter, nil]; + self.terminalFilter = differenceBlendFilter; + + self.filterStrength = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setFilterStrength:(CGFloat)newValue; +{ + lowPassFilter.filterStrength = newValue; +} + +- (CGFloat)filterStrength; +{ + return lowPassFilter.filterStrength; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.h new file mode 100644 index 00000000..35791298 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.h @@ -0,0 +1,20 @@ +#import "GPUImageFilter.h" + +@interface GPUImageHighlightShadowFilter : GPUImageFilter +{ + GLint shadowsUniform, highlightsUniform; +} + +/** + * 0 - 1, increase to lighten shadows. + * @default 0 + */ +@property(readwrite, nonatomic) CGFloat shadows; + +/** + * 0 - 1, decrease to darken highlights. + * @default 1 + */ +@property(readwrite, nonatomic) CGFloat highlights; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.m new file mode 100644 index 00000000..aa92b740 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.m @@ -0,0 +1,93 @@ +#import "GPUImageHighlightShadowFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHighlightShadowFragmentShaderString = SHADER_STRING +( +uniform sampler2D inputImageTexture; +varying highp vec2 textureCoordinate; + +uniform lowp float shadows; +uniform lowp float highlights; + +const mediump vec3 luminanceWeighting = vec3(0.3, 0.3, 0.3); + +void main() +{ + lowp vec4 source = texture2D(inputImageTexture, textureCoordinate); + mediump float luminance = dot(source.rgb, luminanceWeighting); + + mediump float shadow = clamp((pow(luminance, 1.0/(shadows+1.0)) + (-0.76)*pow(luminance, 2.0/(shadows+1.0))) - luminance, 0.0, 1.0); + mediump float highlight = clamp((1.0 - (pow(1.0-luminance, 1.0/(2.0-highlights)) + (-0.8)*pow(1.0-luminance, 2.0/(2.0-highlights)))) - luminance, -1.0, 0.0); + lowp vec3 result = vec3(0.0, 0.0, 0.0) + ((luminance + shadow + highlight) - 0.0) * ((source.rgb - vec3(0.0, 0.0, 0.0))/(luminance - 0.0)); + + gl_FragColor = vec4(result.rgb, source.a); +} +); +#else +NSString *const kGPUImageHighlightShadowFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + varying vec2 textureCoordinate; + + uniform float shadows; + uniform float highlights; + + const vec3 luminanceWeighting = vec3(0.3, 0.3, 0.3); + + void main() + { + vec4 source = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(source.rgb, luminanceWeighting); + + float shadow = clamp((pow(luminance, 1.0/(shadows+1.0)) + (-0.76)*pow(luminance, 2.0/(shadows+1.0))) - luminance, 0.0, 1.0); + float highlight = clamp((1.0 - (pow(1.0-luminance, 1.0/(2.0-highlights)) + (-0.8)*pow(1.0-luminance, 2.0/(2.0-highlights)))) - luminance, -1.0, 0.0); + vec3 result = vec3(0.0, 0.0, 0.0) + ((luminance + shadow + highlight) - 0.0) * ((source.rgb - vec3(0.0, 0.0, 0.0))/(luminance - 0.0)); + + gl_FragColor = vec4(result.rgb, source.a); + } +); +#endif + +@implementation GPUImageHighlightShadowFilter + +@synthesize shadows = _shadows; +@synthesize highlights = _highlights; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageHighlightShadowFragmentShaderString])) + { + return nil; + } + + shadowsUniform = [filterProgram uniformIndex:@"shadows"]; + highlightsUniform = [filterProgram uniformIndex:@"highlights"]; + + self.shadows = 0.0; + self.highlights = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setShadows:(CGFloat)newValue; +{ + _shadows = newValue; + + [self setFloat:_shadows forUniform:shadowsUniform program:filterProgram]; +} + +- (void)setHighlights:(CGFloat)newValue; +{ + _highlights = newValue; + + [self setFloat:_highlights forUniform:highlightsUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.h new file mode 100644 index 00000000..166c5c8c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.h @@ -0,0 +1,25 @@ +// +// GPUImageHighlightShadowTintFilter.h +// +// +// Created by github.com/r3mus on 8/14/15. +// +// + +#import "GPUImageFilter.h" + +@interface GPUImageHighlightShadowTintFilter : GPUImageFilter +{ + GLint shadowTintIntensityUniform, highlightTintIntensityUniform, shadowTintColorUniform, highlightTintColorUniform; +} + +// The shadowTint and highlightTint colors specify what colors replace the dark and light areas of the image, respectively. The defaults for shadows are black, highlighs white. +@property(readwrite, nonatomic) GLfloat shadowTintIntensity; +@property(readwrite, nonatomic) GPUVector4 shadowTintColor; +@property(readwrite, nonatomic) GLfloat highlightTintIntensity; +@property(readwrite, nonatomic) GPUVector4 highlightTintColor; + +- (void)setShadowTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +- (void)setHighlightTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.m new file mode 100644 index 00000000..54bc0850 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.m @@ -0,0 +1,136 @@ +// +// GPUImageHighlightShadowTintFilter.m +// +// Created by github.com/r3mus on 8/14/15. +// +// + +#import "GPUImageHighlightShadowTintFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUHighlightShadowTintFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float shadowTintIntensity; + uniform lowp float highlightTintIntensity; + uniform highp vec4 shadowTintColor; + uniform highp vec4 highlightTintColor; + + const mediump vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + highp float luminance = dot(textureColor.rgb, luminanceWeighting); + + highp vec4 shadowResult = mix(textureColor, max(textureColor, vec4( mix(shadowTintColor.rgb, textureColor.rgb, luminance), textureColor.a)), shadowTintIntensity); + highp vec4 highlightResult = mix(textureColor, min(shadowResult, vec4( mix(shadowResult.rgb, highlightTintColor.rgb, luminance), textureColor.a)), highlightTintIntensity); + + gl_FragColor = vec4( mix(shadowResult.rgb, highlightResult.rgb, luminance), textureColor.a); + } + ); +#else +NSString *const kGPUHighlightShadowTintFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float shadowTintIntensity; + uniform float highlightTintIntensity; + uniform vec3 shadowTintColor; + uniform vec3 highlightTintColor; + + const vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + + vec4 shadowResult = mix(textureColor, max(textureColor, vec4( mix(shadowTintColor.rgb, textureColor.rgb, luminance), textureColor.a)), shadowTintIntensity); + vec4 highlightResult = mix(textureColor, min(shadowResult, vec4( mix(shadowResult.rgb, highlightTintColor.rgb, luminance), textureColor.a)), highlightTintIntensity); + + gl_FragColor = vec4( mix(shadowResult.rgb, highlightResult.rgb, luminance), textureColor.a); + } + ); +#endif + + +@implementation GPUImageHighlightShadowTintFilter + +@synthesize shadowTintIntensity = _shadowTintIntensity; +@synthesize highlightTintIntensity = _highlightTintIntensity; +@synthesize shadowTintColor = _shadowTintColor; +@synthesize highlightTintColor = _highlightTintColor; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUHighlightShadowTintFragmentShaderString])) + { + return nil; + } + + shadowTintIntensityUniform = [filterProgram uniformIndex:@"shadowTintIntensity"]; + highlightTintIntensityUniform = [filterProgram uniformIndex:@"highlightTintIntensity"]; + shadowTintColorUniform = [filterProgram uniformIndex:@"shadowTintColor"]; + highlightTintColorUniform = [filterProgram uniformIndex:@"highlightTintColor"]; + + self.shadowTintIntensity = 0.0f; + self.highlightTintIntensity = 0.0f; + self.shadowTintColor = (GPUVector4){1.0f, 0.0f, 0.0f, 1.0f}; + self.highlightTintColor = (GPUVector4){0.0f, 0.0f, 1.0f, 1.0f}; + + return self; +} + + +#pragma mark - +#pragma mark Accessors + +- (void)setShadowTintIntensity:(GLfloat)newValue +{ + _shadowTintIntensity = newValue; + + [self setFloat:_shadowTintIntensity forUniform:shadowTintIntensityUniform program:filterProgram]; +} + +- (void)setHighlightTintIntensity:(GLfloat)newValue +{ + _highlightTintIntensity = newValue; + + [self setFloat:_highlightTintIntensity forUniform:highlightTintIntensityUniform program:filterProgram]; +} + +- (void)setShadowTintColor:(GPUVector4)newValue; +{ + _shadowTintColor = newValue; + + [self setShadowTintColorRed:_shadowTintColor.one green:_shadowTintColor.two blue:_shadowTintColor.three alpha:_shadowTintColor.four]; +} + +- (void)setHighlightTintColor:(GPUVector4)newValue; +{ + _highlightTintColor = newValue; + + [self setHighlightTintColorRed:_highlightTintColor.one green:_highlightTintColor.two blue:_highlightTintColor.three alpha:_highlightTintColor.four]; +} + +- (void)setShadowTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +{ + GPUVector4 shadowTintColor = {redComponent, greenComponent, blueComponent, alphaComponent}; + + [self setVec4:shadowTintColor forUniform:shadowTintColorUniform program:filterProgram]; +} + +- (void)setHighlightTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +{ + GPUVector4 highlightTintColor = {redComponent, greenComponent, blueComponent, alphaComponent}; + + [self setVec4:highlightTintColor forUniform:highlightTintColorUniform program:filterProgram]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.h new file mode 100644 index 00000000..2b71cda0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.h @@ -0,0 +1,26 @@ +// +// GPUImageHistogramEqualizationFilter.h +// FilterShowcase +// +// Created by Adam Marcus on 19/08/2014. +// Copyright (c) 2014 Sunset Lake Software LLC. All rights reserved. +// + +#import "GPUImageFilterGroup.h" +#import "GPUImageHistogramFilter.h" +#import "GPUImageRawDataOutput.h" +#import "GPUImageRawDataInput.h" +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageHistogramEqualizationFilter : GPUImageFilterGroup +{ + GPUImageHistogramFilter *histogramFilter; + GPUImageRawDataOutput *rawDataOutputFilter; + GPUImageRawDataInput *rawDataInputFilter; +} + +@property(readwrite, nonatomic) NSUInteger downsamplingFactor; + +- (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.m new file mode 100644 index 00000000..7d6b9a2d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.m @@ -0,0 +1,307 @@ +// +// GPUImageHistogramEqualizationFilter.m +// FilterShowcase +// +// Created by Adam Marcus on 19/08/2014. +// Copyright (c) 2014 Sunset Lake Software LLC. All rights reserved. +// + +#import "GPUImageHistogramEqualizationFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageRedHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float redCurveValue = texture2D(inputImageTexture2, vec2(textureColor.r, 0.0)).r; + + gl_FragColor = vec4(redCurveValue, textureColor.g, textureColor.b, textureColor.a); + } + ); +#else +NSString *const kGPUImageRedHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float redCurveValue = texture2D(inputImageTexture2, vec2(textureColor.r, 0.0)).r; + + gl_FragColor = vec4(redCurveValue, textureColor.g, textureColor.b, textureColor.a); + } + ); +#endif + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGreenHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float greenCurveValue = texture2D(inputImageTexture2, vec2(textureColor.g, 0.0)).g; + + gl_FragColor = vec4(textureColor.r, greenCurveValue, textureColor.b, textureColor.a); + } + ); +#else +NSString *const kGPUImageGreenHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float greenCurveValue = texture2D(inputImageTexture2, vec2(textureColor.g, 0.0)).g; + + gl_FragColor = vec4(textureColor.r, greenCurveValue, textureColor.b, textureColor.a); + } + ); +#endif + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageBlueHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float blueCurveValue = texture2D(inputImageTexture2, vec2(textureColor.b, 0.0)).b; + + gl_FragColor = vec4(textureColor.r, textureColor.g, blueCurveValue, textureColor.a); + } + ); +#else +NSString *const kGPUImageBlueHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float blueCurveValue = texture2D(inputImageTexture2, vec2(textureColor.b, 0.0)).b; + + gl_FragColor = vec4(textureColor.r, textureColor.g, blueCurveValue, textureColor.a); + } + ); +#endif + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageRGBHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float redCurveValue = texture2D(inputImageTexture2, vec2(textureColor.r, 0.0)).r; + lowp float greenCurveValue = texture2D(inputImageTexture2, vec2(textureColor.g, 0.0)).g; + lowp float blueCurveValue = texture2D(inputImageTexture2, vec2(textureColor.b, 0.0)).b; + + gl_FragColor = vec4(redCurveValue, greenCurveValue, blueCurveValue, textureColor.a); + } + ); +#else +NSString *const kGPUImageRGBHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float redCurveValue = texture2D(inputImageTexture2, vec2(textureColor.r, 0.0)).r; + float greenCurveValue = texture2D(inputImageTexture2, vec2(textureColor.g, 0.0)).g; + float blueCurveValue = texture2D(inputImageTexture2, vec2(textureColor.b, 0.0)).b; + + gl_FragColor = vec4(redCurveValue, greenCurveValue, blueCurveValue, textureColor.a); + } + ); +#endif + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLuminanceHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + const lowp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float luminance = dot(textureColor.rgb, W); + lowp float newLuminance = texture2D(inputImageTexture2, vec2(luminance, 0.0)).r; + lowp float deltaLuminance = newLuminance - luminance; + + lowp float red = clamp(textureColor.r + deltaLuminance, 0.0, 1.0); + lowp float green = clamp(textureColor.g + deltaLuminance, 0.0, 1.0); + lowp float blue = clamp(textureColor.b + deltaLuminance, 0.0, 1.0); + + gl_FragColor = vec4(red, green, blue, textureColor.a); + } + ); +#else +NSString *const kGPUImageLuminanceHistogramEqualizationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, W); + float newLuminance = texture2D(inputImageTexture2, vec2(luminance, 0.0)).r; + float deltaLuminance = newLuminance - luminance; + + float red = clamp(textureColor.r + deltaLuminance, 0.0, 1.0); + float green = clamp(textureColor.g + deltaLuminance, 0.0, 1.0); + float blue = clamp(textureColor.b + deltaLuminance, 0.0, 1.0); + + gl_FragColor = vec4(red, green, blue, textureColor.a); + } + ); +#endif + +@implementation GPUImageHistogramEqualizationFilter + +@synthesize downsamplingFactor = _downsamplingFactor; + +#pragma mark - +#pragma mark Initialization + +- (id)init; +{ + if (!(self = [self initWithHistogramType:kGPUImageHistogramRGB])) + { + return nil; + } + + return self; +} + +- (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType +{ + if (!(self = [super init])) + { + return nil; + } + + histogramFilter = [[GPUImageHistogramFilter alloc] initWithHistogramType:newHistogramType]; + [self addFilter:histogramFilter]; + + GLubyte dummyInput[4 * 256]; // NB: No way to initialise GPUImageRawDataInput without providing bytes + rawDataInputFilter = [[GPUImageRawDataInput alloc] initWithBytes:dummyInput size:CGSizeMake(256.0, 1.0) pixelFormat:GPUPixelFormatBGRA type:GPUPixelTypeUByte]; + rawDataOutputFilter = [[GPUImageRawDataOutput alloc] initWithImageSize:CGSizeMake(256.0, 3.0) resultsInBGRAFormat:YES]; + + __unsafe_unretained GPUImageRawDataOutput *_rawDataOutputFilter = rawDataOutputFilter; + __unsafe_unretained GPUImageRawDataInput *_rawDataInputFilter = rawDataInputFilter; + [rawDataOutputFilter setNewFrameAvailableBlock:^{ + + unsigned int histogramBins[3][256]; + + [_rawDataOutputFilter lockFramebufferForReading]; + + GLubyte *data = [_rawDataOutputFilter rawBytesForImage]; + data += [_rawDataOutputFilter bytesPerRowInOutput]; + + histogramBins[0][0] = *data++; + histogramBins[1][0] = *data++; + histogramBins[2][0] = *data++; + data++; + + for (unsigned int x = 1; x < 256; x++) { + histogramBins[0][x] = histogramBins[0][x-1] + *data++; + histogramBins[1][x] = histogramBins[1][x-1] + *data++; + histogramBins[2][x] = histogramBins[2][x-1] + *data++; + data++; + } + + [_rawDataOutputFilter unlockFramebufferAfterReading]; + + GLubyte colorMapping[4 * 256]; + GLubyte *_colorMapping = colorMapping; + + for (unsigned int x = 0; x < 256; x++) { + *_colorMapping++ = (GLubyte) (((histogramBins[0][x] - histogramBins[0][0]) * 255) / histogramBins[0][255]); + *_colorMapping++ = (GLubyte) (((histogramBins[1][x] - histogramBins[1][0]) * 255) / histogramBins[1][255]); + *_colorMapping++ = (GLubyte) (((histogramBins[2][x] - histogramBins[2][0]) * 255) / histogramBins[2][255]); + *_colorMapping++ = 255; + } + + _colorMapping = colorMapping; + [_rawDataInputFilter updateDataFromBytes:_colorMapping size:CGSizeMake(256.0, 1.0)]; + [_rawDataInputFilter processData]; + }]; + [histogramFilter addTarget:rawDataOutputFilter]; + + NSString *fragmentShader = nil; + switch (newHistogramType) { + case kGPUImageHistogramRed: + fragmentShader = kGPUImageRedHistogramEqualizationFragmentShaderString; + break; + case kGPUImageHistogramGreen: + fragmentShader = kGPUImageGreenHistogramEqualizationFragmentShaderString; + break; + case kGPUImageHistogramBlue: + fragmentShader = kGPUImageBlueHistogramEqualizationFragmentShaderString; + break; + default: + case kGPUImageHistogramRGB: + fragmentShader = kGPUImageRGBHistogramEqualizationFragmentShaderString; + break; + case kGPUImageHistogramLuminance: + fragmentShader = kGPUImageLuminanceHistogramEqualizationFragmentShaderString; + break; + } + GPUImageFilter *equalizationFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:fragmentShader]; + [rawDataInputFilter addTarget:equalizationFilter atTextureLocation:1]; + + [self addFilter:equalizationFilter]; + + self.initialFilters = [NSArray arrayWithObjects:histogramFilter, equalizationFilter, nil]; + self.terminalFilter = equalizationFilter; + + self.downsamplingFactor = 16; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setDownsamplingFactor:(NSUInteger)newValue; +{ + if (_downsamplingFactor != newValue) + { + _downsamplingFactor = newValue; + histogramFilter.downsamplingFactor = newValue; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.h new file mode 100755 index 00000000..32004bf7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.h @@ -0,0 +1,22 @@ +#import "GPUImageFilter.h" + +typedef enum { kGPUImageHistogramRed, kGPUImageHistogramGreen, kGPUImageHistogramBlue, kGPUImageHistogramRGB, kGPUImageHistogramLuminance} GPUImageHistogramType; + +@interface GPUImageHistogramFilter : GPUImageFilter +{ + GPUImageHistogramType histogramType; + + GLubyte *vertexSamplingCoordinates; + + GLProgram *secondFilterProgram, *thirdFilterProgram; + GLint secondFilterPositionAttribute, thirdFilterPositionAttribute; +} + +// Rather than sampling every pixel, this dictates what fraction of the image is sampled. By default, this is 16 with a minimum of 1. +@property(readwrite, nonatomic) NSUInteger downsamplingFactor; + +// Initialization and teardown +- (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType; +- (void)initializeSecondaryAttributes; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.m new file mode 100755 index 00000000..bb7acce7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.m @@ -0,0 +1,341 @@ +#import "GPUImageHistogramFilter.h" + +// Unlike other filters, this one uses a grid of GL_POINTs to sample the incoming image in a grid. A custom vertex shader reads the color in the texture at its position +// and outputs a bin position in the final histogram as the vertex position. That point is then written into the image of the histogram using translucent pixels. +// The degree of translucency is controlled by the scalingFactor, which lets you adjust the dynamic range of the histogram. The histogram can only be generated for one +// color channel or luminance value at a time. +// +// This is based on this implementation: http://www.shaderwrangler.com/publications/histogram/histogram_cameraready.pdf +// +// Or at least that's how it would work if iOS could read from textures in a vertex shader, which it can't. Therefore, I read the texture data down from the +// incoming frame and process the texture colors as vertices. + +NSString *const kGPUImageRedHistogramSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + varying vec3 colorFactor; + + void main() + { + colorFactor = vec3(1.0, 0.0, 0.0); + gl_Position = vec4(-1.0 + (position.x * 0.0078125), 0.0, 0.0, 1.0); + gl_PointSize = 1.0; + } +); + +NSString *const kGPUImageGreenHistogramSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + varying vec3 colorFactor; + + void main() + { + colorFactor = vec3(0.0, 1.0, 0.0); + gl_Position = vec4(-1.0 + (position.y * 0.0078125), 0.0, 0.0, 1.0); + gl_PointSize = 1.0; + } +); + +NSString *const kGPUImageBlueHistogramSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + varying vec3 colorFactor; + + void main() + { + colorFactor = vec3(0.0, 0.0, 1.0); + gl_Position = vec4(-1.0 + (position.z * 0.0078125), 0.0, 0.0, 1.0); + gl_PointSize = 1.0; + } +); + +NSString *const kGPUImageLuminanceHistogramSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + varying vec3 colorFactor; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + float luminance = dot(position.xyz, W); + + colorFactor = vec3(1.0, 1.0, 1.0); + gl_Position = vec4(-1.0 + (luminance * 0.0078125), 0.0, 0.0, 1.0); + gl_PointSize = 1.0; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHistogramAccumulationFragmentShaderString = SHADER_STRING +( + const lowp float scalingFactor = 1.0 / 256.0; + + varying lowp vec3 colorFactor; + + void main() + { + gl_FragColor = vec4(colorFactor * scalingFactor , 1.0); + } +); +#else +NSString *const kGPUImageHistogramAccumulationFragmentShaderString = SHADER_STRING +( + const float scalingFactor = 1.0 / 256.0; + + varying vec3 colorFactor; + + void main() + { + gl_FragColor = vec4(colorFactor * scalingFactor , 1.0); + } +); +#endif + +@implementation GPUImageHistogramFilter + +@synthesize downsamplingFactor = _downsamplingFactor; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType; +{ + switch (newHistogramType) + { + case kGPUImageHistogramRed: + { + if (!(self = [super initWithVertexShaderFromString:kGPUImageRedHistogramSamplingVertexShaderString fragmentShaderFromString:kGPUImageHistogramAccumulationFragmentShaderString])) + { + return nil; + } + }; break; + case kGPUImageHistogramGreen: + { + if (!(self = [super initWithVertexShaderFromString:kGPUImageGreenHistogramSamplingVertexShaderString fragmentShaderFromString:kGPUImageHistogramAccumulationFragmentShaderString])) + { + return nil; + } + }; break; + case kGPUImageHistogramBlue: + { + if (!(self = [super initWithVertexShaderFromString:kGPUImageBlueHistogramSamplingVertexShaderString fragmentShaderFromString:kGPUImageHistogramAccumulationFragmentShaderString])) + { + return nil; + } + }; break; + case kGPUImageHistogramLuminance: + { + if (!(self = [super initWithVertexShaderFromString:kGPUImageLuminanceHistogramSamplingVertexShaderString fragmentShaderFromString:kGPUImageHistogramAccumulationFragmentShaderString])) + { + return nil; + } + }; break; + case kGPUImageHistogramRGB: + { + if (!(self = [super initWithVertexShaderFromString:kGPUImageRedHistogramSamplingVertexShaderString fragmentShaderFromString:kGPUImageHistogramAccumulationFragmentShaderString])) + { + return nil; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + secondFilterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageGreenHistogramSamplingVertexShaderString fragmentShaderString:kGPUImageHistogramAccumulationFragmentShaderString]; + thirdFilterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageBlueHistogramSamplingVertexShaderString fragmentShaderString:kGPUImageHistogramAccumulationFragmentShaderString]; + + if (!secondFilterProgram.initialized) + { + [self initializeSecondaryAttributes]; + + if (![secondFilterProgram link]) + { + NSString *progLog = [secondFilterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [secondFilterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [secondFilterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + filterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + + } + + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + + glEnableVertexAttribArray(secondFilterPositionAttribute); + + if (![thirdFilterProgram link]) + { + NSString *progLog = [secondFilterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [secondFilterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [secondFilterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + filterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + secondFilterPositionAttribute = [secondFilterProgram attributeIndex:@"position"]; + + + thirdFilterPositionAttribute = [thirdFilterProgram attributeIndex:@"position"]; + [GPUImageContext setActiveShaderProgram:thirdFilterProgram]; + + glEnableVertexAttribArray(thirdFilterPositionAttribute); + }); + }; break; + } + + histogramType = newHistogramType; + + self.downsamplingFactor = 16; + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithHistogramType:kGPUImageHistogramRGB])) + { + return nil; + } + + return self; +} + +- (void)initializeSecondaryAttributes; +{ + [secondFilterProgram addAttribute:@"position"]; + [thirdFilterProgram addAttribute:@"position"]; +} + +- (void)dealloc; +{ + if (vertexSamplingCoordinates != NULL && ![GPUImageContext supportsFastTextureUpload]) + { + free(vertexSamplingCoordinates); + } +} + +#pragma mark - +#pragma mark Rendering + +- (CGSize)sizeOfFBO; +{ + return CGSizeMake(256.0, 3.0); +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + [self renderToTextureWithVertices:NULL textureCoordinates:NULL]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; +} + +- (CGSize)outputFrameSize; +{ + return [self sizeOfFBO]; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (self.preventRendering) + { + return; + } + + inputTextureSize = newSize; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = kGPUImageNoRotation; +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // we need a normal color texture for this filter + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + [GPUImageContext useImageProcessingContext]; + + if ([GPUImageContext supportsFastTextureUpload]) + { + glFinish(); + vertexSamplingCoordinates = [firstInputFramebuffer byteBuffer]; + } else { + if (vertexSamplingCoordinates == NULL) + { + vertexSamplingCoordinates = calloc(inputTextureSize.width * inputTextureSize.height * 4, sizeof(GLubyte)); + } + glReadPixels(0, 0, inputTextureSize.width, inputTextureSize.height, GL_RGBA, GL_UNSIGNED_BYTE, vertexSamplingCoordinates); + } + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glClearColor(0.0, 0.0, 0.0, 1.0); + glClear(GL_COLOR_BUFFER_BIT); + + glBlendEquation(GL_FUNC_ADD); + glBlendFunc(GL_ONE, GL_ONE); + glEnable(GL_BLEND); + + glVertexAttribPointer(filterPositionAttribute, 4, GL_UNSIGNED_BYTE, 0, ((unsigned int)_downsamplingFactor - 1) * 4, vertexSamplingCoordinates); + glDrawArrays(GL_POINTS, 0, inputTextureSize.width * inputTextureSize.height / (CGFloat)_downsamplingFactor); + + if (histogramType == kGPUImageHistogramRGB) + { + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + + glVertexAttribPointer(secondFilterPositionAttribute, 4, GL_UNSIGNED_BYTE, 0, ((unsigned int)_downsamplingFactor - 1) * 4, vertexSamplingCoordinates); + glDrawArrays(GL_POINTS, 0, inputTextureSize.width * inputTextureSize.height / (CGFloat)_downsamplingFactor); + + [GPUImageContext setActiveShaderProgram:thirdFilterProgram]; + + glVertexAttribPointer(thirdFilterPositionAttribute, 4, GL_UNSIGNED_BYTE, 0, ((unsigned int)_downsamplingFactor - 1) * 4, vertexSamplingCoordinates); + glDrawArrays(GL_POINTS, 0, inputTextureSize.width * inputTextureSize.height / (CGFloat)_downsamplingFactor); + } + + glDisable(GL_BLEND); + [firstInputFramebuffer unlock]; + + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +#pragma mark - +#pragma mark Accessors + +//- (void)setScalingFactor:(CGFloat)newValue; +//{ +// _scalingFactor = newValue; +// +// [GPUImageContext useImageProcessingContext]; +// [filterProgram use]; +// glUniform1f(scalingFactorUniform, _scalingFactor); +//} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.h b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.h new file mode 100755 index 00000000..f80c50f3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.h @@ -0,0 +1,8 @@ +#import "GPUImageFilter.h" + +@interface GPUImageHistogramGenerator : GPUImageFilter +{ + GLint backgroundColorUniform; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.m b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.m new file mode 100755 index 00000000..703795df --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.m @@ -0,0 +1,87 @@ +#import "GPUImageHistogramGenerator.h" + +NSString *const kGPUImageHistogramGeneratorVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + varying vec2 textureCoordinate; + varying float height; + + void main() + { + gl_Position = position; + textureCoordinate = vec2(inputTextureCoordinate.x, 0.5); + height = 1.0 - inputTextureCoordinate.y; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHistogramGeneratorFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp float height; + + uniform sampler2D inputImageTexture; + uniform lowp vec4 backgroundColor; + + void main() + { + lowp vec3 colorChannels = texture2D(inputImageTexture, textureCoordinate).rgb; + lowp vec4 heightTest = vec4(step(height, colorChannels), 1.0); + gl_FragColor = mix(backgroundColor, heightTest, heightTest.r + heightTest.g + heightTest.b); + } +); +#else +NSString *const kGPUImageHistogramGeneratorFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying float height; + + uniform sampler2D inputImageTexture; + uniform vec4 backgroundColor; + + void main() + { + vec3 colorChannels = texture2D(inputImageTexture, textureCoordinate).rgb; + vec4 heightTest = vec4(step(height, colorChannels), 1.0); + gl_FragColor = mix(backgroundColor, heightTest, heightTest.r + heightTest.g + heightTest.b); + } +); +#endif + +@implementation GPUImageHistogramGenerator + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageHistogramGeneratorVertexShaderString fragmentShaderFromString:kGPUImageHistogramGeneratorFragmentShaderString])) + { + return nil; + } + + backgroundColorUniform = [filterProgram uniformIndex:@"backgroundColor"]; + + [self setBackgroundColorRed:0.0 green:0.0 blue:0.0 alpha:0.0]; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +{ +// GLfloat backgroundColor[4]; +// backgroundColor[0] = redComponent; +// backgroundColor[1] = greenComponent; +// backgroundColor[2] = blueComponent; +// backgroundColor[3] = alphaComponent; + GPUVector4 backgroundColor = {redComponent, greenComponent, blueComponent, alphaComponent}; + + [self setVec4:backgroundColor forUniform:backgroundColorUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.h b/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.h new file mode 100644 index 00000000..3ab6977f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.h @@ -0,0 +1,49 @@ +#import "GPUImageFilterGroup.h" +#import "GPUImageThresholdEdgeDetectionFilter.h" +#import "GPUImageParallelCoordinateLineTransformFilter.h" +#import "GPUImageThresholdedNonMaximumSuppressionFilter.h" +#import "GPUImageCannyEdgeDetectionFilter.h" + +// This applies a Hough transform to detect lines in a scene. It starts with a thresholded Sobel edge detection pass, +// then takes those edge points in and applies a Hough transform to convert them to lines. The intersection of these lines +// is then determined via blending and accumulation, and a non-maximum suppression filter is applied to find local maxima. +// These local maxima are then converted back into lines in normal space and returned via a callback block. +// +// Rather than using one of the standard Hough transform types, this filter uses parallel coordinate space which is far more efficient +// to rasterize on a GPU. +// +// This approach is based entirely on the PC lines process developed by the Graph@FIT research group at the Brno University of Technology +// and described in their publications: +// +// M. Dubská, J. Havel, and A. Herout. Real-Time Detection of Lines using Parallel Coordinates and OpenGL. Proceedings of SCCG 2011, Bratislava, SK, p. 7. +// http://medusa.fit.vutbr.cz/public/data/papers/2011-SCCG-Dubska-Real-Time-Line-Detection-Using-PC-and-OpenGL.pdf +// M. Dubská, J. Havel, and A. Herout. PClines — Line detection using parallel coordinates. 2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), p. 1489- 1494. +// http://medusa.fit.vutbr.cz/public/data/papers/2011-CVPR-Dubska-PClines.pdf + +//#define DEBUGLINEDETECTION + +@interface GPUImageHoughTransformLineDetector : GPUImageFilterGroup +{ + GPUImageOutput *thresholdEdgeDetectionFilter; + +// GPUImageThresholdEdgeDetectionFilter *thresholdEdgeDetectionFilter; + GPUImageParallelCoordinateLineTransformFilter *parallelCoordinateLineTransformFilter; + GPUImageThresholdedNonMaximumSuppressionFilter *nonMaximumSuppressionFilter; + + GLfloat *linesArray; + GLubyte *rawImagePixels; +} + +// A threshold value for which a point is detected as belonging to an edge for determining lines. Default is 0.9. +@property(readwrite, nonatomic) CGFloat edgeThreshold; + +// A threshold value for which a local maximum is detected as belonging to a line in parallel coordinate space. Default is 0.20. +@property(readwrite, nonatomic) CGFloat lineDetectionThreshold; + +// This block is called on the detection of lines, usually on every processed frame. A C array containing normalized slopes and intercepts in m, b pairs (y=mx+b) is passed in, along with a count of the number of lines detected and the current timestamp of the video frame +@property(nonatomic, copy) void(^linesDetectedBlock)(GLfloat* lineArray, NSUInteger linesDetected, CMTime frameTime); + +// These images are only enabled when built with DEBUGLINEDETECTION defined, and are used to examine the intermediate states of the Hough transform +@property(nonatomic, readonly, strong) NSMutableArray *intermediateImages; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.m b/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.m new file mode 100644 index 00000000..8289eb80 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHoughTransformLineDetector.m @@ -0,0 +1,241 @@ +#import "GPUImageHoughTransformLineDetector.h" + +@interface GPUImageHoughTransformLineDetector() + +- (void)extractLineParametersFromImageAtFrameTime:(CMTime)frameTime; + +@end + +@implementation GPUImageHoughTransformLineDetector + +@synthesize linesDetectedBlock; +@synthesize edgeThreshold; +@synthesize lineDetectionThreshold; +@synthesize intermediateImages = _intermediateImages; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: do edge detection and threshold that to just have white pixels for edges +// if ([GPUImageContext deviceSupportsFramebufferReads]) +// if ([GPUImageContext deviceSupportsFramebufferReads]) +// { +// thresholdEdgeDetectionFilter = [[GPUImageThresholdEdgeDetectionFilter alloc] init]; +// thresholdEdgeDetectionFilter = [[GPUImageSobelEdgeDetectionFilter alloc] init]; +// [(GPUImageThresholdEdgeDetectionFilter *)thresholdEdgeDetectionFilter setThreshold:0.07]; +// [(GPUImageThresholdEdgeDetectionFilter *)thresholdEdgeDetectionFilter setEdgeStrength:0.25]; +// [(GPUImageThresholdEdgeDetectionFilter *)thresholdEdgeDetectionFilter setEdgeStrength:1.0]; +// thresholdEdgeDetectionFilter = [[GPUImageCannyEdgeDetectionFilter alloc] init]; +// } +// else +// { + thresholdEdgeDetectionFilter = [[GPUImageCannyEdgeDetectionFilter alloc] init]; +// } + [self addFilter:thresholdEdgeDetectionFilter]; + + // Second pass: extract the white points and draw representative lines in parallel coordinate space + parallelCoordinateLineTransformFilter = [[GPUImageParallelCoordinateLineTransformFilter alloc] init]; + [self addFilter:parallelCoordinateLineTransformFilter]; + + // Third pass: apply non-maximum suppression + if ([GPUImageContext deviceSupportsFramebufferReads]) + { + nonMaximumSuppressionFilter = [[GPUImageThresholdedNonMaximumSuppressionFilter alloc] initWithPackedColorspace:YES]; + } + else + { + nonMaximumSuppressionFilter = [[GPUImageThresholdedNonMaximumSuppressionFilter alloc] initWithPackedColorspace:NO]; + } + [self addFilter:nonMaximumSuppressionFilter]; + + __unsafe_unretained GPUImageHoughTransformLineDetector *weakSelf = self; +#ifdef DEBUGLINEDETECTION + _intermediateImages = [[NSMutableArray alloc] init]; + __unsafe_unretained NSMutableArray *weakIntermediateImages = _intermediateImages; + +// __unsafe_unretained GPUImageOutput *weakEdgeDetectionFilter = thresholdEdgeDetectionFilter; +// [thresholdEdgeDetectionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ +// [weakIntermediateImages removeAllObjects]; +// UIImage *intermediateImage = [weakEdgeDetectionFilter imageFromCurrentFramebuffer]; +// [weakIntermediateImages addObject:intermediateImage]; +// }]; +// +// __unsafe_unretained GPUImageOutput *weakParallelCoordinateLineTransformFilter = parallelCoordinateLineTransformFilter; +// [parallelCoordinateLineTransformFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ +// UIImage *intermediateImage = [weakParallelCoordinateLineTransformFilter imageFromCurrentFramebuffer]; +// [weakIntermediateImages addObject:intermediateImage]; +// }]; + + __unsafe_unretained GPUImageOutput *weakNonMaximumSuppressionFilter = nonMaximumSuppressionFilter; + [nonMaximumSuppressionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime){ + UIImage *intermediateImage = [weakNonMaximumSuppressionFilter imageFromCurrentFramebuffer]; + [weakIntermediateImages addObject:intermediateImage]; + + [weakSelf extractLineParametersFromImageAtFrameTime:frameTime]; + }]; +#else + [nonMaximumSuppressionFilter setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime) { + [weakSelf extractLineParametersFromImageAtFrameTime:frameTime]; + }]; +#endif + + [thresholdEdgeDetectionFilter addTarget:parallelCoordinateLineTransformFilter]; + [parallelCoordinateLineTransformFilter addTarget:nonMaximumSuppressionFilter]; + + self.initialFilters = [NSArray arrayWithObjects:thresholdEdgeDetectionFilter, nil]; + // self.terminalFilter = colorPackingFilter; + self.terminalFilter = nonMaximumSuppressionFilter; + +// self.edgeThreshold = 0.95; + self.lineDetectionThreshold = 0.12; + + return self; +} + +- (void)dealloc; +{ + free(rawImagePixels); + free(linesArray); +} + +#pragma mark - +#pragma mark Corner extraction + +- (void)extractLineParametersFromImageAtFrameTime:(CMTime)frameTime; +{ + // we need a normal color texture for this filter + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + NSUInteger numberOfLines = 0; + CGSize imageSize = nonMaximumSuppressionFilter.outputFrameSize; + + unsigned int imageByteSize = imageSize.width * imageSize.height * 4; + + if (rawImagePixels == NULL) + { + rawImagePixels = (GLubyte *)malloc(imageByteSize); + linesArray = calloc(1024 * 2, sizeof(GLfloat)); + } + + glReadPixels(0, 0, (int)imageSize.width, (int)imageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + +// CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + unsigned int imageWidth = imageSize.width * 4; + + unsigned int currentByte = 0; + unsigned int cornerStorageIndex = 0; + unsigned long lineStrengthCounter = 0; + while (currentByte < imageByteSize) + { + GLubyte colorByte = rawImagePixels[currentByte]; +// NSLog(@"(%d,%d): [%d,%d,%d,%d]", xCoordinate, yCoordinate, rawImagePixels[currentByte], rawImagePixels[currentByte+1], rawImagePixels[currentByte+2], rawImagePixels[currentByte+3]); +// NSLog(@"[%d,%d,%d,%d]", rawImagePixels[currentByte], rawImagePixels[currentByte+1], rawImagePixels[currentByte+2], rawImagePixels[currentByte+3]); + + if (colorByte > 0) + { + unsigned int xCoordinate = currentByte % imageWidth; + unsigned int yCoordinate = currentByte / imageWidth; + + lineStrengthCounter += colorByte; +// NSLog(@"(%d,%d): [%d,%d,%d,%d]", xCoordinate, yCoordinate, rawImagePixels[currentByte], rawImagePixels[currentByte+1], rawImagePixels[currentByte+2], rawImagePixels[currentByte+3]); + + CGFloat normalizedXCoordinate = -1.0 + 2.0 * (CGFloat)(xCoordinate / 4) / imageSize.width; + CGFloat normalizedYCoordinate = -1.0 + 2.0 * (CGFloat)(yCoordinate) / imageSize.height; + + if (normalizedXCoordinate < 0.0) + { + // T space + // m = -1 - d/u + // b = d * v/u + if (normalizedXCoordinate > -0.05) // Test for the case right near the X axis, stamp the X intercept instead of the Y + { + linesArray[cornerStorageIndex++] = 100000.0; + linesArray[cornerStorageIndex++] = normalizedYCoordinate; + } + else + { + linesArray[cornerStorageIndex++] = -1.0 - 1.0 / normalizedXCoordinate; + linesArray[cornerStorageIndex++] = 1.0 * normalizedYCoordinate / normalizedXCoordinate; + } + } + else + { + // S space + // m = 1 - d/u + // b = d * v/u + if (normalizedXCoordinate < 0.05) // Test for the case right near the X axis, stamp the X intercept instead of the Y + { + linesArray[cornerStorageIndex++] = 100000.0; + linesArray[cornerStorageIndex++] = normalizedYCoordinate; + } + else + { + linesArray[cornerStorageIndex++] = 1.0 - 1.0 / normalizedXCoordinate; + linesArray[cornerStorageIndex++] = 1.0 * normalizedYCoordinate / normalizedXCoordinate; + } + } + + numberOfLines++; + + numberOfLines = MIN(numberOfLines, 1023); + cornerStorageIndex = MIN(cornerStorageIndex, 2040); + } + currentByte +=4; + } + +// CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); +// NSLog(@"Processing time : %f ms", 1000.0 * currentFrameTime); + + if (linesDetectedBlock != NULL) + { + linesDetectedBlock(linesArray, numberOfLines, frameTime); + } +} + +- (BOOL)wantsMonochromeInput; +{ +// return YES; + return NO; +} + +#pragma mark - +#pragma mark Accessors + +//- (void)setEdgeThreshold:(CGFloat)newValue; +//{ +// [(GPUImageCannyEdgeDetectionFilter *)thresholdEdgeDetectionFilter setThreshold:newValue]; +//} +// +//- (CGFloat)edgeThreshold; +//{ +// return [(GPUImageCannyEdgeDetectionFilter *)thresholdEdgeDetectionFilter threshold]; +//} + +- (void)setLineDetectionThreshold:(CGFloat)newValue; +{ + nonMaximumSuppressionFilter.threshold = newValue; +} + +- (CGFloat)lineDetectionThreshold; +{ + return nonMaximumSuppressionFilter.threshold; +} + +#ifdef DEBUGLINEDETECTION +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ +// [thresholdEdgeDetectionFilter useNextFrameForImageCapture]; +// [parallelCoordinateLineTransformFilter useNextFrameForImageCapture]; + [nonMaximumSuppressionFilter useNextFrameForImageCapture]; + + [super newFrameReadyAtTime:frameTime atIndex:textureIndex]; +} +#endif + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.h new file mode 100644 index 00000000..4399ffcf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageHueBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.m new file mode 100644 index 00000000..f9dfbbb8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.m @@ -0,0 +1,212 @@ +#import "GPUImageHueBlendFilter.h" + +/** + * Hue blend mode based upon pseudo code from the PDF specification. + */ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHueBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + highp float lum(lowp vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + lowp vec3 clipcolor(lowp vec3 c) { + highp float l = lum(c); + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + lowp vec3 setlum(lowp vec3 c, highp float l) { + highp float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + highp float sat(lowp vec3 c) { + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + return x - n; + } + + lowp float mid(lowp float cmin, lowp float cmid, lowp float cmax, highp float s) { + return ((cmid - cmin) * s) / (cmax - cmin); + } + + lowp vec3 setsat(lowp vec3 c, highp float s) { + if (c.r > c.g) { + if (c.r > c.b) { + if (c.g > c.b) { + /* g is mid, b is min */ + c.g = mid(c.b, c.g, c.r, s); + c.b = 0.0; + } else { + /* b is mid, g is min */ + c.b = mid(c.g, c.b, c.r, s); + c.g = 0.0; + } + c.r = s; + } else { + /* b is max, r is mid, g is min */ + c.r = mid(c.g, c.r, c.b, s); + c.b = s; + c.r = 0.0; + } + } else if (c.r > c.b) { + /* g is max, r is mid, b is min */ + c.r = mid(c.b, c.r, c.g, s); + c.g = s; + c.b = 0.0; + } else if (c.g > c.b) { + /* g is max, b is mid, r is min */ + c.b = mid(c.r, c.b, c.g, s); + c.g = s; + c.r = 0.0; + } else if (c.b > c.g) { + /* b is max, g is mid, r is min */ + c.g = mid(c.r, c.g, c.b, s); + c.b = s; + c.r = 0.0; + } else { + c = vec3(0.0); + } + return c; + } + + void main() + { + highp vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + highp vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(setsat(overlayColor.rgb, sat(baseColor.rgb)), lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#else +NSString *const kGPUImageHueBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + float lum(vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + vec3 clipcolor(vec3 c) { + float l = lum(c); + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + vec3 setlum(vec3 c, float l) { + float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + float sat(vec3 c) { + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + return x - n; + } + + float mid(float cmin, float cmid, float cmax, float s) { + return ((cmid - cmin) * s) / (cmax - cmin); + } + + vec3 setsat(vec3 c, float s) { + if (c.r > c.g) { + if (c.r > c.b) { + if (c.g > c.b) { + /* g is mid, b is min */ + c.g = mid(c.b, c.g, c.r, s); + c.b = 0.0; + } else { + /* b is mid, g is min */ + c.b = mid(c.g, c.b, c.r, s); + c.g = 0.0; + } + c.r = s; + } else { + /* b is max, r is mid, g is min */ + c.r = mid(c.g, c.r, c.b, s); + c.b = s; + c.r = 0.0; + } + } else if (c.r > c.b) { + /* g is max, r is mid, b is min */ + c.r = mid(c.b, c.r, c.g, s); + c.g = s; + c.b = 0.0; + } else if (c.g > c.b) { + /* g is max, b is mid, r is min */ + c.b = mid(c.r, c.b, c.g, s); + c.g = s; + c.r = 0.0; + } else if (c.b > c.g) { + /* b is max, g is mid, r is min */ + c.g = mid(c.r, c.g, c.b, s); + c.b = s; + c.r = 0.0; + } else { + c = vec3(0.0); + } + return c; + } + + void main() + { + vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(setsat(overlayColor.rgb, sat(baseColor.rgb)), lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#endif + +@implementation GPUImageHueBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageHueBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h new file mode 100644 index 00000000..eef24651 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h @@ -0,0 +1,11 @@ + +#import "GPUImageFilter.h" + +@interface GPUImageHueFilter : GPUImageFilter +{ + GLint hueAdjustUniform; + +} +@property (nonatomic, readwrite) CGFloat hue; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.m new file mode 100644 index 00000000..5b42c86d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.m @@ -0,0 +1,123 @@ + +#import "GPUImageHueFilter.h" + +// Adapted from http://stackoverflow.com/questions/9234724/how-to-change-hue-of-a-texture-with-glsl - see for code and discussion +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHueFragmentShaderString = SHADER_STRING +( + precision highp float; + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform mediump float hueAdjust; + const highp vec4 kRGBToYPrime = vec4 (0.299, 0.587, 0.114, 0.0); + const highp vec4 kRGBToI = vec4 (0.595716, -0.274453, -0.321263, 0.0); + const highp vec4 kRGBToQ = vec4 (0.211456, -0.522591, 0.31135, 0.0); + + const highp vec4 kYIQToR = vec4 (1.0, 0.9563, 0.6210, 0.0); + const highp vec4 kYIQToG = vec4 (1.0, -0.2721, -0.6474, 0.0); + const highp vec4 kYIQToB = vec4 (1.0, -1.1070, 1.7046, 0.0); + + void main () + { + // Sample the input pixel + highp vec4 color = texture2D(inputImageTexture, textureCoordinate); + + // Convert to YIQ + highp float YPrime = dot (color, kRGBToYPrime); + highp float I = dot (color, kRGBToI); + highp float Q = dot (color, kRGBToQ); + + // Calculate the hue and chroma + highp float hue = atan (Q, I); + highp float chroma = sqrt (I * I + Q * Q); + + // Make the user's adjustments + hue += (-hueAdjust); //why negative rotation? + + // Convert back to YIQ + Q = chroma * sin (hue); + I = chroma * cos (hue); + + // Convert back to RGB + highp vec4 yIQ = vec4 (YPrime, I, Q, 0.0); + color.r = dot (yIQ, kYIQToR); + color.g = dot (yIQ, kYIQToG); + color.b = dot (yIQ, kYIQToB); + + // Save the result + gl_FragColor = color; + } +); +#else +NSString *const kGPUImageHueFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float hueAdjust; + const vec4 kRGBToYPrime = vec4 (0.299, 0.587, 0.114, 0.0); + const vec4 kRGBToI = vec4 (0.595716, -0.274453, -0.321263, 0.0); + const vec4 kRGBToQ = vec4 (0.211456, -0.522591, 0.31135, 0.0); + + const vec4 kYIQToR = vec4 (1.0, 0.9563, 0.6210, 0.0); + const vec4 kYIQToG = vec4 (1.0, -0.2721, -0.6474, 0.0); + const vec4 kYIQToB = vec4 (1.0, -1.1070, 1.7046, 0.0); + + void main () + { + // Sample the input pixel + vec4 color = texture2D(inputImageTexture, textureCoordinate); + + // Convert to YIQ + float YPrime = dot (color, kRGBToYPrime); + float I = dot (color, kRGBToI); + float Q = dot (color, kRGBToQ); + + // Calculate the hue and chroma + float hue = atan (Q, I); + float chroma = sqrt (I * I + Q * Q); + + // Make the user's adjustments + hue += (-hueAdjust); //why negative rotation? + + // Convert back to YIQ + Q = chroma * sin (hue); + I = chroma * cos (hue); + + // Convert back to RGB + vec4 yIQ = vec4 (YPrime, I, Q, 0.0); + color.r = dot (yIQ, kYIQToR); + color.g = dot (yIQ, kYIQToG); + color.b = dot (yIQ, kYIQToB); + + // Save the result + gl_FragColor = color; + } +); +#endif + +@implementation GPUImageHueFilter +@synthesize hue; + +- (id)init +{ + if(! (self = [super initWithFragmentShaderFromString:kGPUImageHueFragmentShaderString]) ) + { + return nil; + } + + hueAdjustUniform = [filterProgram uniformIndex:@"hueAdjust"]; + self.hue = 90; + + return self; +} + +- (void)setHue:(CGFloat)newHue +{ + // Convert degrees to radians for hue rotation + hue = fmodf(newHue, 360.0) * M_PI/180; + [self setFloat:hue forUniform:hueAdjustUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.h new file mode 100644 index 00000000..4c50cc37 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilter.h" + +@interface GPUImageJFAVoronoiFilter : GPUImageFilter +{ + GLuint secondFilterOutputTexture; + GLuint secondFilterFramebuffer; + + + GLint sampleStepUniform; + GLint sizeUniform; + NSUInteger numPasses; + +} + +@property (nonatomic, readwrite) CGSize sizeInPixels; + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.m new file mode 100644 index 00000000..15835917 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.m @@ -0,0 +1,446 @@ +// adapted from unitzeroone - http://unitzeroone.com/labs/jfavoronoi/ + +#import "GPUImageJFAVoronoiFilter.h" + +// The shaders are mostly taken from UnitZeroOne's WebGL example here: +// http://unitzeroone.com/blog/2011/03/22/jump-flood-voronoi-for-webgl/ + +NSString *const kGPUImageJFAVoronoiVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform float sampleStep; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 widthStep = vec2(sampleStep, 0.0); + vec2 heightStep = vec2(0.0, sampleStep); + vec2 widthHeightStep = vec2(sampleStep); + vec2 widthNegativeHeightStep = vec2(sampleStep, -sampleStep); + + textureCoordinate = inputTextureCoordinate.xy; + leftTextureCoordinate = inputTextureCoordinate.xy - widthStep; + rightTextureCoordinate = inputTextureCoordinate.xy + widthStep; + + topTextureCoordinate = inputTextureCoordinate.xy - heightStep; + topLeftTextureCoordinate = inputTextureCoordinate.xy - widthHeightStep; + topRightTextureCoordinate = inputTextureCoordinate.xy + widthNegativeHeightStep; + + bottomTextureCoordinate = inputTextureCoordinate.xy + heightStep; + bottomLeftTextureCoordinate = inputTextureCoordinate.xy - widthNegativeHeightStep; + bottomRightTextureCoordinate = inputTextureCoordinate.xy + widthHeightStep; + } + ); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageJFAVoronoiFragmentShaderString = SHADER_STRING +( + + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform vec2 size; + //varying vec2 textureCoordinate; + //uniform float sampleStep; + + vec2 getCoordFromColor(vec4 color) +{ + float z = color.z * 256.0; + float yoff = floor(z / 8.0); + float xoff = mod(z, 8.0); + float x = color.x*256.0 + xoff*256.0; + float y = color.y*256.0 + yoff*256.0; + return vec2(x,y) / size; +} + + void main(void) { + + vec2 sub; + vec4 dst; + vec4 local = texture2D(inputImageTexture, textureCoordinate); + vec4 sam; + float l; + float smallestDist; + if(local.a == 0.0){ + + smallestDist = dot(1.0,1.0); + }else{ + sub = getCoordFromColor(local)-textureCoordinate; + smallestDist = dot(sub,sub); + } + dst = local; + + + sam = texture2D(inputImageTexture, topRightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, topTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, topLeftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomRightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomLeftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, leftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, rightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + gl_FragColor = dst; + } +); +#else +NSString *const kGPUImageJFAVoronoiFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform vec2 size; + //varying vec2 textureCoordinate; + //uniform float sampleStep; + + vec2 getCoordFromColor(vec4 color) +{ + float z = color.z * 256.0; + float yoff = floor(z / 8.0); + float xoff = mod(z, 8.0); + float x = color.x*256.0 + xoff*256.0; + float y = color.y*256.0 + yoff*256.0; + return vec2(x,y) / size; +} + + void main(void) { + + vec2 sub; + vec4 dst; + vec4 local = texture2D(inputImageTexture, textureCoordinate); + vec4 sam; + float l; + float smallestDist; + if(local.a == 0.0){ + + smallestDist = dot(1.0,1.0); + }else{ + sub = getCoordFromColor(local)-textureCoordinate; + smallestDist = dot(sub,sub); + } + dst = local; + + + sam = texture2D(inputImageTexture, topRightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, topTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, topLeftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomRightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, bottomLeftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, leftTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + + sam = texture2D(inputImageTexture, rightTextureCoordinate); + if(sam.a == 1.0){ + sub = (getCoordFromColor(sam)-textureCoordinate); + l = dot(sub,sub); + if(l < smallestDist){ + smallestDist = l; + dst = sam; + } + } + gl_FragColor = dst; + } +); +#endif + +@interface GPUImageJFAVoronoiFilter() { + int currentPass; +} + + +@end + +@implementation GPUImageJFAVoronoiFilter + +@synthesize sizeInPixels = _sizeInPixels; + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageJFAVoronoiVertexShaderString fragmentShaderFromString:kGPUImageJFAVoronoiFragmentShaderString])) + { + + NSLog(@"nil returned"); + return nil; + + } + + sampleStepUniform = [filterProgram uniformIndex:@"sampleStep"]; + sizeUniform = [filterProgram uniformIndex:@"size"]; + //[self disableSecondFrameCheck]; + + return self; +} + +-(void)setSizeInPixels:(CGSize)sizeInPixels { + _sizeInPixels = sizeInPixels; + + //validate that it's a power of 2 + + float width = log2(sizeInPixels.width); + float height = log2(sizeInPixels.height); + + if (width != height) { + NSLog(@"Voronoi point texture must be square"); + return; + } + if (width != floor(width) || height != floor(height)) { + NSLog(@"Voronoi point texture must be a power of 2. Texture size: %f, %f", sizeInPixels.width, sizeInPixels.height); + return; + } + glUniform2f(sizeUniform, _sizeInPixels.width, _sizeInPixels.height); +} + +#pragma mark - +#pragma mark Managing the display FBOs + +-(NSUInteger)nextPowerOfTwo:(CGPoint)input { + NSUInteger val; + if (input.x > input.y) { + val = (NSUInteger)input.x; + } else { + val = (NSUInteger)input.y; + } + + val--; + val = (val >> 1) | val; + val = (val >> 2) | val; + val = (val >> 4) | val; + val = (val >> 8) | val; + val = (val >> 16) | val; + val++; + return val; +} + +//- (void)setOutputFBO; +//{ +// if (currentPass % 2 == 1) { +// [self setSecondFilterFBO]; +// } else { +// [self setFilterFBO]; +// } +// +//} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // Run the first stage of the two-pass filter + [GPUImageContext setActiveShaderProgram:filterProgram]; + currentPass = 0; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glActiveTexture(GL_TEXTURE2); + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glUniform1f(sampleStepUniform, 0.5); + + glUniform2f(sizeUniform, _sizeInPixels.width, _sizeInPixels.height); + + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + + glUniform1i(filterInputTextureUniform, 2); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + for (int pass = 1; pass <= numPasses + 1; pass++) { + currentPass = pass; +// [self setOutputFBO]; + + //glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + if (pass % 2 == 0) { + glBindTexture(GL_TEXTURE_2D, secondFilterOutputTexture); + } else { + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + } + glUniform1i(filterInputTextureUniform, 2); + + float step = pow(2.0, numPasses - pass) / pow(2.0, numPasses); + glUniform1f(sampleStepUniform, step); + glUniform2f(sizeUniform, _sizeInPixels.width, _sizeInPixels.height); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.h new file mode 100755 index 00000000..4fb0bce8 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +/** Kuwahara image abstraction, drawn from the work of Kyprianidis, et. al. in their publication "Anisotropic Kuwahara Filtering on the GPU" within the GPU Pro collection. This produces an oil-painting-like image, but it is extremely computationally expensive, so it can take seconds to render a frame on an iPad 2. This might be best used for still images. + */ +@interface GPUImageKuwaharaFilter : GPUImageFilter +{ + GLint radiusUniform; +} + +/// The radius to sample from when creating the brush-stroke effect, with a default of 3. The larger the radius, the slower the filter. +@property(readwrite, nonatomic) NSUInteger radius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.m new file mode 100755 index 00000000..9061dbf3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.m @@ -0,0 +1,223 @@ +#import "GPUImageKuwaharaFilter.h" + +// Sourced from Kyprianidis, J. E., Kang, H., and Doellner, J. "Anisotropic Kuwahara Filtering on the GPU," GPU Pro p.247 (2010). +// +// Original header: +// +// Anisotropic Kuwahara Filtering on the GPU +// by Jan Eric Kyprianidis + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageKuwaharaFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform int radius; + + precision highp float; + + const vec2 src_size = vec2 (1.0 / 768.0, 1.0 / 1024.0); + + void main (void) + { + vec2 uv = textureCoordinate; + float n = float((radius + 1) * (radius + 1)); + int i; int j; + vec3 m0 = vec3(0.0); vec3 m1 = vec3(0.0); vec3 m2 = vec3(0.0); vec3 m3 = vec3(0.0); + vec3 s0 = vec3(0.0); vec3 s1 = vec3(0.0); vec3 s2 = vec3(0.0); vec3 s3 = vec3(0.0); + vec3 c; + + for (j = -radius; j <= 0; ++j) { + for (i = -radius; i <= 0; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m0 += c; + s0 += c * c; + } + } + + for (j = -radius; j <= 0; ++j) { + for (i = 0; i <= radius; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m1 += c; + s1 += c * c; + } + } + + for (j = 0; j <= radius; ++j) { + for (i = 0; i <= radius; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m2 += c; + s2 += c * c; + } + } + + for (j = 0; j <= radius; ++j) { + for (i = -radius; i <= 0; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m3 += c; + s3 += c * c; + } + } + + + float min_sigma2 = 1e+2; + m0 /= n; + s0 = abs(s0 / n - m0 * m0); + + float sigma2 = s0.r + s0.g + s0.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m0, 1.0); + } + + m1 /= n; + s1 = abs(s1 / n - m1 * m1); + + sigma2 = s1.r + s1.g + s1.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m1, 1.0); + } + + m2 /= n; + s2 = abs(s2 / n - m2 * m2); + + sigma2 = s2.r + s2.g + s2.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m2, 1.0); + } + + m3 /= n; + s3 = abs(s3 / n - m3 * m3); + + sigma2 = s3.r + s3.g + s3.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m3, 1.0); + } + } +); +#else +NSString *const kGPUImageKuwaharaFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform int radius; + + const vec2 src_size = vec2 (1.0 / 768.0, 1.0 / 1024.0); + + void main (void) + { + vec2 uv = textureCoordinate; + float n = float((radius + 1) * (radius + 1)); + int i; int j; + vec3 m0 = vec3(0.0); vec3 m1 = vec3(0.0); vec3 m2 = vec3(0.0); vec3 m3 = vec3(0.0); + vec3 s0 = vec3(0.0); vec3 s1 = vec3(0.0); vec3 s2 = vec3(0.0); vec3 s3 = vec3(0.0); + vec3 c; + + for (j = -radius; j <= 0; ++j) { + for (i = -radius; i <= 0; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m0 += c; + s0 += c * c; + } + } + + for (j = -radius; j <= 0; ++j) { + for (i = 0; i <= radius; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m1 += c; + s1 += c * c; + } + } + + for (j = 0; j <= radius; ++j) { + for (i = 0; i <= radius; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m2 += c; + s2 += c * c; + } + } + + for (j = 0; j <= radius; ++j) { + for (i = -radius; i <= 0; ++i) { + c = texture2D(inputImageTexture, uv + vec2(i,j) * src_size).rgb; + m3 += c; + s3 += c * c; + } + } + + + float min_sigma2 = 1e+2; + m0 /= n; + s0 = abs(s0 / n - m0 * m0); + + float sigma2 = s0.r + s0.g + s0.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m0, 1.0); + } + + m1 /= n; + s1 = abs(s1 / n - m1 * m1); + + sigma2 = s1.r + s1.g + s1.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m1, 1.0); + } + + m2 /= n; + s2 = abs(s2 / n - m2 * m2); + + sigma2 = s2.r + s2.g + s2.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m2, 1.0); + } + + m3 /= n; + s3 = abs(s3 / n - m3 * m3); + + sigma2 = s3.r + s3.g + s3.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m3, 1.0); + } + } +); +#endif + +@implementation GPUImageKuwaharaFilter + +@synthesize radius = _radius; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageKuwaharaFragmentShaderString])) + { + return nil; + } + + radiusUniform = [filterProgram uniformIndex:@"radius"]; + + self.radius = 3; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setRadius:(NSUInteger)newValue; +{ + _radius = newValue; + + [self setInteger:(GLint)_radius forUniform:radiusUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.h b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.h new file mode 100644 index 00000000..c4591b81 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.h @@ -0,0 +1,8 @@ +// +// GPUImageKuwaharaRadius3Filter.h + +#import "GPUImageFilter.h" + +@interface GPUImageKuwaharaRadius3Filter : GPUImageFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.m b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.m new file mode 100644 index 00000000..98b092c6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.m @@ -0,0 +1,547 @@ +#import "GPUImageKuwaharaRadius3Filter.h" + +// Sourced from Kyprianidis, J. E., Kang, H., and Doellner, J. "Anisotropic Kuwahara Filtering on the GPU," GPU Pro p.247 (2010). +// +// Original header: +// +// Anisotropic Kuwahara Filtering on the GPU +// by Jan Eric Kyprianidis + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageKuwaharaRadius3FragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + + precision highp float; + + const vec2 src_size = vec2 (1.0 / 768.0, 1.0 / 1024.0); + + void main (void) + { + vec2 uv = textureCoordinate; + float n = float(16); // radius is assumed to be 3 + vec3 m0 = vec3(0.0); vec3 m1 = vec3(0.0); vec3 m2 = vec3(0.0); vec3 m3 = vec3(0.0); + vec3 s0 = vec3(0.0); vec3 s1 = vec3(0.0); vec3 s2 = vec3(0.0); vec3 s3 = vec3(0.0); + vec3 c; + vec3 cSq; + + c = texture2D(inputImageTexture, uv + vec2(-3,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-2,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-1,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(0,-3) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,-2) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,-1) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-3,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(-2,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(-1,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(0,3) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,2) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,1) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(3,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(2,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(1,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(3,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(2,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(1,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + float min_sigma2 = 1e+2; + m0 /= n; + s0 = abs(s0 / n - m0 * m0); + + float sigma2 = s0.r + s0.g + s0.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m0, 1.0); + } + + m1 /= n; + s1 = abs(s1 / n - m1 * m1); + + sigma2 = s1.r + s1.g + s1.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m1, 1.0); + } + + m2 /= n; + s2 = abs(s2 / n - m2 * m2); + + sigma2 = s2.r + s2.g + s2.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m2, 1.0); + } + + m3 /= n; + s3 = abs(s3 / n - m3 * m3); + + sigma2 = s3.r + s3.g + s3.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m3, 1.0); + } + } +); +#else +NSString *const kGPUImageKuwaharaRadius3FragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + + const vec2 src_size = vec2 (1.0 / 768.0, 1.0 / 1024.0); + + void main (void) + { + vec2 uv = textureCoordinate; + float n = float(16); // radius is assumed to be 3 + vec3 m0 = vec3(0.0); vec3 m1 = vec3(0.0); vec3 m2 = vec3(0.0); vec3 m3 = vec3(0.0); + vec3 s0 = vec3(0.0); vec3 s1 = vec3(0.0); vec3 s2 = vec3(0.0); vec3 s3 = vec3(0.0); + vec3 c; + vec3 cSq; + + c = texture2D(inputImageTexture, uv + vec2(-3,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-2,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-1,-3) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,-2) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,-1) * src_size).rgb; + m0 += c; + s0 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(0,-3) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,-2) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,-1) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m3 += c; + s3 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,0) * src_size).rgb; + cSq = c * c; + m0 += c; + s0 += cSq; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(-3,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-3,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(-2,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-2,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(-1,3) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,2) * src_size).rgb; + m1 += c; + s1 += c * c; + c = texture2D(inputImageTexture, uv + vec2(-1,1) * src_size).rgb; + m1 += c; + s1 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(0,3) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,2) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + c = texture2D(inputImageTexture, uv + vec2(0,1) * src_size).rgb; + cSq = c * c; + m1 += c; + s1 += cSq; + m2 += c; + s2 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(3,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(2,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(1,3) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,2) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,1) * src_size).rgb; + m2 += c; + s2 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,0) * src_size).rgb; + cSq = c * c; + m2 += c; + s2 += cSq; + m3 += c; + s3 += cSq; + + c = texture2D(inputImageTexture, uv + vec2(3,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(3,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(2,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(2,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + c = texture2D(inputImageTexture, uv + vec2(1,-3) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,-2) * src_size).rgb; + m3 += c; + s3 += c * c; + c = texture2D(inputImageTexture, uv + vec2(1,-1) * src_size).rgb; + m3 += c; + s3 += c * c; + + float min_sigma2 = 1e+2; + m0 /= n; + s0 = abs(s0 / n - m0 * m0); + + float sigma2 = s0.r + s0.g + s0.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m0, 1.0); + } + + m1 /= n; + s1 = abs(s1 / n - m1 * m1); + + sigma2 = s1.r + s1.g + s1.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m1, 1.0); + } + + m2 /= n; + s2 = abs(s2 / n - m2 * m2); + + sigma2 = s2.r + s2.g + s2.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m2, 1.0); + } + + m3 /= n; + s3 = abs(s3 / n - m3 * m3); + + sigma2 = s3.r + s3.g + s3.b; + if (sigma2 < min_sigma2) { + min_sigma2 = sigma2; + gl_FragColor = vec4(m3, 1.0); + } + } +); +#endif + +@implementation GPUImageKuwaharaRadius3Filter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageKuwaharaRadius3FragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.h new file mode 100644 index 00000000..5d7409f5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +@interface GPUImageLanczosResamplingFilter : GPUImageTwoPassTextureSamplingFilter + +@property(readwrite, nonatomic) CGSize originalImageSize; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.m new file mode 100644 index 00000000..a655f486 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.m @@ -0,0 +1,239 @@ +#import "GPUImageLanczosResamplingFilter.h" + +NSString *const kGPUImageLanczosVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec2 inputTextureCoordinate; + + uniform float texelWidthOffset; + uniform float texelHeightOffset; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepLeftTextureCoordinate; + varying vec2 twoStepsLeftTextureCoordinate; + varying vec2 threeStepsLeftTextureCoordinate; + varying vec2 fourStepsLeftTextureCoordinate; + varying vec2 oneStepRightTextureCoordinate; + varying vec2 twoStepsRightTextureCoordinate; + varying vec2 threeStepsRightTextureCoordinate; + varying vec2 fourStepsRightTextureCoordinate; + + void main() + { + gl_Position = position; + + vec2 firstOffset = vec2(texelWidthOffset, texelHeightOffset); + vec2 secondOffset = vec2(2.0 * texelWidthOffset, 2.0 * texelHeightOffset); + vec2 thirdOffset = vec2(3.0 * texelWidthOffset, 3.0 * texelHeightOffset); + vec2 fourthOffset = vec2(4.0 * texelWidthOffset, 4.0 * texelHeightOffset); + + centerTextureCoordinate = inputTextureCoordinate; + oneStepLeftTextureCoordinate = inputTextureCoordinate - firstOffset; + twoStepsLeftTextureCoordinate = inputTextureCoordinate - secondOffset; + threeStepsLeftTextureCoordinate = inputTextureCoordinate - thirdOffset; + fourStepsLeftTextureCoordinate = inputTextureCoordinate - fourthOffset; + oneStepRightTextureCoordinate = inputTextureCoordinate + firstOffset; + twoStepsRightTextureCoordinate = inputTextureCoordinate + secondOffset; + threeStepsRightTextureCoordinate = inputTextureCoordinate + thirdOffset; + fourStepsRightTextureCoordinate = inputTextureCoordinate + fourthOffset; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLanczosFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepLeftTextureCoordinate; + varying vec2 twoStepsLeftTextureCoordinate; + varying vec2 threeStepsLeftTextureCoordinate; + varying vec2 fourStepsLeftTextureCoordinate; + varying vec2 oneStepRightTextureCoordinate; + varying vec2 twoStepsRightTextureCoordinate; + varying vec2 threeStepsRightTextureCoordinate; + varying vec2 fourStepsRightTextureCoordinate; + + // sinc(x) * sinc(x/a) = (a * sin(pi * x) * sin(pi * x / a)) / (pi^2 * x^2) + // Assuming a Lanczos constant of 2.0, and scaling values to max out at x = +/- 1.5 + + void main() + { + lowp vec4 fragmentColor = texture2D(inputImageTexture, centerTextureCoordinate) * 0.38026; + + fragmentColor += texture2D(inputImageTexture, oneStepLeftTextureCoordinate) * 0.27667; + fragmentColor += texture2D(inputImageTexture, oneStepRightTextureCoordinate) * 0.27667; + + fragmentColor += texture2D(inputImageTexture, twoStepsLeftTextureCoordinate) * 0.08074; + fragmentColor += texture2D(inputImageTexture, twoStepsRightTextureCoordinate) * 0.08074; + + fragmentColor += texture2D(inputImageTexture, threeStepsLeftTextureCoordinate) * -0.02612; + fragmentColor += texture2D(inputImageTexture, threeStepsRightTextureCoordinate) * -0.02612; + + fragmentColor += texture2D(inputImageTexture, fourStepsLeftTextureCoordinate) * -0.02143; + fragmentColor += texture2D(inputImageTexture, fourStepsRightTextureCoordinate) * -0.02143; + + gl_FragColor = fragmentColor; + } +); +#else +NSString *const kGPUImageLanczosFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepLeftTextureCoordinate; + varying vec2 twoStepsLeftTextureCoordinate; + varying vec2 threeStepsLeftTextureCoordinate; + varying vec2 fourStepsLeftTextureCoordinate; + varying vec2 oneStepRightTextureCoordinate; + varying vec2 twoStepsRightTextureCoordinate; + varying vec2 threeStepsRightTextureCoordinate; + varying vec2 fourStepsRightTextureCoordinate; + + // sinc(x) * sinc(x/a) = (a * sin(pi * x) * sin(pi * x / a)) / (pi^2 * x^2) + // Assuming a Lanczos constant of 2.0, and scaling values to max out at x = +/- 1.5 + + void main() + { + vec4 fragmentColor = texture2D(inputImageTexture, centerTextureCoordinate) * 0.38026; + + fragmentColor += texture2D(inputImageTexture, oneStepLeftTextureCoordinate) * 0.27667; + fragmentColor += texture2D(inputImageTexture, oneStepRightTextureCoordinate) * 0.27667; + + fragmentColor += texture2D(inputImageTexture, twoStepsLeftTextureCoordinate) * 0.08074; + fragmentColor += texture2D(inputImageTexture, twoStepsRightTextureCoordinate) * 0.08074; + + fragmentColor += texture2D(inputImageTexture, threeStepsLeftTextureCoordinate) * -0.02612; + fragmentColor += texture2D(inputImageTexture, threeStepsRightTextureCoordinate) * -0.02612; + + fragmentColor += texture2D(inputImageTexture, fourStepsLeftTextureCoordinate) * -0.02143; + fragmentColor += texture2D(inputImageTexture, fourStepsRightTextureCoordinate) * -0.02143; + + gl_FragColor = fragmentColor; + } +); +#endif + +@implementation GPUImageLanczosResamplingFilter + +@synthesize originalImageSize = _originalImageSize; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFirstStageVertexShaderFromString:kGPUImageLanczosVertexShaderString firstStageFragmentShaderFromString:kGPUImageLanczosFragmentShaderString secondStageVertexShaderFromString:kGPUImageLanczosVertexShaderString secondStageFragmentShaderFromString:kGPUImageLanczosFragmentShaderString])) + { + return nil; + } + + return self; +} + +// Base texture sampling offset on the input image, not the final size +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + self.originalImageSize = newSize; + [super setInputSize:newSize atIndex:textureIndex]; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + // The first pass through the framebuffer may rotate the inbound image, so need to account for that by changing up the kernel ordering for that pass + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + verticalPassTexelWidthOffset = 1.0 / _originalImageSize.height; + verticalPassTexelHeightOffset = 0.0; + } + else + { + verticalPassTexelWidthOffset = 0.0; + verticalPassTexelHeightOffset = 1.0 / _originalImageSize.height; + } + + horizontalPassTexelWidthOffset = 1.0 / _originalImageSize.width; + horizontalPassTexelHeightOffset = 0.0; + }); +} + + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + CGSize currentFBOSize = [self sizeOfFBO]; + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + currentFBOSize.height = self.originalImageSize.height; + } + else + { + currentFBOSize.width = self.originalImageSize.width; + } + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:currentFBOSize textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + + glUniform1i(filterInputTextureUniform, 2); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + [firstInputFramebuffer unlock]; + + // Run the second stage of the two-pass filter + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, 0); + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, 0); + secondOutputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [secondOutputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [secondOutputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:1]; + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + glVertexAttribPointer(secondFilterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:kGPUImageNoRotation]); + + glUniform1i(secondFilterInputTextureUniform, 3); + + glVertexAttribPointer(secondFilterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [outputFramebuffer unlock]; + outputFramebuffer = nil; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.h new file mode 100644 index 00000000..267c1bab --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3ConvolutionFilter.h" + +@interface GPUImageLaplacianFilter : GPUImage3x3ConvolutionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.m new file mode 100644 index 00000000..98b78509 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.m @@ -0,0 +1,115 @@ +#import "GPUImageLaplacianFilter.h" +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLaplacianFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + uniform mediump mat3 convolutionMatrix; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + mediump vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + mediump vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + mediump vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + mediump vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + mediump vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + mediump vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + mediump vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + mediump vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + + mediump vec3 resultColor = topLeftColor * convolutionMatrix[0][0] + topColor * convolutionMatrix[0][1] + topRightColor * convolutionMatrix[0][2]; + resultColor += leftColor * convolutionMatrix[1][0] + centerColor.rgb * convolutionMatrix[1][1] + rightColor * convolutionMatrix[1][2]; + resultColor += bottomLeftColor * convolutionMatrix[2][0] + bottomColor * convolutionMatrix[2][1] + bottomRightColor * convolutionMatrix[2][2]; + + // Normalize the results to allow for negative gradients in the 0.0-1.0 colorspace + resultColor = resultColor + 0.5; + + gl_FragColor = vec4(resultColor, centerColor.a); + } +); +#else +NSString *const kGPUImageLaplacianFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + uniform mat3 convolutionMatrix; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + vec3 bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + vec3 bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + vec3 topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + vec3 topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + + vec3 resultColor = topLeftColor * convolutionMatrix[0][0] + topColor * convolutionMatrix[0][1] + topRightColor * convolutionMatrix[0][2]; + resultColor += leftColor * convolutionMatrix[1][0] + centerColor.rgb * convolutionMatrix[1][1] + rightColor * convolutionMatrix[1][2]; + resultColor += bottomLeftColor * convolutionMatrix[2][0] + bottomColor * convolutionMatrix[2][1] + bottomRightColor * convolutionMatrix[2][2]; + + // Normalize the results to allow for negative gradients in the 0.0-1.0 colorspace + resultColor = resultColor + 0.5; + + gl_FragColor = vec4(resultColor, centerColor.a); + } +); +#endif + +@implementation GPUImageLaplacianFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLaplacianFragmentShaderString])) + { + return nil; + } + + GPUMatrix3x3 newConvolutionMatrix; + newConvolutionMatrix.one.one = 0.5; + newConvolutionMatrix.one.two = 1.0; + newConvolutionMatrix.one.three = 0.5; + + newConvolutionMatrix.two.one = 1.0; + newConvolutionMatrix.two.two = -6.0; + newConvolutionMatrix.two.three = 1.0; + + newConvolutionMatrix.three.one = 0.5; + newConvolutionMatrix.three.two = 1.0; + newConvolutionMatrix.three.three = 0.5; + + self.convolutionKernel = newConvolutionMatrix; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.h new file mode 100644 index 00000000..d0948fbf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.h @@ -0,0 +1,45 @@ +#import "GPUImageFilter.h" + +/** + * Levels like Photoshop. + * + * The min, max, minOut and maxOut parameters are floats in the range [0, 1]. + * If you have parameters from Photoshop in the range [0, 255] you must first + * convert them to be [0, 1]. + * The gamma/mid parameter is a float >= 0. This matches the value from Photoshop. + * + * If you want to apply levels to RGB as well as individual channels you need to use + * this filter twice - first for the individual channels and then for all channels. + */ +@interface GPUImageLevelsFilter : GPUImageFilter +{ + GLint minUniform; + GLint midUniform; + GLint maxUniform; + GLint minOutputUniform; + GLint maxOutputUniform; + + GPUVector3 minVector, midVector, maxVector, minOutputVector, maxOutputVector; +} + +/** Set levels for the red channel */ +- (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; + +- (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; + +/** Set levels for the green channel */ +- (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; + +- (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; + +/** Set levels for the blue channel */ +- (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; + +- (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; + +/** Set levels for all channels at once */ +- (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; +- (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.m new file mode 100644 index 00000000..158815fd --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.m @@ -0,0 +1,152 @@ +#import "GPUImageLevelsFilter.h" + +/* + ** Gamma correction + ** Details: http://blog.mouaif.org/2009/01/22/photoshop-gamma-correction-shader/ + */ + +#define GammaCorrection(color, gamma) pow(color, 1.0 / gamma) + +/* + ** Levels control (input (+gamma), output) + ** Details: http://blog.mouaif.org/2009/01/28/levels-control-shader/ + */ + +#define LevelsControlInputRange(color, minInput, maxInput) min(max(color - minInput, vec3(0.0)) / (maxInput - minInput), vec3(1.0)) +#define LevelsControlInput(color, minInput, gamma, maxInput) GammaCorrection(LevelsControlInputRange(color, minInput, maxInput), gamma) +#define LevelsControlOutputRange(color, minOutput, maxOutput) mix(minOutput, maxOutput, color) +#define LevelsControl(color, minInput, gamma, maxInput, minOutput, maxOutput) LevelsControlOutputRange(LevelsControlInput(color, minInput, gamma, maxInput), minOutput, maxOutput) + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLevelsFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform mediump vec3 levelMinimum; + uniform mediump vec3 levelMiddle; + uniform mediump vec3 levelMaximum; + uniform mediump vec3 minOutput; + uniform mediump vec3 maxOutput; + + void main() + { + mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(LevelsControl(textureColor.rgb, levelMinimum, levelMiddle, levelMaximum, minOutput, maxOutput), textureColor.a); + } +); +#else +NSString *const kGPUImageLevelsFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform vec3 levelMinimum; + uniform vec3 levelMiddle; + uniform vec3 levelMaximum; + uniform vec3 minOutput; + uniform vec3 maxOutput; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(LevelsControl(textureColor.rgb, levelMinimum, levelMiddle, levelMaximum, minOutput, maxOutput), textureColor.a); + } +); +#endif + +@implementation GPUImageLevelsFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLevelsFragmentShaderString])) + { + return nil; + } + + minUniform = [filterProgram uniformIndex:@"levelMinimum"]; + midUniform = [filterProgram uniformIndex:@"levelMiddle"]; + maxUniform = [filterProgram uniformIndex:@"levelMaximum"]; + minOutputUniform = [filterProgram uniformIndex:@"minOutput"]; + maxOutputUniform = [filterProgram uniformIndex:@"maxOutput"]; + + [self setRedMin:0.0 gamma:1.0 max:1.0 minOut:0.0 maxOut:1.0]; + [self setGreenMin:0.0 gamma:1.0 max:1.0 minOut:0.0 maxOut:1.0]; + [self setBlueMin:0.0 gamma:1.0 max:1.0 minOut:0.0 maxOut:1.0]; + + return self; +} + +#pragma mark - +#pragma mark Helpers + +- (void)updateUniforms { + [self setVec3:minVector forUniform:minUniform program:filterProgram]; + [self setVec3:midVector forUniform:midUniform program:filterProgram]; + [self setVec3:maxVector forUniform:maxUniform program:filterProgram]; + [self setVec3:minOutputVector forUniform:minOutputUniform program:filterProgram]; + [self setVec3:maxOutputVector forUniform:maxOutputUniform program:filterProgram]; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut { + [self setRedMin:min gamma:mid max:max minOut:minOut maxOut:maxOut]; + [self setGreenMin:min gamma:mid max:max minOut:minOut maxOut:maxOut]; + [self setBlueMin:min gamma:mid max:max minOut:minOut maxOut:maxOut]; +} + +- (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max { + [self setMin:min gamma:mid max:max minOut:0.0 maxOut:1.0]; +} + +- (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut { + minVector.one = min; + midVector.one = mid; + maxVector.one = max; + minOutputVector.one = minOut; + maxOutputVector.one = maxOut; + + [self updateUniforms]; +} + +- (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max { + [self setRedMin:min gamma:mid max:max minOut:0.0 maxOut:1.0]; +} + +- (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut { + minVector.two = min; + midVector.two = mid; + maxVector.two = max; + minOutputVector.two = minOut; + maxOutputVector.two = maxOut; + + [self updateUniforms]; +} + +- (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max { + [self setGreenMin:min gamma:mid max:max minOut:0.0 maxOut:1.0]; +} + +- (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut { + minVector.three = min; + midVector.three = mid; + maxVector.three = max; + minOutputVector.three = minOut; + maxOutputVector.three = maxOut; + + [self updateUniforms]; +} + +- (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max { + [self setBlueMin:min gamma:mid max:max minOut:0.0 maxOut:1.0]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.h new file mode 100755 index 00000000..b0287c13 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.h @@ -0,0 +1,8 @@ +#import "GPUImageTwoInputFilter.h" + +/// Blends two images by taking the maximum value of each color component between the images +@interface GPUImageLightenBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.m new file mode 100755 index 00000000..2bbd4b20 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageLightenBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = max(textureColor, textureColor2); + } +); +#else +NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = max(textureColor, textureColor2); + } + ); +#endif + +@implementation GPUImageLightenBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLightenBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.h b/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.h new file mode 100644 index 00000000..4c467366 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.h @@ -0,0 +1,18 @@ +#import "GPUImageFilter.h" + +@interface GPUImageLineGenerator : GPUImageFilter +{ + GLint lineWidthUniform, lineColorUniform; + GLfloat *lineCoordinates; +} + +// The width of the displayed lines, in pixels. The default is 1. +@property(readwrite, nonatomic) CGFloat lineWidth; + +// The color of the lines is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0). +- (void)setLineColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +// Rendering +- (void)renderLinesFromArray:(GLfloat *)lineSlopeAndIntercepts count:(NSUInteger)numberOfLines frameTime:(CMTime)frameTime; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.m b/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.m new file mode 100644 index 00000000..85d93bee --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.m @@ -0,0 +1,164 @@ +#import "GPUImageLineGenerator.h" + +NSString *const kGPUImageLineGeneratorVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + void main() + { + gl_Position = position; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLineGeneratorFragmentShaderString = SHADER_STRING +( + uniform lowp vec3 lineColor; + + void main() + { + gl_FragColor = vec4(lineColor, 1.0); + } +); +#else +NSString *const kGPUImageLineGeneratorFragmentShaderString = SHADER_STRING +( + uniform vec3 lineColor; + + void main() + { + gl_FragColor = vec4(lineColor, 1.0); + } +); +#endif + +@interface GPUImageLineGenerator() + +- (void)generateLineCoordinates; + +@end + +@implementation GPUImageLineGenerator + +@synthesize lineWidth = _lineWidth; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageLineGeneratorVertexShaderString fragmentShaderFromString:kGPUImageLineGeneratorFragmentShaderString])) + { + return nil; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + lineWidthUniform = [filterProgram uniformIndex:@"lineWidth"]; + lineColorUniform = [filterProgram uniformIndex:@"lineColor"]; + + self.lineWidth = 1.0; + [self setLineColorRed:0.0 green:1.0 blue:0.0]; + }); + + return self; +} + +- (void)dealloc +{ + if (lineCoordinates) + { + free(lineCoordinates); + } +} + +#pragma mark - +#pragma mark Rendering + +- (void)generateLineCoordinates; +{ + lineCoordinates = calloc(1024 * 4, sizeof(GLfloat)); +} + +- (void)renderLinesFromArray:(GLfloat *)lineSlopeAndIntercepts count:(NSUInteger)numberOfLines frameTime:(CMTime)frameTime; +{ + if (self.preventRendering) + { + return; + } + + if (lineCoordinates == NULL) + { + [self generateLineCoordinates]; + } + + // Iterate through and generate vertices from the slopes and intercepts + NSUInteger currentVertexIndex = 0; + NSUInteger currentLineIndex = 0; + NSUInteger maxLineIndex = numberOfLines *2; + while(currentLineIndex < maxLineIndex) + { + GLfloat slope = lineSlopeAndIntercepts[currentLineIndex++]; + GLfloat intercept = lineSlopeAndIntercepts[currentLineIndex++]; + + if (slope > 9000.0) // Vertical line + { + lineCoordinates[currentVertexIndex++] = intercept; + lineCoordinates[currentVertexIndex++] = -1.0; + lineCoordinates[currentVertexIndex++] = intercept; + lineCoordinates[currentVertexIndex++] = 1.0; + } + else + { + lineCoordinates[currentVertexIndex++] = -1.0; + lineCoordinates[currentVertexIndex++] = slope * -1.0 + intercept; + lineCoordinates[currentVertexIndex++] = 1.0; + lineCoordinates[currentVertexIndex++] = slope * 1.0 + intercept; + } + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(0.0, 0.0, 0.0, 0.0); + glClear(GL_COLOR_BUFFER_BIT); + + glBlendEquation(GL_FUNC_ADD); + glBlendFunc(GL_ONE, GL_ONE); + glEnable(GL_BLEND); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, lineCoordinates); + glDrawArrays(GL_LINES, 0, ((unsigned int)numberOfLines * 2)); + + glDisable(GL_BLEND); + + [self informTargetsAboutNewFrameAtTime:frameTime]; + }); +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // Prevent rendering of the frame by normal means +} + +#pragma mark - +#pragma mark Accessors + +- (void)setLineWidth:(CGFloat)newValue; +{ + _lineWidth = newValue; + [GPUImageContext setActiveShaderProgram:filterProgram]; + glLineWidth(newValue); +} + +- (void)setLineColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 lineColor = {redComponent, greenComponent, blueComponent}; + + [self setVec3:lineColor forUniform:lineColorUniform program:filterProgram]; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.h new file mode 100644 index 00000000..7e5e415c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageLinearBurnBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.m new file mode 100644 index 00000000..47031967 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.m @@ -0,0 +1,51 @@ +#import "GPUImageLinearBurnBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a); + } +); +#else +NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a); + } +); +#endif + +@implementation GPUImageLinearBurnBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLinearBurnBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.h new file mode 100644 index 00000000..431dbbd4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.m new file mode 100644 index 00000000..1ee1f8d9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.m @@ -0,0 +1,123 @@ +#import "GPUImageLocalBinaryPatternFilter.h" + +// This is based on "Accelerating image recognition on mobile devices using GPGPU" by Miguel Bordallo Lopez, Henri Nykanen, Jari Hannuksela, Olli Silven and Markku Vehvilainen +// http://www.ee.oulu.fi/~jhannuks/publications/SPIE2011a.pdf + +// Right pixel is the most significant bit, traveling clockwise to get to the upper right, which is the least significant +// If the external pixel is greater than or equal to the center, set to 1, otherwise 0 +// +// 2 1 0 +// 3 7 +// 4 5 6 + +// 01101101 +// 76543210 + +@implementation GPUImageLocalBinaryPatternFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLocalBinaryPatternFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; + lowp float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + lowp float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + lowp float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + lowp float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + lowp float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + lowp float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + lowp float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + lowp float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + + lowp float byteTally = 1.0 / 255.0 * step(centerIntensity, topRightIntensity); + byteTally += 2.0 / 255.0 * step(centerIntensity, topIntensity); + byteTally += 4.0 / 255.0 * step(centerIntensity, topLeftIntensity); + byteTally += 8.0 / 255.0 * step(centerIntensity, leftIntensity); + byteTally += 16.0 / 255.0 * step(centerIntensity, bottomLeftIntensity); + byteTally += 32.0 / 255.0 * step(centerIntensity, bottomIntensity); + byteTally += 64.0 / 255.0 * step(centerIntensity, bottomRightIntensity); + byteTally += 128.0 / 255.0 * step(centerIntensity, rightIntensity); + + // TODO: Replace the above with a dot product and two vec4s + // TODO: Apply step to a matrix, rather than individually + + gl_FragColor = vec4(byteTally, byteTally, byteTally, 1.0); + } +); +#else +NSString *const kGPUImageLocalBinaryPatternFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + + float byteTally = 1.0 / 255.0 * step(centerIntensity, topRightIntensity); + byteTally += 2.0 / 255.0 * step(centerIntensity, topIntensity); + byteTally += 4.0 / 255.0 * step(centerIntensity, topLeftIntensity); + byteTally += 8.0 / 255.0 * step(centerIntensity, leftIntensity); + byteTally += 16.0 / 255.0 * step(centerIntensity, bottomLeftIntensity); + byteTally += 32.0 / 255.0 * step(centerIntensity, bottomIntensity); + byteTally += 64.0 / 255.0 * step(centerIntensity, bottomRightIntensity); + byteTally += 128.0 / 255.0 * step(centerIntensity, rightIntensity); + + // TODO: Replace the above with a dot product and two vec4s + // TODO: Apply step to a matrix, rather than individually + + gl_FragColor = vec4(byteTally, byteTally, byteTally, 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLocalBinaryPatternFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.h new file mode 100644 index 00000000..23ebde2b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.h @@ -0,0 +1,34 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageLookupFilter : GPUImageTwoInputFilter +{ + GLint intensityUniform; +} + +// How To Use: +// 1) Use your favourite photo editing application to apply a filter to lookup.png from GPUImage/framework/Resources. +// For this to work properly each pixel color must not depend on other pixels (e.g. blur will not work). +// If you need more complex filter you can create as many lookup tables as required. +// E.g. color_balance_lookup_1.png -> GPUImageGaussianBlurFilter -> color_balance_lookup_2.png +// 2) Use you new lookup.png file as a second input for GPUImageLookupFilter. + +// See GPUImageAmatorkaFilter, GPUImageMissEtikateFilter, and GPUImageSoftEleganceFilter for example. + +// Additional Info: +// Lookup texture is organised as 8x8 quads of 64x64 pixels representing all possible RGB colors: +//for (int by = 0; by < 8; by++) { +// for (int bx = 0; bx < 8; bx++) { +// for (int g = 0; g < 64; g++) { +// for (int r = 0; r < 64; r++) { +// image.setPixel(r + bx * 64, g + by * 64, qRgb((int)(r * 255.0 / 63.0 + 0.5), +// (int)(g * 255.0 / 63.0 + 0.5), +// (int)((bx + by * 8.0) * 255.0 / 63.0 + 0.5))); +// } +// } +// } +//} + +// Opacity/intensity of lookup filter ranges from 0.0 to 1.0, with 1.0 as the normal setting +@property(readwrite, nonatomic) CGFloat intensity; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.m new file mode 100644 index 00000000..1c3505eb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.m @@ -0,0 +1,115 @@ +#import "GPUImageLookupFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLookupFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; // TODO: This is not used + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; // lookup texture + + uniform lowp float intensity; + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + highp float blueColor = textureColor.b * 63.0; + + highp vec2 quad1; + quad1.y = floor(floor(blueColor) / 8.0); + quad1.x = floor(blueColor) - (quad1.y * 8.0); + + highp vec2 quad2; + quad2.y = floor(ceil(blueColor) / 8.0); + quad2.x = ceil(blueColor) - (quad2.y * 8.0); + + highp vec2 texPos1; + texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r); + texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g); + + highp vec2 texPos2; + texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r); + texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g); + + lowp vec4 newColor1 = texture2D(inputImageTexture2, texPos1); + lowp vec4 newColor2 = texture2D(inputImageTexture2, texPos2); + + lowp vec4 newColor = mix(newColor1, newColor2, fract(blueColor)); + gl_FragColor = mix(textureColor, vec4(newColor.rgb, textureColor.w), intensity); + } +); +#else +NSString *const kGPUImageLookupFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; // TODO: This is not used + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; // lookup texture + + uniform float intensity; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + float blueColor = textureColor.b * 63.0; + + vec2 quad1; + quad1.y = floor(floor(blueColor) / 8.0); + quad1.x = floor(blueColor) - (quad1.y * 8.0); + + vec2 quad2; + quad2.y = floor(ceil(blueColor) / 8.0); + quad2.x = ceil(blueColor) - (quad2.y * 8.0); + + vec2 texPos1; + texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r); + texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g); + + vec2 texPos2; + texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r); + texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g); + + vec4 newColor1 = texture2D(inputImageTexture2, texPos1); + vec4 newColor2 = texture2D(inputImageTexture2, texPos2); + + vec4 newColor = mix(newColor1, newColor2, fract(blueColor)); + gl_FragColor = mix(textureColor, vec4(newColor.rgb, textureColor.w), intensity); + } +); +#endif + +@implementation GPUImageLookupFilter + +@synthesize intensity = _intensity; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLookupFragmentShaderString])) + { + return nil; + } + + intensityUniform = [filterProgram uniformIndex:@"intensity"]; + self.intensity = 1.0f; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setIntensity:(CGFloat)intensity +{ + _intensity = intensity; + + [self setFloat:_intensity forUniform:intensityUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.h new file mode 100644 index 00000000..be5c397e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilterGroup.h" +#import "GPUImageBuffer.h" +#import "GPUImageDissolveBlendFilter.h" + +@interface GPUImageLowPassFilter : GPUImageFilterGroup +{ + GPUImageBuffer *bufferFilter; + GPUImageDissolveBlendFilter *dissolveBlendFilter; +} + +// This controls the degree by which the previous accumulated frames are blended with the current one. This ranges from 0.0 to 1.0, with a default of 0.5. +@property(readwrite, nonatomic) CGFloat filterStrength; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.m new file mode 100644 index 00000000..39ca08f6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.m @@ -0,0 +1,61 @@ +#import "GPUImageLowPassFilter.h" + +@implementation GPUImageLowPassFilter + +@synthesize filterStrength; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // Take in the frame and blend it with the previous one + dissolveBlendFilter = [[GPUImageDissolveBlendFilter alloc] init]; + [self addFilter:dissolveBlendFilter]; + + // Buffer the result to be fed back into the blend + bufferFilter = [[GPUImageBuffer alloc] init]; + [self addFilter:bufferFilter]; + + // Texture location 0 needs to be the original image for the dissolve blend + [bufferFilter addTarget:dissolveBlendFilter atTextureLocation:1]; + [dissolveBlendFilter addTarget:bufferFilter]; + + [dissolveBlendFilter disableSecondFrameCheck]; + + // To prevent double updating of this filter, disable updates from the sharp image side + // self.inputFilterToIgnoreForUpdates = unsharpMaskFilter; + + self.initialFilters = [NSArray arrayWithObject:dissolveBlendFilter]; + self.terminalFilter = dissolveBlendFilter; + + self.filterStrength = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setFilterStrength:(CGFloat)newValue; +{ + dissolveBlendFilter.mix = newValue; +} + +- (CGFloat)filterStrength; +{ + return dissolveBlendFilter.mix; +} + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + [self.terminalFilter addTarget:newTarget atTextureLocation:textureLocation]; + //if use GPUImagePipline,will cause self.termainlFilter removeAllTargets,so need add bufferFilter back + if (self.terminalFilter == dissolveBlendFilter && ![self.terminalFilter.targets containsObject:bufferFilter]) { + [self.terminalFilter addTarget:bufferFilter atTextureLocation:1]; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.h new file mode 100644 index 00000000..5a310370 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.h @@ -0,0 +1,12 @@ +#import "GPUImageFilter.h" + +@interface GPUImageLuminanceRangeFilter : GPUImageFilter +{ + GLint rangeReductionUniform; +} + +/** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6. + */ +@property(readwrite, nonatomic) CGFloat rangeReductionFactor; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.m new file mode 100644 index 00000000..5122c958 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.m @@ -0,0 +1,76 @@ +#import "GPUImageLuminanceRangeFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLuminanceRangeFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float rangeReduction; + + // Values from "Graphics Shaders: Theory and Practice" by Bailey and Cunningham + const mediump vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + mediump float luminance = dot(textureColor.rgb, luminanceWeighting); + mediump float luminanceRatio = ((0.5 - luminance) * rangeReduction); + + gl_FragColor = vec4((textureColor.rgb) + (luminanceRatio), textureColor.w); + } +); +#else +NSString *const kGPUImageLuminanceRangeFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float rangeReduction; + + // Values from "Graphics Shaders: Theory and Practice" by Bailey and Cunningham + const vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + float luminanceRatio = ((0.5 - luminance) * rangeReduction); + + gl_FragColor = vec4((textureColor.rgb) + (luminanceRatio), textureColor.w); + } +); +#endif + +@implementation GPUImageLuminanceRangeFilter + +@synthesize rangeReductionFactor = _rangeReductionFactor; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLuminanceRangeFragmentShaderString])) + { + return nil; + } + + rangeReductionUniform = [filterProgram uniformIndex:@"rangeReduction"]; + self.rangeReductionFactor = 0.6; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setRangeReductionFactor:(CGFloat)newValue; +{ + _rangeReductionFactor = newValue; + + [self setFloat:_rangeReductionFactor forUniform:rangeReductionUniform program:filterProgram]; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.h new file mode 100755 index 00000000..0abb9a1e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +/** Pixels with a luminance above the threshold will appear white, and those below will be black + */ +@interface GPUImageLuminanceThresholdFilter : GPUImageFilter +{ + GLint thresholdUniform; +} + +/** Anything above this luminance will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.5 as the default + */ +@property(readwrite, nonatomic) CGFloat threshold; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.m new file mode 100755 index 00000000..368b8fbe --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.m @@ -0,0 +1,74 @@ +#import "GPUImageLuminanceThresholdFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLuminanceThresholdFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float threshold; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + highp float luminance = dot(textureColor.rgb, W); + highp float thresholdResult = step(threshold, luminance); + + gl_FragColor = vec4(vec3(thresholdResult), textureColor.w); + } +); +#else +NSString *const kGPUImageLuminanceThresholdFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float threshold; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, W); + float thresholdResult = step(threshold, luminance); + + gl_FragColor = vec4(vec3(thresholdResult), textureColor.w); + } +); +#endif + +@implementation GPUImageLuminanceThresholdFilter + +@synthesize threshold = _threshold; + +#pragma mark - +#pragma mark Initialization + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLuminanceThresholdFragmentShaderString])) + { + return nil; + } + + thresholdUniform = [filterProgram uniformIndex:@"threshold"]; + self.threshold = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setThreshold:(CGFloat)newValue; +{ + _threshold = newValue; + + [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.h b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.h new file mode 100644 index 00000000..b2d2458f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.h @@ -0,0 +1,17 @@ +#import "GPUImageAverageColor.h" + +@interface GPUImageLuminosity : GPUImageAverageColor +{ + GLProgram *secondFilterProgram; + GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute; + GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2; + GLint secondFilterTexelWidthUniform, secondFilterTexelHeightUniform; +} + +// This block is called on the completion of color averaging for a frame +@property(nonatomic, copy) void(^luminosityProcessingFinishedBlock)(CGFloat luminosity, CMTime frameTime); + +- (void)extractLuminosityAtFrameTime:(CMTime)frameTime; +- (void)initializeSecondaryAttributes; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.m b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.m new file mode 100644 index 00000000..37f374ab --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.m @@ -0,0 +1,329 @@ +#import "GPUImageLuminosity.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageInitialLuminosityFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + varying highp vec2 outputTextureCoordinate; + + varying highp vec2 upperLeftInputTextureCoordinate; + varying highp vec2 upperRightInputTextureCoordinate; + varying highp vec2 lowerLeftInputTextureCoordinate; + varying highp vec2 lowerRightInputTextureCoordinate; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + highp float upperLeftLuminance = dot(texture2D(inputImageTexture, upperLeftInputTextureCoordinate).rgb, W); + highp float upperRightLuminance = dot(texture2D(inputImageTexture, upperRightInputTextureCoordinate).rgb, W); + highp float lowerLeftLuminance = dot(texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).rgb, W); + highp float lowerRightLuminance = dot(texture2D(inputImageTexture, lowerRightInputTextureCoordinate).rgb, W); + + highp float luminosity = 0.25 * (upperLeftLuminance + upperRightLuminance + lowerLeftLuminance + lowerRightLuminance); + gl_FragColor = vec4(luminosity, luminosity, luminosity, 1.0); + } +); + +NSString *const kGPUImageLuminosityFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + varying highp vec2 outputTextureCoordinate; + + varying highp vec2 upperLeftInputTextureCoordinate; + varying highp vec2 upperRightInputTextureCoordinate; + varying highp vec2 lowerLeftInputTextureCoordinate; + varying highp vec2 lowerRightInputTextureCoordinate; + + void main() + { + highp float upperLeftLuminance = texture2D(inputImageTexture, upperLeftInputTextureCoordinate).r; + highp float upperRightLuminance = texture2D(inputImageTexture, upperRightInputTextureCoordinate).r; + highp float lowerLeftLuminance = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).r; + highp float lowerRightLuminance = texture2D(inputImageTexture, lowerRightInputTextureCoordinate).r; + + highp float luminosity = 0.25 * (upperLeftLuminance + upperRightLuminance + lowerLeftLuminance + lowerRightLuminance); + gl_FragColor = vec4(luminosity, luminosity, luminosity, 1.0); + } +); +#else +NSString *const kGPUImageInitialLuminosityFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 outputTextureCoordinate; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + float upperLeftLuminance = dot(texture2D(inputImageTexture, upperLeftInputTextureCoordinate).rgb, W); + float upperRightLuminance = dot(texture2D(inputImageTexture, upperRightInputTextureCoordinate).rgb, W); + float lowerLeftLuminance = dot(texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).rgb, W); + float lowerRightLuminance = dot(texture2D(inputImageTexture, lowerRightInputTextureCoordinate).rgb, W); + + float luminosity = 0.25 * (upperLeftLuminance + upperRightLuminance + lowerLeftLuminance + lowerRightLuminance); + gl_FragColor = vec4(luminosity, luminosity, luminosity, 1.0); + } +); + +NSString *const kGPUImageLuminosityFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 outputTextureCoordinate; + + varying vec2 upperLeftInputTextureCoordinate; + varying vec2 upperRightInputTextureCoordinate; + varying vec2 lowerLeftInputTextureCoordinate; + varying vec2 lowerRightInputTextureCoordinate; + + void main() + { + float upperLeftLuminance = texture2D(inputImageTexture, upperLeftInputTextureCoordinate).r; + float upperRightLuminance = texture2D(inputImageTexture, upperRightInputTextureCoordinate).r; + float lowerLeftLuminance = texture2D(inputImageTexture, lowerLeftInputTextureCoordinate).r; + float lowerRightLuminance = texture2D(inputImageTexture, lowerRightInputTextureCoordinate).r; + + float luminosity = 0.25 * (upperLeftLuminance + upperRightLuminance + lowerLeftLuminance + lowerRightLuminance); + gl_FragColor = vec4(luminosity, luminosity, luminosity, 1.0); + } +); +#endif + +@implementation GPUImageLuminosity + +@synthesize luminosityProcessingFinishedBlock = _luminosityProcessingFinishedBlock; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageColorAveragingVertexShaderString fragmentShaderFromString:kGPUImageInitialLuminosityFragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + + __unsafe_unretained GPUImageLuminosity *weakSelf = self; + [self setFrameProcessingCompletionBlock:^(GPUImageOutput *filter, CMTime frameTime) { + [weakSelf extractLuminosityAtFrameTime:frameTime]; + }]; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + secondFilterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageColorAveragingVertexShaderString fragmentShaderString:kGPUImageLuminosityFragmentShaderString]; + + if (!secondFilterProgram.initialized) + { + [self initializeSecondaryAttributes]; + + if (![secondFilterProgram link]) + { + NSString *progLog = [secondFilterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [secondFilterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [secondFilterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + filterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + secondFilterPositionAttribute = [secondFilterProgram attributeIndex:@"position"]; + secondFilterTextureCoordinateAttribute = [secondFilterProgram attributeIndex:@"inputTextureCoordinate"]; + secondFilterInputTextureUniform = [secondFilterProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputImageTexture" for the fragment shader + secondFilterInputTextureUniform2 = [secondFilterProgram uniformIndex:@"inputImageTexture2"]; // This does assume a name of "inputImageTexture2" for second input texture in the fragment shader + + secondFilterTexelWidthUniform = [secondFilterProgram uniformIndex:@"texelWidth"]; + secondFilterTexelHeightUniform = [secondFilterProgram uniformIndex:@"texelHeight"]; + + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + + glEnableVertexAttribArray(secondFilterPositionAttribute); + glEnableVertexAttribArray(secondFilterTextureCoordinateAttribute); + }); + + return self; +} + +- (void)initializeSecondaryAttributes; +{ + [secondFilterProgram addAttribute:@"position"]; + [secondFilterProgram addAttribute:@"inputTextureCoordinate"]; +} + +/* +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + // Do an initial render pass that both convert to luminance and reduces + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + GLuint currentFramebuffer = [[stageFramebuffers objectAtIndex:0] intValue]; + glBindFramebuffer(GL_FRAMEBUFFER, currentFramebuffer); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGSize currentStageSize = [[stageSizes objectAtIndex:0] CGSizeValue]; +#else + NSSize currentStageSize = [[stageSizes objectAtIndex:0] sizeValue]; +#endif + glViewport(0, 0, (int)currentStageSize.width, (int)currentStageSize.height); + + GLuint currentTexture = [firstInputFramebuffer texture]; + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, currentTexture); + + glUniform1i(filterInputTextureUniform, 2); + + glUniform1f(texelWidthUniform, 0.5 / currentStageSize.width); + glUniform1f(texelHeightUniform, 0.5 / currentStageSize.height); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + currentTexture = [[stageTextures objectAtIndex:0] intValue]; + + // Just perform reductions from this point on + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + glVertexAttribPointer(secondFilterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(secondFilterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + NSUInteger numberOfStageFramebuffers = [stageFramebuffers count]; + for (NSUInteger currentStage = 1; currentStage < numberOfStageFramebuffers; currentStage++) + { + currentFramebuffer = [[stageFramebuffers objectAtIndex:currentStage] intValue]; + glBindFramebuffer(GL_FRAMEBUFFER, currentFramebuffer); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + currentStageSize = [[stageSizes objectAtIndex:currentStage] CGSizeValue]; +#else + currentStageSize = [[stageSizes objectAtIndex:currentStage] sizeValue]; +#endif + glViewport(0, 0, (int)currentStageSize.width, (int)currentStageSize.height); + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, currentTexture); + + glUniform1i(secondFilterInputTextureUniform, 2); + + glUniform1f(secondFilterTexelWidthUniform, 0.5 / currentStageSize.width); + glUniform1f(secondFilterTexelHeightUniform, 0.5 / currentStageSize.height); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + currentTexture = [[stageTextures objectAtIndex:currentStage] intValue]; + +// NSUInteger totalBytesForImage = (int)currentStageSize.width * (int)currentStageSize.height * 4; +// GLubyte *rawImagePixels2 = (GLubyte *)malloc(totalBytesForImage); +// glReadPixels(0, 0, (int)currentStageSize.width, (int)currentStageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels2); +// CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, rawImagePixels2, totalBytesForImage, NULL); +// CGColorSpaceRef defaultRGBColorSpace = CGColorSpaceCreateDeviceRGB(); +// +// CGFloat currentRedTotal = 0.0f, currentGreenTotal = 0.0f, currentBlueTotal = 0.0f, currentAlphaTotal = 0.0f; +// NSUInteger totalNumberOfPixels = totalBytesForImage / 4; +// +// for (NSUInteger currentPixel = 0; currentPixel < totalNumberOfPixels; currentPixel++) +// { +// currentRedTotal += (CGFloat)rawImagePixels2[(currentPixel * 4)] / 255.0f; +// currentGreenTotal += (CGFloat)rawImagePixels2[(currentPixel * 4) + 1] / 255.0f; +// currentBlueTotal += (CGFloat)rawImagePixels2[(currentPixel * 4 + 2)] / 255.0f; +// currentAlphaTotal += (CGFloat)rawImagePixels2[(currentPixel * 4) + 3] / 255.0f; +// } +// +// NSLog(@"Stage %d average image red: %f, green: %f, blue: %f, alpha: %f", currentStage, currentRedTotal / (CGFloat)totalNumberOfPixels, currentGreenTotal / (CGFloat)totalNumberOfPixels, currentBlueTotal / (CGFloat)totalNumberOfPixels, currentAlphaTotal / (CGFloat)totalNumberOfPixels); +// +// +// CGImageRef cgImageFromBytes = CGImageCreate((int)currentStageSize.width, (int)currentStageSize.height, 8, 32, 4 * (int)currentStageSize.width, defaultRGBColorSpace, kCGBitmapByteOrderDefault | kCGImageAlphaLast, dataProvider, NULL, NO, kCGRenderingIntentDefault); +// +// UIImage *imageToSave = [UIImage imageWithCGImage:cgImageFromBytes]; +// +// NSData *dataForPNGFile = UIImagePNGRepresentation(imageToSave); +// +// NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); +// NSString *documentsDirectory = [paths objectAtIndex:0]; +// +// NSString *imageName = [NSString stringWithFormat:@"AverageLevel%d.png", currentStage]; +// NSError *error = nil; +// if (![dataForPNGFile writeToFile:[documentsDirectory stringByAppendingPathComponent:imageName] options:NSAtomicWrite error:&error]) +// { +// return; +// } + } + + [firstInputFramebuffer unlock]; +} + */ + +#pragma mark - +#pragma mark Callbacks + +- (void)extractLuminosityAtFrameTime:(CMTime)frameTime; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + + // we need a normal color texture for this filter + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + NSUInteger totalNumberOfPixels = round(finalStageSize.width * finalStageSize.height); + + if (rawImagePixels == NULL) + { + rawImagePixels = (GLubyte *)malloc(totalNumberOfPixels * 4); + } + + [GPUImageContext useImageProcessingContext]; + [outputFramebuffer activateFramebuffer]; + + glReadPixels(0, 0, (int)finalStageSize.width, (int)finalStageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + + NSUInteger luminanceTotal = 0; + NSUInteger byteIndex = 0; + for (NSUInteger currentPixel = 0; currentPixel < totalNumberOfPixels; currentPixel++) + { + luminanceTotal += rawImagePixels[byteIndex]; + byteIndex += 4; + } + + CGFloat normalizedLuminosityTotal = (CGFloat)luminanceTotal / (CGFloat)totalNumberOfPixels / 255.0; + + if (_luminosityProcessingFinishedBlock != NULL) + { + _luminosityProcessingFinishedBlock(normalizedLuminosityTotal, frameTime); + } + }); +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.h new file mode 100644 index 00000000..03b5e4c9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageLuminosityBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.m new file mode 100644 index 00000000..7e399746 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.m @@ -0,0 +1,113 @@ +#import "GPUImageLuminosityBlendFilter.h" + +/** + * Luminosity blend mode based upon pseudo code from the PDF specification. + */ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageLuminosityBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + highp float lum(lowp vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + lowp vec3 clipcolor(lowp vec3 c) { + highp float l = lum(c); + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + lowp vec3 setlum(lowp vec3 c, highp float l) { + highp float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + void main() + { + highp vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + highp vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(baseColor.rgb, lum(overlayColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#else +NSString *const kGPUImageLuminosityBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + float lum(vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + vec3 clipcolor(vec3 c) { + float l = lum(c); + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + vec3 setlum(vec3 c, float l) { + float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + void main() + { + vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(baseColor.rgb, lum(overlayColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#endif + + +@implementation GPUImageLuminosityBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageLuminosityBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.h new file mode 100755 index 00000000..94cf0648 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageMaskFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.m new file mode 100755 index 00000000..24503b3b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.m @@ -0,0 +1,76 @@ +#import "GPUImageMaskFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMaskShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + //Averages mask's the RGB values, and scales that value by the mask's alpha + // + //The dot product should take fewer cycles than doing an average normally + // + //Typical/ideal case, R,G, and B will be the same, and Alpha will be 1.0 + lowp float newAlpha = dot(textureColor2.rgb, vec3(.33333334, .33333334, .33333334)) * textureColor2.a; + + gl_FragColor = vec4(textureColor.xyz, newAlpha); +// gl_FragColor = vec4(textureColor2); + } +); +#else +NSString *const kGPUImageMaskShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + //Averages mask's the RGB values, and scales that value by the mask's alpha + // + //The dot product should take fewer cycles than doing an average normally + // + //Typical/ideal case, R,G, and B will be the same, and Alpha will be 1.0 + float newAlpha = dot(textureColor2.rgb, vec3(.33333334, .33333334, .33333334)) * textureColor2.a; + + gl_FragColor = vec4(textureColor.xyz, newAlpha); + // gl_FragColor = vec4(textureColor2); + } +); +#endif + +@implementation GPUImageMaskFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageMaskShaderString])) + { + return nil; + } + + return self; +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + [super renderToTextureWithVertices:vertices textureCoordinates:textureCoordinates]; + glDisable(GL_BLEND); +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.h new file mode 100644 index 00000000..80225789 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageMedianFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.m new file mode 100644 index 00000000..78c11805 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.m @@ -0,0 +1,178 @@ +#import "GPUImageMedianFilter.h" + +/* + 3x3 median filter, adapted from "A Fast, Small-Radius GPU Median Filter" by Morgan McGuire in ShaderX6 + http://graphics.cs.williams.edu/papers/MedianShaderX6/ + + Morgan McGuire and Kyle Whitson + Williams College + + Register allocation tips by Victor Huang Xiaohuang + University of Illinois at Urbana-Champaign + + http://graphics.cs.williams.edu + + + Copyright (c) Morgan McGuire and Williams College, 2006 + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMedianFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + +#define s2(a, b) temp = a; a = min(a, b); b = max(temp, b); +#define mn3(a, b, c) s2(a, b); s2(a, c); +#define mx3(a, b, c) s2(b, c); s2(a, c); + +#define mnmx3(a, b, c) mx3(a, b, c); s2(a, b); // 3 exchanges +#define mnmx4(a, b, c, d) s2(a, b); s2(c, d); s2(a, c); s2(b, d); // 4 exchanges +#define mnmx5(a, b, c, d, e) s2(a, b); s2(c, d); mn3(a, c, e); mx3(b, d, e); // 6 exchanges +#define mnmx6(a, b, c, d, e, f) s2(a, d); s2(b, e); s2(c, f); mn3(a, b, c); mx3(d, e, f); // 7 exchanges + + void main() + { + vec3 v[6]; + + v[0] = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + v[1] = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + v[2] = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + v[3] = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + v[4] = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + v[5] = texture2D(inputImageTexture, rightTextureCoordinate).rgb; +// v[6] = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; +// v[7] = texture2D(inputImageTexture, topTextureCoordinate).rgb; + vec3 temp; + + mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + + mnmx5(v[1], v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + mnmx4(v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, textureCoordinate).rgb; + + mnmx3(v[3], v[4], v[5]); + + gl_FragColor = vec4(v[4], 1.0); + } +); +#else +NSString *const kGPUImageMedianFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + +#define s2(a, b) temp = a; a = min(a, b); b = max(temp, b); +#define mn3(a, b, c) s2(a, b); s2(a, c); +#define mx3(a, b, c) s2(b, c); s2(a, c); + +#define mnmx3(a, b, c) mx3(a, b, c); s2(a, b); // 3 exchanges +#define mnmx4(a, b, c, d) s2(a, b); s2(c, d); s2(a, c); s2(b, d); // 4 exchanges +#define mnmx5(a, b, c, d, e) s2(a, b); s2(c, d); mn3(a, c, e); mx3(b, d, e); // 6 exchanges +#define mnmx6(a, b, c, d, e, f) s2(a, d); s2(b, e); s2(c, f); mn3(a, b, c); mx3(d, e, f); // 7 exchanges + + void main() + { + vec3 v[6]; + + v[0] = texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb; + v[1] = texture2D(inputImageTexture, topRightTextureCoordinate).rgb; + v[2] = texture2D(inputImageTexture, topLeftTextureCoordinate).rgb; + v[3] = texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb; + v[4] = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + v[5] = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + // v[6] = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + // v[7] = texture2D(inputImageTexture, topTextureCoordinate).rgb; + vec3 temp; + + mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + + mnmx5(v[1], v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + mnmx4(v[2], v[3], v[4], v[5]); + + v[5] = texture2D(inputImageTexture, textureCoordinate).rgb; + + mnmx3(v[3], v[4], v[5]); + + gl_FragColor = vec4(v[4], 1.0); + } +); +#endif + +@implementation GPUImageMedianFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageMedianFragmentShaderString])) + { + return nil; + } + + hasOverriddenImageSizeFactor = NO; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.h new file mode 100755 index 00000000..de170647 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImagePicture; + +/** A photo filter based on Photoshop action by Miss Etikate: + http://miss-etikate.deviantart.com/art/Photoshop-Action-15-120151961 + */ + +// Note: If you want to use this effect you have to add lookup_miss_etikate.png +// from Resources folder to your application bundle. + +@interface GPUImageMissEtikateFilter : GPUImageFilterGroup +{ + GPUImagePicture *lookupImageSource; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.m new file mode 100755 index 00000000..1810b707 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.m @@ -0,0 +1,38 @@ +#import "GPUImageMissEtikateFilter.h" +#import "GPUImagePicture.h" +#import "GPUImageLookupFilter.h" + +@implementation GPUImageMissEtikateFilter + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + UIImage *image = [UIImage imageNamed:@"lookup_miss_etikate.png"]; +#else + NSImage *image = [NSImage imageNamed:@"lookup_miss_etikate.png"]; +#endif + + NSAssert(image, @"To use GPUImageMissEtikateFilter you need to add lookup_miss_etikate.png from GPUImage/framework/Resources to your application bundle."); + + lookupImageSource = [[GPUImagePicture alloc] initWithImage:image]; + GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init]; + [self addFilter:lookupFilter]; + + [lookupImageSource addTarget:lookupFilter atTextureLocation:1]; + [lookupImageSource processImage]; + + self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil]; + self.terminalFilter = lookupFilter; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.h new file mode 100644 index 00000000..66a0e773 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImageMonochromeFilter : GPUImageFilter +{ + GLint intensityUniform, filterColorUniform; +} + +@property(readwrite, nonatomic) CGFloat intensity; +@property(readwrite, nonatomic) GPUVector4 color; + +- (void)setColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.m new file mode 100644 index 00000000..70cef872 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.m @@ -0,0 +1,115 @@ +#import "GPUImageMonochromeFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUMonochromeFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float intensity; + uniform vec3 filterColor; + + const mediump vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + //desat, then apply overlay blend + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + + lowp vec4 desat = vec4(vec3(luminance), 1.0); + + //overlay + lowp vec4 outputColor = vec4( + (desat.r < 0.5 ? (2.0 * desat.r * filterColor.r) : (1.0 - 2.0 * (1.0 - desat.r) * (1.0 - filterColor.r))), + (desat.g < 0.5 ? (2.0 * desat.g * filterColor.g) : (1.0 - 2.0 * (1.0 - desat.g) * (1.0 - filterColor.g))), + (desat.b < 0.5 ? (2.0 * desat.b * filterColor.b) : (1.0 - 2.0 * (1.0 - desat.b) * (1.0 - filterColor.b))), + 1.0 + ); + + //which is better, or are they equal? + gl_FragColor = vec4( mix(textureColor.rgb, outputColor.rgb, intensity), textureColor.a); + } +); +#else +NSString *const kGPUMonochromeFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float intensity; + uniform vec3 filterColor; + + const vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + //desat, then apply overlay blend + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + + vec4 desat = vec4(vec3(luminance), 1.0); + + //overlay + vec4 outputColor = vec4( + (desat.r < 0.5 ? (2.0 * desat.r * filterColor.r) : (1.0 - 2.0 * (1.0 - desat.r) * (1.0 - filterColor.r))), + (desat.g < 0.5 ? (2.0 * desat.g * filterColor.g) : (1.0 - 2.0 * (1.0 - desat.g) * (1.0 - filterColor.g))), + (desat.b < 0.5 ? (2.0 * desat.b * filterColor.b) : (1.0 - 2.0 * (1.0 - desat.b) * (1.0 - filterColor.b))), + 1.0 + ); + + //which is better, or are they equal? + gl_FragColor = vec4( mix(textureColor.rgb, outputColor.rgb, intensity), textureColor.a); + } +); +#endif + +@implementation GPUImageMonochromeFilter + +@synthesize intensity = _intensity; +@synthesize color = _color; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUMonochromeFragmentShaderString])) + { + return nil; + } + + intensityUniform = [filterProgram uniformIndex:@"intensity"]; + filterColorUniform = [filterProgram uniformIndex:@"filterColor"]; + + self.intensity = 1.0; + self.color = (GPUVector4){0.6f, 0.45f, 0.3f, 1.f}; + //self.color = [CIColor colorWithRed:0.6 green:0.45 blue:0.3 alpha:1.]; + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setColor:(GPUVector4)color; +{ + + _color = color; + + [self setColorRed:color.one green:color.two blue:color.three]; +} + +- (void)setColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; +{ + GPUVector3 filterColor = {redComponent, greenComponent, blueComponent}; + + [self setVec3:filterColor forUniform:filterColorUniform program:filterProgram]; +} + +- (void)setIntensity:(CGFloat)newValue; +{ + _intensity = newValue; + + [self setFloat:_intensity forUniform:intensityUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.h new file mode 100644 index 00000000..5f72d3c9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.h @@ -0,0 +1,22 @@ + +// This needs a little more work, it's rotating the input tileset and there are some artifacts (I think from GL_LINEAR interpolation), but it's working + +#import "GPUImageTwoInputFilter.h" +#import "GPUImagePicture.h" + +@interface GPUImageMosaicFilter : GPUImageTwoInputFilter { + GLint inputTileSizeUniform, numTilesUniform, displayTileSizeUniform, colorOnUniform; + GPUImagePicture *pic; +} + +// This filter takes an input tileset, the tiles must ascend in luminance +// It looks at the input image and replaces each display tile with an input tile +// according to the luminance of that tile. The idea was to replicate the ASCII +// video filters seen in other apps, but the tileset can be anything. +@property(readwrite, nonatomic) CGSize inputTileSize; +@property(readwrite, nonatomic) float numTiles; +@property(readwrite, nonatomic) CGSize displayTileSize; +@property(readwrite, nonatomic) BOOL colorOn; +@property(readwrite, nonatomic, copy) NSString *tileSet; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.m new file mode 100644 index 00000000..b017ca97 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.m @@ -0,0 +1,188 @@ +// +// GPUImageMosaicFilter.m + + +#import "GPUImageMosaicFilter.h" +#import "GPUImagePicture.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMosaicFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform vec2 inputTileSize; + uniform vec2 displayTileSize; + uniform float numTiles; + uniform int colorOn; + + void main() + { + vec2 xy = textureCoordinate; + xy = xy - mod(xy, displayTileSize); + + vec4 lumcoeff = vec4(0.299,0.587,0.114,0.0); + + vec4 inputColor = texture2D(inputImageTexture2, xy); + float lum = dot(inputColor,lumcoeff); + lum = 1.0 - lum; + + float stepsize = 1.0 / numTiles; + float lumStep = (lum - mod(lum, stepsize)) / stepsize; + + float rowStep = 1.0 / inputTileSize.x; + float x = mod(lumStep, rowStep); + float y = floor(lumStep / rowStep); + + vec2 startCoord = vec2(float(x) * inputTileSize.x, float(y) * inputTileSize.y); + vec2 finalCoord = startCoord + ((textureCoordinate - xy) * (inputTileSize / displayTileSize)); + + vec4 color = texture2D(inputImageTexture, finalCoord); + if (colorOn == 1) { + color = color * inputColor; + } + gl_FragColor = color; + + } +); +#else +NSString *const kGPUImageMosaicFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform vec2 inputTileSize; + uniform vec2 displayTileSize; + uniform float numTiles; + uniform int colorOn; + + void main() + { + vec2 xy = textureCoordinate; + xy = xy - mod(xy, displayTileSize); + + vec4 lumcoeff = vec4(0.299,0.587,0.114,0.0); + + vec4 inputColor = texture2D(inputImageTexture2, xy); + float lum = dot(inputColor,lumcoeff); + lum = 1.0 - lum; + + float stepsize = 1.0 / numTiles; + float lumStep = (lum - mod(lum, stepsize)) / stepsize; + + float rowStep = 1.0 / inputTileSize.x; + float x = mod(lumStep, rowStep); + float y = floor(lumStep / rowStep); + + vec2 startCoord = vec2(float(x) * inputTileSize.x, float(y) * inputTileSize.y); + vec2 finalCoord = startCoord + ((textureCoordinate - xy) * (inputTileSize / displayTileSize)); + + vec4 color = texture2D(inputImageTexture, finalCoord); + if (colorOn == 1) { + color = color * inputColor; + } + gl_FragColor = color; + } +); +#endif + +@implementation GPUImageMosaicFilter + +@synthesize inputTileSize = _inputTileSize, numTiles = _numTiles, displayTileSize = _displayTileSize, colorOn = _colorOn; +@synthesize tileSet = _tileSet; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageMosaicFragmentShaderString])) + { + return nil; + } + + inputTileSizeUniform = [filterProgram uniformIndex:@"inputTileSize"]; + displayTileSizeUniform = [filterProgram uniformIndex:@"displayTileSize"]; + numTilesUniform = [filterProgram uniformIndex:@"numTiles"]; + colorOnUniform = [filterProgram uniformIndex:@"colorOn"]; + + CGSize its = CGSizeMake(0.125, 0.125); + CGSize dts = CGSizeMake(0.025, 0.025); + [self setDisplayTileSize:dts]; + [self setInputTileSize:its]; + [self setNumTiles:64.0]; + [self setColorOn:YES]; + //[self setTileSet:@"squares.png"]; + return self; +} + +- (void)setColorOn:(BOOL)yes +{ + glUniform1i(colorOnUniform, yes); +} + +- (void)setNumTiles:(float)numTiles +{ + + _numTiles = numTiles; + [self setFloat:_numTiles forUniformName:@"numTiles"]; +} + +- (void)setInputTileSize:(CGSize)inputTileSize +{ + if (inputTileSize.width > 1.0) { + _inputTileSize.width = 1.0; + } + if (inputTileSize.height > 1.0) { + _inputTileSize.height = 1.0; + } + if (inputTileSize.width < 0.0) { + _inputTileSize.width = 0.0; + } + if (inputTileSize.height < 0.0) { + _inputTileSize.height = 0.0; + } + + + _inputTileSize = inputTileSize; + + [self setSize:_inputTileSize forUniform:inputTileSizeUniform program:filterProgram]; +} + +-(void)setDisplayTileSize:(CGSize)displayTileSize +{ + if (displayTileSize.width > 1.0) { + _displayTileSize.width = 1.0; + } + if (displayTileSize.height > 1.0) { + _displayTileSize.height = 1.0; + } + if (displayTileSize.width < 0.0) { + _displayTileSize.width = 0.0; + } + if (displayTileSize.height < 0.0) { + _displayTileSize.height = 0.0; + } + + + _displayTileSize = displayTileSize; + + [self setSize:_displayTileSize forUniform:displayTileSizeUniform program:filterProgram]; +} + +-(void)setTileSet:(NSString *)tileSet +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + UIImage *img = [UIImage imageNamed:tileSet]; +#else + NSImage *img = [NSImage imageNamed:tileSet]; +#endif + pic = [[GPUImagePicture alloc] initWithImage:img smoothlyScaleOutput:YES]; + [pic addTarget:self]; + [pic processImage]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.h new file mode 100644 index 00000000..dcca712f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImageMotionBlurFilter : GPUImageFilter + +/** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 + */ +@property (readwrite, nonatomic) CGFloat blurSize; + +/** The angular direction of the blur, in degrees. 0 degrees by default + */ +@property (readwrite, nonatomic) CGFloat blurAngle; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.m new file mode 100644 index 00000000..5a2c20b6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.m @@ -0,0 +1,209 @@ +#import "GPUImageMotionBlurFilter.h" + +// Override vertex shader to remove dependent texture reads +NSString *const kGPUImageTiltedTexelSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform vec2 directionalTexelStep; + + varying vec2 textureCoordinate; + varying vec2 oneStepBackTextureCoordinate; + varying vec2 twoStepsBackTextureCoordinate; + varying vec2 threeStepsBackTextureCoordinate; + varying vec2 fourStepsBackTextureCoordinate; + varying vec2 oneStepForwardTextureCoordinate; + varying vec2 twoStepsForwardTextureCoordinate; + varying vec2 threeStepsForwardTextureCoordinate; + varying vec2 fourStepsForwardTextureCoordinate; + + void main() + { + gl_Position = position; + + textureCoordinate = inputTextureCoordinate.xy; + oneStepBackTextureCoordinate = inputTextureCoordinate.xy - directionalTexelStep; + twoStepsBackTextureCoordinate = inputTextureCoordinate.xy - 2.0 * directionalTexelStep; + threeStepsBackTextureCoordinate = inputTextureCoordinate.xy - 3.0 * directionalTexelStep; + fourStepsBackTextureCoordinate = inputTextureCoordinate.xy - 4.0 * directionalTexelStep; + oneStepForwardTextureCoordinate = inputTextureCoordinate.xy + directionalTexelStep; + twoStepsForwardTextureCoordinate = inputTextureCoordinate.xy + 2.0 * directionalTexelStep; + threeStepsForwardTextureCoordinate = inputTextureCoordinate.xy + 3.0 * directionalTexelStep; + fourStepsForwardTextureCoordinate = inputTextureCoordinate.xy + 4.0 * directionalTexelStep; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMotionBlurFragmentShaderString = SHADER_STRING +( + precision highp float; + + uniform sampler2D inputImageTexture; + + varying vec2 textureCoordinate; + varying vec2 oneStepBackTextureCoordinate; + varying vec2 twoStepsBackTextureCoordinate; + varying vec2 threeStepsBackTextureCoordinate; + varying vec2 fourStepsBackTextureCoordinate; + varying vec2 oneStepForwardTextureCoordinate; + varying vec2 twoStepsForwardTextureCoordinate; + varying vec2 threeStepsForwardTextureCoordinate; + varying vec2 fourStepsForwardTextureCoordinate; + + void main() + { + // Box weights +// lowp vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, oneStepBackTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, twoStepsBackTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, threeStepsBackTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, fourStepsBackTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, oneStepForwardTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, twoStepsForwardTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, threeStepsForwardTextureCoordinate) * 0.1111111; +// fragmentColor += texture2D(inputImageTexture, fourStepsForwardTextureCoordinate) * 0.1111111; + + lowp vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.18; + fragmentColor += texture2D(inputImageTexture, oneStepBackTextureCoordinate) * 0.15; + fragmentColor += texture2D(inputImageTexture, twoStepsBackTextureCoordinate) * 0.12; + fragmentColor += texture2D(inputImageTexture, threeStepsBackTextureCoordinate) * 0.09; + fragmentColor += texture2D(inputImageTexture, fourStepsBackTextureCoordinate) * 0.05; + fragmentColor += texture2D(inputImageTexture, oneStepForwardTextureCoordinate) * 0.15; + fragmentColor += texture2D(inputImageTexture, twoStepsForwardTextureCoordinate) * 0.12; + fragmentColor += texture2D(inputImageTexture, threeStepsForwardTextureCoordinate) * 0.09; + fragmentColor += texture2D(inputImageTexture, fourStepsForwardTextureCoordinate) * 0.05; + + gl_FragColor = fragmentColor; + } +); +#else +NSString *const kGPUImageMotionBlurFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 textureCoordinate; + varying vec2 oneStepBackTextureCoordinate; + varying vec2 twoStepsBackTextureCoordinate; + varying vec2 threeStepsBackTextureCoordinate; + varying vec2 fourStepsBackTextureCoordinate; + varying vec2 oneStepForwardTextureCoordinate; + varying vec2 twoStepsForwardTextureCoordinate; + varying vec2 threeStepsForwardTextureCoordinate; + varying vec2 fourStepsForwardTextureCoordinate; + + void main() + { + // Box weights + // vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, oneStepBackTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, twoStepsBackTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, threeStepsBackTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, fourStepsBackTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, oneStepForwardTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, twoStepsForwardTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, threeStepsForwardTextureCoordinate) * 0.1111111; + // fragmentColor += texture2D(inputImageTexture, fourStepsForwardTextureCoordinate) * 0.1111111; + + vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.18; + fragmentColor += texture2D(inputImageTexture, oneStepBackTextureCoordinate) * 0.15; + fragmentColor += texture2D(inputImageTexture, twoStepsBackTextureCoordinate) * 0.12; + fragmentColor += texture2D(inputImageTexture, threeStepsBackTextureCoordinate) * 0.09; + fragmentColor += texture2D(inputImageTexture, fourStepsBackTextureCoordinate) * 0.05; + fragmentColor += texture2D(inputImageTexture, oneStepForwardTextureCoordinate) * 0.15; + fragmentColor += texture2D(inputImageTexture, twoStepsForwardTextureCoordinate) * 0.12; + fragmentColor += texture2D(inputImageTexture, threeStepsForwardTextureCoordinate) * 0.09; + fragmentColor += texture2D(inputImageTexture, fourStepsForwardTextureCoordinate) * 0.05; + + gl_FragColor = fragmentColor; + } +); +#endif + +@interface GPUImageMotionBlurFilter() +{ + GLint directionalTexelStepUniform; +} + +- (void)recalculateTexelOffsets; + +@end + +@implementation GPUImageMotionBlurFilter + +@synthesize blurSize = _blurSize; +@synthesize blurAngle = _blurAngle; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageTiltedTexelSamplingVertexShaderString fragmentShaderFromString:kGPUImageMotionBlurFragmentShaderString])) + { + return nil; + } + + directionalTexelStepUniform = [filterProgram uniformIndex:@"directionalTexelStep"]; + + self.blurSize = 2.5; + self.blurAngle = 0.0; + + return self; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if (!CGSizeEqualToSize(oldInputSize, inputTextureSize) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self recalculateTexelOffsets]; + } +} + +- (void)recalculateTexelOffsets; +{ + CGFloat aspectRatio = 1.0; + CGPoint texelOffsets; + + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + aspectRatio = (inputTextureSize.width / inputTextureSize.height); + texelOffsets.x = _blurSize * sin(_blurAngle * M_PI / 180.0) * aspectRatio / inputTextureSize.height; + texelOffsets.y = _blurSize * cos(_blurAngle * M_PI / 180.0) / inputTextureSize.height; + } + else + { + aspectRatio = (inputTextureSize.height / inputTextureSize.width); + texelOffsets.x = _blurSize * cos(_blurAngle * M_PI / 180.0) * aspectRatio / inputTextureSize.width; + texelOffsets.y = _blurSize * sin(_blurAngle * M_PI / 180.0) / inputTextureSize.width; + } + + [self setPoint:texelOffsets forUniform:directionalTexelStepUniform program:filterProgram]; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self recalculateTexelOffsets]; +} + +- (void)setBlurAngle:(CGFloat)newValue; +{ + _blurAngle = newValue; + [self recalculateTexelOffsets]; +} + +- (void)setBlurSize:(CGFloat)newValue; +{ + _blurSize = newValue; + [self recalculateTexelOffsets]; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.h b/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.h new file mode 100644 index 00000000..01329145 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.h @@ -0,0 +1,18 @@ +#import "GPUImageFilterGroup.h" +#import "GPUImageLowPassFilter.h" +#import "GPUImageAverageColor.h" + +@interface GPUImageMotionDetector : GPUImageFilterGroup +{ + GPUImageLowPassFilter *lowPassFilter; + GPUImageTwoInputFilter *frameComparisonFilter; + GPUImageAverageColor *averageColor; +} + +// This controls the low pass filter strength used to compare the current frame with previous ones to detect motion. This ranges from 0.0 to 1.0, with a default of 0.5. +@property(readwrite, nonatomic) CGFloat lowPassFilterStrength; + +// For every frame, this will feed back the calculated centroid of the motion, as well as a relative intensity. +@property(nonatomic, copy) void(^motionDetectionBlock)(CGPoint motionCentroid, CGFloat motionIntensity, CMTime frameTime); + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.m b/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.m new file mode 100644 index 00000000..0e204ad7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.m @@ -0,0 +1,112 @@ +#import "GPUImageMotionDetector.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMotionComparisonFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform highp float intensity; + + void main() + { + lowp vec3 currentImageColor = texture2D(inputImageTexture, textureCoordinate).rgb; + lowp vec3 lowPassImageColor = texture2D(inputImageTexture2, textureCoordinate2).rgb; + + mediump float colorDistance = distance(currentImageColor, lowPassImageColor); // * 0.57735 + lowp float movementThreshold = step(0.2, colorDistance); + + gl_FragColor = movementThreshold * vec4(textureCoordinate2.x, textureCoordinate2.y, 1.0, 1.0); + } +); +#else +NSString *const kGPUImageMotionComparisonFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float intensity; + + void main() + { + vec3 currentImageColor = texture2D(inputImageTexture, textureCoordinate).rgb; + vec3 lowPassImageColor = texture2D(inputImageTexture2, textureCoordinate2).rgb; + + float colorDistance = distance(currentImageColor, lowPassImageColor); // * 0.57735 + float movementThreshold = step(0.2, colorDistance); + + gl_FragColor = movementThreshold * vec4(textureCoordinate2.x, textureCoordinate2.y, 1.0, 1.0); + } +); +#endif + + +@implementation GPUImageMotionDetector + +@synthesize lowPassFilterStrength, motionDetectionBlock; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // Start with a low pass filter to define the component to be removed + lowPassFilter = [[GPUImageLowPassFilter alloc] init]; + [self addFilter:lowPassFilter]; + + // Take the difference of the current frame from the low pass filtered result to get the high pass + frameComparisonFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:kGPUImageMotionComparisonFragmentShaderString]; + [self addFilter:frameComparisonFilter]; + + // Texture location 0 needs to be the original image for the difference blend + [lowPassFilter addTarget:frameComparisonFilter atTextureLocation:1]; + + // End with the average color for the scene to determine the centroid + averageColor = [[GPUImageAverageColor alloc] init]; + + __unsafe_unretained GPUImageMotionDetector *weakSelf = self; + + [averageColor setColorAverageProcessingFinishedBlock:^(CGFloat redComponent, CGFloat greenComponent, CGFloat blueComponent, CGFloat alphaComponent, CMTime frameTime) { + if (weakSelf.motionDetectionBlock != NULL) + { + weakSelf.motionDetectionBlock(CGPointMake(redComponent / alphaComponent, greenComponent / alphaComponent), alphaComponent, frameTime); + } +// NSLog(@"Average X: %f, Y: %f total: %f", redComponent / alphaComponent, greenComponent / alphaComponent, alphaComponent); + }]; + + [frameComparisonFilter addTarget:averageColor]; + + self.initialFilters = [NSArray arrayWithObjects:lowPassFilter, frameComparisonFilter, nil]; + self.terminalFilter = frameComparisonFilter; + + self.lowPassFilterStrength = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setLowPassFilterStrength:(CGFloat)newValue; +{ + lowPassFilter.filterStrength = newValue; +} + +- (CGFloat)lowPassFilterStrength; +{ + return lowPassFilter.filterStrength; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMovie.h b/LFLiveKit/Vendor/GPUImage/GPUImageMovie.h new file mode 100755 index 00000000..f61e56ef --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMovie.h @@ -0,0 +1,61 @@ +#import +#import +#import "GPUImageContext.h" +#import "GPUImageOutput.h" + +/** Protocol for getting Movie played callback. + */ +@protocol GPUImageMovieDelegate + +- (void)didCompletePlayingMovie; +@end + +/** Source object for filtering movies + */ +@interface GPUImageMovie : GPUImageOutput + +@property (readwrite, retain) AVAsset *asset; +@property (readwrite, retain) AVPlayerItem *playerItem; +@property(readwrite, retain) NSURL *url; + +/** This enables the benchmarking mode, which logs out instantaneous and average frame times to the console + */ +@property(readwrite, nonatomic) BOOL runBenchmark; + +/** This determines whether to play back a movie as fast as the frames can be processed, or if the original speed of the movie should be respected. Defaults to NO. + */ +@property(readwrite, nonatomic) BOOL playAtActualSpeed; + +/** This determines whether the video should repeat (loop) at the end and restart from the beginning. Defaults to NO. + */ +@property(readwrite, nonatomic) BOOL shouldRepeat; + +/** This specifies the progress of the process on a scale from 0 to 1.0. A value of 0 means the process has not yet begun, A value of 1.0 means the conversaion is complete. + This property is not key-value observable. + */ +@property(readonly, nonatomic) float progress; + +/** This is used to send the delete Movie did complete playing alert + */ +@property (readwrite, nonatomic, assign) id delegate; + +@property (readonly, nonatomic) AVAssetReader *assetReader; +@property (readonly, nonatomic) BOOL audioEncodingIsFinished; +@property (readonly, nonatomic) BOOL videoEncodingIsFinished; + +/// @name Initialization and teardown +- (id)initWithAsset:(AVAsset *)asset; +- (id)initWithPlayerItem:(AVPlayerItem *)playerItem; +- (id)initWithURL:(NSURL *)url; +- (void)yuvConversionSetup; + +/// @name Movie processing +- (void)enableSynchronizedEncodingUsingMovieWriter:(GPUImageMovieWriter *)movieWriter; +- (BOOL)readNextVideoFrameFromOutput:(AVAssetReaderOutput *)readerVideoTrackOutput; +- (BOOL)readNextAudioSampleFromOutput:(AVAssetReaderOutput *)readerAudioTrackOutput; +- (void)startProcessing; +- (void)endProcessing; +- (void)cancelProcessing; +- (void)processMovieFrame:(CMSampleBufferRef)movieSampleBuffer; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMovie.m b/LFLiveKit/Vendor/GPUImage/GPUImageMovie.m new file mode 100755 index 00000000..4da050f4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMovie.m @@ -0,0 +1,876 @@ +#import "GPUImageMovie.h" +#import "GPUImageMovieWriter.h" +#import "GPUImageFilter.h" +#import "GPUImageColorConversion.h" + + +@interface GPUImageMovie () +{ + BOOL audioEncodingIsFinished, videoEncodingIsFinished; + GPUImageMovieWriter *synchronizedMovieWriter; + AVAssetReader *reader; + AVPlayerItemVideoOutput *playerItemOutput; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CADisplayLink *displayLink; +#else + CVDisplayLinkRef displayLink; +#endif + CMTime previousFrameTime, processingFrameTime; + CFAbsoluteTime previousActualFrameTime; + BOOL keepLooping; + + GLuint luminanceTexture, chrominanceTexture; + + GLProgram *yuvConversionProgram; + GLint yuvConversionPositionAttribute, yuvConversionTextureCoordinateAttribute; + GLint yuvConversionLuminanceTextureUniform, yuvConversionChrominanceTextureUniform; + GLint yuvConversionMatrixUniform; + const GLfloat *_preferredConversion; + + BOOL isFullYUVRange; + + int imageBufferWidth, imageBufferHeight; +} + +- (void)processAsset; + +@end + +@implementation GPUImageMovie + +@synthesize url = _url; +@synthesize asset = _asset; +@synthesize runBenchmark = _runBenchmark; +@synthesize playAtActualSpeed = _playAtActualSpeed; +@synthesize delegate = _delegate; +@synthesize shouldRepeat = _shouldRepeat; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithURL:(NSURL *)url; +{ + if (!(self = [super init])) + { + return nil; + } + + [self yuvConversionSetup]; + + self.url = url; + self.asset = nil; + + return self; +} + +- (id)initWithAsset:(AVAsset *)asset; +{ + if (!(self = [super init])) + { + return nil; + } + + [self yuvConversionSetup]; + + self.url = nil; + self.asset = asset; + + return self; +} + +- (id)initWithPlayerItem:(AVPlayerItem *)playerItem; +{ + if (!(self = [super init])) + { + return nil; + } + + [self yuvConversionSetup]; + + self.url = nil; + self.asset = nil; + self.playerItem = playerItem; + + return self; +} + +- (void)yuvConversionSetup; +{ + if ([GPUImageContext supportsFastTextureUpload]) + { + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + _preferredConversion = kColorConversion709; + isFullYUVRange = YES; + yuvConversionProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageYUVFullRangeConversionForLAFragmentShaderString]; + + if (!yuvConversionProgram.initialized) + { + [yuvConversionProgram addAttribute:@"position"]; + [yuvConversionProgram addAttribute:@"inputTextureCoordinate"]; + + if (![yuvConversionProgram link]) + { + NSString *progLog = [yuvConversionProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [yuvConversionProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [yuvConversionProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + yuvConversionProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + yuvConversionPositionAttribute = [yuvConversionProgram attributeIndex:@"position"]; + yuvConversionTextureCoordinateAttribute = [yuvConversionProgram attributeIndex:@"inputTextureCoordinate"]; + yuvConversionLuminanceTextureUniform = [yuvConversionProgram uniformIndex:@"luminanceTexture"]; + yuvConversionChrominanceTextureUniform = [yuvConversionProgram uniformIndex:@"chrominanceTexture"]; + yuvConversionMatrixUniform = [yuvConversionProgram uniformIndex:@"colorConversionMatrix"]; + + [GPUImageContext setActiveShaderProgram:yuvConversionProgram]; + + glEnableVertexAttribArray(yuvConversionPositionAttribute); + glEnableVertexAttribArray(yuvConversionTextureCoordinateAttribute); + }); + } +} + +- (void)dealloc +{ + [playerItemOutput setDelegate:nil queue:nil]; + + // Moved into endProcessing + //if (self.playerItem && (displayLink != nil)) + //{ + // [displayLink invalidate]; // remove from all run loops + // displayLink = nil; + //} +} + +#pragma mark - +#pragma mark Movie processing + +- (void)enableSynchronizedEncodingUsingMovieWriter:(GPUImageMovieWriter *)movieWriter; +{ + synchronizedMovieWriter = movieWriter; + movieWriter.encodingLiveVideo = NO; +} + +- (void)startProcessing +{ + if( self.playerItem ) { + [self processPlayerItem]; + return; + } + if(self.url == nil) + { + [self processAsset]; + return; + } + + if (_shouldRepeat) keepLooping = YES; + + previousFrameTime = kCMTimeZero; + previousActualFrameTime = CFAbsoluteTimeGetCurrent(); + + NSDictionary *inputOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey]; + AVURLAsset *inputAsset = [[AVURLAsset alloc] initWithURL:self.url options:inputOptions]; + + GPUImageMovie __block *blockSelf = self; + + [inputAsset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:@"tracks"] completionHandler: ^{ + dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ + NSError *error = nil; + AVKeyValueStatus tracksStatus = [inputAsset statusOfValueForKey:@"tracks" error:&error]; + if (tracksStatus != AVKeyValueStatusLoaded) + { + return; + } + blockSelf.asset = inputAsset; + [blockSelf processAsset]; + blockSelf = nil; + }); + }]; +} + +- (AVAssetReader*)createAssetReader +{ + NSError *error = nil; + AVAssetReader *assetReader = [AVAssetReader assetReaderWithAsset:self.asset error:&error]; + + NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary]; + if ([GPUImageContext supportsFastTextureUpload]) { + [outputSettings setObject:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) forKey:(id)kCVPixelBufferPixelFormatTypeKey]; + isFullYUVRange = YES; + } + else { + [outputSettings setObject:@(kCVPixelFormatType_32BGRA) forKey:(id)kCVPixelBufferPixelFormatTypeKey]; + isFullYUVRange = NO; + } + + // Maybe set alwaysCopiesSampleData to NO on iOS 5.0 for faster video decoding + AVAssetReaderTrackOutput *readerVideoTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:[[self.asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] outputSettings:outputSettings]; + readerVideoTrackOutput.alwaysCopiesSampleData = NO; + [assetReader addOutput:readerVideoTrackOutput]; + + NSArray *audioTracks = [self.asset tracksWithMediaType:AVMediaTypeAudio]; + BOOL shouldRecordAudioTrack = (([audioTracks count] > 0) && (self.audioEncodingTarget != nil) ); + AVAssetReaderTrackOutput *readerAudioTrackOutput = nil; + + if (shouldRecordAudioTrack) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [self.audioEncodingTarget setShouldInvalidateAudioSampleWhenDone:YES]; +#else +#warning Missing OSX implementation +#endif + + // This might need to be extended to handle movies with more than one audio track + AVAssetTrack* audioTrack = [audioTracks objectAtIndex:0]; + readerAudioTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack outputSettings:nil]; + readerAudioTrackOutput.alwaysCopiesSampleData = NO; + [assetReader addOutput:readerAudioTrackOutput]; + } + + return assetReader; +} + +- (void)processAsset +{ + reader = [self createAssetReader]; + + AVAssetReaderOutput *readerVideoTrackOutput = nil; + AVAssetReaderOutput *readerAudioTrackOutput = nil; + + audioEncodingIsFinished = YES; + for( AVAssetReaderOutput *output in reader.outputs ) { + if( [output.mediaType isEqualToString:AVMediaTypeAudio] ) { + audioEncodingIsFinished = NO; + readerAudioTrackOutput = output; + } + else if( [output.mediaType isEqualToString:AVMediaTypeVideo] ) { + readerVideoTrackOutput = output; + } + } + + if ([reader startReading] == NO) + { + NSLog(@"Error reading from file at URL: %@", self.url); + return; + } + + __unsafe_unretained GPUImageMovie *weakSelf = self; + + if (synchronizedMovieWriter != nil) + { + [synchronizedMovieWriter setVideoInputReadyCallback:^{ + BOOL success = [weakSelf readNextVideoFrameFromOutput:readerVideoTrackOutput]; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + return success; +#endif + }]; + + [synchronizedMovieWriter setAudioInputReadyCallback:^{ + BOOL success = [weakSelf readNextAudioSampleFromOutput:readerAudioTrackOutput]; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + return success; +#endif + }]; + + [synchronizedMovieWriter enableSynchronizationCallbacks]; + + } + else + { + while (reader.status == AVAssetReaderStatusReading && (!_shouldRepeat || keepLooping)) + { + [weakSelf readNextVideoFrameFromOutput:readerVideoTrackOutput]; + + if ( (readerAudioTrackOutput) && (!audioEncodingIsFinished) ) + { + [weakSelf readNextAudioSampleFromOutput:readerAudioTrackOutput]; + } + + } + + if (reader.status == AVAssetReaderStatusCompleted) { + + [reader cancelReading]; + + if (keepLooping) { + reader = nil; + dispatch_async(dispatch_get_main_queue(), ^{ + [self startProcessing]; + }); + } else { + [weakSelf endProcessing]; + } + + } + } +} + +- (void)processPlayerItem +{ + runSynchronouslyOnVideoProcessingQueue(^{ + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + displayLink = [CADisplayLink displayLinkWithTarget:self selector:@selector(displayLinkCallback:)]; + [displayLink addToRunLoop:[NSRunLoop currentRunLoop] forMode:NSRunLoopCommonModes]; + [displayLink setPaused:YES]; +#else + // Suggested implementation: use CVDisplayLink http://stackoverflow.com/questions/14158743/alternative-of-cadisplaylink-for-mac-os-x + CGDirectDisplayID displayID = CGMainDisplayID(); + CVReturn error = kCVReturnSuccess; + error = CVDisplayLinkCreateWithCGDisplay(displayID, &displayLink); + if (error) + { + NSLog(@"DisplayLink created with error:%d", error); + displayLink = NULL; + } + CVDisplayLinkSetOutputCallback(displayLink, renderCallback, (__bridge void *)self); + CVDisplayLinkStop(displayLink); +#endif + + dispatch_queue_t videoProcessingQueue = [GPUImageContext sharedContextQueue]; + NSMutableDictionary *pixBuffAttributes = [NSMutableDictionary dictionary]; + if ([GPUImageContext supportsFastTextureUpload]) { + [pixBuffAttributes setObject:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) forKey:(id)kCVPixelBufferPixelFormatTypeKey]; + } + else { + [pixBuffAttributes setObject:@(kCVPixelFormatType_32BGRA) forKey:(id)kCVPixelBufferPixelFormatTypeKey]; + } + playerItemOutput = [[AVPlayerItemVideoOutput alloc] initWithPixelBufferAttributes:pixBuffAttributes]; + [playerItemOutput setDelegate:self queue:videoProcessingQueue]; + + [_playerItem addOutput:playerItemOutput]; + [playerItemOutput requestNotificationOfMediaDataChangeWithAdvanceInterval:0.1]; + }); +} + +- (void)outputMediaDataWillChange:(AVPlayerItemOutput *)sender +{ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + // Restart display link. + [displayLink setPaused:NO]; +#else + CVDisplayLinkStart(displayLink); +#endif +} + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +- (void)displayLinkCallback:(CADisplayLink *)sender +{ + /* + The callback gets called once every Vsync. + Using the display link's timestamp and duration we can compute the next time the screen will be refreshed, and copy the pixel buffer for that time + This pixel buffer can then be processed and later rendered on screen. + */ + // Calculate the nextVsync time which is when the screen will be refreshed next. + CFTimeInterval nextVSync = ([sender timestamp] + [sender duration]); + + CMTime outputItemTime = [playerItemOutput itemTimeForHostTime:nextVSync]; + + [self processPixelBufferAtTime:outputItemTime]; + +} +#else +static CVReturn renderCallback(CVDisplayLinkRef displayLink, + const CVTimeStamp *inNow, + const CVTimeStamp *inOutputTime, + CVOptionFlags flagsIn, + CVOptionFlags *flagsOut, + void *displayLinkContext) +{ + // Sample code taken from here https://developer.apple.com/library/mac/samplecode/AVGreenScreenPlayer/Listings/AVGreenScreenPlayer_GSPlayerView_m.html + + GPUImageMovie *self = (__bridge GPUImageMovie *)displayLinkContext; + AVPlayerItemVideoOutput *playerItemOutput = self->playerItemOutput; + + + // The displayLink calls back at every vsync (screen refresh) + // Compute itemTime for the next vsync + CMTime outputItemTime = [playerItemOutput itemTimeForCVTimeStamp:*inOutputTime]; + + [self processPixelBufferAtTime:outputItemTime]; + + return kCVReturnSuccess; +} +#endif + +- (void)processPixelBufferAtTime:(CMTime)outputItemTime { + if ([playerItemOutput hasNewPixelBufferForItemTime:outputItemTime]) { + __unsafe_unretained GPUImageMovie *weakSelf = self; + CVPixelBufferRef pixelBuffer = [playerItemOutput copyPixelBufferForItemTime:outputItemTime itemTimeForDisplay:NULL]; + if( pixelBuffer ) + runSynchronouslyOnVideoProcessingQueue(^{ + [weakSelf processMovieFrame:pixelBuffer withSampleTime:outputItemTime]; + CFRelease(pixelBuffer); + }); + } +} + +- (BOOL)readNextVideoFrameFromOutput:(AVAssetReaderOutput *)readerVideoTrackOutput; +{ + if (reader.status == AVAssetReaderStatusReading && ! videoEncodingIsFinished) + { + CMSampleBufferRef sampleBufferRef = [readerVideoTrackOutput copyNextSampleBuffer]; + if (sampleBufferRef) + { + //NSLog(@"read a video frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, CMSampleBufferGetOutputPresentationTimeStamp(sampleBufferRef)))); + if (_playAtActualSpeed) + { + // Do this outside of the video processing queue to not slow that down while waiting + CMTime currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBufferRef); + CMTime differenceFromLastFrame = CMTimeSubtract(currentSampleTime, previousFrameTime); + CFAbsoluteTime currentActualTime = CFAbsoluteTimeGetCurrent(); + + CGFloat frameTimeDifference = CMTimeGetSeconds(differenceFromLastFrame); + CGFloat actualTimeDifference = currentActualTime - previousActualFrameTime; + + if (frameTimeDifference > actualTimeDifference) + { + usleep(1000000.0 * (frameTimeDifference - actualTimeDifference)); + } + + previousFrameTime = currentSampleTime; + previousActualFrameTime = CFAbsoluteTimeGetCurrent(); + } + + __unsafe_unretained GPUImageMovie *weakSelf = self; + runSynchronouslyOnVideoProcessingQueue(^{ + [weakSelf processMovieFrame:sampleBufferRef]; + CMSampleBufferInvalidate(sampleBufferRef); + CFRelease(sampleBufferRef); + }); + + return YES; + } + else + { + if (!keepLooping) { + videoEncodingIsFinished = YES; + if( videoEncodingIsFinished && audioEncodingIsFinished ) + [self endProcessing]; + } + } + } + else if (synchronizedMovieWriter != nil) + { + if (reader.status == AVAssetReaderStatusCompleted) + { + [self endProcessing]; + } + } + return NO; +} + +- (BOOL)readNextAudioSampleFromOutput:(AVAssetReaderOutput *)readerAudioTrackOutput; +{ + if (reader.status == AVAssetReaderStatusReading && ! audioEncodingIsFinished) + { + CMSampleBufferRef audioSampleBufferRef = [readerAudioTrackOutput copyNextSampleBuffer]; + if (audioSampleBufferRef) + { + //NSLog(@"read an audio frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, CMSampleBufferGetOutputPresentationTimeStamp(audioSampleBufferRef)))); + [self.audioEncodingTarget processAudioBuffer:audioSampleBufferRef]; + CFRelease(audioSampleBufferRef); + return YES; + } + else + { + if (!keepLooping) { + audioEncodingIsFinished = YES; + if( videoEncodingIsFinished && audioEncodingIsFinished ) + [self endProcessing]; + } + } + } + else if (synchronizedMovieWriter != nil) + { + if (reader.status == AVAssetReaderStatusCompleted || reader.status == AVAssetReaderStatusFailed || + reader.status == AVAssetReaderStatusCancelled) + { + [self endProcessing]; + } + } + return NO; +} + +- (void)processMovieFrame:(CMSampleBufferRef)movieSampleBuffer; +{ +// CMTimeGetSeconds +// CMTimeSubtract + + CMTime currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(movieSampleBuffer); + CVImageBufferRef movieFrame = CMSampleBufferGetImageBuffer(movieSampleBuffer); + + processingFrameTime = currentSampleTime; + [self processMovieFrame:movieFrame withSampleTime:currentSampleTime]; +} + +- (float)progress +{ + if ( AVAssetReaderStatusReading == reader.status ) + { + float current = processingFrameTime.value * 1.0f / processingFrameTime.timescale; + float duration = self.asset.duration.value * 1.0f / self.asset.duration.timescale; + return current / duration; + } + else if ( AVAssetReaderStatusCompleted == reader.status ) + { + return 1.f; + } + else + { + return 0.f; + } +} + +- (void)processMovieFrame:(CVPixelBufferRef)movieFrame withSampleTime:(CMTime)currentSampleTime +{ + int bufferHeight = (int) CVPixelBufferGetHeight(movieFrame); + int bufferWidth = (int) CVPixelBufferGetWidth(movieFrame); + + CFTypeRef colorAttachments = CVBufferGetAttachment(movieFrame, kCVImageBufferYCbCrMatrixKey, NULL); + if (colorAttachments != NULL) + { + if(CFStringCompare(colorAttachments, kCVImageBufferYCbCrMatrix_ITU_R_601_4, 0) == kCFCompareEqualTo) + { + if (isFullYUVRange) + { + _preferredConversion = kColorConversion601FullRange; + } + else + { + _preferredConversion = kColorConversion601; + } + } + else + { + _preferredConversion = kColorConversion709; + } + } + else + { + if (isFullYUVRange) + { + _preferredConversion = kColorConversion601FullRange; + } + else + { + _preferredConversion = kColorConversion601; + } + + } + + CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + // Fix issue 1580 + [GPUImageContext useImageProcessingContext]; + + if ([GPUImageContext supportsFastTextureUpload]) + { + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CVOpenGLESTextureRef luminanceTextureRef = NULL; + CVOpenGLESTextureRef chrominanceTextureRef = NULL; +#else + CVOpenGLTextureRef luminanceTextureRef = NULL; + CVOpenGLTextureRef chrominanceTextureRef = NULL; +#endif + + // if (captureAsYUV && [GPUImageContext deviceSupportsRedTextures]) + if (CVPixelBufferGetPlaneCount(movieFrame) > 0) // Check for YUV planar inputs to do RGB conversion + { + + // fix issue 2221 + CVPixelBufferLockBaseAddress(movieFrame,0); + + + if ( (imageBufferWidth != bufferWidth) && (imageBufferHeight != bufferHeight) ) + { + imageBufferWidth = bufferWidth; + imageBufferHeight = bufferHeight; + } + + CVReturn err; + // Y-plane + glActiveTexture(GL_TEXTURE4); + if ([GPUImageContext deviceSupportsRedTextures]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef); +#else + err = CVOpenGLTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, &luminanceTextureRef); +#endif + } + else + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef); +#else + err = CVOpenGLTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, &luminanceTextureRef); +#endif + } + if (err) + { + NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err); + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + luminanceTexture = CVOpenGLESTextureGetName(luminanceTextureRef); +#else + luminanceTexture = CVOpenGLTextureGetName(luminanceTextureRef); +#endif + glBindTexture(GL_TEXTURE_2D, luminanceTexture); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + // UV-plane + glActiveTexture(GL_TEXTURE5); + if ([GPUImageContext deviceSupportsRedTextures]) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef); +#else + err = CVOpenGLTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, &chrominanceTextureRef); +#endif + } + else + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef); +#else + err = CVOpenGLTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], movieFrame, NULL, &chrominanceTextureRef); +#endif + } + if (err) + { + NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err); + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + chrominanceTexture = CVOpenGLESTextureGetName(chrominanceTextureRef); +#else + chrominanceTexture = CVOpenGLTextureGetName(chrominanceTextureRef); +#endif + glBindTexture(GL_TEXTURE_2D, chrominanceTexture); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + +// if (!allTargetsWantMonochromeData) +// { + [self convertYUVToRGBOutput]; +// } + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + [currentTarget setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:targetTextureIndex]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:targetTextureIndex]; + } + + [outputFramebuffer unlock]; + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + [currentTarget newFrameReadyAtTime:currentSampleTime atIndex:targetTextureIndex]; + } + + CVPixelBufferUnlockBaseAddress(movieFrame, 0); + CFRelease(luminanceTextureRef); + CFRelease(chrominanceTextureRef); + } + else + { + // TODO: Mesh this with the new framebuffer cache +// CVPixelBufferLockBaseAddress(movieFrame, 0); +// +// CVReturn err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, coreVideoTextureCache, movieFrame, NULL, GL_TEXTURE_2D, GL_RGBA, bufferWidth, bufferHeight, GL_BGRA, GL_UNSIGNED_BYTE, 0, &texture); +// +// if (!texture || err) { +// NSLog(@"Movie CVOpenGLESTextureCacheCreateTextureFromImage failed (error: %d)", err); +// NSAssert(NO, @"Camera failure"); +// return; +// } +// +// outputTexture = CVOpenGLESTextureGetName(texture); +// // glBindTexture(CVOpenGLESTextureGetTarget(texture), outputTexture); +// glBindTexture(GL_TEXTURE_2D, outputTexture); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); +// +// for (id currentTarget in targets) +// { +// NSInteger indexOfObject = [targets indexOfObject:currentTarget]; +// NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; +// +// [currentTarget setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:targetTextureIndex]; +// [currentTarget setInputTexture:outputTexture atIndex:targetTextureIndex]; +// +// [currentTarget newFrameReadyAtTime:currentSampleTime atIndex:targetTextureIndex]; +// } +// +// CVPixelBufferUnlockBaseAddress(movieFrame, 0); +// CVOpenGLESTextureCacheFlush(coreVideoTextureCache, 0); +// CFRelease(texture); +// +// outputTexture = 0; + } + } + else + { + // Upload to texture + CVPixelBufferLockBaseAddress(movieFrame, 0); + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(bufferWidth, bufferHeight) textureOptions:self.outputTextureOptions onlyTexture:YES]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + // Using BGRA extension to pull in video frame data directly + glTexImage2D(GL_TEXTURE_2D, + 0, + self.outputTextureOptions.internalFormat, + bufferWidth, + bufferHeight, + 0, + self.outputTextureOptions.format, + self.outputTextureOptions.type, + CVPixelBufferGetBaseAddress(movieFrame)); + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + [currentTarget setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:targetTextureIndex]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:targetTextureIndex]; + } + + [outputFramebuffer unlock]; + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + [currentTarget newFrameReadyAtTime:currentSampleTime atIndex:targetTextureIndex]; + } + CVPixelBufferUnlockBaseAddress(movieFrame, 0); + } + + if (_runBenchmark) + { + CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); + NSLog(@"Current frame time : %f ms", 1000.0 * currentFrameTime); + } +} + +- (void)endProcessing; +{ + keepLooping = NO; +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [displayLink setPaused:YES]; +#else + CVDisplayLinkStop(displayLink); +#endif + + for (id currentTarget in targets) + { + [currentTarget endProcessing]; + } + + if (synchronizedMovieWriter != nil) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [synchronizedMovieWriter setVideoInputReadyCallback:^{return NO;}]; + [synchronizedMovieWriter setAudioInputReadyCallback:^{return NO;}]; +#else + // I'm not sure about this, meybe setting a nil will be more appropriate then an empty block + [synchronizedMovieWriter setVideoInputReadyCallback:^{}]; + [synchronizedMovieWriter setAudioInputReadyCallback:^{}]; +#endif + } + + if (self.playerItem && (displayLink != nil)) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [displayLink invalidate]; // remove from all run loops + displayLink = nil; +#else + CVDisplayLinkStop(displayLink); + displayLink = NULL; +#endif + } + + if ([self.delegate respondsToSelector:@selector(didCompletePlayingMovie)]) { + [self.delegate didCompletePlayingMovie]; + } + self.delegate = nil; +} + +- (void)cancelProcessing +{ + if (reader) { + [reader cancelReading]; + } + [self endProcessing]; +} + +- (void)convertYUVToRGBOutput; +{ + [GPUImageContext setActiveShaderProgram:yuvConversionProgram]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(imageBufferWidth, imageBufferHeight) onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + static const GLfloat squareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + static const GLfloat textureCoordinates[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 1.0f, + }; + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, luminanceTexture); + glUniform1i(yuvConversionLuminanceTextureUniform, 4); + + glActiveTexture(GL_TEXTURE5); + glBindTexture(GL_TEXTURE_2D, chrominanceTexture); + glUniform1i(yuvConversionChrominanceTextureUniform, 5); + + glUniformMatrix3fv(yuvConversionMatrixUniform, 1, GL_FALSE, _preferredConversion); + + glVertexAttribPointer(yuvConversionPositionAttribute, 2, GL_FLOAT, 0, 0, squareVertices); + glVertexAttribPointer(yuvConversionTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); +} + +- (AVAssetReader*)assetReader { + return reader; +} + +- (BOOL)audioEncodingIsFinished { + return audioEncodingIsFinished; +} + +- (BOOL)videoEncodingIsFinished { + return videoEncodingIsFinished; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.h b/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.h new file mode 100644 index 00000000..00e43814 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.h @@ -0,0 +1,21 @@ +// +// GPUImageMovieComposition.h +// Givit +// +// Created by Sean Meiners on 2013/01/25. +// +// + +#import "GPUImageMovie.h" + +@interface GPUImageMovieComposition : GPUImageMovie + +@property (readwrite, retain) AVComposition *compositon; +@property (readwrite, retain) AVVideoComposition *videoComposition; +@property (readwrite, retain) AVAudioMix *audioMix; + +- (id)initWithComposition:(AVComposition*)compositon + andVideoComposition:(AVVideoComposition*)videoComposition + andAudioMix:(AVAudioMix*)audioMix; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.m b/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.m new file mode 100644 index 00000000..6138fffe --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.m @@ -0,0 +1,70 @@ +// +// GPUImageMovieComposition.m +// Givit +// +// Created by Sean Meiners on 2013/01/25. +// +// + +#import "GPUImageMovieComposition.h" +#import "GPUImageMovieWriter.h" + +@implementation GPUImageMovieComposition + +@synthesize compositon = _compositon; +@synthesize videoComposition = _videoComposition; +@synthesize audioMix = _audioMix; + +- (id)initWithComposition:(AVComposition*)compositon + andVideoComposition:(AVVideoComposition*)videoComposition + andAudioMix:(AVAudioMix*)audioMix { + if (!(self = [super init])) + { + return nil; + } + + [self yuvConversionSetup]; + + self.compositon = compositon; + self.videoComposition = videoComposition; + self.audioMix = audioMix; + + return self; +} + +- (AVAssetReader*)createAssetReader + { + //NSLog(@"creating reader from composition: %@, video: %@, audio: %@ with duration: %@", _compositon, _videoComposition, _audioMix, CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, _compositon.duration))); + + NSError *error = nil; + AVAssetReader *assetReader = [AVAssetReader assetReaderWithAsset:self.compositon error:&error]; + + NSDictionary *outputSettings = @{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)}; + AVAssetReaderVideoCompositionOutput *readerVideoOutput = [AVAssetReaderVideoCompositionOutput assetReaderVideoCompositionOutputWithVideoTracks:[_compositon tracksWithMediaType:AVMediaTypeVideo] + videoSettings:outputSettings]; +#if ! TARGET_IPHONE_SIMULATOR + if( [_videoComposition isKindOfClass:[AVMutableVideoComposition class]] ) + [(AVMutableVideoComposition*)_videoComposition setRenderScale:1.0]; +#endif + readerVideoOutput.videoComposition = self.videoComposition; + readerVideoOutput.alwaysCopiesSampleData = NO; + [assetReader addOutput:readerVideoOutput]; + + NSArray *audioTracks = [_compositon tracksWithMediaType:AVMediaTypeAudio]; + BOOL shouldRecordAudioTrack = (([audioTracks count] > 0) && (self.audioEncodingTarget != nil) ); + AVAssetReaderAudioMixOutput *readerAudioOutput = nil; + + if (shouldRecordAudioTrack) + { + [self.audioEncodingTarget setShouldInvalidateAudioSampleWhenDone:YES]; + + readerAudioOutput = [AVAssetReaderAudioMixOutput assetReaderAudioMixOutputWithAudioTracks:audioTracks audioSettings:nil]; + readerAudioOutput.audioMix = self.audioMix; + readerAudioOutput.alwaysCopiesSampleData = NO; + [assetReader addOutput:readerAudioOutput]; + } + + return assetReader; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.h new file mode 100755 index 00000000..5ebc28bb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageMultiplyBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.m new file mode 100755 index 00000000..ed647072 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageMultiplyBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 base = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a); + } +); +#else +NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a); + } +); +#endif + +@implementation GPUImageMultiplyBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageMultiplyBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.h new file mode 100644 index 00000000..963fd66a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.h @@ -0,0 +1,12 @@ +#import "GPUImageHarrisCornerDetectionFilter.h" + +/** Noble corner detector + + This is the Noble variant on the Harris detector, from + Alison Noble, "Descriptions of Image Surfaces", PhD thesis, Department of Engineering Science, Oxford University 1989, p45. +*/ + + +@interface GPUImageNobleCornerDetectionFilter : GPUImageHarrisCornerDetectionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.m new file mode 100644 index 00000000..aa6b3041 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.m @@ -0,0 +1,74 @@ +#import "GPUImageNobleCornerDetectionFilter.h" + +@implementation GPUImageNobleCornerDetectionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageNobleCornerDetectionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float sensitivity; + + void main() + { + mediump vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + mediump float derivativeSum = derivativeElements.x + derivativeElements.y; + + // R = (Ix^2 * Iy^2 - Ixy * Ixy) / (Ix^2 + Iy^2) + mediump float zElement = (derivativeElements.z * 2.0) - 1.0; + // mediump float harrisIntensity = (derivativeElements.x * derivativeElements.y - (derivativeElements.z * derivativeElements.z)) / (derivativeSum); + mediump float cornerness = (derivativeElements.x * derivativeElements.y - (zElement * zElement)) / (derivativeSum); + + // Original Harris detector + // R = Ix^2 * Iy^2 - Ixy * Ixy - k * (Ix^2 + Iy^2)^2 + // highp float harrisIntensity = derivativeElements.x * derivativeElements.y - (derivativeElements.z * derivativeElements.z) - harrisConstant * derivativeSum * derivativeSum; + + // gl_FragColor = vec4(vec3(harrisIntensity * 7.0), 1.0); + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#else +NSString *const kGPUImageNobleCornerDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float sensitivity; + + void main() + { + vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + float derivativeSum = derivativeElements.x + derivativeElements.y; + + // R = (Ix^2 * Iy^2 - Ixy * Ixy) / (Ix^2 + Iy^2) + float zElement = (derivativeElements.z * 2.0) - 1.0; + // mediump float harrisIntensity = (derivativeElements.x * derivativeElements.y - (derivativeElements.z * derivativeElements.z)) / (derivativeSum); + float cornerness = (derivativeElements.x * derivativeElements.y - (zElement * zElement)) / (derivativeSum); + + // Original Harris detector + // R = Ix^2 * Iy^2 - Ixy * Ixy - k * (Ix^2 + Iy^2)^2 + // highp float harrisIntensity = derivativeElements.x * derivativeElements.y - (derivativeElements.z * derivativeElements.z) - harrisConstant * derivativeSum * derivativeSum; + + // gl_FragColor = vec4(vec3(harrisIntensity * 7.0), 1.0); + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithCornerDetectionFragmentShader:kGPUImageNobleCornerDetectionFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.h new file mode 100644 index 00000000..fd8fe6d6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.m new file mode 100644 index 00000000..eaf7ce56 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.m @@ -0,0 +1,107 @@ +#import "GPUImageNonMaximumSuppressionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageNonMaximumSuppressionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying highp vec2 textureCoordinate; + varying highp vec2 leftTextureCoordinate; + varying highp vec2 rightTextureCoordinate; + + varying highp vec2 topTextureCoordinate; + varying highp vec2 topLeftTextureCoordinate; + varying highp vec2 topRightTextureCoordinate; + + varying highp vec2 bottomTextureCoordinate; + varying highp vec2 bottomLeftTextureCoordinate; + varying highp vec2 bottomRightTextureCoordinate; + + void main() + { + lowp float bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).r; + lowp float bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + lowp float bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + lowp vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + lowp float leftColor = texture2D(inputImageTexture, leftTextureCoordinate).r; + lowp float rightColor = texture2D(inputImageTexture, rightTextureCoordinate).r; + lowp float topColor = texture2D(inputImageTexture, topTextureCoordinate).r; + lowp float topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).r; + lowp float topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + + // Use a tiebreaker for pixels to the left and immediately above this one + lowp float multiplier = 1.0 - step(centerColor.r, topColor); + multiplier = multiplier * (1.0 - step(centerColor.r, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, bottomLeftColor)); + + lowp float maxValue = max(centerColor.r, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + gl_FragColor = vec4((centerColor.rgb * step(maxValue, centerColor.r) * multiplier), 1.0); + } +); +#else +NSString *const kGPUImageNonMaximumSuppressionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + void main() + { + float bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + float leftColor = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightColor = texture2D(inputImageTexture, rightTextureCoordinate).r; + float topColor = texture2D(inputImageTexture, topTextureCoordinate).r; + float topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + + // Use a tiebreaker for pixels to the left and immediately above this one + float multiplier = 1.0 - step(centerColor.r, topColor); + multiplier = multiplier * (1.0 - step(centerColor.r, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, bottomLeftColor)); + + float maxValue = max(centerColor.r, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + gl_FragColor = vec4((centerColor.rgb * step(maxValue, centerColor.r) * multiplier), 1.0); + } +); +#endif + +@implementation GPUImageNonMaximumSuppressionFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageNonMaximumSuppressionFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.h new file mode 100644 index 00000000..ce5e22b4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.h @@ -0,0 +1,8 @@ +// Created by Jorge Garcia on 9/5/12. +// + +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageNormalBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.m new file mode 100644 index 00000000..f5b5069d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.m @@ -0,0 +1,96 @@ +// Created by Jorge Garcia on 9/5/12. + +#import "GPUImageNormalBlendFilter.h" +/* + This equation is a simplification of the general blending equation. It assumes the destination color is opaque, and therefore drops the destination color's alpha term. + + D = C1 * C1a + C2 * C2a * (1 - C1a) + where D is the resultant color, C1 is the color of the first element, C1a is the alpha of the first element, C2 is the second element color, C2a is the alpha of the second element. The destination alpha is calculated with: + + Da = C1a + C2a * (1 - C1a) + The resultant color is premultiplied with the alpha. To restore the color to the unmultiplied values, just divide by Da, the resultant alpha. + + http://stackoverflow.com/questions/1724946/blend-mode-on-a-transparent-and-semi-transparent-background + + For some reason Photoshop behaves + D = C1 + C2 * C2a * (1 - C1a) + */ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageNormalBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 c2 = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 c1 = texture2D(inputImageTexture2, textureCoordinate2); + + lowp vec4 outputColor; + +// outputColor.r = c1.r + c2.r * c2.a * (1.0 - c1.a); +// outputColor.g = c1.g + c2.g * c2.a * (1.0 - c1.a); +// outputColor.b = c1.b + c2.b * c2.a * (1.0 - c1.a); +// outputColor.a = c1.a + c2.a * (1.0 - c1.a); + + lowp float a = c1.a + c2.a * (1.0 - c1.a); + lowp float alphaDivisor = a + step(a, 0.0); // Protect against a divide-by-zero blacking out things in the output + + outputColor.r = (c1.r * c1.a + c2.r * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.g = (c1.g * c1.a + c2.g * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.b = (c1.b * c1.a + c2.b * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.a = a; + + gl_FragColor = outputColor; + } +); +#else +NSString *const kGPUImageNormalBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 c2 = texture2D(inputImageTexture, textureCoordinate); + vec4 c1 = texture2D(inputImageTexture2, textureCoordinate2); + + vec4 outputColor; + + // outputColor.r = c1.r + c2.r * c2.a * (1.0 - c1.a); + // outputColor.g = c1.g + c2.g * c2.a * (1.0 - c1.a); + // outputColor.b = c1.b + c2.b * c2.a * (1.0 - c1.a); + // outputColor.a = c1.a + c2.a * (1.0 - c1.a); + + float a = c1.a + c2.a * (1.0 - c1.a); + float alphaDivisor = a + step(a, 0.0); // Protect against a divide-by-zero blacking out things in the output + + outputColor.r = (c1.r * c1.a + c2.r * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.g = (c1.g * c1.a + c2.g * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.b = (c1.b * c1.a + c2.b * c2.a * (1.0 - c1.a))/alphaDivisor; + outputColor.a = a; + + gl_FragColor = outputColor; + } +); +#endif + +@implementation GPUImageNormalBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageNormalBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.h new file mode 100644 index 00000000..826749fb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageFilter.h" + +@interface GPUImageOpacityFilter : GPUImageFilter +{ + GLint opacityUniform; +} + +// Opacity ranges from 0.0 to 1.0, with 1.0 as the normal setting +@property(readwrite, nonatomic) CGFloat opacity; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.m new file mode 100644 index 00000000..b74acb60 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.m @@ -0,0 +1,65 @@ +#import "GPUImageOpacityFilter.h" + +@implementation GPUImageOpacityFilter + +@synthesize opacity = _opacity; + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float opacity; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity); + } +); +#else +NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float opacity; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageOpacityFragmentShaderString])) + { + return nil; + } + + opacityUniform = [filterProgram uniformIndex:@"opacity"]; + self.opacity = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setOpacity:(CGFloat)newValue; +{ + _opacity = newValue; + + [self setFloat:_opacity forUniform:opacityUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.h new file mode 100644 index 00000000..3e4f7545 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageErosionFilter; +@class GPUImageDilationFilter; + +// A filter that first performs an erosion on the red channel of an image, followed by a dilation of the same radius. +// This helps to filter out smaller bright elements. + +@interface GPUImageOpeningFilter : GPUImageFilterGroup +{ + GPUImageErosionFilter *erosionFilter; + GPUImageDilationFilter *dilationFilter; +} + +@property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; + +- (id)initWithRadius:(NSUInteger)radius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.m new file mode 100644 index 00000000..4e7a5653 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.m @@ -0,0 +1,57 @@ +#import "GPUImageOpeningFilter.h" +#import "GPUImageErosionFilter.h" +#import "GPUImageDilationFilter.h" + +@implementation GPUImageOpeningFilter + +@synthesize verticalTexelSpacing = _verticalTexelSpacing; +@synthesize horizontalTexelSpacing = _horizontalTexelSpacing; + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +- (id)initWithRadius:(NSUInteger)radius; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: erosion + erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius]; + [self addFilter:erosionFilter]; + + // Second pass: dilation + dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius]; + [self addFilter:dilationFilter]; + + [erosionFilter addTarget:dilationFilter]; + + self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil]; + self.terminalFilter = dilationFilter; + + return self; +} + +- (void)setVerticalTexelSpacing:(CGFloat)newValue; +{ + _verticalTexelSpacing = newValue; + erosionFilter.verticalTexelSpacing = newValue; + dilationFilter.verticalTexelSpacing = newValue; +} + +- (void)setHorizontalTexelSpacing:(CGFloat)newValue; +{ + _horizontalTexelSpacing = newValue; + erosionFilter.horizontalTexelSpacing = newValue; + dilationFilter.horizontalTexelSpacing = newValue; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOutput.h b/LFLiveKit/Vendor/GPUImage/GPUImageOutput.h new file mode 100755 index 00000000..c10121a9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOutput.h @@ -0,0 +1,128 @@ +#import "GPUImageContext.h" +#import "GPUImageFramebuffer.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#import +#else +// For now, just redefine this on the Mac +typedef NS_ENUM(NSInteger, UIImageOrientation) { + UIImageOrientationUp, // default orientation + UIImageOrientationDown, // 180 deg rotation + UIImageOrientationLeft, // 90 deg CCW + UIImageOrientationRight, // 90 deg CW + UIImageOrientationUpMirrored, // as above but image mirrored along other axis. horizontal flip + UIImageOrientationDownMirrored, // horizontal flip + UIImageOrientationLeftMirrored, // vertical flip + UIImageOrientationRightMirrored, // vertical flip +}; +#endif + +dispatch_queue_attr_t GPUImageDefaultQueueAttribute(void); +void runOnMainQueueWithoutDeadlocking(void (^block)(void)); +void runSynchronouslyOnVideoProcessingQueue(void (^block)(void)); +void runAsynchronouslyOnVideoProcessingQueue(void (^block)(void)); +void runSynchronouslyOnContextQueue(GPUImageContext *context, void (^block)(void)); +void runAsynchronouslyOnContextQueue(GPUImageContext *context, void (^block)(void)); +void reportAvailableMemoryForGPUImage(NSString *tag); + +@class GPUImageMovieWriter; + +/** GPUImage's base source object + + Images or frames of video are uploaded from source objects, which are subclasses of GPUImageOutput. These include: + + - GPUImageVideoCamera (for live video from an iOS camera) + - GPUImageStillCamera (for taking photos with the camera) + - GPUImagePicture (for still images) + - GPUImageMovie (for movies) + + Source objects upload still image frames to OpenGL ES as textures, then hand those textures off to the next objects in the processing chain. + */ +@interface GPUImageOutput : NSObject +{ + GPUImageFramebuffer *outputFramebuffer; + + NSMutableArray *targets, *targetTextureIndices; + + CGSize inputTextureSize, cachedMaximumOutputSize, forcedMaximumSize; + + BOOL overrideInputSize; + + BOOL allTargetsWantMonochromeData; + BOOL usingNextFrameForImageCapture; +} + +@property(readwrite, nonatomic) BOOL shouldSmoothlyScaleOutput; +@property(readwrite, nonatomic) BOOL shouldIgnoreUpdatesToThisTarget; +@property(readwrite, nonatomic, retain) GPUImageMovieWriter *audioEncodingTarget; +@property(readwrite, nonatomic, unsafe_unretained) id targetToIgnoreForUpdates; +@property(nonatomic, copy) void(^frameProcessingCompletionBlock)(GPUImageOutput*, CMTime); +@property(nonatomic) BOOL enabled; +@property(readwrite, nonatomic) GPUTextureOptions outputTextureOptions; + +/// @name Managing targets +- (void)setInputFramebufferForTarget:(id)target atIndex:(NSInteger)inputTextureIndex; +- (GPUImageFramebuffer *)framebufferForOutput; +- (void)removeOutputFramebuffer; +- (void)notifyTargetsAboutNewOutputTexture; + +/** Returns an array of the current targets. + */ +- (NSArray*)targets; + +/** Adds a target to receive notifications when new frames are available. + + The target will be asked for its next available texture. + + See [GPUImageInput newFrameReadyAtTime:] + + @param newTarget Target to be added + */ +- (void)addTarget:(id)newTarget; + +/** Adds a target to receive notifications when new frames are available. + + See [GPUImageInput newFrameReadyAtTime:] + + @param newTarget Target to be added + */ +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; + +/** Removes a target. The target will no longer receive notifications when new frames are available. + + @param targetToRemove Target to be removed + */ +- (void)removeTarget:(id)targetToRemove; + +/** Removes all targets. + */ +- (void)removeAllTargets; + +/// @name Manage the output texture + +- (void)forceProcessingAtSize:(CGSize)frameSize; +- (void)forceProcessingAtSizeRespectingAspectRatio:(CGSize)frameSize; + +/// @name Still image processing + +- (void)useNextFrameForImageCapture; +- (CGImageRef)newCGImageFromCurrentlyProcessedOutput; +- (CGImageRef)newCGImageByFilteringCGImage:(CGImageRef)imageToFilter; + +// Platform-specific image output methods +// If you're trying to use these methods, remember that you need to set -useNextFrameForImageCapture before running -processImage or running video and calling any of these methods, or you will get a nil image +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +- (UIImage *)imageFromCurrentFramebuffer; +- (UIImage *)imageFromCurrentFramebufferWithOrientation:(UIImageOrientation)imageOrientation; +- (UIImage *)imageByFilteringImage:(UIImage *)imageToFilter; +- (CGImageRef)newCGImageByFilteringImage:(UIImage *)imageToFilter; +#else +- (NSImage *)imageFromCurrentFramebuffer; +- (NSImage *)imageFromCurrentFramebufferWithOrientation:(UIImageOrientation)imageOrientation; +- (NSImage *)imageByFilteringImage:(NSImage *)imageToFilter; +- (CGImageRef)newCGImageByFilteringImage:(NSImage *)imageToFilter; +#endif + +- (BOOL)providesMonochromeOutput; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOutput.m b/LFLiveKit/Vendor/GPUImage/GPUImageOutput.m new file mode 100755 index 00000000..d9bdaefa --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOutput.m @@ -0,0 +1,439 @@ +#import "GPUImageOutput.h" +#import "GPUImageMovieWriter.h" +#import "GPUImagePicture.h" +#import + +dispatch_queue_attr_t GPUImageDefaultQueueAttribute(void) +{ +#if TARGET_OS_IPHONE + if ([[[UIDevice currentDevice] systemVersion] compare:@"9.0" options:NSNumericSearch] != NSOrderedAscending) + { + return dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_DEFAULT, 0); + } +#endif + return nil; +} + +void runOnMainQueueWithoutDeadlocking(void (^block)(void)) +{ + if ([NSThread isMainThread]) + { + block(); + } + else + { + dispatch_sync(dispatch_get_main_queue(), block); + } +} + +void runSynchronouslyOnVideoProcessingQueue(void (^block)(void)) +{ + dispatch_queue_t videoProcessingQueue = [GPUImageContext sharedContextQueue]; +#if !OS_OBJECT_USE_OBJC +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if (dispatch_get_current_queue() == videoProcessingQueue) +#pragma clang diagnostic pop +#else + if (dispatch_get_specific([GPUImageContext contextKey])) +#endif + { + block(); + }else + { + dispatch_sync(videoProcessingQueue, block); + } +} + +void runAsynchronouslyOnVideoProcessingQueue(void (^block)(void)) +{ + dispatch_queue_t videoProcessingQueue = [GPUImageContext sharedContextQueue]; + +#if !OS_OBJECT_USE_OBJC +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if (dispatch_get_current_queue() == videoProcessingQueue) +#pragma clang diagnostic pop +#else + if (dispatch_get_specific([GPUImageContext contextKey])) +#endif + { + block(); + }else + { + dispatch_async(videoProcessingQueue, block); + } +} + +void runSynchronouslyOnContextQueue(GPUImageContext *context, void (^block)(void)) +{ + dispatch_queue_t videoProcessingQueue = [context contextQueue]; +#if !OS_OBJECT_USE_OBJC +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if (dispatch_get_current_queue() == videoProcessingQueue) +#pragma clang diagnostic pop +#else + if (dispatch_get_specific([GPUImageContext contextKey])) +#endif + { + block(); + }else + { + dispatch_sync(videoProcessingQueue, block); + } +} + +void runAsynchronouslyOnContextQueue(GPUImageContext *context, void (^block)(void)) +{ + dispatch_queue_t videoProcessingQueue = [context contextQueue]; + +#if !OS_OBJECT_USE_OBJC +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if (dispatch_get_current_queue() == videoProcessingQueue) +#pragma clang diagnostic pop +#else + if (dispatch_get_specific([GPUImageContext contextKey])) +#endif + { + block(); + }else + { + dispatch_async(videoProcessingQueue, block); + } +} + +void reportAvailableMemoryForGPUImage(NSString *tag) +{ + if (!tag) + tag = @"Default"; + + struct task_basic_info info; + + mach_msg_type_number_t size = sizeof(info); + + kern_return_t kerr = task_info(mach_task_self(), + + TASK_BASIC_INFO, + + (task_info_t)&info, + + &size); + if( kerr == KERN_SUCCESS ) { + NSLog(@"%@ - Memory used: %u", tag, (unsigned int)info.resident_size); //in bytes + } else { + NSLog(@"%@ - Error: %s", tag, mach_error_string(kerr)); + } +} + +@implementation GPUImageOutput + +@synthesize shouldSmoothlyScaleOutput = _shouldSmoothlyScaleOutput; +@synthesize shouldIgnoreUpdatesToThisTarget = _shouldIgnoreUpdatesToThisTarget; +@synthesize audioEncodingTarget = _audioEncodingTarget; +@synthesize targetToIgnoreForUpdates = _targetToIgnoreForUpdates; +@synthesize frameProcessingCompletionBlock = _frameProcessingCompletionBlock; +@synthesize enabled = _enabled; +@synthesize outputTextureOptions = _outputTextureOptions; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + targets = [[NSMutableArray alloc] init]; + targetTextureIndices = [[NSMutableArray alloc] init]; + _enabled = YES; + allTargetsWantMonochromeData = YES; + usingNextFrameForImageCapture = NO; + + // set default texture options + _outputTextureOptions.minFilter = GL_LINEAR; + _outputTextureOptions.magFilter = GL_LINEAR; + _outputTextureOptions.wrapS = GL_CLAMP_TO_EDGE; + _outputTextureOptions.wrapT = GL_CLAMP_TO_EDGE; + _outputTextureOptions.internalFormat = GL_RGBA; + _outputTextureOptions.format = GL_BGRA; + _outputTextureOptions.type = GL_UNSIGNED_BYTE; + + return self; +} + +- (void)dealloc +{ + [self removeAllTargets]; +} + +#pragma mark - +#pragma mark Managing targets + +- (void)setInputFramebufferForTarget:(id)target atIndex:(NSInteger)inputTextureIndex; +{ + [target setInputFramebuffer:[self framebufferForOutput] atIndex:inputTextureIndex]; +} + +- (GPUImageFramebuffer *)framebufferForOutput; +{ + return outputFramebuffer; +} + +- (void)removeOutputFramebuffer; +{ + outputFramebuffer = nil; +} + +- (void)notifyTargetsAboutNewOutputTexture; +{ + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [self setInputFramebufferForTarget:currentTarget atIndex:textureIndex]; + } +} + +- (NSArray*)targets; +{ + return [NSArray arrayWithArray:targets]; +} + +- (void)addTarget:(id)newTarget; +{ + NSInteger nextAvailableTextureIndex = [newTarget nextAvailableTextureIndex]; + [self addTarget:newTarget atTextureLocation:nextAvailableTextureIndex]; + + if ([newTarget shouldIgnoreUpdatesToThisTarget]) + { + _targetToIgnoreForUpdates = newTarget; + } +} + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + if([targets containsObject:newTarget]) + { + return; + } + + cachedMaximumOutputSize = CGSizeZero; + runSynchronouslyOnVideoProcessingQueue(^{ + [self setInputFramebufferForTarget:newTarget atIndex:textureLocation]; + [targets addObject:newTarget]; + [targetTextureIndices addObject:[NSNumber numberWithInteger:textureLocation]]; + + allTargetsWantMonochromeData = allTargetsWantMonochromeData && [newTarget wantsMonochromeInput]; + }); +} + +- (void)removeTarget:(id)targetToRemove; +{ + if(![targets containsObject:targetToRemove]) + { + return; + } + + if (_targetToIgnoreForUpdates == targetToRemove) + { + _targetToIgnoreForUpdates = nil; + } + + cachedMaximumOutputSize = CGSizeZero; + + NSInteger indexOfObject = [targets indexOfObject:targetToRemove]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + runSynchronouslyOnVideoProcessingQueue(^{ + [targetToRemove setInputSize:CGSizeZero atIndex:textureIndexOfTarget]; + [targetToRemove setInputRotation:kGPUImageNoRotation atIndex:textureIndexOfTarget]; + + [targetTextureIndices removeObjectAtIndex:indexOfObject]; + [targets removeObject:targetToRemove]; + [targetToRemove endProcessing]; + }); +} + +- (void)removeAllTargets; +{ + cachedMaximumOutputSize = CGSizeZero; + runSynchronouslyOnVideoProcessingQueue(^{ + for (id targetToRemove in targets) + { + NSInteger indexOfObject = [targets indexOfObject:targetToRemove]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [targetToRemove setInputSize:CGSizeZero atIndex:textureIndexOfTarget]; + [targetToRemove setInputRotation:kGPUImageNoRotation atIndex:textureIndexOfTarget]; + } + [targets removeAllObjects]; + [targetTextureIndices removeAllObjects]; + + allTargetsWantMonochromeData = YES; + }); +} + +#pragma mark - +#pragma mark Manage the output texture + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + +} + +- (void)forceProcessingAtSizeRespectingAspectRatio:(CGSize)frameSize; +{ +} + +#pragma mark - +#pragma mark Still image processing + +- (void)useNextFrameForImageCapture; +{ + +} + +- (CGImageRef)newCGImageFromCurrentlyProcessedOutput; +{ + return nil; +} + +- (CGImageRef)newCGImageByFilteringCGImage:(CGImageRef)imageToFilter; +{ + GPUImagePicture *stillImageSource = [[GPUImagePicture alloc] initWithCGImage:imageToFilter]; + + [self useNextFrameForImageCapture]; + [stillImageSource addTarget:(id)self]; + [stillImageSource processImage]; + + CGImageRef processedImage = [self newCGImageFromCurrentlyProcessedOutput]; + + [stillImageSource removeTarget:(id)self]; + return processedImage; +} + +- (BOOL)providesMonochromeOutput; +{ + return NO; +} + +#pragma mark - +#pragma mark Platform-specific image output methods + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + +- (UIImage *)imageFromCurrentFramebuffer; +{ + UIDeviceOrientation deviceOrientation = [[UIDevice currentDevice] orientation]; + UIImageOrientation imageOrientation = UIImageOrientationLeft; + switch (deviceOrientation) + { + case UIDeviceOrientationPortrait: + imageOrientation = UIImageOrientationUp; + break; + case UIDeviceOrientationPortraitUpsideDown: + imageOrientation = UIImageOrientationDown; + break; + case UIDeviceOrientationLandscapeLeft: + imageOrientation = UIImageOrientationLeft; + break; + case UIDeviceOrientationLandscapeRight: + imageOrientation = UIImageOrientationRight; + break; + default: + imageOrientation = UIImageOrientationUp; + break; + } + + return [self imageFromCurrentFramebufferWithOrientation:imageOrientation]; +} + +- (UIImage *)imageFromCurrentFramebufferWithOrientation:(UIImageOrientation)imageOrientation; +{ + CGImageRef cgImageFromBytes = [self newCGImageFromCurrentlyProcessedOutput]; + UIImage *finalImage = [UIImage imageWithCGImage:cgImageFromBytes scale:1.0 orientation:imageOrientation]; + CGImageRelease(cgImageFromBytes); + + return finalImage; +} + +- (UIImage *)imageByFilteringImage:(UIImage *)imageToFilter; +{ + CGImageRef image = [self newCGImageByFilteringCGImage:[imageToFilter CGImage]]; + UIImage *processedImage = [UIImage imageWithCGImage:image scale:[imageToFilter scale] orientation:[imageToFilter imageOrientation]]; + CGImageRelease(image); + return processedImage; +} + +- (CGImageRef)newCGImageByFilteringImage:(UIImage *)imageToFilter +{ + return [self newCGImageByFilteringCGImage:[imageToFilter CGImage]]; +} + +#else + +- (NSImage *)imageFromCurrentFramebuffer; +{ + return [self imageFromCurrentFramebufferWithOrientation:UIImageOrientationLeft]; +} + +- (NSImage *)imageFromCurrentFramebufferWithOrientation:(UIImageOrientation)imageOrientation; +{ + CGImageRef cgImageFromBytes = [self newCGImageFromCurrentlyProcessedOutput]; + NSImage *finalImage = [[NSImage alloc] initWithCGImage:cgImageFromBytes size:NSZeroSize]; + CGImageRelease(cgImageFromBytes); + + return finalImage; +} + +- (NSImage *)imageByFilteringImage:(NSImage *)imageToFilter; +{ + CGImageRef image = [self newCGImageByFilteringCGImage:[imageToFilter CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil]]; + NSImage *processedImage = [[NSImage alloc] initWithCGImage:image size:NSZeroSize]; + CGImageRelease(image); + return processedImage; +} + +- (CGImageRef)newCGImageByFilteringImage:(NSImage *)imageToFilter +{ + return [self newCGImageByFilteringCGImage:[imageToFilter CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil]]; +} + +#endif + +#pragma mark - +#pragma mark Accessors + +- (void)setAudioEncodingTarget:(GPUImageMovieWriter *)newValue; +{ + _audioEncodingTarget = newValue; + if( ! _audioEncodingTarget.hasAudioTrack ) + { + _audioEncodingTarget.hasAudioTrack = YES; + } +} + +-(void)setOutputTextureOptions:(GPUTextureOptions)outputTextureOptions +{ + _outputTextureOptions = outputTextureOptions; + + if( outputFramebuffer.texture ) + { + glBindTexture(GL_TEXTURE_2D, outputFramebuffer.texture); + //_outputTextureOptions.format + //_outputTextureOptions.internalFormat + //_outputTextureOptions.magFilter + //_outputTextureOptions.minFilter + //_outputTextureOptions.type + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, _outputTextureOptions.wrapS); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, _outputTextureOptions.wrapT); + glBindTexture(GL_TEXTURE_2D, 0); + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.h new file mode 100755 index 00000000..57eb8402 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageOverlayBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.m new file mode 100755 index 00000000..c8c5185d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.m @@ -0,0 +1,94 @@ +#import "GPUImageOverlayBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageOverlayBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + mediump float ra; + if (2.0 * base.r < base.a) { + ra = 2.0 * overlay.r * base.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + ra = overlay.a * base.a - 2.0 * (base.a - base.r) * (overlay.a - overlay.r) + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } + + mediump float ga; + if (2.0 * base.g < base.a) { + ga = 2.0 * overlay.g * base.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + ga = overlay.a * base.a - 2.0 * (base.a - base.g) * (overlay.a - overlay.g) + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } + + mediump float ba; + if (2.0 * base.b < base.a) { + ba = 2.0 * overlay.b * base.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + ba = overlay.a * base.a - 2.0 * (base.a - base.b) * (overlay.a - overlay.b) + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } + + gl_FragColor = vec4(ra, ga, ba, 1.0); + } +); +#else +NSString *const kGPUImageOverlayBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + float ra; + if (2.0 * base.r < base.a) { + ra = 2.0 * overlay.r * base.r + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } else { + ra = overlay.a * base.a - 2.0 * (base.a - base.r) * (overlay.a - overlay.r) + overlay.r * (1.0 - base.a) + base.r * (1.0 - overlay.a); + } + + float ga; + if (2.0 * base.g < base.a) { + ga = 2.0 * overlay.g * base.g + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } else { + ga = overlay.a * base.a - 2.0 * (base.a - base.g) * (overlay.a - overlay.g) + overlay.g * (1.0 - base.a) + base.g * (1.0 - overlay.a); + } + + float ba; + if (2.0 * base.b < base.a) { + ba = 2.0 * overlay.b * base.b + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } else { + ba = overlay.a * base.a - 2.0 * (base.a - base.b) * (overlay.a - overlay.b) + overlay.b * (1.0 - base.a) + base.b * (1.0 - overlay.a); + } + + gl_FragColor = vec4(ra, ga, ba, 1.0); + } +); +#endif + +@implementation GPUImageOverlayBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageOverlayBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.h new file mode 100644 index 00000000..aa8f3f47 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.h @@ -0,0 +1,16 @@ +#import "GPUImageFilter.h" + +// This is an accumulator that uses a Hough transform in parallel coordinate space to identify probable lines in a scene. +// +// It is entirely based on the work of the Graph@FIT research group at the Brno University of Technology and their publications: +// M. Dubská, J. Havel, and A. Herout. Real-Time Detection of Lines using Parallel Coordinates and OpenGL. Proceedings of SCCG 2011, Bratislava, SK, p. 7. +// M. Dubská, J. Havel, and A. Herout. PClines — Line detection using parallel coordinates. 2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), p. 1489- 1494. + +@interface GPUImageParallelCoordinateLineTransformFilter : GPUImageFilter +{ + GLubyte *rawImagePixels; + GLfloat *lineCoordinates; + unsigned int maxLinePairsToRender, linePairsToRender; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.m new file mode 100644 index 00000000..0a2f6e49 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.m @@ -0,0 +1,266 @@ +#import "GPUImageParallelCoordinateLineTransformFilter.h" + +NSString *const kGPUImageHoughAccumulationVertexShaderString = SHADER_STRING +( + attribute vec4 position; + + void main() + { + gl_Position = position; + } +); + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageHoughAccumulationFragmentShaderString = SHADER_STRING +( + const lowp float scalingFactor = 1.0 / 256.0; + + void main() + { + gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0); + } +); + +// highp - 16-bit, floating point range: -2^62 to 2^62, integer range: -2^16 to 2^16 +// NOTE: See below for where I'm tacking on the required extension as a prefix +NSString *const kGPUImageHoughAccumulationFBOReadFragmentShaderString = SHADER_STRING +( + const lowp float scalingFactor = 0.004; +// const lowp float scalingFactor = 0.1; + + void main() + { + mediump vec4 fragmentData = gl_LastFragData[0]; + + fragmentData.r = fragmentData.r + scalingFactor; + fragmentData.g = scalingFactor * floor(fragmentData.r) + fragmentData.g; + fragmentData.b = scalingFactor * floor(fragmentData.g) + fragmentData.b; + fragmentData.a = scalingFactor * floor(fragmentData.b) + fragmentData.a; + + fragmentData = fract(fragmentData); + + gl_FragColor = vec4(fragmentData.rgb, 1.0); + } +); + +#else +NSString *const kGPUImageHoughAccumulationFragmentShaderString = SHADER_STRING +( + const float scalingFactor = 1.0 / 256.0; + + void main() + { + gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0); + } +); + +NSString *const kGPUImageHoughAccumulationFBOReadFragmentShaderString = SHADER_STRING +( + const float scalingFactor = 1.0 / 256.0; + + void main() + { + // gl_FragColor = vec4(scalingFactor, scalingFactor, scalingFactor, 1.0); + gl_FragColor = vec4(0.004, 0.004, 0.004, 1.0); + } +); +#endif + +@interface GPUImageParallelCoordinateLineTransformFilter() +// Rendering +- (void)generateLineCoordinates; + +@end + +@implementation GPUImageParallelCoordinateLineTransformFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + NSString *fragmentShaderToUse = nil; + + if ([GPUImageContext deviceSupportsFramebufferReads]) + { + fragmentShaderToUse = [NSString stringWithFormat:@"#extension GL_EXT_shader_framebuffer_fetch : require\n %@",kGPUImageHoughAccumulationFBOReadFragmentShaderString]; + } + else + { + fragmentShaderToUse = kGPUImageHoughAccumulationFragmentShaderString; + } + + if (!(self = [super initWithVertexShaderFromString:kGPUImageHoughAccumulationVertexShaderString fragmentShaderFromString:fragmentShaderToUse])) + { + return nil; + } + + + return self; +} + +// TODO: have this be regenerated on change of image size +- (void)dealloc; +{ + free(rawImagePixels); + free(lineCoordinates); +} + +- (void)initializeAttributes; +{ + [filterProgram addAttribute:@"position"]; +} + +#pragma mark - +#pragma mark Rendering + +#define MAXLINESCALINGFACTOR 4 + +- (void)generateLineCoordinates; +{ + unsigned int imageByteSize = inputTextureSize.width * inputTextureSize.height * 4; + rawImagePixels = (GLubyte *)malloc(imageByteSize); + + maxLinePairsToRender = (inputTextureSize.width * inputTextureSize.height) / MAXLINESCALINGFACTOR; + lineCoordinates = calloc(maxLinePairsToRender * 8, sizeof(GLfloat)); +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + if (lineCoordinates == NULL) + { + [self generateLineCoordinates]; + } + + [self renderToTextureWithVertices:NULL textureCoordinates:NULL]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // we need a normal color texture for this filter + NSAssert(self.outputTextureOptions.internalFormat == GL_RGBA, @"The output texture format for this filter must be GL_RGBA."); + NSAssert(self.outputTextureOptions.type == GL_UNSIGNED_BYTE, @"The type of the output texture of this filter must be GL_UNSIGNED_BYTE."); + + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + // Grab the edge points from the previous frame and create the parallel coordinate lines for them + // This would be a great place to have a working histogram pyramid implementation + + [GPUImageContext useImageProcessingContext]; + [firstInputFramebuffer activateFramebuffer]; + + glFinish(); + glReadPixels(0, 0, inputTextureSize.width, inputTextureSize.height, GL_RGBA, GL_UNSIGNED_BYTE, rawImagePixels); + + CGFloat xAspectMultiplier = 1.0, yAspectMultiplier = 1.0; + +// if (inputTextureSize.width > inputTextureSize.height) +// { +// yAspectMultiplier = inputTextureSize.height / inputTextureSize.width; +// } +// else +// { +// xAspectMultiplier = inputTextureSize.width / inputTextureSize.height; +// } + +// CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + + unsigned int imageByteSize = inputTextureSize.width * inputTextureSize.height * 4; + unsigned int imageWidth = inputTextureSize.width * 4; + + linePairsToRender = 0; + unsigned int currentByte = 0; + unsigned int lineStorageIndex = 0; + unsigned int maxLineStorageIndex = maxLinePairsToRender * 8 - 8; + + GLfloat minY = 100, maxY = -100, minX = 100, maxX = -100; + while (currentByte < imageByteSize) + { + GLubyte colorByte = rawImagePixels[currentByte]; + + if (colorByte > 0) + { + unsigned int xCoordinate = currentByte % imageWidth; + unsigned int yCoordinate = currentByte / imageWidth; + + CGFloat normalizedXCoordinate = (-1.0 + 2.0 * (CGFloat)(xCoordinate / 4) / inputTextureSize.width) * xAspectMultiplier; + CGFloat normalizedYCoordinate = (-1.0 + 2.0 * (CGFloat)(yCoordinate) / inputTextureSize.height) * yAspectMultiplier; + + minY = MIN(minY, normalizedYCoordinate); + maxY = MAX(maxY, normalizedYCoordinate); + minX = MIN(minX, normalizedXCoordinate); + maxX = MAX(maxX, normalizedXCoordinate); + +// NSLog(@"Parallel line coordinates: (%f, %f) - (%f, %f) - (%f, %f)", -1.0, -normalizedYCoordinate, 0.0, normalizedXCoordinate, 1.0, normalizedYCoordinate); + // T space coordinates, (-d, -y) to (0, x) + lineCoordinates[lineStorageIndex++] = -1.0; + lineCoordinates[lineStorageIndex++] = -normalizedYCoordinate; + lineCoordinates[lineStorageIndex++] = 0.0; + lineCoordinates[lineStorageIndex++] = normalizedXCoordinate; + + // S space coordinates, (0, x) to (d, y) + lineCoordinates[lineStorageIndex++] = 0.0; + lineCoordinates[lineStorageIndex++] = normalizedXCoordinate; + lineCoordinates[lineStorageIndex++] = 1.0; + lineCoordinates[lineStorageIndex++] = normalizedYCoordinate; + + linePairsToRender++; + + linePairsToRender = MIN(linePairsToRender, maxLinePairsToRender); + lineStorageIndex = MIN(lineStorageIndex, maxLineStorageIndex); + } + currentByte +=8; + } + +// NSLog(@"Line pairs to render: %d out of max: %d", linePairsToRender, maxLinePairsToRender); + +// CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); +// NSLog(@"Line generation processing time : %f ms", 1000.0 * currentFrameTime); + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + [self setUniformsForProgramAtIndex:0]; + + glClearColor(0.0, 0.0, 0.0, 1.0); + glClear(GL_COLOR_BUFFER_BIT); + + if (![GPUImageContext deviceSupportsFramebufferReads]) + { + glBlendEquation(GL_FUNC_ADD); + glBlendFunc(GL_ONE, GL_ONE); + glEnable(GL_BLEND); + } + else + { + } + + glLineWidth(1); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, lineCoordinates); + glDrawArrays(GL_LINES, 0, (linePairsToRender * 4)); + + if (![GPUImageContext deviceSupportsFramebufferReads]) + { + glDisable(GL_BLEND); + } + [firstInputFramebuffer unlock]; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.h new file mode 100644 index 00000000..922f4d30 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImagePerlinNoiseFilter : GPUImageFilter +{ + GLint scaleUniform, colorStartUniform, colorFinishUniform; +} + +@property (readwrite, nonatomic) GPUVector4 colorStart; +@property (readwrite, nonatomic) GPUVector4 colorFinish; + +@property (readwrite, nonatomic) float scale; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.m new file mode 100644 index 00000000..9ca7cbad --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.m @@ -0,0 +1,239 @@ +#import "GPUImagePerlinNoiseFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePerlinNoiseFragmentShaderString = SHADER_STRING +( + precision highp float; + varying highp vec2 textureCoordinate; + uniform float scale; + + uniform vec4 colorStart; + uniform vec4 colorFinish; + + // + // Description : Array and textureless GLSL 2D/3D/4D simplex + // noise functions. + // Author : Ian McEwan, Ashima Arts. + // Maintainer : ijm + // Lastmod : 20110822 (ijm) + // License : Copyright (C) 2011 Ashima Arts. All rights reserved. + // Distributed under the MIT License. See LICENSE file. + // https://github.com/ashima/webgl-noise + // + + vec4 mod289(vec4 x) +{ + return x - floor(x * (1.0 / 289.0)) * 289.0; +} + + vec4 permute(vec4 x) +{ + return mod289(((x*34.0)+1.0)*x); +} + + vec4 taylorInvSqrt(vec4 r) +{ + return 1.79284291400159 - 0.85373472095314 * r; +} + + vec2 fade(vec2 t) { + return t*t*t*(t*(t*6.0-15.0)+10.0); + } + + // Classic Perlin noise + float cnoise(vec2 P) +{ + vec4 Pi = floor(P.xyxy) + vec4(0.0, 0.0, 1.0, 1.0); + vec4 Pf = fract(P.xyxy) - vec4(0.0, 0.0, 1.0, 1.0); + Pi = mod289(Pi); // To avoid truncation effects in permutation + vec4 ix = Pi.xzxz; + vec4 iy = Pi.yyww; + vec4 fx = Pf.xzxz; + vec4 fy = Pf.yyww; + + vec4 i = permute(permute(ix) + iy); + + vec4 gx = fract(i * (1.0 / 41.0)) * 2.0 - 1.0 ; + vec4 gy = abs(gx) - 0.5 ; + vec4 tx = floor(gx + 0.5); + gx = gx - tx; + + vec2 g00 = vec2(gx.x,gy.x); + vec2 g10 = vec2(gx.y,gy.y); + vec2 g01 = vec2(gx.z,gy.z); + vec2 g11 = vec2(gx.w,gy.w); + + vec4 norm = taylorInvSqrt(vec4(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + float n00 = dot(g00, vec2(fx.x, fy.x)); + float n10 = dot(g10, vec2(fx.y, fy.y)); + float n01 = dot(g01, vec2(fx.z, fy.z)); + float n11 = dot(g11, vec2(fx.w, fy.w)); + + vec2 fade_xy = fade(Pf.xy); + vec2 n_x = mix(vec2(n00, n01), vec2(n10, n11), fade_xy.x); + float n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return 2.3 * n_xy; +} + + + void main() + { + + float n1 = (cnoise(textureCoordinate * scale) + 1.0) / 2.0; + + vec4 colorDiff = colorFinish - colorStart; + vec4 color = colorStart + colorDiff * n1; + + gl_FragColor = color; + } +); +#else +NSString *const kGPUImagePerlinNoiseFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform float scale; + + uniform vec4 colorStart; + uniform vec4 colorFinish; + + // + // Description : Array and textureless GLSL 2D/3D/4D simplex + // noise functions. + // Author : Ian McEwan, Ashima Arts. + // Maintainer : ijm + // Lastmod : 20110822 (ijm) + // License : Copyright (C) 2011 Ashima Arts. All rights reserved. + // Distributed under the MIT License. See LICENSE file. + // https://github.com/ashima/webgl-noise + // + + vec4 mod289(vec4 x) +{ + return x - floor(x * (1.0 / 289.0)) * 289.0; +} + + vec4 permute(vec4 x) +{ + return mod289(((x*34.0)+1.0)*x); +} + + vec4 taylorInvSqrt(vec4 r) +{ + return 1.79284291400159 - 0.85373472095314 * r; +} + + vec2 fade(vec2 t) { + return t*t*t*(t*(t*6.0-15.0)+10.0); + } + + // Classic Perlin noise + float cnoise(vec2 P) +{ + vec4 Pi = floor(P.xyxy) + vec4(0.0, 0.0, 1.0, 1.0); + vec4 Pf = fract(P.xyxy) - vec4(0.0, 0.0, 1.0, 1.0); + Pi = mod289(Pi); // To avoid truncation effects in permutation + vec4 ix = Pi.xzxz; + vec4 iy = Pi.yyww; + vec4 fx = Pf.xzxz; + vec4 fy = Pf.yyww; + + vec4 i = permute(permute(ix) + iy); + + vec4 gx = fract(i * (1.0 / 41.0)) * 2.0 - 1.0 ; + vec4 gy = abs(gx) - 0.5 ; + vec4 tx = floor(gx + 0.5); + gx = gx - tx; + + vec2 g00 = vec2(gx.x,gy.x); + vec2 g10 = vec2(gx.y,gy.y); + vec2 g01 = vec2(gx.z,gy.z); + vec2 g11 = vec2(gx.w,gy.w); + + vec4 norm = taylorInvSqrt(vec4(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + float n00 = dot(g00, vec2(fx.x, fy.x)); + float n10 = dot(g10, vec2(fx.y, fy.y)); + float n01 = dot(g01, vec2(fx.z, fy.z)); + float n11 = dot(g11, vec2(fx.w, fy.w)); + + vec2 fade_xy = fade(Pf.xy); + vec2 n_x = mix(vec2(n00, n01), vec2(n10, n11), fade_xy.x); + float n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return 2.3 * n_xy; + } + + void main() + { + + float n1 = (cnoise(textureCoordinate * scale) + 1.0) / 2.0; + + vec4 colorDiff = colorFinish - colorStart; + vec4 color = colorStart + colorDiff * n1; + + gl_FragColor = color; + } +); +#endif + + +@implementation GPUImagePerlinNoiseFilter + +@synthesize scale = _scale, colorStart = _colorStart, colorFinish = _colorFinish; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePerlinNoiseFragmentShaderString])) + { + return nil; + } + + scaleUniform = [filterProgram uniformIndex:@"scale"]; + + colorStartUniform = [filterProgram uniformIndex:@"colorStart"]; + colorFinishUniform = [filterProgram uniformIndex:@"colorFinish"]; + + [self setScale:8.0]; + + [self setColorStart:(GPUVector4){0.0, 0.0, 0.0, 1.0}]; + [self setColorFinish:(GPUVector4){1.0, 1.0, 1.0, 1.0}]; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setScale:(float)scale +{ + _scale = scale; + + [self setFloat:_scale forUniform:scaleUniform program:filterProgram]; +} + +- (void)setColorStart:(GPUVector4)colorStart +{ + _colorStart = colorStart; + + [self setVec4:_colorStart forUniform:colorStartUniform program:filterProgram]; +} + +- (void)setColorFinish:(GPUVector4)colorFinish +{ + _colorFinish = colorFinish; + + [self setVec4:_colorFinish forUniform:colorFinishUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.h new file mode 100755 index 00000000..994774fd --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.h @@ -0,0 +1,20 @@ +#import "GPUImageFilter.h" + +/** Creates a pinch distortion of the image + */ +@interface GPUImagePinchDistortionFilter : GPUImageFilter +{ + GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform; +} + +/** The center about which to apply the distortion, with a default of (0.5, 0.5) + */ +@property(readwrite, nonatomic) CGPoint center; +/** The radius of the distortion, ranging from 0.0 to 2.0, with a default of 1.0 + */ +@property(readwrite, nonatomic) CGFloat radius; +/** The amount of distortion to apply, from -2.0 to 2.0, with a default of 0.5 + */ +@property(readwrite, nonatomic) CGFloat scale; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.m new file mode 100755 index 00000000..76d79096 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.m @@ -0,0 +1,176 @@ +#import "GPUImagePinchDistortionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePinchDistortionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float aspectRatio; + uniform highp vec2 center; + uniform highp float radius; + uniform highp float scale; + + void main() + { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float dist = distance(center, textureCoordinateToUse); + textureCoordinateToUse = textureCoordinate; + + if (dist < radius) + { + textureCoordinateToUse -= center; + highp float percent = 1.0 + ((0.5 - dist) / 0.5) * scale; + textureCoordinateToUse = textureCoordinateToUse * percent; + textureCoordinateToUse += center; + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate ); + } + } +); +#else +NSString *const kGPUImagePinchDistortionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float aspectRatio; + uniform vec2 center; + uniform float radius; + uniform float scale; + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float dist = distance(center, textureCoordinateToUse); + textureCoordinateToUse = textureCoordinate; + + if (dist < radius) + { + textureCoordinateToUse -= center; + float percent = 1.0 + ((0.5 - dist) / 0.5) * scale; + textureCoordinateToUse = textureCoordinateToUse * percent; + textureCoordinateToUse += center; + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate ); + } + } +); +#endif + +@interface GPUImagePinchDistortionFilter () + +- (void)adjustAspectRatio; + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end + +@implementation GPUImagePinchDistortionFilter + +@synthesize aspectRatio = _aspectRatio; +@synthesize center = _center; +@synthesize radius = _radius; +@synthesize scale = _scale; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePinchDistortionFragmentShaderString])) + { + return nil; + } + + aspectRatioUniform = [filterProgram uniformIndex:@"aspectRatio"]; + radiusUniform = [filterProgram uniformIndex:@"radius"]; + scaleUniform = [filterProgram uniformIndex:@"scale"]; + centerUniform = [filterProgram uniformIndex:@"center"]; + + self.radius = 1.0; + self.scale = 0.5; + self.center = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; + [self adjustAspectRatio]; +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:filterProgram]; +} + +- (void)setRadius:(CGFloat)newValue; +{ + _radius = newValue; + + [self setFloat:_radius forUniform:radiusUniform program:filterProgram]; +} + +- (void)setScale:(CGFloat)newValue; +{ + _scale = newValue; + + [self setFloat:_scale forUniform:scaleUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.h new file mode 100755 index 00000000..d0f6ae04 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.h @@ -0,0 +1,12 @@ +#import "GPUImageFilter.h" + +@interface GPUImagePixellateFilter : GPUImageFilter +{ + GLint fractionalWidthOfAPixelUniform, aspectRatioUniform; +} + +// The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored. +@property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel; + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.m new file mode 100755 index 00000000..88430d09 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.m @@ -0,0 +1,151 @@ +#import "GPUImagePixellateFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePixellationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float fractionalWidthOfPixel; + uniform highp float aspectRatio; + + void main() + { + highp vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + highp vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + gl_FragColor = texture2D(inputImageTexture, samplePos ); + } +); +#else +NSString *const kGPUImagePixellationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float fractionalWidthOfPixel; + uniform float aspectRatio; + + void main() + { + vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + gl_FragColor = texture2D(inputImageTexture, samplePos ); + } +); +#endif + +@interface GPUImagePixellateFilter () + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +- (void)adjustAspectRatio; + +@end + +@implementation GPUImagePixellateFilter + +@synthesize fractionalWidthOfAPixel = _fractionalWidthOfAPixel; +@synthesize aspectRatio = _aspectRatio; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImagePixellationFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + fractionalWidthOfAPixelUniform = [filterProgram uniformIndex:@"fractionalWidthOfPixel"]; + aspectRatioUniform = [filterProgram uniformIndex:@"aspectRatio"]; + + self.fractionalWidthOfAPixel = 0.05; + + return self; +} + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self adjustAspectRatio]; +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setFractionalWidthOfAPixel:(CGFloat)newValue; +{ + CGFloat singlePixelSpacing; + if (inputTextureSize.width != 0.0) + { + singlePixelSpacing = 1.0 / inputTextureSize.width; + } + else + { + singlePixelSpacing = 1.0 / 2048.0; + } + + if (newValue < singlePixelSpacing) + { + _fractionalWidthOfAPixel = singlePixelSpacing; + } + else + { + _fractionalWidthOfAPixel = newValue; + } + + [self setFloat:_fractionalWidthOfAPixel forUniform:fractionalWidthOfAPixelUniform program:filterProgram]; +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.h new file mode 100755 index 00000000..9d304c93 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilter.h" + +@interface GPUImagePixellatePositionFilter : GPUImageFilter +{ + GLint fractionalWidthOfAPixelUniform, aspectRatioUniform, centerUniform, radiusUniform; +} + +// The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored. +@property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel; + +// the center point to start pixelation in texture coordinates, default 0.5, 0.5 +@property(readwrite, nonatomic) CGPoint center; + +// the radius (0.0 - 1.0) in which to pixelate, default 1.0 +@property(readwrite, nonatomic) CGFloat radius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.m new file mode 100755 index 00000000..f1bd09cc --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.m @@ -0,0 +1,194 @@ +#import "GPUImagePixellatePositionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePixellationPositionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float fractionalWidthOfPixel; + uniform highp float aspectRatio; + uniform lowp vec2 pixelateCenter; + uniform highp float pixelateRadius; + + void main() + { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float dist = distance(pixelateCenter, textureCoordinateToUse); + + if (dist < pixelateRadius) + { + highp vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + highp vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + gl_FragColor = texture2D(inputImageTexture, samplePos ); + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate ); + } + } +); +#else +NSString *const kGPUImagePixellationPositionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float fractionalWidthOfPixel; + uniform float aspectRatio; + uniform vec2 pixelateCenter; + uniform float pixelateRadius; + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float dist = distance(pixelateCenter, textureCoordinateToUse); + + if (dist < pixelateRadius) + { + vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + gl_FragColor = texture2D(inputImageTexture, samplePos ); + } + else + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate ); + } + } +); +#endif + +@interface GPUImagePixellatePositionFilter () + +- (void)adjustAspectRatio; + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end + +@implementation GPUImagePixellatePositionFilter + +@synthesize fractionalWidthOfAPixel = _fractionalWidthOfAPixel; +@synthesize aspectRatio = _aspectRatio; +@synthesize center = _center; +@synthesize radius = _radius; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImagePixellationPositionFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + fractionalWidthOfAPixelUniform = [filterProgram uniformIndex:@"fractionalWidthOfPixel"]; + aspectRatioUniform = [filterProgram uniformIndex:@"aspectRatio"]; + centerUniform = [filterProgram uniformIndex:@"pixelateCenter"]; + radiusUniform = [filterProgram uniformIndex:@"pixelateRadius"]; + + self.fractionalWidthOfAPixel = 0.05; + self.center = CGPointMake(0.5f, 0.5f); + self.radius = 0.25f; + + return self; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if ( (!CGSizeEqualToSize(oldInputSize, inputTextureSize)) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; + [self adjustAspectRatio]; +} + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setFractionalWidthOfAPixel:(CGFloat)newValue; +{ + CGFloat singlePixelSpacing; + if (inputTextureSize.width != 0.0) + { + singlePixelSpacing = 1.0 / inputTextureSize.width; + } + else + { + singlePixelSpacing = 1.0 / 2048.0; + } + + if (newValue < singlePixelSpacing) + { + _fractionalWidthOfAPixel = singlePixelSpacing; + } + else + { + _fractionalWidthOfAPixel = newValue; + } + + [self setFloat:_fractionalWidthOfAPixel forUniform:fractionalWidthOfAPixelUniform program:filterProgram]; +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)center +{ + _center = center; + CGPoint rotatedPoint = [self rotatedPoint:center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +- (void)setRadius:(CGFloat)radius +{ + _radius = radius; + + [self setFloat:_radius forUniform:radiusUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.h new file mode 100644 index 00000000..58eff225 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.h @@ -0,0 +1,18 @@ +#import "GPUImageTwoInputCrossTextureSamplingFilter.h" +#import "GPUImageFilterGroup.h" + +@interface GPUImagePoissonBlendFilter : GPUImageTwoInputCrossTextureSamplingFilter +{ + GLint mixUniform; + + GPUImageFramebuffer *secondOutputFramebuffer; +} + +// Mix ranges from 0.0 (only image 1) to 1.0 (only image 2 gradients), with 1.0 as the normal level +@property(readwrite, nonatomic) CGFloat mix; + +// The number of times to propagate the gradients. +// Crank this up to 100 or even 1000 if you want to get anywhere near convergence. Yes, this will be slow. +@property(readwrite, nonatomic) NSUInteger numIterations; + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.m new file mode 100644 index 00000000..0167e024 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.m @@ -0,0 +1,175 @@ +#import "GPUImagePoissonBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePoissonBlendFragmentShaderString = SHADER_STRING +( + precision mediump float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + varying vec2 topTextureCoordinate; + varying vec2 bottomTextureCoordinate; + + varying vec2 textureCoordinate2; + varying vec2 leftTextureCoordinate2; + varying vec2 rightTextureCoordinate2; + varying vec2 topTextureCoordinate2; + varying vec2 bottomTextureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform lowp float mixturePercent; + + void main() + { + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + vec4 centerColor2 = texture2D(inputImageTexture2, textureCoordinate2); + vec3 bottomColor2 = texture2D(inputImageTexture2, bottomTextureCoordinate2).rgb; + vec3 leftColor2 = texture2D(inputImageTexture2, leftTextureCoordinate2).rgb; + vec3 rightColor2 = texture2D(inputImageTexture2, rightTextureCoordinate2).rgb; + vec3 topColor2 = texture2D(inputImageTexture2, topTextureCoordinate2).rgb; + + vec3 meanColor = (bottomColor + leftColor + rightColor + topColor) / 4.0; + vec3 diffColor = centerColor.rgb - meanColor; + + vec3 meanColor2 = (bottomColor2 + leftColor2 + rightColor2 + topColor2) / 4.0; + vec3 diffColor2 = centerColor2.rgb - meanColor2; + + vec3 gradColor = (meanColor + diffColor2); + + gl_FragColor = vec4(mix(centerColor.rgb, gradColor, centerColor2.a * mixturePercent), centerColor.a); + } +); +#else +NSString *const kGPUImagePoissonBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + varying vec2 topTextureCoordinate; + varying vec2 bottomTextureCoordinate; + + varying vec2 textureCoordinate2; + varying vec2 leftTextureCoordinate2; + varying vec2 rightTextureCoordinate2; + varying vec2 topTextureCoordinate2; + varying vec2 bottomTextureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float mixturePercent; + + void main() + { + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + vec3 bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + vec3 leftColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 topColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + + vec4 centerColor2 = texture2D(inputImageTexture2, textureCoordinate2); + vec3 bottomColor2 = texture2D(inputImageTexture2, bottomTextureCoordinate2).rgb; + vec3 leftColor2 = texture2D(inputImageTexture2, leftTextureCoordinate2).rgb; + vec3 rightColor2 = texture2D(inputImageTexture2, rightTextureCoordinate2).rgb; + vec3 topColor2 = texture2D(inputImageTexture2, topTextureCoordinate2).rgb; + + vec3 meanColor = (bottomColor + leftColor + rightColor + topColor) / 4.0; + vec3 diffColor = centerColor.rgb - meanColor; + + vec3 meanColor2 = (bottomColor2 + leftColor2 + rightColor2 + topColor2) / 4.0; + vec3 diffColor2 = centerColor2.rgb - meanColor2; + + vec3 gradColor = (meanColor + diffColor2); + + gl_FragColor = vec4(mix(centerColor.rgb, gradColor, centerColor2.a * mixturePercent), centerColor.a); + } +); +#endif + +@implementation GPUImagePoissonBlendFilter + +@synthesize mix = _mix; +@synthesize numIterations = _numIterations; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePoissonBlendFragmentShaderString])) + { + return nil; + } + + mixUniform = [filterProgram uniformIndex:@"mixturePercent"]; + self.mix = 0.5; + + self.numIterations = 10; + + return self; +} + +- (void)setMix:(CGFloat)newValue; +{ + _mix = newValue; + + [self setFloat:_mix forUniform:mixUniform program:filterProgram]; +} + +//- (void)setOutputFBO; +//{ +// if (self.numIterations % 2 == 1) { +// [self setSecondFilterFBO]; +// } else { +// [self setFilterFBO]; +// } +//} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + // Run the first stage of the two-pass filter + [GPUImageContext setActiveShaderProgram:filterProgram]; + + [super renderToTextureWithVertices:vertices textureCoordinates:textureCoordinates]; + + for (int pass = 1; pass < self.numIterations; pass++) { + + if (pass % 2 == 0) { + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + // TODO: This will over-unlock the incoming framebuffer + [super renderToTextureWithVertices:vertices textureCoordinates:[[self class] textureCoordinatesForRotation:kGPUImageNoRotation]]; + } else { + // Run the second stage of the two-pass filter + secondOutputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [secondOutputFramebuffer activateFramebuffer]; + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + glUniform1i(filterInputTextureUniform, 2); + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [secondInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform2, 3); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:kGPUImageNoRotation]); + glVertexAttribPointer(filterSecondTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation2]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + } + } +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.h new file mode 100755 index 00000000..3de6a4d3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImagePolarPixellateFilter : GPUImageFilter { + GLint centerUniform, pixelSizeUniform; +} + +// The center about which to apply the distortion, with a default of (0.5, 0.5) +@property(readwrite, nonatomic) CGPoint center; +// The amount of distortion to apply, from (-2.0, -2.0) to (2.0, 2.0), with a default of (0.05, 0.05) +@property(readwrite, nonatomic) CGSize pixelSize; + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.m new file mode 100755 index 00000000..5677db48 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.m @@ -0,0 +1,128 @@ +#import "GPUImagePolarPixellateFilter.h" + +// @fattjake based on vid by toneburst + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePolarPixellateFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 center; + uniform highp vec2 pixelSize; + + + void main() + { + highp vec2 normCoord = 2.0 * textureCoordinate - 1.0; + highp vec2 normCenter = 2.0 * center - 1.0; + + normCoord -= normCenter; + + highp float r = length(normCoord); // to polar coords + highp float phi = atan(normCoord.y, normCoord.x); // to polar coords + + r = r - mod(r, pixelSize.x) + 0.03; + phi = phi - mod(phi, pixelSize.y); + + normCoord.x = r * cos(phi); + normCoord.y = r * sin(phi); + + normCoord += normCenter; + + mediump vec2 textureCoordinateToUse = normCoord / 2.0 + 0.5; + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + + } +); +#else +NSString *const kGPUImagePolarPixellateFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 center; + uniform vec2 pixelSize; + + + void main() + { + vec2 normCoord = 2.0 * textureCoordinate - 1.0; + vec2 normCenter = 2.0 * center - 1.0; + + normCoord -= normCenter; + + float r = length(normCoord); // to polar coords + float phi = atan(normCoord.y, normCoord.x); // to polar coords + + r = r - mod(r, pixelSize.x) + 0.03; + phi = phi - mod(phi, pixelSize.y); + + normCoord.x = r * cos(phi); + normCoord.y = r * sin(phi); + + normCoord += normCenter; + + vec2 textureCoordinateToUse = normCoord / 2.0 + 0.5; + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + + } +); +#endif + + +@implementation GPUImagePolarPixellateFilter + +@synthesize center = _center; + +@synthesize pixelSize = _pixelSize; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePolarPixellateFragmentShaderString])) + { + return nil; + } + + pixelSizeUniform = [filterProgram uniformIndex:@"pixelSize"]; + centerUniform = [filterProgram uniformIndex:@"center"]; + + + self.pixelSize = CGSizeMake(0.05, 0.05); + self.center = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; +} + +- (void)setPixelSize:(CGSize)pixelSize +{ + _pixelSize = pixelSize; + + [self setSize:_pixelSize forUniform:pixelSizeUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.h new file mode 100644 index 00000000..369b7737 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.h @@ -0,0 +1,10 @@ +#import "GPUImagePixellateFilter.h" + +@interface GPUImagePolkaDotFilter : GPUImagePixellateFilter +{ + GLint dotScalingUniform; +} + +@property(readwrite, nonatomic) CGFloat dotScaling; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.m new file mode 100644 index 00000000..a439a043 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.m @@ -0,0 +1,85 @@ +#import "GPUImagePolkaDotFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePolkaDotFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float fractionalWidthOfPixel; + uniform highp float aspectRatio; + uniform highp float dotScaling; + + void main() + { + highp vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + highp vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp vec2 adjustedSamplePos = vec2(samplePos.x, (samplePos.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float distanceFromSamplePoint = distance(adjustedSamplePos, textureCoordinateToUse); + lowp float checkForPresenceWithinDot = step(distanceFromSamplePoint, (fractionalWidthOfPixel * 0.5) * dotScaling); + + lowp vec4 inputColor = texture2D(inputImageTexture, samplePos); + + gl_FragColor = vec4(inputColor.rgb * checkForPresenceWithinDot, inputColor.a); + } +); +#else +NSString *const kGPUImagePolkaDotFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float fractionalWidthOfPixel; + uniform float aspectRatio; + uniform float dotScaling; + + void main() + { + vec2 sampleDivisor = vec2(fractionalWidthOfPixel, fractionalWidthOfPixel / aspectRatio); + + vec2 samplePos = textureCoordinate - mod(textureCoordinate, sampleDivisor) + 0.5 * sampleDivisor; + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + vec2 adjustedSamplePos = vec2(samplePos.x, (samplePos.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float distanceFromSamplePoint = distance(adjustedSamplePos, textureCoordinateToUse); + float checkForPresenceWithinDot = step(distanceFromSamplePoint, (fractionalWidthOfPixel * 0.5) * dotScaling); + + vec4 inputColor = texture2D(inputImageTexture, samplePos); + + gl_FragColor = vec4(inputColor.rgb * checkForPresenceWithinDot, inputColor.a); + } +); +#endif + +@implementation GPUImagePolkaDotFilter + +@synthesize dotScaling = _dotScaling; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePolkaDotFragmentShaderString])) + { + return nil; + } + + dotScalingUniform = [filterProgram uniformIndex:@"dotScaling"]; + + self.dotScaling = 0.90; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setDotScaling:(CGFloat)newValue; +{ + _dotScaling = newValue; + + [self setFloat:_dotScaling forUniform:dotScalingUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.h new file mode 100755 index 00000000..6f655b3e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +/** This reduces the color dynamic range into the number of steps specified, leading to a cartoon-like simple shading of the image. + */ +@interface GPUImagePosterizeFilter : GPUImageFilter +{ + GLint colorLevelsUniform; +} + +/** The number of color levels to reduce the image space to. This ranges from 1 to 256, with a default of 10. + */ +@property(readwrite, nonatomic) NSUInteger colorLevels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.m new file mode 100755 index 00000000..a438cea5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.m @@ -0,0 +1,66 @@ +#import "GPUImagePosterizeFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float colorLevels; + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels; + } +); +#else +NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float colorLevels; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels; + } +); +#endif + +@implementation GPUImagePosterizeFilter + +@synthesize colorLevels = _colorLevels; + +#pragma mark - +#pragma mark Initialization + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImagePosterizeFragmentShaderString])) + { + return nil; + } + + colorLevelsUniform = [filterProgram uniformIndex:@"colorLevels"]; + self.colorLevels = 10; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setColorLevels:(NSUInteger)newValue; +{ + _colorLevels = newValue; + + [self setFloat:_colorLevels forUniform:colorLevelsUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.h new file mode 100755 index 00000000..141f8c5f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageSobelEdgeDetectionFilter.h" + +@interface GPUImagePrewittEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.m new file mode 100755 index 00000000..a9906930 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.m @@ -0,0 +1,97 @@ +#import "GPUImagePrewittEdgeDetectionFilter.h" + +@implementation GPUImagePrewittEdgeDetectionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImagePrewittFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float edgeStrength; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - topIntensity - topRightIntensity + bottomLeftIntensity + bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - leftIntensity - topLeftIntensity + bottomRightIntensity + rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)) * edgeStrength; + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#else +NSString *const kGPUImagePrewittFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float edgeStrength; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - topIntensity - topRightIntensity + bottomLeftIntensity + bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - leftIntensity - topLeftIntensity + bottomRightIntensity + rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)) * edgeStrength; + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImagePrewittFragmentShaderString])) + { + return nil; + } + + self.edgeStrength = 1.0; + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.h new file mode 100644 index 00000000..08d13f88 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.h @@ -0,0 +1,18 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageRGBErosionFilter; +@class GPUImageRGBDilationFilter; + +// A filter that first performs a dilation on each color channel of an image, followed by an erosion of the same radius. +// This helps to filter out smaller dark elements. + +@interface GPUImageRGBClosingFilter : GPUImageFilterGroup +{ + GPUImageRGBErosionFilter *erosionFilter; + GPUImageRGBDilationFilter *dilationFilter; +} + +- (id)initWithRadius:(NSUInteger)radius; + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.m new file mode 100644 index 00000000..c5bb1c8d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.m @@ -0,0 +1,41 @@ +#import "GPUImageRGBClosingFilter.h" +#import "GPUImageRGBErosionFilter.h" +#import "GPUImageRGBDilationFilter.h" + +@implementation GPUImageRGBClosingFilter + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +- (id)initWithRadius:(NSUInteger)radius; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: dilation + dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius]; + [self addFilter:dilationFilter]; + + // Second pass: erosion + erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius]; + [self addFilter:erosionFilter]; + + [dilationFilter addTarget:erosionFilter]; + + self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil]; + self.terminalFilter = erosionFilter; + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.h new file mode 100644 index 00000000..68276f84 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +// For each pixel, this sets it to the maximum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center. +// This extends out brighter colors, and can be used for abstraction of color images. + +@interface GPUImageRGBDilationFilter : GPUImageTwoPassTextureSamplingFilter + +// Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. +- (id)initWithRadius:(NSUInteger)dilationRadius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.m new file mode 100644 index 00000000..9702c783 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.m @@ -0,0 +1,306 @@ +#import "GPUImageRGBDilationFilter.h" +#import "GPUImageDilationFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageRGBDilationRadiusOneFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + + lowp vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + + gl_FragColor = max(maxValue, oneStepNegativeIntensity); + } +); + +NSString *const kGPUImageRGBDilationRadiusTwoFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + + lowp vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + + gl_FragColor = max(maxValue, twoStepsNegativeIntensity); + } +); + +NSString *const kGPUImageRGBDilationRadiusThreeFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + lowp vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + lowp vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + + lowp vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + + gl_FragColor = max(maxValue, threeStepsNegativeIntensity); + } +); + +NSString *const kGPUImageRGBDilationRadiusFourFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + lowp vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + lowp vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + lowp vec4 fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate); + lowp vec4 fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate); + + lowp vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + maxValue = max(maxValue, fourStepsPositiveIntensity); + + gl_FragColor = max(maxValue, fourStepsNegativeIntensity); + } +); +#else +NSString *const kGPUImageRGBDilationRadiusOneFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + + vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + + gl_FragColor = max(maxValue, oneStepNegativeIntensity); + } + ); + +NSString *const kGPUImageRGBDilationRadiusTwoFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + + vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + + gl_FragColor = max(maxValue, twoStepsNegativeIntensity); + } + ); + +NSString *const kGPUImageRGBDilationRadiusThreeFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + + vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + + gl_FragColor = max(maxValue, threeStepsNegativeIntensity); + } +); + +NSString *const kGPUImageRGBDilationRadiusFourFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + vec4 fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate); + vec4 fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate); + + vec4 maxValue = max(centerIntensity, oneStepPositiveIntensity); + maxValue = max(maxValue, oneStepNegativeIntensity); + maxValue = max(maxValue, twoStepsPositiveIntensity); + maxValue = max(maxValue, twoStepsNegativeIntensity); + maxValue = max(maxValue, threeStepsPositiveIntensity); + maxValue = max(maxValue, threeStepsNegativeIntensity); + maxValue = max(maxValue, fourStepsPositiveIntensity); + + gl_FragColor = max(maxValue, fourStepsNegativeIntensity); + } +); +#endif + +@implementation GPUImageRGBDilationFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithRadius:(NSUInteger)dilationRadius; +{ + NSString *fragmentShaderForThisRadius = nil; + NSString *vertexShaderForThisRadius = nil; + + switch (dilationRadius) + { + case 0: + case 1: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusOneVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBDilationRadiusOneFragmentShaderString; + }; break; + case 2: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusTwoVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBDilationRadiusTwoFragmentShaderString; + }; break; + case 3: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusThreeVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBDilationRadiusThreeFragmentShaderString; + }; break; + case 4: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBDilationRadiusFourFragmentShaderString; + }; break; + default: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBDilationRadiusFourFragmentShaderString; + }; break; + } + + if (!(self = [super initWithFirstStageVertexShaderFromString:vertexShaderForThisRadius firstStageFragmentShaderFromString:fragmentShaderForThisRadius secondStageVertexShaderFromString:vertexShaderForThisRadius secondStageFragmentShaderFromString:fragmentShaderForThisRadius])) + { + return nil; + } + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.h new file mode 100644 index 00000000..5979cb7e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +// For each pixel, this sets it to the minimum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center. +// This extends out dark features, and can be used for abstraction of color images. + +@interface GPUImageRGBErosionFilter : GPUImageTwoPassTextureSamplingFilter + +// Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. +- (id)initWithRadius:(NSUInteger)erosionRadius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.m new file mode 100644 index 00000000..91e5f33d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.m @@ -0,0 +1,304 @@ +#import "GPUImageRGBErosionFilter.h" +#import "GPUImageDilationFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageRGBErosionRadiusOneFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + + lowp vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + + gl_FragColor = min(minValue, oneStepNegativeIntensity); + } +); + +NSString *const kGPUImageRGBErosionRadiusTwoFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + + lowp vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + + gl_FragColor = min(minValue, twoStepsNegativeIntensity); + } + ); + +NSString *const kGPUImageRGBErosionRadiusThreeFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + lowp vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + lowp vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + + lowp vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + + gl_FragColor = min(minValue, threeStepsNegativeIntensity); + } + ); + +NSString *const kGPUImageRGBErosionRadiusFourFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + lowp vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + lowp vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + lowp vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + lowp vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + lowp vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + lowp vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + lowp vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + lowp vec4 fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate); + lowp vec4 fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate); + + lowp vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + minValue = min(minValue, fourStepsPositiveIntensity); + + gl_FragColor = min(minValue, fourStepsNegativeIntensity); + } +); +#else +NSString *const kGPUImageRGBErosionRadiusOneFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + + vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + + gl_FragColor = min(minValue, oneStepNegativeIntensity); + } +); + +NSString *const kGPUImageRGBErosionRadiusTwoFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + + vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + + gl_FragColor = min(minValue, twoStepsNegativeIntensity); + } +); + +NSString *const kGPUImageRGBErosionRadiusThreeFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + + vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + + gl_FragColor = min(minValue, threeStepsNegativeIntensity); + } +); + +NSString *const kGPUImageRGBErosionRadiusFourFragmentShaderString = SHADER_STRING +( + varying vec2 centerTextureCoordinate; + varying vec2 oneStepPositiveTextureCoordinate; + varying vec2 oneStepNegativeTextureCoordinate; + varying vec2 twoStepsPositiveTextureCoordinate; + varying vec2 twoStepsNegativeTextureCoordinate; + varying vec2 threeStepsPositiveTextureCoordinate; + varying vec2 threeStepsNegativeTextureCoordinate; + varying vec2 fourStepsPositiveTextureCoordinate; + varying vec2 fourStepsNegativeTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + vec4 centerIntensity = texture2D(inputImageTexture, centerTextureCoordinate); + vec4 oneStepPositiveIntensity = texture2D(inputImageTexture, oneStepPositiveTextureCoordinate); + vec4 oneStepNegativeIntensity = texture2D(inputImageTexture, oneStepNegativeTextureCoordinate); + vec4 twoStepsPositiveIntensity = texture2D(inputImageTexture, twoStepsPositiveTextureCoordinate); + vec4 twoStepsNegativeIntensity = texture2D(inputImageTexture, twoStepsNegativeTextureCoordinate); + vec4 threeStepsPositiveIntensity = texture2D(inputImageTexture, threeStepsPositiveTextureCoordinate); + vec4 threeStepsNegativeIntensity = texture2D(inputImageTexture, threeStepsNegativeTextureCoordinate); + vec4 fourStepsPositiveIntensity = texture2D(inputImageTexture, fourStepsPositiveTextureCoordinate); + vec4 fourStepsNegativeIntensity = texture2D(inputImageTexture, fourStepsNegativeTextureCoordinate); + + vec4 minValue = min(centerIntensity, oneStepPositiveIntensity); + minValue = min(minValue, oneStepNegativeIntensity); + minValue = min(minValue, twoStepsPositiveIntensity); + minValue = min(minValue, twoStepsNegativeIntensity); + minValue = min(minValue, threeStepsPositiveIntensity); + minValue = min(minValue, threeStepsNegativeIntensity); + minValue = min(minValue, fourStepsPositiveIntensity); + + gl_FragColor = min(minValue, fourStepsNegativeIntensity); + } +); +#endif + +@implementation GPUImageRGBErosionFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithRadius:(NSUInteger)erosionRadius; +{ + NSString *fragmentShaderForThisRadius = nil; + NSString *vertexShaderForThisRadius = nil; + + switch (erosionRadius) + { + case 0: + case 1: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusOneVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBErosionRadiusOneFragmentShaderString; + }; break; + case 2: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusTwoVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBErosionRadiusTwoFragmentShaderString; + }; break; + case 3: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusThreeVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBErosionRadiusThreeFragmentShaderString; + }; break; + case 4: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBErosionRadiusFourFragmentShaderString; + }; break; + default: + { + vertexShaderForThisRadius = kGPUImageDilationRadiusFourVertexShaderString; + fragmentShaderForThisRadius = kGPUImageRGBErosionRadiusFourFragmentShaderString; + }; break; + } + + if (!(self = [super initWithFirstStageVertexShaderFromString:vertexShaderForThisRadius firstStageFragmentShaderFromString:fragmentShaderForThisRadius secondStageVertexShaderFromString:vertexShaderForThisRadius secondStageFragmentShaderFromString:fragmentShaderForThisRadius])) + { + return nil; + } + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h new file mode 100755 index 00000000..18966b1b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h @@ -0,0 +1,15 @@ +#import "GPUImageFilter.h" + +@interface GPUImageRGBFilter : GPUImageFilter +{ + GLint redUniform; + GLint greenUniform; + GLint blueUniform; +} + +// Normalized values by which each color channel is multiplied. The range is from 0.0 up, with 1.0 as the default. +@property (readwrite, nonatomic) CGFloat red; +@property (readwrite, nonatomic) CGFloat green; +@property (readwrite, nonatomic) CGFloat blue; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.m new file mode 100755 index 00000000..7a2e5681 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.m @@ -0,0 +1,89 @@ +#import "GPUImageRGBFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageRGBFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float redAdjustment; + uniform highp float greenAdjustment; + uniform highp float blueAdjustment; + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.r * redAdjustment, textureColor.g * greenAdjustment, textureColor.b * blueAdjustment, textureColor.a); + } +); +#else +NSString *const kGPUImageRGBFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float redAdjustment; + uniform float greenAdjustment; + uniform float blueAdjustment; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + gl_FragColor = vec4(textureColor.r * redAdjustment, textureColor.g * greenAdjustment, textureColor.b * blueAdjustment, textureColor.a); + } + ); +#endif + +@implementation GPUImageRGBFilter + +@synthesize red = _red, blue = _blue, green = _green; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageRGBFragmentShaderString])) + { + return nil; + } + + redUniform = [filterProgram uniformIndex:@"redAdjustment"]; + self.red = 1.0; + + greenUniform = [filterProgram uniformIndex:@"greenAdjustment"]; + self.green = 1.0; + + blueUniform = [filterProgram uniformIndex:@"blueAdjustment"]; + self.blue = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setRed:(CGFloat)newValue; +{ + _red = newValue; + + [self setFloat:_red forUniform:redUniform program:filterProgram]; +} + +- (void)setGreen:(CGFloat)newValue; +{ + _green = newValue; + + [self setFloat:_green forUniform:greenUniform program:filterProgram]; +} + +- (void)setBlue:(CGFloat)newValue; +{ + _blue = newValue; + + [self setFloat:_blue forUniform:blueUniform program:filterProgram]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.h new file mode 100644 index 00000000..dbec75fb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageRGBErosionFilter; +@class GPUImageRGBDilationFilter; + +// A filter that first performs an erosion on each color channel of an image, followed by a dilation of the same radius. +// This helps to filter out smaller bright elements. + +@interface GPUImageRGBOpeningFilter : GPUImageFilterGroup +{ + GPUImageRGBErosionFilter *erosionFilter; + GPUImageRGBDilationFilter *dilationFilter; +} + +- (id)initWithRadius:(NSUInteger)radius; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.m new file mode 100644 index 00000000..9d53021e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.m @@ -0,0 +1,41 @@ +#import "GPUImageRGBOpeningFilter.h" +#import "GPUImageRGBErosionFilter.h" +#import "GPUImageRGBDilationFilter.h" + +@implementation GPUImageRGBOpeningFilter + +- (id)init; +{ + if (!(self = [self initWithRadius:1])) + { + return nil; + } + + return self; +} + +- (id)initWithRadius:(NSUInteger)radius; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: erosion + erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius]; + [self addFilter:erosionFilter]; + + // Second pass: dilation + dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius]; + [self addFilter:dilationFilter]; + + [erosionFilter addTarget:dilationFilter]; + + self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil]; + self.terminalFilter = dilationFilter; + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.h b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.h new file mode 100644 index 00000000..6ec4720f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.h @@ -0,0 +1,43 @@ +#import "GPUImageOutput.h" + +// The bytes passed into this input are not copied or retained, but you are free to deallocate them after they are used by this filter. +// The bytes are uploaded and stored within a texture, so nothing is kept locally. +// The default format for input bytes is GPUPixelFormatBGRA, unless specified with pixelFormat: +// The default type for input bytes is GPUPixelTypeUByte, unless specified with pixelType: + +typedef enum { + GPUPixelFormatBGRA = GL_BGRA, + GPUPixelFormatRGBA = GL_RGBA, + GPUPixelFormatRGB = GL_RGB, + GPUPixelFormatLuminance = GL_LUMINANCE +} GPUPixelFormat; + +typedef enum { + GPUPixelTypeUByte = GL_UNSIGNED_BYTE, + GPUPixelTypeFloat = GL_FLOAT +} GPUPixelType; + +@interface GPUImageRawDataInput : GPUImageOutput +{ + CGSize uploadedImageSize; + + dispatch_semaphore_t dataUpdateSemaphore; +} + +// Initialization and teardown +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat; +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat type:(GPUPixelType)pixelType; + +/** Input data pixel format + */ +@property (readwrite, nonatomic) GPUPixelFormat pixelFormat; +@property (readwrite, nonatomic) GPUPixelType pixelType; + +// Image rendering +- (void)updateDataFromBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; +- (void)processData; +- (void)processDataForTimestamp:(CMTime)frameTime; +- (CGSize)outputImageSize; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.m b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.m new file mode 100644 index 00000000..cfa3b128 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.m @@ -0,0 +1,139 @@ +#import "GPUImageRawDataInput.h" + +@interface GPUImageRawDataInput() +- (void)uploadBytes:(GLubyte *)bytesToUpload; +@end + +@implementation GPUImageRawDataInput + +@synthesize pixelFormat = _pixelFormat; +@synthesize pixelType = _pixelType; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; +{ + if (!(self = [self initWithBytes:bytesToUpload size:imageSize pixelFormat:GPUPixelFormatBGRA type:GPUPixelTypeUByte])) + { + return nil; + } + + return self; +} + +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat; +{ + if (!(self = [self initWithBytes:bytesToUpload size:imageSize pixelFormat:pixelFormat type:GPUPixelTypeUByte])) + { + return nil; + } + + return self; +} + +- (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat type:(GPUPixelType)pixelType; +{ + if (!(self = [super init])) + { + return nil; + } + + dataUpdateSemaphore = dispatch_semaphore_create(1); + + uploadedImageSize = imageSize; + self.pixelFormat = pixelFormat; + self.pixelType = pixelType; + + [self uploadBytes:bytesToUpload]; + + return self; +} + +// ARC forbids explicit message send of 'release'; since iOS 6 even for dispatch_release() calls: stripping it out in that case is required. +- (void)dealloc; +{ +#if !OS_OBJECT_USE_OBJC + if (dataUpdateSemaphore != NULL) + { + dispatch_release(dataUpdateSemaphore); + } +#endif +} + +#pragma mark - +#pragma mark Image rendering + +- (void)uploadBytes:(GLubyte *)bytesToUpload; +{ + [GPUImageContext useImageProcessingContext]; + + // TODO: This probably isn't right, and will need to be corrected + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:uploadedImageSize textureOptions:self.outputTextureOptions onlyTexture:YES]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + glTexImage2D(GL_TEXTURE_2D, 0, _pixelFormat, (int)uploadedImageSize.width, (int)uploadedImageSize.height, 0, (GLint)_pixelFormat, (GLenum)_pixelType, bytesToUpload); +} + +- (void)updateDataFromBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; +{ + uploadedImageSize = imageSize; + + [self uploadBytes:bytesToUpload]; +} + +- (void)processData; +{ + if (dispatch_semaphore_wait(dataUpdateSemaphore, DISPATCH_TIME_NOW) != 0) + { + return; + } + + runAsynchronouslyOnVideoProcessingQueue(^{ + + CGSize pixelSizeOfImage = [self outputImageSize]; + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setInputSize:pixelSizeOfImage atIndex:textureIndexOfTarget]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:textureIndexOfTarget]; + [currentTarget newFrameReadyAtTime:kCMTimeInvalid atIndex:textureIndexOfTarget]; + } + + dispatch_semaphore_signal(dataUpdateSemaphore); + }); +} + +- (void)processDataForTimestamp:(CMTime)frameTime; +{ + if (dispatch_semaphore_wait(dataUpdateSemaphore, DISPATCH_TIME_NOW) != 0) + { + return; + } + + runAsynchronouslyOnVideoProcessingQueue(^{ + + CGSize pixelSizeOfImage = [self outputImageSize]; + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setInputSize:pixelSizeOfImage atIndex:textureIndexOfTarget]; + [currentTarget newFrameReadyAtTime:frameTime atIndex:textureIndexOfTarget]; + } + + dispatch_semaphore_signal(dataUpdateSemaphore); + }); +} + +- (CGSize)outputImageSize; +{ + return uploadedImageSize; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.h b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.h new file mode 100755 index 00000000..5a4538c1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.h @@ -0,0 +1,44 @@ +#import +#import "GPUImageContext.h" + +struct GPUByteColorVector { + GLubyte red; + GLubyte green; + GLubyte blue; + GLubyte alpha; +}; +typedef struct GPUByteColorVector GPUByteColorVector; + +@protocol GPUImageRawDataProcessor; + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +@interface GPUImageRawDataOutput : NSObject { + CGSize imageSize; + GPUImageRotationMode inputRotation; + BOOL outputBGRA; +} +#else +@interface GPUImageRawDataOutput : NSObject { + CGSize imageSize; + GPUImageRotationMode inputRotation; + BOOL outputBGRA; +} +#endif + +@property(readonly) GLubyte *rawBytesForImage; +@property(nonatomic, copy) void(^newFrameAvailableBlock)(void); +@property(nonatomic) BOOL enabled; + +// Initialization and teardown +- (id)initWithImageSize:(CGSize)newImageSize resultsInBGRAFormat:(BOOL)resultsInBGRAFormat; + +// Data access +- (GPUByteColorVector)colorAtLocation:(CGPoint)locationInImage; +- (NSUInteger)bytesPerRowInOutput; + +- (void)setImageSize:(CGSize)newImageSize; + +- (void)lockFramebufferForReading; +- (void)unlockFramebufferAfterReading; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.m b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.m new file mode 100755 index 00000000..18101e2c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.m @@ -0,0 +1,307 @@ +#import "GPUImageRawDataOutput.h" + +#import "GPUImageContext.h" +#import "GLProgram.h" +#import "GPUImageFilter.h" +#import "GPUImageMovieWriter.h" + +@interface GPUImageRawDataOutput () +{ + GPUImageFramebuffer *firstInputFramebuffer, *outputFramebuffer, *retainedFramebuffer; + + BOOL hasReadFromTheCurrentFrame; + + GLProgram *dataProgram; + GLint dataPositionAttribute, dataTextureCoordinateAttribute; + GLint dataInputTextureUniform; + + GLubyte *_rawBytesForImage; + + BOOL lockNextFramebuffer; +} + +// Frame rendering +- (void)renderAtInternalSize; + +@end + +@implementation GPUImageRawDataOutput + +@synthesize rawBytesForImage = _rawBytesForImage; +@synthesize newFrameAvailableBlock = _newFrameAvailableBlock; +@synthesize enabled; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithImageSize:(CGSize)newImageSize resultsInBGRAFormat:(BOOL)resultsInBGRAFormat; +{ + if (!(self = [super init])) + { + return nil; + } + + self.enabled = YES; + lockNextFramebuffer = NO; + outputBGRA = resultsInBGRAFormat; + imageSize = newImageSize; + hasReadFromTheCurrentFrame = NO; + _rawBytesForImage = NULL; + inputRotation = kGPUImageNoRotation; + + [GPUImageContext useImageProcessingContext]; + if ( (outputBGRA && ![GPUImageContext supportsFastTextureUpload]) || (!outputBGRA && [GPUImageContext supportsFastTextureUpload]) ) + { + dataProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageColorSwizzlingFragmentShaderString]; + } + else + { + dataProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImagePassthroughFragmentShaderString]; + } + + if (!dataProgram.initialized) + { + [dataProgram addAttribute:@"position"]; + [dataProgram addAttribute:@"inputTextureCoordinate"]; + + if (![dataProgram link]) + { + NSString *progLog = [dataProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [dataProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [dataProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + dataProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + dataPositionAttribute = [dataProgram attributeIndex:@"position"]; + dataTextureCoordinateAttribute = [dataProgram attributeIndex:@"inputTextureCoordinate"]; + dataInputTextureUniform = [dataProgram uniformIndex:@"inputImageTexture"]; + + return self; +} + +- (void)dealloc +{ + if (_rawBytesForImage != NULL && (![GPUImageContext supportsFastTextureUpload])) + { + free(_rawBytesForImage); + _rawBytesForImage = NULL; + } +} + +#pragma mark - +#pragma mark Data access + +- (void)renderAtInternalSize; +{ + [GPUImageContext setActiveShaderProgram:dataProgram]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:imageSize onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + if(lockNextFramebuffer) + { + retainedFramebuffer = outputFramebuffer; + [retainedFramebuffer lock]; + [retainedFramebuffer lockForReading]; + lockNextFramebuffer = NO; + } + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + static const GLfloat squareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + static const GLfloat textureCoordinates[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 1.0f, + }; + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + glUniform1i(dataInputTextureUniform, 4); + + glVertexAttribPointer(dataPositionAttribute, 2, GL_FLOAT, 0, 0, squareVertices); + glVertexAttribPointer(dataTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glEnableVertexAttribArray(dataPositionAttribute); + glEnableVertexAttribArray(dataTextureCoordinateAttribute); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [firstInputFramebuffer unlock]; +} + +- (GPUByteColorVector)colorAtLocation:(CGPoint)locationInImage; +{ + GPUByteColorVector *imageColorBytes = (GPUByteColorVector *)self.rawBytesForImage; +// NSLog(@"Row start"); +// for (unsigned int currentXPosition = 0; currentXPosition < (imageSize.width * 2.0); currentXPosition++) +// { +// GPUByteColorVector byteAtPosition = imageColorBytes[currentXPosition]; +// NSLog(@"%d - %d, %d, %d", currentXPosition, byteAtPosition.red, byteAtPosition.green, byteAtPosition.blue); +// } +// NSLog(@"Row end"); + +// GPUByteColorVector byteAtOne = imageColorBytes[1]; +// GPUByteColorVector byteAtWidth = imageColorBytes[(int)imageSize.width - 3]; +// GPUByteColorVector byteAtHeight = imageColorBytes[(int)(imageSize.height - 1) * (int)imageSize.width]; +// NSLog(@"Byte 1: %d, %d, %d, byte 2: %d, %d, %d, byte 3: %d, %d, %d", byteAtOne.red, byteAtOne.green, byteAtOne.blue, byteAtWidth.red, byteAtWidth.green, byteAtWidth.blue, byteAtHeight.red, byteAtHeight.green, byteAtHeight.blue); + + CGPoint locationToPickFrom = CGPointZero; + locationToPickFrom.x = MIN(MAX(locationInImage.x, 0.0), (imageSize.width - 1.0)); + locationToPickFrom.y = MIN(MAX((imageSize.height - locationInImage.y), 0.0), (imageSize.height - 1.0)); + + if (outputBGRA) + { + GPUByteColorVector flippedColor = imageColorBytes[(int)(round((locationToPickFrom.y * imageSize.width) + locationToPickFrom.x))]; + GLubyte temporaryRed = flippedColor.red; + + flippedColor.red = flippedColor.blue; + flippedColor.blue = temporaryRed; + + return flippedColor; + } + else + { + return imageColorBytes[(int)(round((locationToPickFrom.y * imageSize.width) + locationToPickFrom.x))]; + } +} + +#pragma mark - +#pragma mark GPUImageInput protocol + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + hasReadFromTheCurrentFrame = NO; + + if (_newFrameAvailableBlock != NULL) + { + _newFrameAvailableBlock(); + } +} + +- (NSInteger)nextAvailableTextureIndex; +{ + return 0; +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + firstInputFramebuffer = newInputFramebuffer; + [firstInputFramebuffer lock]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = newInputRotation; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ +} + +- (CGSize)maximumOutputSize; +{ + return imageSize; +} + +- (void)endProcessing; +{ +} + +- (BOOL)shouldIgnoreUpdatesToThisTarget; +{ + return NO; +} + +- (BOOL)wantsMonochromeInput; +{ + return NO; +} + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +{ + +} + +#pragma mark - +#pragma mark Accessors + +- (GLubyte *)rawBytesForImage; +{ + if ( (_rawBytesForImage == NULL) && (![GPUImageContext supportsFastTextureUpload]) ) + { + _rawBytesForImage = (GLubyte *) calloc(imageSize.width * imageSize.height * 4, sizeof(GLubyte)); + hasReadFromTheCurrentFrame = NO; + } + + if (hasReadFromTheCurrentFrame) + { + return _rawBytesForImage; + } + else + { + runSynchronouslyOnVideoProcessingQueue(^{ + // Note: the fast texture caches speed up 640x480 frame reads from 9.6 ms to 3.1 ms on iPhone 4S + + [GPUImageContext useImageProcessingContext]; + [self renderAtInternalSize]; + + if ([GPUImageContext supportsFastTextureUpload]) + { + glFinish(); + _rawBytesForImage = [outputFramebuffer byteBuffer]; + } + else + { + glReadPixels(0, 0, imageSize.width, imageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, _rawBytesForImage); + // GL_EXT_read_format_bgra + // glReadPixels(0, 0, imageSize.width, imageSize.height, GL_BGRA_EXT, GL_UNSIGNED_BYTE, _rawBytesForImage); + } + + hasReadFromTheCurrentFrame = YES; + + }); + + return _rawBytesForImage; + } +} + +- (NSUInteger)bytesPerRowInOutput; +{ + return [retainedFramebuffer bytesPerRow]; +} + +- (void)setImageSize:(CGSize)newImageSize { + imageSize = newImageSize; + if (_rawBytesForImage != NULL && (![GPUImageContext supportsFastTextureUpload])) + { + free(_rawBytesForImage); + _rawBytesForImage = NULL; + } +} + +- (void)lockFramebufferForReading; +{ + lockNextFramebuffer = YES; +} + +- (void)unlockFramebufferAfterReading; +{ + [retainedFramebuffer unlockAfterReading]; + [retainedFramebuffer unlock]; + retainedFramebuffer = nil; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.h new file mode 100644 index 00000000..767892a5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageSaturationBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.m new file mode 100644 index 00000000..da37f6aa --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.m @@ -0,0 +1,213 @@ +#import "GPUImageSaturationBlendFilter.h" + +/** + * Saturation blend mode based upon pseudo code from the PDF specification. + */ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSaturationBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + highp float lum(lowp vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + lowp vec3 clipcolor(lowp vec3 c) { + highp float l = lum(c); + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + lowp vec3 setlum(lowp vec3 c, highp float l) { + highp float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + highp float sat(lowp vec3 c) { + lowp float n = min(min(c.r, c.g), c.b); + lowp float x = max(max(c.r, c.g), c.b); + return x - n; + } + + lowp float mid(lowp float cmin, lowp float cmid, lowp float cmax, highp float s) { + return ((cmid - cmin) * s) / (cmax - cmin); + } + + lowp vec3 setsat(lowp vec3 c, highp float s) { + if (c.r > c.g) { + if (c.r > c.b) { + if (c.g > c.b) { + /* g is mid, b is min */ + c.g = mid(c.b, c.g, c.r, s); + c.b = 0.0; + } else { + /* b is mid, g is min */ + c.b = mid(c.g, c.b, c.r, s); + c.g = 0.0; + } + c.r = s; + } else { + /* b is max, r is mid, g is min */ + c.r = mid(c.g, c.r, c.b, s); + c.b = s; + c.r = 0.0; + } + } else if (c.r > c.b) { + /* g is max, r is mid, b is min */ + c.r = mid(c.b, c.r, c.g, s); + c.g = s; + c.b = 0.0; + } else if (c.g > c.b) { + /* g is max, b is mid, r is min */ + c.b = mid(c.r, c.b, c.g, s); + c.g = s; + c.r = 0.0; + } else if (c.b > c.g) { + /* b is max, g is mid, r is min */ + c.g = mid(c.r, c.g, c.b, s); + c.b = s; + c.r = 0.0; + } else { + c = vec3(0.0); + } + return c; + } + + void main() + { + highp vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + highp vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(setsat(baseColor.rgb, sat(overlayColor.rgb)), lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#else +NSString *const kGPUImageSaturationBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + float lum(vec3 c) { + return dot(c, vec3(0.3, 0.59, 0.11)); + } + + vec3 clipcolor(vec3 c) { + float l = lum(c); + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + + if (n < 0.0) { + c.r = l + ((c.r - l) * l) / (l - n); + c.g = l + ((c.g - l) * l) / (l - n); + c.b = l + ((c.b - l) * l) / (l - n); + } + if (x > 1.0) { + c.r = l + ((c.r - l) * (1.0 - l)) / (x - l); + c.g = l + ((c.g - l) * (1.0 - l)) / (x - l); + c.b = l + ((c.b - l) * (1.0 - l)) / (x - l); + } + + return c; + } + + vec3 setlum(vec3 c, float l) { + float d = l - lum(c); + c = c + vec3(d); + return clipcolor(c); + } + + float sat(vec3 c) { + float n = min(min(c.r, c.g), c.b); + float x = max(max(c.r, c.g), c.b); + return x - n; + } + + float mid(float cmin, float cmid, float cmax, float s) { + return ((cmid - cmin) * s) / (cmax - cmin); + } + + vec3 setsat(vec3 c, float s) { + if (c.r > c.g) { + if (c.r > c.b) { + if (c.g > c.b) { + /* g is mid, b is min */ + c.g = mid(c.b, c.g, c.r, s); + c.b = 0.0; + } else { + /* b is mid, g is min */ + c.b = mid(c.g, c.b, c.r, s); + c.g = 0.0; + } + c.r = s; + } else { + /* b is max, r is mid, g is min */ + c.r = mid(c.g, c.r, c.b, s); + c.b = s; + c.r = 0.0; + } + } else if (c.r > c.b) { + /* g is max, r is mid, b is min */ + c.r = mid(c.b, c.r, c.g, s); + c.g = s; + c.b = 0.0; + } else if (c.g > c.b) { + /* g is max, b is mid, r is min */ + c.b = mid(c.r, c.b, c.g, s); + c.g = s; + c.r = 0.0; + } else if (c.b > c.g) { + /* b is max, g is mid, r is min */ + c.g = mid(c.r, c.g, c.b, s); + c.b = s; + c.r = 0.0; + } else { + c = vec3(0.0); + } + return c; + } + + void main() + { + vec4 baseColor = texture2D(inputImageTexture, textureCoordinate); + vec4 overlayColor = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(baseColor.rgb * (1.0 - overlayColor.a) + setlum(setsat(baseColor.rgb, sat(overlayColor.rgb)), lum(baseColor.rgb)) * overlayColor.a, baseColor.a); + } +); +#endif + + +@implementation GPUImageSaturationBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSaturationBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.h new file mode 100755 index 00000000..1c6ff5bd --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +/** Adjusts the saturation of an image + */ +@interface GPUImageSaturationFilter : GPUImageFilter +{ + GLint saturationUniform; +} + +/** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 1.0 as the normal level + */ +@property(readwrite, nonatomic) CGFloat saturation; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.m new file mode 100755 index 00000000..fc373d4a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.m @@ -0,0 +1,78 @@ +#import "GPUImageSaturationFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSaturationFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float saturation; + + // Values from "Graphics Shaders: Theory and Practice" by Bailey and Cunningham + const mediump vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp float luminance = dot(textureColor.rgb, luminanceWeighting); + lowp vec3 greyScaleColor = vec3(luminance); + + gl_FragColor = vec4(mix(greyScaleColor, textureColor.rgb, saturation), textureColor.w); + + } +); +#else +NSString *const kGPUImageSaturationFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float saturation; + + // Values from "Graphics Shaders: Theory and Practice" by Bailey and Cunningham + const vec3 luminanceWeighting = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, luminanceWeighting); + vec3 greyScaleColor = vec3(luminance); + + gl_FragColor = vec4(mix(greyScaleColor, textureColor.rgb, saturation), textureColor.w); + + } + ); +#endif + +@implementation GPUImageSaturationFilter + +@synthesize saturation = _saturation; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSaturationFragmentShaderString])) + { + return nil; + } + + saturationUniform = [filterProgram uniformIndex:@"saturation"]; + self.saturation = 1.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setSaturation:(CGFloat)newValue; +{ + _saturation = newValue; + + [self setFloat:_saturation forUniform:saturationUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.h new file mode 100755 index 00000000..2df3abf3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageScreenBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.m new file mode 100755 index 00000000..d871e7db --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageScreenBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + mediump vec4 whiteColor = vec4(1.0); + gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor)); + } +); +#else +NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + vec4 whiteColor = vec4(1.0); + gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor)); + } +); +#endif + +@implementation GPUImageScreenBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageScreenBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.h new file mode 100755 index 00000000..a45164fe --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.h @@ -0,0 +1,6 @@ +#import "GPUImageColorMatrixFilter.h" + +/// Simple sepia tone filter +@interface GPUImageSepiaFilter : GPUImageColorMatrixFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.m new file mode 100755 index 00000000..71668d63 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.m @@ -0,0 +1,24 @@ +#import "GPUImageSepiaFilter.h" + +@implementation GPUImageSepiaFilter + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + self.intensity = 1.0; + self.colorMatrix = (GPUMatrix4x4){ + {0.3588, 0.7044, 0.1368, 0.0}, + {0.2990, 0.5870, 0.1140, 0.0}, + {0.2392, 0.4696, 0.0912 ,0.0}, + {0,0,0,1.0}, + }; + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.h new file mode 100755 index 00000000..739df503 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.h @@ -0,0 +1,12 @@ +#import "GPUImageFilter.h" + +@interface GPUImageSharpenFilter : GPUImageFilter +{ + GLint sharpnessUniform; + GLint imageWidthFactorUniform, imageHeightFactorUniform; +} + +// Sharpness ranges from -4.0 to 4.0, with 0.0 as the normal level +@property(readwrite, nonatomic) CGFloat sharpness; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.m new file mode 100755 index 00000000..6d7367a9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.m @@ -0,0 +1,147 @@ +#import "GPUImageSharpenFilter.h" + +NSString *const kGPUImageSharpenVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform float imageWidthFactor; + uniform float imageHeightFactor; + uniform float sharpness; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + varying vec2 topTextureCoordinate; + varying vec2 bottomTextureCoordinate; + + varying float centerMultiplier; + varying float edgeMultiplier; + + void main() + { + gl_Position = position; + + vec2 widthStep = vec2(imageWidthFactor, 0.0); + vec2 heightStep = vec2(0.0, imageHeightFactor); + + textureCoordinate = inputTextureCoordinate.xy; + leftTextureCoordinate = inputTextureCoordinate.xy - widthStep; + rightTextureCoordinate = inputTextureCoordinate.xy + widthStep; + topTextureCoordinate = inputTextureCoordinate.xy + heightStep; + bottomTextureCoordinate = inputTextureCoordinate.xy - heightStep; + + centerMultiplier = 1.0 + 4.0 * sharpness; + edgeMultiplier = sharpness; + } +); + + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSharpenFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying highp vec2 textureCoordinate; + varying highp vec2 leftTextureCoordinate; + varying highp vec2 rightTextureCoordinate; + varying highp vec2 topTextureCoordinate; + varying highp vec2 bottomTextureCoordinate; + + varying highp float centerMultiplier; + varying highp float edgeMultiplier; + + uniform sampler2D inputImageTexture; + + void main() + { + mediump vec3 textureColor = texture2D(inputImageTexture, textureCoordinate).rgb; + mediump vec3 leftTextureColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + mediump vec3 rightTextureColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + mediump vec3 topTextureColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + mediump vec3 bottomTextureColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + + gl_FragColor = vec4((textureColor * centerMultiplier - (leftTextureColor * edgeMultiplier + rightTextureColor * edgeMultiplier + topTextureColor * edgeMultiplier + bottomTextureColor * edgeMultiplier)), texture2D(inputImageTexture, bottomTextureCoordinate).w); + } +); +#else +NSString *const kGPUImageSharpenFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + varying vec2 topTextureCoordinate; + varying vec2 bottomTextureCoordinate; + + varying float centerMultiplier; + varying float edgeMultiplier; + + uniform sampler2D inputImageTexture; + + void main() + { + vec3 textureColor = texture2D(inputImageTexture, textureCoordinate).rgb; + vec3 leftTextureColor = texture2D(inputImageTexture, leftTextureCoordinate).rgb; + vec3 rightTextureColor = texture2D(inputImageTexture, rightTextureCoordinate).rgb; + vec3 topTextureColor = texture2D(inputImageTexture, topTextureCoordinate).rgb; + vec3 bottomTextureColor = texture2D(inputImageTexture, bottomTextureCoordinate).rgb; + + gl_FragColor = vec4((textureColor * centerMultiplier - (leftTextureColor * edgeMultiplier + rightTextureColor * edgeMultiplier + topTextureColor * edgeMultiplier + bottomTextureColor * edgeMultiplier)), texture2D(inputImageTexture, bottomTextureCoordinate).w); + } +); +#endif + + +@implementation GPUImageSharpenFilter + +@synthesize sharpness = _sharpness; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageSharpenVertexShaderString fragmentShaderFromString:kGPUImageSharpenFragmentShaderString])) + { + return nil; + } + + sharpnessUniform = [filterProgram uniformIndex:@"sharpness"]; + self.sharpness = 0.0; + + imageWidthFactorUniform = [filterProgram uniformIndex:@"imageWidthFactor"]; + imageHeightFactorUniform = [filterProgram uniformIndex:@"imageHeightFactor"]; + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + glUniform1f(imageWidthFactorUniform, 1.0 / filterFrameSize.height); + glUniform1f(imageHeightFactorUniform, 1.0 / filterFrameSize.width); + } + else + { + glUniform1f(imageWidthFactorUniform, 1.0 / filterFrameSize.width); + glUniform1f(imageHeightFactorUniform, 1.0 / filterFrameSize.height); + } + }); +} + +#pragma mark - +#pragma mark Accessors + +- (void)setSharpness:(CGFloat)newValue; +{ + _sharpness = newValue; + + [self setFloat:_sharpness forUniform:sharpnessUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.h new file mode 100644 index 00000000..b16ebc01 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageHarrisCornerDetectionFilter.h" + +/** Shi-Tomasi feature detector + + This is the Shi-Tomasi feature detector, as described in + J. Shi and C. Tomasi. Good features to track. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 593-600, June 1994. + */ + +@interface GPUImageShiTomasiFeatureDetectionFilter : GPUImageHarrisCornerDetectionFilter + +// Compared to the Harris corner detector, the default sensitivity value for this detector is set to 1.5 + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.m new file mode 100644 index 00000000..e58cbb4f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.m @@ -0,0 +1,65 @@ +#import "GPUImageShiTomasiFeatureDetectionFilter.h" + +@implementation GPUImageShiTomasiFeatureDetectionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageShiTomasiCornerDetectionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float sensitivity; + + void main() + { + mediump vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + mediump float derivativeDifference = derivativeElements.x - derivativeElements.y; + mediump float zElement = (derivativeElements.z * 2.0) - 1.0; + + // R = Ix^2 + Iy^2 - sqrt( (Ix^2 - Iy^2)^2 + 4 * Ixy * Ixy) + mediump float cornerness = derivativeElements.x + derivativeElements.y - sqrt(derivativeDifference * derivativeDifference + 4.0 * zElement * zElement); + + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#else +NSString *const kGPUImageShiTomasiCornerDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float sensitivity; + + void main() + { + vec3 derivativeElements = texture2D(inputImageTexture, textureCoordinate).rgb; + + float derivativeDifference = derivativeElements.x - derivativeElements.y; + float zElement = (derivativeElements.z * 2.0) - 1.0; + + // R = Ix^2 + Iy^2 - sqrt( (Ix^2 - Iy^2)^2 + 4 * Ixy * Ixy) + float cornerness = derivativeElements.x + derivativeElements.y - sqrt(derivativeDifference * derivativeDifference + 4.0 * zElement * zElement); + + gl_FragColor = vec4(vec3(cornerness * sensitivity), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithCornerDetectionFragmentShader:kGPUImageShiTomasiCornerDetectionFragmentShaderString])) + { + return nil; + } + + self.sensitivity = 1.5; + + return self; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.h new file mode 100644 index 00000000..934b1e3a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageGaussianBlurFilter.h" + +// This filter merely performs the standard Gaussian blur on the red color channel (assuming a luminance image) + +@interface GPUImageSingleComponentGaussianBlurFilter : GPUImageGaussianBlurFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.m new file mode 100644 index 00000000..4ff0d91d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.m @@ -0,0 +1,189 @@ +#import "GPUImageSingleComponentGaussianBlurFilter.h" + +@implementation GPUImageSingleComponentGaussianBlurFilter + ++ (NSString *)vertexShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImageVertexShaderString; + } + + // First, generate the normal Gaussian weights for a given sigma + GLfloat *standardGaussianWeights = calloc(blurRadius + 1, sizeof(GLfloat)); + GLfloat sumOfWeights = 0.0; + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = (1.0 / sqrt(2.0 * M_PI * pow(sigma, 2.0))) * exp(-pow(currentGaussianWeightIndex, 2.0) / (2.0 * pow(sigma, 2.0))); + + if (currentGaussianWeightIndex == 0) + { + sumOfWeights += standardGaussianWeights[currentGaussianWeightIndex]; + } + else + { + sumOfWeights += 2.0 * standardGaussianWeights[currentGaussianWeightIndex]; + } + } + + // Next, normalize these weights to prevent the clipping of the Gaussian curve at the end of the discrete samples from reducing luminance + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = standardGaussianWeights[currentGaussianWeightIndex] / sumOfWeights; + } + + // From these weights we calculate the offsets to read interpolated values from + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + GLfloat *optimizedGaussianOffsets = calloc(numberOfOptimizedOffsets, sizeof(GLfloat)); + + for (NSUInteger currentOptimizedOffset = 0; currentOptimizedOffset < numberOfOptimizedOffsets; currentOptimizedOffset++) + { + GLfloat firstWeight = standardGaussianWeights[currentOptimizedOffset*2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentOptimizedOffset*2 + 2]; + + GLfloat optimizedWeight = firstWeight + secondWeight; + + optimizedGaussianOffsets[currentOptimizedOffset] = (firstWeight * (currentOptimizedOffset*2 + 1) + secondWeight * (currentOptimizedOffset*2 + 2)) / optimizedWeight; + } + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + // Header + [shaderString appendFormat:@"\ + attribute vec4 position;\n\ + attribute vec4 inputTextureCoordinate;\n\ + \n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + gl_Position = position;\n\ + \n\ + vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2))]; + + // Inner offset loop + [shaderString appendString:@"blurCoordinates[0] = inputTextureCoordinate.xy;\n"]; + for (NSUInteger currentOptimizedOffset = 0; currentOptimizedOffset < numberOfOptimizedOffsets; currentOptimizedOffset++) + { + [shaderString appendFormat:@"\ + blurCoordinates[%lu] = inputTextureCoordinate.xy + singleStepOffset * %f;\n\ + blurCoordinates[%lu] = inputTextureCoordinate.xy - singleStepOffset * %f;\n", (unsigned long)((currentOptimizedOffset * 2) + 1), optimizedGaussianOffsets[currentOptimizedOffset], (unsigned long)((currentOptimizedOffset * 2) + 2), optimizedGaussianOffsets[currentOptimizedOffset]]; + } + + // Footer + [shaderString appendString:@"}\n"]; + + free(optimizedGaussianOffsets); + free(standardGaussianWeights); + return shaderString; +} + ++ (NSString *)fragmentShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; +{ + if (blurRadius < 1) + { + return kGPUImagePassthroughFragmentShaderString; + } + + // First, generate the normal Gaussian weights for a given sigma + GLfloat *standardGaussianWeights = calloc(blurRadius + 1, sizeof(GLfloat)); + GLfloat sumOfWeights = 0.0; + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = (1.0 / sqrt(2.0 * M_PI * pow(sigma, 2.0))) * exp(-pow(currentGaussianWeightIndex, 2.0) / (2.0 * pow(sigma, 2.0))); + + if (currentGaussianWeightIndex == 0) + { + sumOfWeights += standardGaussianWeights[currentGaussianWeightIndex]; + } + else + { + sumOfWeights += 2.0 * standardGaussianWeights[currentGaussianWeightIndex]; + } + } + + // Next, normalize these weights to prevent the clipping of the Gaussian curve at the end of the discrete samples from reducing luminance + for (NSUInteger currentGaussianWeightIndex = 0; currentGaussianWeightIndex < blurRadius + 1; currentGaussianWeightIndex++) + { + standardGaussianWeights[currentGaussianWeightIndex] = standardGaussianWeights[currentGaussianWeightIndex] / sumOfWeights; + } + + // From these weights we calculate the offsets to read interpolated values from + NSUInteger numberOfOptimizedOffsets = MIN(blurRadius / 2 + (blurRadius % 2), 7); + NSUInteger trueNumberOfOptimizedOffsets = blurRadius / 2 + (blurRadius % 2); + + NSMutableString *shaderString = [[NSMutableString alloc] init]; + + // Header +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform highp float texelWidthOffset;\n\ + uniform highp float texelHeightOffset;\n\ + \n\ + varying highp vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + lowp float sum = 0.0;\n", (unsigned long)(1 + (numberOfOptimizedOffsets * 2)) ]; +#else + [shaderString appendFormat:@"\ + uniform sampler2D inputImageTexture;\n\ + uniform float texelWidthOffset;\n\ + uniform float texelHeightOffset;\n\ + \n\ + varying vec2 blurCoordinates[%lu];\n\ + \n\ + void main()\n\ + {\n\ + float sum = 0.0;\n", 1 + (numberOfOptimizedOffsets * 2) ]; +#endif + + // Inner texture loop + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0]).r * %f;\n", standardGaussianWeights[0]]; + + for (NSUInteger currentBlurCoordinateIndex = 0; currentBlurCoordinateIndex < numberOfOptimizedOffsets; currentBlurCoordinateIndex++) + { + GLfloat firstWeight = standardGaussianWeights[currentBlurCoordinateIndex * 2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentBlurCoordinateIndex * 2 + 2]; + GLfloat optimizedWeight = firstWeight + secondWeight; + + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]).r * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 1), optimizedWeight]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[%lu]).r * %f;\n", (unsigned long)((currentBlurCoordinateIndex * 2) + 2), optimizedWeight]; + } + + // If the number of required samples exceeds the amount we can pass in via varyings, we have to do dependent texture reads in the fragment shader + if (trueNumberOfOptimizedOffsets > numberOfOptimizedOffsets) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [shaderString appendString:@"highp vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#else + [shaderString appendString:@"highp vec2 singleStepOffset = vec2(texelWidthOffset, texelHeightOffset);\n"]; +#endif + + for (NSUInteger currentOverlowTextureRead = numberOfOptimizedOffsets; currentOverlowTextureRead < trueNumberOfOptimizedOffsets; currentOverlowTextureRead++) + { + GLfloat firstWeight = standardGaussianWeights[currentOverlowTextureRead * 2 + 1]; + GLfloat secondWeight = standardGaussianWeights[currentOverlowTextureRead * 2 + 2]; + + GLfloat optimizedWeight = firstWeight + secondWeight; + GLfloat optimizedOffset = (firstWeight * (currentOverlowTextureRead * 2 + 1) + secondWeight * (currentOverlowTextureRead * 2 + 2)) / optimizedWeight; + + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] + singleStepOffset * %f).r * %f;\n", optimizedOffset, optimizedWeight]; + [shaderString appendFormat:@"sum += texture2D(inputImageTexture, blurCoordinates[0] - singleStepOffset * %f).r * %f;\n", optimizedOffset, optimizedWeight]; + } + } + + // Footer + [shaderString appendString:@"\ + gl_FragColor = vec4(sum, sum, sum, 1.0);\n\ + }\n"]; + + free(standardGaussianWeights); + return shaderString; +} + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.h new file mode 100755 index 00000000..598145ae --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.h @@ -0,0 +1,11 @@ +#import "GPUImageSobelEdgeDetectionFilter.h" + +/** Converts video to look like a sketch. + + This is just the Sobel edge detection filter with the colors inverted. + */ +@interface GPUImageSketchFilter : GPUImageSobelEdgeDetectionFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.m new file mode 100755 index 00000000..3cda2203 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.m @@ -0,0 +1,98 @@ +#import "GPUImageSketchFilter.h" + +@implementation GPUImageSketchFilter + +// Invert the colorspace for a sketch +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSketchFragmentShaderString = SHADER_STRING +( + precision mediump float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform float edgeStrength; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = 1.0 - (length(vec2(h, v)) * edgeStrength); + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#else +NSString *const kGPUImageSketchFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform float edgeStrength; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = 1.0 - (length(vec2(h, v)) * edgeStrength); + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageSketchFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.h new file mode 100644 index 00000000..0dd6f866 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.h @@ -0,0 +1,47 @@ +// +// GPUImageSkinToneFilter.h +// +// +// Created by github.com/r3mus on 8/14/15. +// +// + +#import "GPUImageTwoInputFilter.h" + +typedef NS_ENUM(NSUInteger, GPUImageSkinToneUpperColor) { + GPUImageSkinToneUpperColorGreen, + GPUImageSkinToneUpperColorOrange +}; + +extern NSString *const kGPUImageSkinToneFragmentShaderString; + +@interface GPUImageSkinToneFilter : GPUImageFilter +{ + GLint skinToneAdjustUniform; + GLint skinHueUniform; + GLint skinHueThresholdUniform; + GLint maxHueShiftUniform; + GLint maxSaturationShiftUniform; + GLint upperSkinToneColorUniform; +} + +// The amount of effect to apply, between -1.0 (pink) and +1.0 (orange OR green). Default is 0.0. +@property (nonatomic, readwrite) CGFloat skinToneAdjust; + +// The initial hue of skin to adjust. Default is 0.05 (a common skin red). +@property (nonatomic, readwrite) CGFloat skinHue; + +// The bell curve "breadth" of the skin hue adjustment (i.e. how different from the original skinHue will the modifications effect). +// Default is 40.0 +@property (nonatomic, readwrite) CGFloat skinHueThreshold; + +// The maximum amount of hue shift allowed in the adjustments that affect hue (pink, green). Default = 0.25. +@property (nonatomic, readwrite) CGFloat maxHueShift; + +// The maximum amount of saturation shift allowed in the adjustments that affect saturation (orange). Default = 0.4. +@property (nonatomic, readwrite) CGFloat maxSaturationShift; + +// Defines whether the upper range (> 0.0) will change the skin tone to green (hue) or orange (saturation) +@property (nonatomic, readwrite) GPUImageSkinToneUpperColor upperSkinToneColor; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.m new file mode 100644 index 00000000..0db9ac7c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.m @@ -0,0 +1,246 @@ +// +// GPUImageSkinToneFilter.m +// +// +// Created by github.com/r3mus on 8/13/15. +// +// + +#import "GPUImageSkinToneFilter.h" + +@implementation GPUImageSkinToneFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSkinToneFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + // [-1;1] <=> [pink;orange] + uniform highp float skinToneAdjust; // will make reds more pink + + // Other parameters + uniform mediump float skinHue; + uniform mediump float skinHueThreshold; + uniform mediump float maxHueShift; + uniform mediump float maxSaturationShift; + uniform int upperSkinToneColor; + + // RGB <-> HSV conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl + highp vec3 rgb2hsv(highp vec3 c) +{ + highp vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0); + highp vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g)); + highp vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r)); + + highp float d = q.x - min(q.w, q.y); + highp float e = 1.0e-10; + return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x); +} + + // HSV <-> RGB conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl + highp vec3 hsv2rgb(highp vec3 c) +{ + highp vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + highp vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y); +} + + // Main + void main () +{ + + // Sample the input pixel + highp vec4 colorRGB = texture2D(inputImageTexture, textureCoordinate); + + // Convert color to HSV, extract hue + highp vec3 colorHSV = rgb2hsv(colorRGB.rgb); + highp float hue = colorHSV.x; + + // check how far from skin hue + highp float dist = hue - skinHue; + if (dist > 0.5) + dist -= 1.0; + if (dist < -0.5) + dist += 1.0; + dist = abs(dist)/0.5; // normalized to [0,1] + + // Apply Gaussian like filter + highp float weight = exp(-dist*dist*skinHueThreshold); + weight = clamp(weight, 0.0, 1.0); + + // Using pink/green, so only adjust hue + if (upperSkinToneColor == 0) { + colorHSV.x += skinToneAdjust * weight * maxHueShift; + // Using pink/orange, so adjust hue < 0 and saturation > 0 + } else if (upperSkinToneColor == 1) { + // We want more orange, so increase saturation + if (skinToneAdjust > 0.0) + colorHSV.y += skinToneAdjust * weight * maxSaturationShift; + // we want more pinks, so decrease hue + else + colorHSV.x += skinToneAdjust * weight * maxHueShift; + } + + // final color + highp vec3 finalColorRGB = hsv2rgb(colorHSV.rgb); + + // display + gl_FragColor = vec4(finalColorRGB, 1.0); +} +); +#else +NSString *const kGPUImageSkinToneFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + // [-1;1] <=> [pink;orange] + uniform float skinToneAdjust; // will make reds more pink + + // Other parameters + uniform float skinHue; + uniform float skinHueThreshold; + uniform float maxHueShift; + uniform float maxSaturationShift; + uniform int upperSkinToneColor; + + // RGB <-> HSV conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl + highp vec3 rgb2hsv(highp vec3 c) +{ + vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0); + vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g)); + vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r)); + + float d = q.x - min(q.w, q.y); + float e = 1.0e-10; + return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x); +} + + // HSV <-> RGB conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl + highp vec3 hsv2rgb(highp vec3 c) +{ + vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y); +} + + // Main + void main () +{ + + // Sample the input pixel + vec4 colorRGB = texture2D(inputImageTexture, textureCoordinate); + + // Convert color to HSV, extract hue + vec3 colorHSV = rgb2hsv(colorRGB.rgb); + float hue = colorHSV.x; + + // check how far from skin hue + float dist = hue - skinHue; + if (dist > 0.5) + dist -= 1.0; + if (dist < -0.5) + dist += 1.0; + dist = abs(dist)/0.5; // normalized to [0,1] + + // Apply Gaussian like filter + float weight = exp(-dist*dist*skinHueThreshold); + weight = clamp(weight, 0.0, 1.0); + + // Using pink/green, so only adjust hue + if (upperSkinToneColor == 0) { + colorHSV.x += skinToneAdjust * weight * maxHueShift; + // Using pink/orange, so adjust hue < 0 and saturation > 0 + } else if (upperSkinToneColor == 1) { + // We want more orange, so increase saturation + if (skinToneAdjust > 0.0) + colorHSV.y += skinToneAdjust * weight * maxSaturationShift; + // we want more pinks, so decrease hue + else + colorHSV.x += skinToneAdjust * weight * maxHueShift; + } + + // final color + vec3 finalColorRGB = hsv2rgb(colorHSV.rgb); + + // display + gl_FragColor = vec4(finalColorRGB, 1.0); +} + ); +#endif + +#pragma mark - +#pragma mark Initialization and teardown +@synthesize skinToneAdjust; +@synthesize skinHue; +@synthesize skinHueThreshold; +@synthesize maxHueShift; +@synthesize maxSaturationShift; +@synthesize upperSkinToneColor; + +- (id)init +{ + if(! (self = [super initWithFragmentShaderFromString:kGPUImageSkinToneFragmentShaderString]) ) + { + return nil; + } + + skinToneAdjustUniform = [filterProgram uniformIndex:@"skinToneAdjust"]; + skinHueUniform = [filterProgram uniformIndex:@"skinHue"]; + skinHueThresholdUniform = [filterProgram uniformIndex:@"skinHueThreshold"]; + maxHueShiftUniform = [filterProgram uniformIndex:@"maxHueShift"]; + maxSaturationShiftUniform = [filterProgram uniformIndex:@"maxSaturationShift"]; + upperSkinToneColorUniform = [filterProgram uniformIndex:@"upperSkinToneColor"]; + + self.skinHue = 0.05; + self.skinHueThreshold = 40.0; + self.maxHueShift = 0.25; + self.maxSaturationShift = 0.4; + self.upperSkinToneColor = GPUImageSkinToneUpperColorGreen; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setSkinToneAdjust:(CGFloat)newValue +{ + skinToneAdjust = newValue; + [self setFloat:newValue forUniform:skinToneAdjustUniform program:filterProgram]; +} + +- (void)setSkinHue:(CGFloat)newValue +{ + skinHue = newValue; + [self setFloat:newValue forUniform:skinHueUniform program:filterProgram]; +} + +- (void)setSkinHueThreshold:(CGFloat)newValue +{ + skinHueThreshold = newValue; + [self setFloat:newValue forUniform:skinHueThresholdUniform program:filterProgram]; +} + +- (void)setMaxHueShift:(CGFloat)newValue +{ + maxHueShift = newValue; + [self setFloat:newValue forUniform:maxHueShiftUniform program:filterProgram]; +} + +- (void)setMaxSaturationShift:(CGFloat)newValue +{ + maxSaturationShift = newValue; + [self setFloat:newValue forUniform:maxSaturationShiftUniform program:filterProgram]; +} + +- (void)setUpperSkinToneColor:(GPUImageSkinToneUpperColor)newValue +{ + upperSkinToneColor = newValue; + [self setInteger:newValue forUniform:upperSkinToneColorUniform program:filterProgram]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.h new file mode 100755 index 00000000..f89caac5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.h @@ -0,0 +1,28 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGaussianBlurFilter; +@class GPUImageToonFilter; + +/** This uses a similar process as the GPUImageToonFilter, only it precedes the toon effect with a Gaussian blur to smooth out noise. + */ +@interface GPUImageSmoothToonFilter : GPUImageFilterGroup +{ + GPUImageGaussianBlurFilter *blurFilter; + GPUImageToonFilter *toonFilter; +} + +/// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels +@property(readwrite, nonatomic) CGFloat texelWidth; +/// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels +@property(readwrite, nonatomic) CGFloat texelHeight; + +/// The radius of the underlying Gaussian blur. The default is 2.0. +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +/// The threshold at which to apply the edges, default of 0.2 +@property(readwrite, nonatomic) CGFloat threshold; + +/// The levels of quantization for the posterization of colors within the scene, with a default of 10.0 +@property(readwrite, nonatomic) CGFloat quantizationLevels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.m new file mode 100755 index 00000000..03828f48 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.m @@ -0,0 +1,94 @@ +#import "GPUImageSmoothToonFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageToonFilter.h" + +@implementation GPUImageSmoothToonFilter + +@synthesize threshold; +@synthesize blurRadiusInPixels; +@synthesize quantizationLevels; +@synthesize texelWidth; +@synthesize texelHeight; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: apply a variable Gaussian blur + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Second pass: run the Sobel edge detection on this blurred image, along with a posterization effect + toonFilter = [[GPUImageToonFilter alloc] init]; + [self addFilter:toonFilter]; + + // Texture location 0 needs to be the sharp image for both the blur and the second stage processing + [blurFilter addTarget:toonFilter]; + + self.initialFilters = [NSArray arrayWithObject:blurFilter]; + self.terminalFilter = toonFilter; + + self.blurRadiusInPixels = 2.0; + self.threshold = 0.2; + self.quantizationLevels = 10.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setTexelWidth:(CGFloat)newValue; +{ + toonFilter.texelWidth = newValue; +} + +- (CGFloat)texelWidth; +{ + return toonFilter.texelWidth; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + toonFilter.texelHeight = newValue; +} + +- (CGFloat)texelHeight; +{ + return toonFilter.texelHeight; +} + +- (void)setThreshold:(CGFloat)newValue; +{ + toonFilter.threshold = newValue; +} + +- (CGFloat)threshold; +{ + return toonFilter.threshold; +} + +- (void)setQuantizationLevels:(CGFloat)newValue; +{ + toonFilter.quantizationLevels = newValue; +} + +- (CGFloat)quantizationLevels; +{ + return toonFilter.quantizationLevels; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.h new file mode 100755 index 00000000..d6b2c13a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.h @@ -0,0 +1,16 @@ +#import "GPUImageTwoPassFilter.h" + +@interface GPUImageSobelEdgeDetectionFilter : GPUImageTwoPassFilter +{ + GLint texelWidthUniform, texelHeightUniform, edgeStrengthUniform; + BOOL hasOverriddenImageSizeFactor; +} + +// The texel width and height factors tweak the appearance of the edges. By default, they match the inverse of the filter size in pixels +@property(readwrite, nonatomic) CGFloat texelWidth; +@property(readwrite, nonatomic) CGFloat texelHeight; + +// The filter strength property affects the dynamic range of the filter. High values can make edges more visible, but can lead to saturation. Default of 1.0. +@property(readwrite, nonatomic) CGFloat edgeStrength; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.m new file mode 100755 index 00000000..e193f027 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.m @@ -0,0 +1,188 @@ +#import "GPUImageSobelEdgeDetectionFilter.h" +#import "GPUImageGrayscaleFilter.h" +#import "GPUImage3x3ConvolutionFilter.h" + +// Code from "Graphics Shaders: Theory and Practice" by M. Bailey and S. Cunningham +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSobelEdgeDetectionFragmentShaderString = SHADER_STRING +( + precision mediump float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float edgeStrength; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)) * edgeStrength; + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#else +NSString *const kGPUImageSobelEdgeDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float edgeStrength; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)) * edgeStrength; + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#endif + +@implementation GPUImageSobelEdgeDetectionFilter + +@synthesize texelWidth = _texelWidth; +@synthesize texelHeight = _texelHeight; +@synthesize edgeStrength = _edgeStrength; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageSobelEdgeDetectionFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + // Do a luminance pass first to reduce the calculations performed at each fragment in the edge detection phase + + if (!(self = [super initWithFirstStageVertexShaderFromString:kGPUImageVertexShaderString firstStageFragmentShaderFromString:kGPUImageLuminanceFragmentShaderString secondStageVertexShaderFromString:kGPUImageNearbyTexelSamplingVertexShaderString secondStageFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + hasOverriddenImageSizeFactor = NO; + + texelWidthUniform = [secondFilterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [secondFilterProgram uniformIndex:@"texelHeight"]; + edgeStrengthUniform = [secondFilterProgram uniformIndex:@"edgeStrength"]; + + self.edgeStrength = 1.0; + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!hasOverriddenImageSizeFactor) + { + _texelWidth = 1.0 / filterFrameSize.width; + _texelHeight = 1.0 / filterFrameSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + GLProgram *previousProgram = [GPUImageContext sharedImageProcessingContext].currentShaderProgram; + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + [GPUImageContext setActiveShaderProgram:previousProgram]; + }); + } +} + +- (void)setUniformsForProgramAtIndex:(NSUInteger)programIndex; +{ + [super setUniformsForProgramAtIndex:programIndex]; + + if (programIndex == 1) + { + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + } +} + +- (BOOL)wantsMonochromeInput; +{ +// return YES; + return NO; +} + +- (BOOL)providesMonochromeOutput; +{ +// return YES; + return NO; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelWidth:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelWidth = newValue; + + [self setFloat:_texelWidth forUniform:texelWidthUniform program:secondFilterProgram]; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelHeight = newValue; + + [self setFloat:_texelHeight forUniform:texelHeightUniform program:secondFilterProgram]; +} + +- (void)setEdgeStrength:(CGFloat)newValue; +{ + _edgeStrength = newValue; + + [self setFloat:_edgeStrength forUniform:edgeStrengthUniform program:secondFilterProgram]; +} + + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.h new file mode 100755 index 00000000..596e1567 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImagePicture; + +/** A photo filter based on Soft Elegance Photoshop action + http://h-d-stock.deviantart.com/art/H-D-A-soft-elegance-70107603 + */ + +// Note: If you want to use this effect you have to add +// lookup_soft_elegance_1.png and lookup_soft_elegance_2.png +// from Resources folder to your application bundle. + +@interface GPUImageSoftEleganceFilter : GPUImageFilterGroup +{ + GPUImagePicture *lookupImageSource1; + GPUImagePicture *lookupImageSource2; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.m new file mode 100755 index 00000000..e1d4e02d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.m @@ -0,0 +1,62 @@ +#import "GPUImageSoftEleganceFilter.h" +#import "GPUImagePicture.h" +#import "GPUImageLookupFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageAlphaBlendFilter.h" + +@implementation GPUImageSoftEleganceFilter + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + UIImage *image1 = [UIImage imageNamed:@"lookup_soft_elegance_1.png"]; + UIImage *image2 = [UIImage imageNamed:@"lookup_soft_elegance_2.png"]; +#else + NSImage *image1 = [NSImage imageNamed:@"lookup_soft_elegance_1.png"]; + NSImage *image2 = [NSImage imageNamed:@"lookup_soft_elegance_2.png"]; +#endif + + NSAssert(image1 && image2, + @"To use GPUImageSoftEleganceFilter you need to add lookup_soft_elegance_1.png and lookup_soft_elegance_2.png from GPUImage/framework/Resources to your application bundle."); + + lookupImageSource1 = [[GPUImagePicture alloc] initWithImage:image1]; + GPUImageLookupFilter *lookupFilter1 = [[GPUImageLookupFilter alloc] init]; + [self addFilter:lookupFilter1]; + + [lookupImageSource1 addTarget:lookupFilter1 atTextureLocation:1]; + [lookupImageSource1 processImage]; + + GPUImageGaussianBlurFilter *gaussianBlur = [[GPUImageGaussianBlurFilter alloc] init]; + gaussianBlur.blurRadiusInPixels = 10.0; + [lookupFilter1 addTarget:gaussianBlur]; + [self addFilter:gaussianBlur]; + + GPUImageAlphaBlendFilter *alphaBlend = [[GPUImageAlphaBlendFilter alloc] init]; + alphaBlend.mix = 0.14; + [lookupFilter1 addTarget:alphaBlend]; + [gaussianBlur addTarget:alphaBlend]; + [self addFilter:alphaBlend]; + + lookupImageSource2 = [[GPUImagePicture alloc] initWithImage:image2]; + + GPUImageLookupFilter *lookupFilter2 = [[GPUImageLookupFilter alloc] init]; + [alphaBlend addTarget:lookupFilter2]; + [lookupImageSource2 addTarget:lookupFilter2]; + [lookupImageSource2 processImage]; + [self addFilter:lookupFilter2]; + + self.initialFilters = [NSArray arrayWithObjects:lookupFilter1, nil]; + self.terminalFilter = lookupFilter2; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.h new file mode 100755 index 00000000..13fc877c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.h @@ -0,0 +1,7 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageSoftLightBlendFilter : GPUImageTwoInputFilter +{ +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.m new file mode 100755 index 00000000..368bce0a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.m @@ -0,0 +1,54 @@ +#import "GPUImageSoftLightBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSoftLightBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); + mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + lowp float alphaDivisor = base.a + step(base.a, 0.0); // Protect against a divide-by-zero blacking out things in the output + gl_FragColor = base * (overlay.a * (base / alphaDivisor) + (2.0 * overlay * (1.0 - (base / alphaDivisor)))) + overlay * (1.0 - base.a) + base * (1.0 - overlay.a); + } +); +#else +NSString *const kGPUImageSoftLightBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 base = texture2D(inputImageTexture, textureCoordinate); + vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); + + float alphaDivisor = base.a + step(base.a, 0.0); // Protect against a divide-by-zero blacking out things in the output + gl_FragColor = base * (overlay.a * (base / alphaDivisor) + (2.0 * overlay * (1.0 - (base / alphaDivisor)))) + overlay * (1.0 - base.a) + base * (1.0 - overlay.a); + } +); +#endif + +@implementation GPUImageSoftLightBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSoftLightBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.h new file mode 100644 index 00000000..ba01c15a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.h @@ -0,0 +1,14 @@ +#import "GPUImageFilter.h" + +/** Pixels with a luminance above the threshold will invert their color + */ +@interface GPUImageSolarizeFilter : GPUImageFilter +{ + GLint thresholdUniform; +} + +/** Anything above this luminance will be inverted, and anything below normal. Ranges from 0.0 to 1.0, with 0.5 as the default + */ +@property(readwrite, nonatomic) CGFloat threshold; + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.m new file mode 100644 index 00000000..616cb5b2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.m @@ -0,0 +1,76 @@ +#import "GPUImageSolarizeFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSolarizeFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform highp float threshold; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + highp float luminance = dot(textureColor.rgb, W); + highp float thresholdResult = step(luminance, threshold); + highp vec3 finalColor = abs(thresholdResult - textureColor.rgb); + + gl_FragColor = vec4(finalColor, textureColor.w); + } +); +#else +NSString *const kGPUImageSolarizeFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float threshold; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + float luminance = dot(textureColor.rgb, W); + float thresholdResult = step(luminance, threshold); + vec3 finalColor = abs(thresholdResult - textureColor.rgb); + + gl_FragColor = vec4(vec3(finalColor), textureColor.w); + } +); +#endif + +@implementation GPUImageSolarizeFilter; + +@synthesize threshold = _threshold; + +#pragma mark - +#pragma mark Initialization + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSolarizeFragmentShaderString])) + { + return nil; + } + + thresholdUniform = [filterProgram uniformIndex:@"threshold"]; + self.threshold = 0.5; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setThreshold:(CGFloat)newValue; +{ + _threshold = newValue; + + [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; +} + + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.h b/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.h new file mode 100644 index 00000000..58b1383e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.h @@ -0,0 +1,19 @@ +#import "GPUImageFilter.h" + +// This outputs an image with a constant color. You need to use -forceProcessingAtSize: in order to set the output image +// dimensions, or this won't work correctly + + +@interface GPUImageSolidColorGenerator : GPUImageFilter +{ + GLint colorUniform; + GLint useExistingAlphaUniform; +} + +// This color dictates what the output image will be filled with +@property(readwrite, nonatomic) GPUVector4 color; +@property(readwrite, nonatomic, assign) BOOL useExistingAlpha; // whether to use the alpha of the existing image or not, default is NO + +- (void)setColorRed:(CGFloat)redComponent green:(CGFloat)greenComponent blue:(CGFloat)blueComponent alpha:(CGFloat)alphaComponent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.m b/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.m new file mode 100644 index 00000000..9b555ce0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.m @@ -0,0 +1,123 @@ +#import "GPUImageSolidColorGenerator.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUSolidColorFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying highp vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform vec4 color; + uniform float useExistingAlpha; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + gl_FragColor = vec4(color.rgb, max(textureColor.a, 1.0 - useExistingAlpha)); + } + ); +#else +NSString *const kGPUSolidColorFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + uniform sampler2D inputImageTexture; + uniform vec4 color; + uniform float useExistingAlpha; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + gl_FragColor = vec4(color.rgb, max(textureColor.a, 1.0 - useExistingAlpha)); + } + ); +#endif + +@implementation GPUImageSolidColorGenerator + +@synthesize color = _color; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUSolidColorFragmentShaderString])) + { + return nil; + } + + colorUniform = [filterProgram uniformIndex:@"color"]; + useExistingAlphaUniform = [filterProgram uniformIndex:@"useExistingAlpha"]; + + _color = (GPUVector4){0.0f, 0.0f, 0.5f, 1.0f}; + self.useExistingAlpha = NO; + + return self; +} + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + return; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(_color.one, _color.two, _color.three, _color.four); + glClear(GL_COLOR_BUFFER_BIT); + }); +} + + +#pragma mark - +#pragma mark Accessors + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + + if (!CGSizeEqualToSize(inputTextureSize, CGSizeZero)) + { + [self newFrameReadyAtTime:kCMTimeIndefinite atIndex:0]; + } +} + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + [super addTarget:newTarget atTextureLocation:textureLocation]; + + if (!CGSizeEqualToSize(inputTextureSize, CGSizeZero)) + { + [newTarget setInputSize:inputTextureSize atIndex:textureLocation]; + [newTarget newFrameReadyAtTime:kCMTimeIndefinite atIndex:textureLocation]; + } +} + +- (void)setColor:(GPUVector4)newValue; +{ + [self setColorRed:newValue.one green:newValue.two blue:newValue.three alpha:newValue.four]; +} + +- (void)setColorRed:(CGFloat)redComponent green:(CGFloat)greenComponent blue:(CGFloat)blueComponent alpha:(CGFloat)alphaComponent; +{ + _color.one = (GLfloat)redComponent; + _color.two = (GLfloat)greenComponent; + _color.three = (GLfloat)blueComponent; + _color.four = (GLfloat)alphaComponent; + +// [self setVec4:_color forUniform:colorUniform program:filterProgram]; + runAsynchronouslyOnVideoProcessingQueue(^{ + [self newFrameReadyAtTime:kCMTimeIndefinite atIndex:0]; + }); +} + +- (void)setUseExistingAlpha:(BOOL)useExistingAlpha; +{ + _useExistingAlpha = useExistingAlpha; + + [self setInteger:(useExistingAlpha ? 1 : 0) forUniform:useExistingAlphaUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.h new file mode 100644 index 00000000..29e30635 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageSourceOverBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.m new file mode 100644 index 00000000..432adc4b --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.m @@ -0,0 +1,51 @@ +#import "GPUImageSourceOverBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate); + + gl_FragColor = mix(textureColor, textureColor2, textureColor2.a); + } +); +#else +NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate); + + gl_FragColor = mix(textureColor, textureColor2, textureColor2.a); + } + ); +#endif + +@implementation GPUImageSourceOverBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSourceOverBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.h new file mode 100644 index 00000000..cbbd2afa --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.h @@ -0,0 +1,15 @@ +#import "GPUImageFilter.h" + +@interface GPUImageSphereRefractionFilter : GPUImageFilter +{ + GLint radiusUniform, centerUniform, aspectRatioUniform, refractiveIndexUniform; +} + +/// The center about which to apply the distortion, with a default of (0.5, 0.5) +@property(readwrite, nonatomic) CGPoint center; +/// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25 +@property(readwrite, nonatomic) CGFloat radius; +/// The index of refraction for the sphere, with a default of 0.71 +@property(readwrite, nonatomic) CGFloat refractiveIndex; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.m new file mode 100644 index 00000000..b0f54042 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.m @@ -0,0 +1,179 @@ +#import "GPUImageSphereRefractionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSphereRefractionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 center; + uniform highp float radius; + uniform highp float aspectRatio; + uniform highp float refractiveIndex; + + void main() + { + highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + highp float distanceFromCenter = distance(center, textureCoordinateToUse); + lowp float checkForPresenceWithinSphere = step(distanceFromCenter, radius); + + distanceFromCenter = distanceFromCenter / radius; + + highp float normalizedDepth = radius * sqrt(1.0 - distanceFromCenter * distanceFromCenter); + highp vec3 sphereNormal = normalize(vec3(textureCoordinateToUse - center, normalizedDepth)); + + highp vec3 refractedVector = refract(vec3(0.0, 0.0, -1.0), sphereNormal, refractiveIndex); + + gl_FragColor = texture2D(inputImageTexture, (refractedVector.xy + 1.0) * 0.5) * checkForPresenceWithinSphere; + } +); +#else +NSString *const kGPUImageSphereRefractionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 center; + uniform float radius; + uniform float aspectRatio; + uniform float refractiveIndex; + + void main() + { + vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio)); + float distanceFromCenter = distance(center, textureCoordinateToUse); + float checkForPresenceWithinSphere = step(distanceFromCenter, radius); + + distanceFromCenter = distanceFromCenter / radius; + + float normalizedDepth = radius * sqrt(1.0 - distanceFromCenter * distanceFromCenter); + vec3 sphereNormal = normalize(vec3(textureCoordinateToUse - center, normalizedDepth)); + + vec3 refractedVector = refract(vec3(0.0, 0.0, -1.0), sphereNormal, refractiveIndex); + + gl_FragColor = texture2D(inputImageTexture, (refractedVector.xy + 1.0) * 0.5) * checkForPresenceWithinSphere; + } +); +#endif + +@interface GPUImageSphereRefractionFilter () + +- (void)adjustAspectRatio; + +@property (readwrite, nonatomic) CGFloat aspectRatio; + +@end + + +@implementation GPUImageSphereRefractionFilter + +@synthesize center = _center; +@synthesize radius = _radius; +@synthesize aspectRatio = _aspectRatio; +@synthesize refractiveIndex = _refractiveIndex; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageSphereRefractionFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + radiusUniform = [filterProgram uniformIndex:@"radius"]; + aspectRatioUniform = [filterProgram uniformIndex:@"aspectRatio"]; + centerUniform = [filterProgram uniformIndex:@"center"]; + refractiveIndexUniform = [filterProgram uniformIndex:@"refractiveIndex"]; + + self.radius = 0.25; + self.center = CGPointMake(0.5, 0.5); + self.refractiveIndex = 0.71; + + [self setBackgroundColorRed:0.0 green:0.0 blue:0.0 alpha:0.0]; + + return self; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + CGSize oldInputSize = inputTextureSize; + [super setInputSize:newSize atIndex:textureIndex]; + + if (!CGSizeEqualToSize(oldInputSize, inputTextureSize) && (!CGSizeEqualToSize(newSize, CGSizeZero)) ) + { + [self adjustAspectRatio]; + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)adjustAspectRatio; +{ + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + [self setAspectRatio:(inputTextureSize.width / inputTextureSize.height)]; + } + else + { + [self setAspectRatio:(inputTextureSize.height / inputTextureSize.width)]; + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; + [self adjustAspectRatio]; +} + +- (void)forceProcessingAtSize:(CGSize)frameSize; +{ + [super forceProcessingAtSize:frameSize]; + [self adjustAspectRatio]; +} + +- (void)setRadius:(CGFloat)newValue; +{ + _radius = newValue; + + [self setFloat:_radius forUniform:radiusUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +- (void)setAspectRatio:(CGFloat)newValue; +{ + _aspectRatio = newValue; + + [self setFloat:_aspectRatio forUniform:aspectRatioUniform program:filterProgram]; +} + +- (void)setRefractiveIndex:(CGFloat)newValue; +{ + _refractiveIndex = newValue; + + [self setFloat:_refractiveIndex forUniform:refractiveIndexUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.h b/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.h new file mode 100755 index 00000000..e4db59b2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.h @@ -0,0 +1,24 @@ +#import "GPUImageVideoCamera.h" + +void stillImageDataReleaseCallback(void *releaseRefCon, const void *baseAddress); +void GPUImageCreateResizedSampleBuffer(CVPixelBufferRef cameraFrame, CGSize finalSize, CMSampleBufferRef *sampleBuffer); + +@interface GPUImageStillCamera : GPUImageVideoCamera + +/** The JPEG compression quality to use when capturing a photo as a JPEG. + */ +@property CGFloat jpegCompressionQuality; + +// Only reliably set inside the context of the completion handler of one of the capture methods +@property (readonly) NSDictionary *currentCaptureMetadata; + +// Photography controls +- (void)capturePhotoAsSampleBufferWithCompletionHandler:(void (^)(CMSampleBufferRef imageSampleBuffer, NSError *error))block; +- (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block; +- (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block; +- (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedJPEG, NSError *error))block; +- (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedJPEG, NSError *error))block; +- (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; +- (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.m b/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.m new file mode 100755 index 00000000..447f79f6 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.m @@ -0,0 +1,338 @@ +// 2448x3264 pixel image = 31,961,088 bytes for uncompressed RGBA + +#import "GPUImageStillCamera.h" + +void stillImageDataReleaseCallback(void *releaseRefCon, const void *baseAddress) +{ + free((void *)baseAddress); +} + +void GPUImageCreateResizedSampleBuffer(CVPixelBufferRef cameraFrame, CGSize finalSize, CMSampleBufferRef *sampleBuffer) +{ + // CVPixelBufferCreateWithPlanarBytes for YUV input + + CGSize originalSize = CGSizeMake(CVPixelBufferGetWidth(cameraFrame), CVPixelBufferGetHeight(cameraFrame)); + + CVPixelBufferLockBaseAddress(cameraFrame, 0); + GLubyte *sourceImageBytes = CVPixelBufferGetBaseAddress(cameraFrame); + CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, sourceImageBytes, CVPixelBufferGetBytesPerRow(cameraFrame) * originalSize.height, NULL); + CGColorSpaceRef genericRGBColorspace = CGColorSpaceCreateDeviceRGB(); + CGImageRef cgImageFromBytes = CGImageCreate((int)originalSize.width, (int)originalSize.height, 8, 32, CVPixelBufferGetBytesPerRow(cameraFrame), genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst, dataProvider, NULL, NO, kCGRenderingIntentDefault); + + GLubyte *imageData = (GLubyte *) calloc(1, (int)finalSize.width * (int)finalSize.height * 4); + + CGContextRef imageContext = CGBitmapContextCreate(imageData, (int)finalSize.width, (int)finalSize.height, 8, (int)finalSize.width * 4, genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); + CGContextDrawImage(imageContext, CGRectMake(0.0, 0.0, finalSize.width, finalSize.height), cgImageFromBytes); + CGImageRelease(cgImageFromBytes); + CGContextRelease(imageContext); + CGColorSpaceRelease(genericRGBColorspace); + CGDataProviderRelease(dataProvider); + + CVPixelBufferRef pixel_buffer = NULL; + CVPixelBufferCreateWithBytes(kCFAllocatorDefault, finalSize.width, finalSize.height, kCVPixelFormatType_32BGRA, imageData, finalSize.width * 4, stillImageDataReleaseCallback, NULL, NULL, &pixel_buffer); + CMVideoFormatDescriptionRef videoInfo = NULL; + CMVideoFormatDescriptionCreateForImageBuffer(NULL, pixel_buffer, &videoInfo); + + CMTime frameTime = CMTimeMake(1, 30); + CMSampleTimingInfo timing = {frameTime, frameTime, kCMTimeInvalid}; + + CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault, pixel_buffer, YES, NULL, NULL, videoInfo, &timing, sampleBuffer); + CVPixelBufferUnlockBaseAddress(cameraFrame, 0); + CFRelease(videoInfo); + CVPixelBufferRelease(pixel_buffer); +} + +@interface GPUImageStillCamera () +{ + AVCaptureStillImageOutput *photoOutput; +} + +// Methods calling this are responsible for calling dispatch_semaphore_signal(frameRenderingSemaphore) somewhere inside the block +- (void)capturePhotoProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withImageOnGPUHandler:(void (^)(NSError *error))block; + +@end + +@implementation GPUImageStillCamera { + BOOL requiresFrontCameraTextureCacheCorruptionWorkaround; +} + +@synthesize currentCaptureMetadata = _currentCaptureMetadata; +@synthesize jpegCompressionQuality = _jpegCompressionQuality; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithSessionPreset:(NSString *)sessionPreset cameraPosition:(AVCaptureDevicePosition)cameraPosition; +{ + if (!(self = [super initWithSessionPreset:sessionPreset cameraPosition:cameraPosition])) + { + return nil; + } + + /* Detect iOS version < 6 which require a texture cache corruption workaround */ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + requiresFrontCameraTextureCacheCorruptionWorkaround = [[[UIDevice currentDevice] systemVersion] compare:@"6.0" options:NSNumericSearch] == NSOrderedAscending; +#pragma clang diagnostic pop + + [self.captureSession beginConfiguration]; + + photoOutput = [[AVCaptureStillImageOutput alloc] init]; + + // Having a still photo input set to BGRA and video to YUV doesn't work well, so since I don't have YUV resizing for iPhone 4 yet, kick back to BGRA for that device +// if (captureAsYUV && [GPUImageContext supportsFastTextureUpload]) + if (captureAsYUV && [GPUImageContext deviceSupportsRedTextures]) + { + BOOL supportsFullYUVRange = NO; + NSArray *supportedPixelFormats = photoOutput.availableImageDataCVPixelFormatTypes; + for (NSNumber *currentPixelFormat in supportedPixelFormats) + { + if ([currentPixelFormat intValue] == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) + { + supportsFullYUVRange = YES; + } + } + + if (supportsFullYUVRange) + { + [photoOutput setOutputSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + } + else + { + [photoOutput setOutputSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + } + } + else + { + captureAsYUV = NO; + [photoOutput setOutputSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + [videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + } + + [self.captureSession addOutput:photoOutput]; + + [self.captureSession commitConfiguration]; + + self.jpegCompressionQuality = 0.8; + + return self; +} + +- (id)init; +{ + if (!(self = [self initWithSessionPreset:AVCaptureSessionPresetPhoto cameraPosition:AVCaptureDevicePositionBack])) + { + return nil; + } + return self; +} + +- (void)removeInputsAndOutputs; +{ + [self.captureSession removeOutput:photoOutput]; + [super removeInputsAndOutputs]; +} + +#pragma mark - +#pragma mark Photography controls + +- (void)capturePhotoAsSampleBufferWithCompletionHandler:(void (^)(CMSampleBufferRef imageSampleBuffer, NSError *error))block +{ + NSLog(@"If you want to use the method capturePhotoAsSampleBufferWithCompletionHandler:, you must comment out the line in GPUImageStillCamera.m in the method initWithSessionPreset:cameraPosition: which sets the CVPixelBufferPixelFormatTypeKey, as well as uncomment the rest of the method capturePhotoAsSampleBufferWithCompletionHandler:. However, if you do this you cannot use any of the photo capture methods to take a photo if you also supply a filter."); + + /*dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_FOREVER); + + [photoOutput captureStillImageAsynchronouslyFromConnection:[[photoOutput connections] objectAtIndex:0] completionHandler:^(CMSampleBufferRef imageSampleBuffer, NSError *error) { + block(imageSampleBuffer, error); + }]; + + dispatch_semaphore_signal(frameRenderingSemaphore); + + */ + + return; +} + +- (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block; +{ + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + UIImage *filteredPhoto = nil; + + if(!error){ + filteredPhoto = [finalFilterInChain imageFromCurrentFramebuffer]; + } + dispatch_semaphore_signal(frameRenderingSemaphore); + + block(filteredPhoto, error); + }]; +} + +- (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block { + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + UIImage *filteredPhoto = nil; + + if(!error) { + filteredPhoto = [finalFilterInChain imageFromCurrentFramebufferWithOrientation:orientation]; + } + dispatch_semaphore_signal(frameRenderingSemaphore); + + block(filteredPhoto, error); + }]; +} + +- (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedJPEG, NSError *error))block; +{ +// reportAvailableMemoryForGPUImage(@"Before Capture"); + + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + NSData *dataForJPEGFile = nil; + + if(!error){ + @autoreleasepool { + UIImage *filteredPhoto = [finalFilterInChain imageFromCurrentFramebuffer]; + dispatch_semaphore_signal(frameRenderingSemaphore); +// reportAvailableMemoryForGPUImage(@"After UIImage generation"); + + dataForJPEGFile = UIImageJPEGRepresentation(filteredPhoto,self.jpegCompressionQuality); +// reportAvailableMemoryForGPUImage(@"After JPEG generation"); + } + +// reportAvailableMemoryForGPUImage(@"After autorelease pool"); + }else{ + dispatch_semaphore_signal(frameRenderingSemaphore); + } + + block(dataForJPEGFile, error); + }]; +} + +- (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedImage, NSError *error))block { + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + NSData *dataForJPEGFile = nil; + + if(!error) { + @autoreleasepool { + UIImage *filteredPhoto = [finalFilterInChain imageFromCurrentFramebufferWithOrientation:orientation]; + dispatch_semaphore_signal(frameRenderingSemaphore); + + dataForJPEGFile = UIImageJPEGRepresentation(filteredPhoto, self.jpegCompressionQuality); + } + } else { + dispatch_semaphore_signal(frameRenderingSemaphore); + } + + block(dataForJPEGFile, error); + }]; +} + +- (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; +{ + + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + NSData *dataForPNGFile = nil; + + if(!error){ + @autoreleasepool { + UIImage *filteredPhoto = [finalFilterInChain imageFromCurrentFramebuffer]; + dispatch_semaphore_signal(frameRenderingSemaphore); + dataForPNGFile = UIImagePNGRepresentation(filteredPhoto); + } + }else{ + dispatch_semaphore_signal(frameRenderingSemaphore); + } + + block(dataForPNGFile, error); + }]; + + return; +} + +- (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; +{ + + [self capturePhotoProcessedUpToFilter:finalFilterInChain withImageOnGPUHandler:^(NSError *error) { + NSData *dataForPNGFile = nil; + + if(!error){ + @autoreleasepool { + UIImage *filteredPhoto = [finalFilterInChain imageFromCurrentFramebufferWithOrientation:orientation]; + dispatch_semaphore_signal(frameRenderingSemaphore); + dataForPNGFile = UIImagePNGRepresentation(filteredPhoto); + } + }else{ + dispatch_semaphore_signal(frameRenderingSemaphore); + } + + block(dataForPNGFile, error); + }]; + + return; +} + +#pragma mark - Private Methods + +- (void)capturePhotoProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withImageOnGPUHandler:(void (^)(NSError *error))block +{ + dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_FOREVER); + + if(photoOutput.isCapturingStillImage){ + block([NSError errorWithDomain:AVFoundationErrorDomain code:AVErrorMaximumStillImageCaptureRequestsExceeded userInfo:nil]); + return; + } + + [photoOutput captureStillImageAsynchronouslyFromConnection:[[photoOutput connections] objectAtIndex:0] completionHandler:^(CMSampleBufferRef imageSampleBuffer, NSError *error) { + if(imageSampleBuffer == NULL){ + block(error); + return; + } + + // For now, resize photos to fix within the max texture size of the GPU + CVImageBufferRef cameraFrame = CMSampleBufferGetImageBuffer(imageSampleBuffer); + + CGSize sizeOfPhoto = CGSizeMake(CVPixelBufferGetWidth(cameraFrame), CVPixelBufferGetHeight(cameraFrame)); + CGSize scaledImageSizeToFitOnGPU = [GPUImageContext sizeThatFitsWithinATextureForSize:sizeOfPhoto]; + if (!CGSizeEqualToSize(sizeOfPhoto, scaledImageSizeToFitOnGPU)) + { + CMSampleBufferRef sampleBuffer = NULL; + + if (CVPixelBufferGetPlaneCount(cameraFrame) > 0) + { + NSAssert(NO, @"Error: no downsampling for YUV input in the framework yet"); + } + else + { + GPUImageCreateResizedSampleBuffer(cameraFrame, scaledImageSizeToFitOnGPU, &sampleBuffer); + } + + dispatch_semaphore_signal(frameRenderingSemaphore); + [finalFilterInChain useNextFrameForImageCapture]; + [self captureOutput:photoOutput didOutputSampleBuffer:sampleBuffer fromConnection:[[photoOutput connections] objectAtIndex:0]]; + dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_FOREVER); + if (sampleBuffer != NULL) + CFRelease(sampleBuffer); + } + else + { + // This is a workaround for the corrupt images that are sometimes returned when taking a photo with the front camera and using the iOS 5.0 texture caches + AVCaptureDevicePosition currentCameraPosition = [[videoInput device] position]; + if ( (currentCameraPosition != AVCaptureDevicePositionFront) || (![GPUImageContext supportsFastTextureUpload]) || !requiresFrontCameraTextureCacheCorruptionWorkaround) + { + dispatch_semaphore_signal(frameRenderingSemaphore); + [finalFilterInChain useNextFrameForImageCapture]; + [self captureOutput:photoOutput didOutputSampleBuffer:imageSampleBuffer fromConnection:[[photoOutput connections] objectAtIndex:0]]; + dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_FOREVER); + } + } + + CFDictionaryRef metadata = CMCopyDictionaryOfAttachments(NULL, imageSampleBuffer, kCMAttachmentMode_ShouldPropagate); + _currentCaptureMetadata = (__bridge_transfer NSDictionary *)metadata; + + block(nil); + + _currentCaptureMetadata = nil; + }]; +} + + + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.h new file mode 100755 index 00000000..07803095 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +/** Creates a stretch distortion of the image + */ +@interface GPUImageStretchDistortionFilter : GPUImageFilter { + GLint centerUniform; +} + +/** The center about which to apply the distortion, with a default of (0.5, 0.5) + */ +@property(readwrite, nonatomic) CGPoint center; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.m new file mode 100755 index 00000000..d38cac34 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.m @@ -0,0 +1,99 @@ +#import "GPUImageStretchDistortionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageStretchDistortionFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 center; + + void main() + { + highp vec2 normCoord = 2.0 * textureCoordinate - 1.0; + highp vec2 normCenter = 2.0 * center - 1.0; + + normCoord -= normCenter; + mediump vec2 s = sign(normCoord); + normCoord = abs(normCoord); + normCoord = 0.5 * normCoord + 0.5 * smoothstep(0.25, 0.5, normCoord) * normCoord; + normCoord = s * normCoord; + + normCoord += normCenter; + + mediump vec2 textureCoordinateToUse = normCoord / 2.0 + 0.5; + + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + + } +); +#else +NSString *const kGPUImageStretchDistortionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 center; + + void main() + { + vec2 normCoord = 2.0 * textureCoordinate - 1.0; + vec2 normCenter = 2.0 * center - 1.0; + + normCoord -= normCenter; + vec2 s = sign(normCoord); + normCoord = abs(normCoord); + normCoord = 0.5 * normCoord + 0.5 * smoothstep(0.25, 0.5, normCoord) * normCoord; + normCoord = s * normCoord; + + normCoord += normCenter; + + vec2 textureCoordinateToUse = normCoord / 2.0 + 0.5; + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse); + } +); +#endif + +@implementation GPUImageStretchDistortionFilter + +@synthesize center = _center; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageStretchDistortionFragmentShaderString])) + { + return nil; + } + + centerUniform = [filterProgram uniformIndex:@"center"]; + + self.center = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.h new file mode 100755 index 00000000..8dee8215 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageSubtractBlendFilter : GPUImageTwoInputFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.m new file mode 100755 index 00000000..8938baea --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.m @@ -0,0 +1,52 @@ +#import "GPUImageSubtractBlendFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a); + } +); +#else +NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); + + gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a); + } +); +#endif + +@implementation GPUImageSubtractBlendFilter + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSubtractBlendFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.h new file mode 100755 index 00000000..ed7d0122 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilter.h" + +/** Creates a swirl distortion on the image + */ +@interface GPUImageSwirlFilter : GPUImageFilter +{ + GLint radiusUniform, centerUniform, angleUniform; +} + +/// The center about which to apply the distortion, with a default of (0.5, 0.5) +@property(readwrite, nonatomic) CGPoint center; +/// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.5 +@property(readwrite, nonatomic) CGFloat radius; +/// The amount of distortion to apply, with a minimum of 0.0 and a default of 1.0 +@property(readwrite, nonatomic) CGFloat angle; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.m new file mode 100755 index 00000000..5462bc65 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.m @@ -0,0 +1,123 @@ +#import "GPUImageSwirlFilter.h" + +// Adapted from the shader example here: http://www.geeks3d.com/20110428/shader-library-swirl-post-processing-filter-in-glsl/ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageSwirlFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 center; + uniform highp float radius; + uniform highp float angle; + + void main() + { + highp vec2 textureCoordinateToUse = textureCoordinate; + highp float dist = distance(center, textureCoordinate); + if (dist < radius) + { + textureCoordinateToUse -= center; + highp float percent = (radius - dist) / radius; + highp float theta = percent * percent * angle * 8.0; + highp float s = sin(theta); + highp float c = cos(theta); + textureCoordinateToUse = vec2(dot(textureCoordinateToUse, vec2(c, -s)), dot(textureCoordinateToUse, vec2(s, c))); + textureCoordinateToUse += center; + } + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + + } +); +#else +NSString *const kGPUImageSwirlFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 center; + uniform float radius; + uniform float angle; + + void main() + { + vec2 textureCoordinateToUse = textureCoordinate; + float dist = distance(center, textureCoordinate); + if (dist < radius) + { + textureCoordinateToUse -= center; + float percent = (radius - dist) / radius; + float theta = percent * percent * angle * 8.0; + float s = sin(theta); + float c = cos(theta); + textureCoordinateToUse = vec2(dot(textureCoordinateToUse, vec2(c, -s)), dot(textureCoordinateToUse, vec2(s, c))); + textureCoordinateToUse += center; + } + + gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse ); + } +); +#endif + +@implementation GPUImageSwirlFilter + +@synthesize center = _center; +@synthesize radius = _radius; +@synthesize angle = _angle; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageSwirlFragmentShaderString])) + { + return nil; + } + + radiusUniform = [filterProgram uniformIndex:@"radius"]; + angleUniform = [filterProgram uniformIndex:@"angle"]; + centerUniform = [filterProgram uniformIndex:@"center"]; + + self.radius = 0.5; + self.angle = 1.0; + self.center = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setCenter:self.center]; +} + +- (void)setRadius:(CGFloat)newValue; +{ + _radius = newValue; + + [self setFloat:_radius forUniform:radiusUniform program:filterProgram]; +} + +- (void)setAngle:(CGFloat)newValue; +{ + _angle = newValue; + + [self setFloat:_angle forUniform:angleUniform program:filterProgram]; +} + +- (void)setCenter:(CGPoint)newValue; +{ + _center = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_center forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:centerUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.h b/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.h new file mode 100755 index 00000000..8190305d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.h @@ -0,0 +1,14 @@ +#import "GPUImageOutput.h" + +@interface GPUImageTextureInput : GPUImageOutput +{ + CGSize textureSize; +} + +// Initialization and teardown +- (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize; + +// Image rendering +- (void)processTextureWithFrameTime:(CMTime)frameTime; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.m b/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.m new file mode 100755 index 00000000..ad3ca1d1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.m @@ -0,0 +1,46 @@ +#import "GPUImageTextureInput.h" + +@implementation GPUImageTextureInput + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize; +{ + if (!(self = [super init])) + { + return nil; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + }); + + textureSize = newTextureSize; + + runSynchronouslyOnVideoProcessingQueue(^{ + outputFramebuffer = [[GPUImageFramebuffer alloc] initWithSize:newTextureSize overriddenTexture:newInputTexture]; + }); + + return self; +} + +#pragma mark - +#pragma mark Image rendering + +- (void)processTextureWithFrameTime:(CMTime)frameTime; +{ + runAsynchronouslyOnVideoProcessingQueue(^{ + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setInputSize:textureSize atIndex:targetTextureIndex]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:targetTextureIndex]; + [currentTarget newFrameReadyAtTime:frameTime atIndex:targetTextureIndex]; + } + }); +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.h b/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.h new file mode 100755 index 00000000..05e1f36e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.h @@ -0,0 +1,21 @@ +#import +#import "GPUImageContext.h" + +@protocol GPUImageTextureOutputDelegate; + +@interface GPUImageTextureOutput : NSObject +{ + GPUImageFramebuffer *firstInputFramebuffer; +} + +@property(readwrite, unsafe_unretained, nonatomic) id delegate; +@property(readonly) GLuint texture; +@property(nonatomic) BOOL enabled; + +- (void)doneWithTexture; + +@end + +@protocol GPUImageTextureOutputDelegate +- (void)newFrameReadyFromTextureOutput:(GPUImageTextureOutput *)callbackTextureOutput; +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.m b/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.m new file mode 100755 index 00000000..1e1f24f2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.m @@ -0,0 +1,83 @@ +#import "GPUImageTextureOutput.h" + +@implementation GPUImageTextureOutput + +@synthesize delegate = _delegate; +@synthesize texture = _texture; +@synthesize enabled; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + self.enabled = YES; + + return self; +} + +- (void)doneWithTexture; +{ + [firstInputFramebuffer unlock]; +} + +#pragma mark - +#pragma mark GPUImageInput protocol + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + [_delegate newFrameReadyFromTextureOutput:self]; +} + +- (NSInteger)nextAvailableTextureIndex; +{ + return 0; +} + +// TODO: Deal with the fact that the texture changes regularly as a result of the caching +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + firstInputFramebuffer = newInputFramebuffer; + [firstInputFramebuffer lock]; + + _texture = [firstInputFramebuffer texture]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ +} + +- (CGSize)maximumOutputSize; +{ + return CGSizeZero; +} + +- (void)endProcessing +{ +} + +- (BOOL)shouldIgnoreUpdatesToThisTarget; +{ + return NO; +} + +- (BOOL)wantsMonochromeInput; +{ + return NO; +} + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +{ + +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.h new file mode 100644 index 00000000..5ecd53e0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.h @@ -0,0 +1,21 @@ +#import "GPUImageTwoInputFilter.h" + +extern NSString *const kGPUImageThreeInputTextureVertexShaderString; + +@interface GPUImageThreeInputFilter : GPUImageTwoInputFilter +{ + GPUImageFramebuffer *thirdInputFramebuffer; + + GLint filterThirdTextureCoordinateAttribute; + GLint filterInputTextureUniform3; + GPUImageRotationMode inputRotation3; + GLuint filterSourceTexture3; + CMTime thirdFrameTime; + + BOOL hasSetSecondTexture, hasReceivedThirdFrame, thirdFrameWasVideo; + BOOL thirdFrameCheckDisabled; +} + +- (void)disableThirdFrameCheck; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.m new file mode 100644 index 00000000..2f4f113f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.m @@ -0,0 +1,328 @@ +#import "GPUImageThreeInputFilter.h" + + +NSString *const kGPUImageThreeInputTextureVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + attribute vec4 inputTextureCoordinate2; + attribute vec4 inputTextureCoordinate3; + + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + varying vec2 textureCoordinate3; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + textureCoordinate2 = inputTextureCoordinate2.xy; + textureCoordinate3 = inputTextureCoordinate3.xy; + } +); + +@implementation GPUImageThreeInputFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [self initWithVertexShaderFromString:kGPUImageThreeInputTextureVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:vertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + inputRotation3 = kGPUImageNoRotation; + + hasSetSecondTexture = NO; + + hasReceivedThirdFrame = NO; + thirdFrameWasVideo = NO; + thirdFrameCheckDisabled = NO; + + thirdFrameTime = kCMTimeInvalid; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + filterThirdTextureCoordinateAttribute = [filterProgram attributeIndex:@"inputTextureCoordinate3"]; + + filterInputTextureUniform3 = [filterProgram uniformIndex:@"inputImageTexture3"]; // This does assume a name of "inputImageTexture3" for the third input texture in the fragment shader + glEnableVertexAttribArray(filterThirdTextureCoordinateAttribute); + }); + + return self; +} + +- (void)initializeAttributes; +{ + [super initializeAttributes]; + [filterProgram addAttribute:@"inputTextureCoordinate3"]; +} + +- (void)disableThirdFrameCheck; +{ + thirdFrameCheckDisabled = YES; +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + [thirdInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform, 2); + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [secondInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform2, 3); + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, [thirdInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform3, 4); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + glVertexAttribPointer(filterSecondTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation2]); + glVertexAttribPointer(filterThirdTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation3]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + [thirdInputFramebuffer unlock]; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +#pragma mark - +#pragma mark GPUImageInput + +- (NSInteger)nextAvailableTextureIndex; +{ + if (hasSetSecondTexture) + { + return 2; + } + else if (hasSetFirstTexture) + { + return 1; + } + else + { + return 0; + } +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + firstInputFramebuffer = newInputFramebuffer; + hasSetFirstTexture = YES; + [firstInputFramebuffer lock]; + } + else if (textureIndex == 1) + { + secondInputFramebuffer = newInputFramebuffer; + hasSetSecondTexture = YES; + [secondInputFramebuffer lock]; + } + else + { + thirdInputFramebuffer = newInputFramebuffer; + [thirdInputFramebuffer lock]; + } +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + [super setInputSize:newSize atIndex:textureIndex]; + + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetFirstTexture = NO; + } + } + else if (textureIndex == 1) + { + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetSecondTexture = NO; + } + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + inputRotation = newInputRotation; + } + else if (textureIndex == 1) + { + inputRotation2 = newInputRotation; + } + else + { + inputRotation3 = newInputRotation; + } +} + +- (CGSize)rotatedSize:(CGSize)sizeToRotate forIndex:(NSInteger)textureIndex; +{ + CGSize rotatedSize = sizeToRotate; + + GPUImageRotationMode rotationToCheck; + if (textureIndex == 0) + { + rotationToCheck = inputRotation; + } + else if (textureIndex == 1) + { + rotationToCheck = inputRotation2; + } + else + { + rotationToCheck = inputRotation3; + } + + if (GPUImageRotationSwapsWidthAndHeight(rotationToCheck)) + { + rotatedSize.width = sizeToRotate.height; + rotatedSize.height = sizeToRotate.width; + } + + return rotatedSize; +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + // You can set up infinite update loops, so this helps to short circuit them + if (hasReceivedFirstFrame && hasReceivedSecondFrame && hasReceivedThirdFrame) + { + return; + } + + BOOL updatedMovieFrameOppositeStillImage = NO; + + if (textureIndex == 0) + { + hasReceivedFirstFrame = YES; + firstFrameTime = frameTime; + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + if (thirdFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(secondFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else if (textureIndex == 1) + { + hasReceivedSecondFrame = YES; + secondFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + if (thirdFrameCheckDisabled) + { + hasReceivedThirdFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else + { + hasReceivedThirdFrame = YES; + thirdFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + + // || (hasReceivedFirstFrame && secondFrameCheckDisabled) || (hasReceivedSecondFrame && firstFrameCheckDisabled) + if ((hasReceivedFirstFrame && hasReceivedSecondFrame && hasReceivedThirdFrame) || updatedMovieFrameOppositeStillImage) + { + static const GLfloat imageVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + [self renderToTextureWithVertices:imageVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + + [self informTargetsAboutNewFrameAtTime:frameTime]; + + hasReceivedFirstFrame = NO; + hasReceivedSecondFrame = NO; + hasReceivedThirdFrame = NO; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.h new file mode 100755 index 00000000..2036030c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.h @@ -0,0 +1,12 @@ +#import "GPUImageSobelEdgeDetectionFilter.h" + +@interface GPUImageThresholdEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter +{ + GLint thresholdUniform; +} + +/** Any edge above this threshold will be black, and anything below white. Ranges from 0.0 to 1.0, with 0.8 as the default + */ +@property(readwrite, nonatomic) CGFloat threshold; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.m new file mode 100755 index 00000000..553c600f --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.m @@ -0,0 +1,145 @@ +#import "GPUImageThresholdEdgeDetectionFilter.h" + +@implementation GPUImageThresholdEdgeDetectionFilter + +// Invert the colorspace for a sketch +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageThresholdEdgeDetectionFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float threshold; + + uniform float edgeStrength; + + void main() + { +// float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; +// float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; +// float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; +// float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; +// float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; +// float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; +// float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + leftIntensity + 2.0 * centerIntensity + rightIntensity; +// float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomIntensity + 2.0 * centerIntensity + topIntensity; + float h = (centerIntensity - topIntensity) + (bottomIntensity - centerIntensity); + float v = (centerIntensity - leftIntensity) + (rightIntensity - centerIntensity); +// float h = (centerIntensity - topIntensity); +// float j = (topIntensity - centerIntensity); +// h = max(h,j); +// j = abs(h); +// float v = (centerIntensity - leftIntensity); + + float mag = length(vec2(h, v)) * edgeStrength; + mag = step(threshold, mag); + +// float mag = abs(h); + +// gl_FragColor = vec4(h, h, h, 1.0); +// gl_FragColor = vec4(texture2D(inputImageTexture, textureCoordinate)); +// gl_FragColor = vec4(h, centerIntensity, j, 1.0); + gl_FragColor = vec4(mag, mag, mag, 1.0); + } +); +#else +NSString *const kGPUImageThresholdEdgeDetectionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float threshold; + + uniform float edgeStrength; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + h = max(0.0, h); + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + v = max(0.0, v); + + float mag = length(vec2(h, v)) * edgeStrength; + mag = step(threshold, mag); + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +@synthesize threshold = _threshold; + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + thresholdUniform = [secondFilterProgram uniformIndex:@"threshold"]; + self.threshold = 0.25; + self.edgeStrength = 1.0; + + return self; +} + + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageThresholdEdgeDetectionFragmentShaderString])) + { + return nil; + } + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setThreshold:(CGFloat)newValue; +{ + _threshold = newValue; + + [self setFloat:_threshold forUniform:thresholdUniform program:secondFilterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.h new file mode 100644 index 00000000..fda58979 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageThresholdEdgeDetectionFilter.h" + +@interface GPUImageThresholdSketchFilter : GPUImageThresholdEdgeDetectionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.m new file mode 100644 index 00000000..d24e9dea --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.m @@ -0,0 +1,103 @@ +#import "GPUImageThresholdSketchFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageThresholdSketchFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform lowp float threshold; + uniform float edgeStrength; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = (length(vec2(h, v)) * edgeStrength); + mag = step(threshold, mag); + mag = 1.0 - mag; + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#else +NSString *const kGPUImageThresholdSketchFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + uniform float threshold; + uniform float edgeStrength; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = 1.0 - length(vec2(h, v) * edgeStrength); + mag = step(threshold, mag); + + gl_FragColor = vec4(vec3(mag), 1.0); + } +); +#endif + +@implementation GPUImageThresholdSketchFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageThresholdSketchFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.h new file mode 100644 index 00000000..9c6e5d72 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.h @@ -0,0 +1,14 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageThresholdedNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter +{ + GLint thresholdUniform; +} + +/** Any local maximum above this threshold will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.8 as the default + */ +@property(readwrite, nonatomic) CGFloat threshold; + +- (id)initWithPackedColorspace:(BOOL)inputUsesPackedColorspace; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.m new file mode 100644 index 00000000..439d3119 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.m @@ -0,0 +1,297 @@ +#import "GPUImageThresholdedNonMaximumSuppressionFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageThresholdedNonMaximumSuppressionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying highp vec2 textureCoordinate; + varying highp vec2 leftTextureCoordinate; + varying highp vec2 rightTextureCoordinate; + + varying highp vec2 topTextureCoordinate; + varying highp vec2 topLeftTextureCoordinate; + varying highp vec2 topRightTextureCoordinate; + + varying highp vec2 bottomTextureCoordinate; + varying highp vec2 bottomLeftTextureCoordinate; + varying highp vec2 bottomRightTextureCoordinate; + + uniform lowp float threshold; + + void main() + { + lowp float bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).r; + lowp float bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + lowp float bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + lowp vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + lowp float leftColor = texture2D(inputImageTexture, leftTextureCoordinate).r; + lowp float rightColor = texture2D(inputImageTexture, rightTextureCoordinate).r; + lowp float topColor = texture2D(inputImageTexture, topTextureCoordinate).r; + lowp float topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).r; + lowp float topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + + // Use a tiebreaker for pixels to the left and immediately above this one + lowp float multiplier = 1.0 - step(centerColor.r, topColor); + multiplier = multiplier * (1.0 - step(centerColor.r, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, bottomLeftColor)); + + lowp float maxValue = max(centerColor.r, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + lowp float finalValue = centerColor.r * step(maxValue, centerColor.r) * multiplier; + finalValue = step(threshold, finalValue); + + gl_FragColor = vec4(finalValue, finalValue, finalValue, 1.0); +// +// gl_FragColor = vec4((centerColor.rgb * step(maxValue, step(threshold, centerColor.r)) * multiplier), 1.0); + } +); + +NSString *const kGPUImageThresholdedNonMaximumSuppressionPackedColorspaceFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying highp vec2 textureCoordinate; + varying highp vec2 leftTextureCoordinate; + varying highp vec2 rightTextureCoordinate; + + varying highp vec2 topTextureCoordinate; + varying highp vec2 topLeftTextureCoordinate; + varying highp vec2 topRightTextureCoordinate; + + varying highp vec2 bottomTextureCoordinate; + varying highp vec2 bottomLeftTextureCoordinate; + varying highp vec2 bottomRightTextureCoordinate; + + uniform lowp float threshold; + uniform highp float texelWidth; + uniform highp float texelHeight; + + highp float encodedIntensity(highp vec3 sourceColor) + { + return (sourceColor.b * 256.0 * 256.0 + sourceColor.g * 256.0 + sourceColor.r); + } + + void main() + { + highp float bottomColor = encodedIntensity(texture2D(inputImageTexture, bottomTextureCoordinate).rgb); + highp float bottomLeftColor = encodedIntensity(texture2D(inputImageTexture, bottomLeftTextureCoordinate).rgb); + highp float bottomRightColor = encodedIntensity(texture2D(inputImageTexture, bottomRightTextureCoordinate).rgb); + highp float centerColor = encodedIntensity(texture2D(inputImageTexture, textureCoordinate).rgb); + highp float leftColor = encodedIntensity(texture2D(inputImageTexture, leftTextureCoordinate).rgb); + highp float rightColor = encodedIntensity(texture2D(inputImageTexture, rightTextureCoordinate).rgb); + highp float topColor = encodedIntensity(texture2D(inputImageTexture, topTextureCoordinate).rgb); + highp float topRightColor = encodedIntensity(texture2D(inputImageTexture, topRightTextureCoordinate).rgb); + highp float topLeftColor = encodedIntensity(texture2D(inputImageTexture, topLeftTextureCoordinate).rgb); + + highp float secondStageColor1 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-2.0 * texelWidth, -2.0 * texelHeight)).rgb); + highp float secondStageColor2 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-2.0 * texelWidth, -1.0 * texelHeight)).rgb); + highp float secondStageColor3 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-2.0 * texelWidth, 0.0)).rgb); + highp float secondStageColor4 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-2.0 * texelWidth, 1.0 * texelHeight)).rgb); + highp float secondStageColor5 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-2.0 * texelWidth, 2.0 * texelHeight)).rgb); + highp float secondStageColor6 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-1.0 * texelWidth, 2.0 * texelHeight)).rgb); + highp float secondStageColor7 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(0.0, 2.0 * texelHeight)).rgb); + highp float secondStageColor8 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(1.0 * texelWidth, 2.0 * texelHeight)).rgb); + + highp float thirdStageColor1 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(-1.0 * texelWidth, -2.0 * texelHeight)).rgb); + highp float thirdStageColor2 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(0.0, -2.0 * texelHeight)).rgb); + highp float thirdStageColor3 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(1.0 * texelWidth, -2.0 * texelHeight)).rgb); + highp float thirdStageColor4 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(2.0 * texelWidth, -2.0 * texelHeight)).rgb); + highp float thirdStageColor5 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(2.0 * texelWidth, -1.0 * texelHeight)).rgb); + highp float thirdStageColor6 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(2.0 * texelWidth, 0.0)).rgb); + highp float thirdStageColor7 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(2.0 * texelWidth, 1.0 * texelHeight)).rgb); + highp float thirdStageColor8 = encodedIntensity(texture2D(inputImageTexture, textureCoordinate + vec2(2.0 * texelWidth, 2.0 * texelHeight)).rgb); + + // Use a tiebreaker for pixels to the left and immediately above this one + highp float multiplier = 1.0 - step(centerColor, topColor); + multiplier = multiplier * (1.0 - step(centerColor, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor, bottomLeftColor)); + + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor1)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor2)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor3)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor4)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor5)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor6)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor7)); + multiplier = multiplier * (1.0 - step(centerColor, secondStageColor8)); + + highp float maxValue = max(centerColor, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + maxValue = max(maxValue, thirdStageColor1); + maxValue = max(maxValue, thirdStageColor2); + maxValue = max(maxValue, thirdStageColor3); + maxValue = max(maxValue, thirdStageColor4); + maxValue = max(maxValue, thirdStageColor5); + maxValue = max(maxValue, thirdStageColor6); + maxValue = max(maxValue, thirdStageColor7); + maxValue = max(maxValue, thirdStageColor8); + + highp float midValue = centerColor * step(maxValue, centerColor) * multiplier; + highp float finalValue = step(threshold, midValue); + + gl_FragColor = vec4(finalValue * centerColor, topLeftColor, topRightColor, topColor); + } +); +#else +NSString *const kGPUImageThresholdedNonMaximumSuppressionFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform float threshold; + + void main() + { + float bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + float leftColor = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightColor = texture2D(inputImageTexture, rightTextureCoordinate).r; + float topColor = texture2D(inputImageTexture, topTextureCoordinate).r; + float topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + + // Use a tiebreaker for pixels to the left and immediately above this one + float multiplier = 1.0 - step(centerColor.r, topColor); + multiplier = multiplier * (1.0 - step(centerColor.r, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, bottomLeftColor)); + + float maxValue = max(centerColor.r, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + float finalValue = centerColor.r * step(maxValue, centerColor.r) * multiplier; + finalValue = step(threshold, finalValue); + + gl_FragColor = vec4(finalValue, finalValue, finalValue, 1.0); + // + // gl_FragColor = vec4((centerColor.rgb * step(maxValue, step(threshold, centerColor.r)) * multiplier), 1.0); + } +); + +NSString *const kGPUImageThresholdedNonMaximumSuppressionPackedColorspaceFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform float threshold; + + void main() + { + float bottomColor = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float bottomLeftColor = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float bottomRightColor = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + vec4 centerColor = texture2D(inputImageTexture, textureCoordinate); + float leftColor = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightColor = texture2D(inputImageTexture, rightTextureCoordinate).r; + float topColor = texture2D(inputImageTexture, topTextureCoordinate).r; + float topRightColor = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftColor = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + + // Use a tiebreaker for pixels to the left and immediately above this one + float multiplier = 1.0 - step(centerColor.r, topColor); + multiplier = multiplier * (1.0 - step(centerColor.r, topLeftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, leftColor)); + multiplier = multiplier * (1.0 - step(centerColor.r, bottomLeftColor)); + + float maxValue = max(centerColor.r, bottomColor); + maxValue = max(maxValue, bottomRightColor); + maxValue = max(maxValue, rightColor); + maxValue = max(maxValue, topRightColor); + + float finalValue = centerColor.r * step(maxValue, centerColor.r) * multiplier; + finalValue = step(threshold, finalValue); + + gl_FragColor = vec4(finalValue, finalValue, finalValue, 1.0); + // + // gl_FragColor = vec4((centerColor.rgb * step(maxValue, step(threshold, centerColor.r)) * multiplier), 1.0); + } + ); +#endif + +@implementation GPUImageThresholdedNonMaximumSuppressionFilter + +@synthesize threshold = _threshold; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithPackedColorspace:NO])) + { + return nil; + } + + return self; +} + +- (id)initWithPackedColorspace:(BOOL)inputUsesPackedColorspace; +{ + NSString *shaderString; + if (inputUsesPackedColorspace) + { + shaderString = kGPUImageThresholdedNonMaximumSuppressionPackedColorspaceFragmentShaderString; + } + else + { + shaderString = kGPUImageThresholdedNonMaximumSuppressionFragmentShaderString; + } + + + if (!(self = [super initWithFragmentShaderFromString:shaderString])) + { + return nil; + } + + thresholdUniform = [filterProgram uniformIndex:@"threshold"]; + self.threshold = 0.9; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setThreshold:(CGFloat)newValue; +{ + _threshold = newValue; + + [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.h new file mode 100755 index 00000000..e41adee7 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.h @@ -0,0 +1,24 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGaussianBlurFilter; + +/// A simulated tilt shift lens effect +@interface GPUImageTiltShiftFilter : GPUImageFilterGroup +{ + GPUImageGaussianBlurFilter *blurFilter; + GPUImageFilter *tiltShiftFilter; +} + +/// The radius of the underlying blur, in pixels. This is 7.0 by default. +@property(readwrite, nonatomic) CGFloat blurRadiusInPixels; + +/// The normalized location of the top of the in-focus area in the image, this value should be lower than bottomFocusLevel, default 0.4 +@property(readwrite, nonatomic) CGFloat topFocusLevel; + +/// The normalized location of the bottom of the in-focus area in the image, this value should be higher than topFocusLevel, default 0.6 +@property(readwrite, nonatomic) CGFloat bottomFocusLevel; + +/// The rate at which the image gets blurry away from the in-focus region, default 0.2 +@property(readwrite, nonatomic) CGFloat focusFallOffRate; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.m new file mode 100755 index 00000000..e2f632b1 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.m @@ -0,0 +1,126 @@ +#import "GPUImageTiltShiftFilter.h" +#import "GPUImageFilter.h" +#import "GPUImageTwoInputFilter.h" +#import "GPUImageGaussianBlurFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageTiltShiftFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform highp float topFocusLevel; + uniform highp float bottomFocusLevel; + uniform highp float focusFallOffRate; + + void main() + { + lowp vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec4 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2); + + lowp float blurIntensity = 1.0 - smoothstep(topFocusLevel - focusFallOffRate, topFocusLevel, textureCoordinate2.y); + blurIntensity += smoothstep(bottomFocusLevel, bottomFocusLevel + focusFallOffRate, textureCoordinate2.y); + + gl_FragColor = mix(sharpImageColor, blurredImageColor, blurIntensity); + } +); +#else +NSString *const kGPUImageTiltShiftFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float topFocusLevel; + uniform float bottomFocusLevel; + uniform float focusFallOffRate; + + void main() + { + vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + vec4 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2); + + float blurIntensity = 1.0 - smoothstep(topFocusLevel - focusFallOffRate, topFocusLevel, textureCoordinate2.y); + blurIntensity += smoothstep(bottomFocusLevel, bottomFocusLevel + focusFallOffRate, textureCoordinate2.y); + + gl_FragColor = mix(sharpImageColor, blurredImageColor, blurIntensity); + } +); +#endif + +@implementation GPUImageTiltShiftFilter + +@synthesize blurRadiusInPixels; +@synthesize topFocusLevel = _topFocusLevel; +@synthesize bottomFocusLevel = _bottomFocusLevel; +@synthesize focusFallOffRate = _focusFallOffRate; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: apply a variable Gaussian blur + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Second pass: combine the blurred image with the original sharp one + tiltShiftFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:kGPUImageTiltShiftFragmentShaderString]; + [self addFilter:tiltShiftFilter]; + + // Texture location 0 needs to be the sharp image for both the blur and the second stage processing + [blurFilter addTarget:tiltShiftFilter atTextureLocation:1]; + + // To prevent double updating of this filter, disable updates from the sharp image side +// self.inputFilterToIgnoreForUpdates = tiltShiftFilter; + + self.initialFilters = [NSArray arrayWithObjects:blurFilter, tiltShiftFilter, nil]; + self.terminalFilter = tiltShiftFilter; + + self.topFocusLevel = 0.4; + self.bottomFocusLevel = 0.6; + self.focusFallOffRate = 0.2; + self.blurRadiusInPixels = 7.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setTopFocusLevel:(CGFloat)newValue; +{ + _topFocusLevel = newValue; + [tiltShiftFilter setFloat:newValue forUniformName:@"topFocusLevel"]; +} + +- (void)setBottomFocusLevel:(CGFloat)newValue; +{ + _bottomFocusLevel = newValue; + [tiltShiftFilter setFloat:newValue forUniformName:@"bottomFocusLevel"]; +} + +- (void)setFocusFallOffRate:(CGFloat)newValue; +{ + _focusFallOffRate = newValue; + [tiltShiftFilter setFloat:newValue forUniformName:@"focusFallOffRate"]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.h new file mode 100755 index 00000000..ff4ae92e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.h @@ -0,0 +1,30 @@ +#import "GPUImageFilter.h" + +@interface GPUImageToneCurveFilter : GPUImageFilter + +@property(readwrite, nonatomic, copy) NSArray *redControlPoints; +@property(readwrite, nonatomic, copy) NSArray *greenControlPoints; +@property(readwrite, nonatomic, copy) NSArray *blueControlPoints; +@property(readwrite, nonatomic, copy) NSArray *rgbCompositeControlPoints; + +// Initialization and teardown +- (id)initWithACVData:(NSData*)data; + +- (id)initWithACV:(NSString*)curveFilename; +- (id)initWithACVURL:(NSURL*)curveFileURL; + +// This lets you set all three red, green, and blue tone curves at once. +// NOTE: Deprecated this function because this effect can be accomplished +// using the rgbComposite channel rather then setting all 3 R, G, and B channels. +- (void)setRGBControlPoints:(NSArray *)points DEPRECATED_ATTRIBUTE; + +- (void)setPointsWithACV:(NSString*)curveFilename; +- (void)setPointsWithACVURL:(NSURL*)curveFileURL; + +// Curve calculation +- (NSMutableArray *)getPreparedSplineCurve:(NSArray *)points; +- (NSMutableArray *)splineCurve:(NSArray *)points; +- (NSMutableArray *)secondDerivative:(NSArray *)cgPoints; +- (void)updateToneCurveTexture; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.m new file mode 100644 index 00000000..18a717e0 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.m @@ -0,0 +1,621 @@ +#import "GPUImageToneCurveFilter.h" + +#pragma mark - +#pragma mark GPUImageACVFile Helper + +// GPUImageACVFile +// +// ACV File format Parser +// Please refer to http://www.adobe.com/devnet-apps/photoshop/fileformatashtml/PhotoshopFileFormats.htm#50577411_pgfId-1056330 +// + +@interface GPUImageACVFile : NSObject{ + short version; + short totalCurves; + + NSArray *rgbCompositeCurvePoints; + NSArray *redCurvePoints; + NSArray *greenCurvePoints; + NSArray *blueCurvePoints; +} + +@property(strong,nonatomic) NSArray *rgbCompositeCurvePoints; +@property(strong,nonatomic) NSArray *redCurvePoints; +@property(strong,nonatomic) NSArray *greenCurvePoints; +@property(strong,nonatomic) NSArray *blueCurvePoints; + +- (id) initWithACVFileData:(NSData*)data; + + +unsigned short int16WithBytes(Byte* bytes); +@end + +@implementation GPUImageACVFile + +@synthesize rgbCompositeCurvePoints, redCurvePoints, greenCurvePoints, blueCurvePoints; + +- (id) initWithACVFileData:(NSData *)data { + self = [super init]; + if (self != nil) + { + if (data.length == 0) + { + NSLog(@"failed to init ACVFile with data:%@", data); + + return self; + } + + Byte* rawBytes = (Byte*) [data bytes]; + version = int16WithBytes(rawBytes); + rawBytes+=2; + + totalCurves = int16WithBytes(rawBytes); + rawBytes+=2; + + NSMutableArray *curves = [NSMutableArray new]; + + float pointRate = (1.0 / 255); + // The following is the data for each curve specified by count above + for (NSInteger x = 0; x 0) + { + // Sort the array. + NSArray *sortedPoints = [points sortedArrayUsingComparator:^NSComparisonResult(id a, id b) { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + float x1 = [(NSValue *)a CGPointValue].x; + float x2 = [(NSValue *)b CGPointValue].x; +#else + float x1 = [(NSValue *)a pointValue].x; + float x2 = [(NSValue *)b pointValue].x; +#endif + return x1 > x2; + }]; + + // Convert from (0, 1) to (0, 255). + NSMutableArray *convertedPoints = [NSMutableArray arrayWithCapacity:[sortedPoints count]]; + for (int i=0; i<[points count]; i++){ +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGPoint point = [[sortedPoints objectAtIndex:i] CGPointValue]; +#else + NSPoint point = [[sortedPoints objectAtIndex:i] pointValue]; +#endif + point.x = point.x * 255; + point.y = point.y * 255; + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [convertedPoints addObject:[NSValue valueWithCGPoint:point]]; +#else + [convertedPoints addObject:[NSValue valueWithPoint:point]]; +#endif + } + + + NSMutableArray *splinePoints = [self splineCurve:convertedPoints]; + + // If we have a first point like (0.3, 0) we'll be missing some points at the beginning + // that should be 0. +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGPoint firstSplinePoint = [[splinePoints objectAtIndex:0] CGPointValue]; +#else + NSPoint firstSplinePoint = [[splinePoints objectAtIndex:0] pointValue]; +#endif + + if (firstSplinePoint.x > 0) { + for (int i=firstSplinePoint.x; i >= 0; i--) { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGPoint newCGPoint = CGPointMake(i, 0); + [splinePoints insertObject:[NSValue valueWithCGPoint:newCGPoint] atIndex:0]; +#else + NSPoint newNSPoint = NSMakePoint(i, 0); + [splinePoints insertObject:[NSValue valueWithPoint:newNSPoint] atIndex:0]; +#endif + } + } + + // Insert points similarly at the end, if necessary. +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGPoint lastSplinePoint = [[splinePoints lastObject] CGPointValue]; + + if (lastSplinePoint.x < 255) { + for (int i = lastSplinePoint.x + 1; i <= 255; i++) { + CGPoint newCGPoint = CGPointMake(i, 255); + [splinePoints addObject:[NSValue valueWithCGPoint:newCGPoint]]; + } + } +#else + NSPoint lastSplinePoint = [[splinePoints lastObject] pointValue]; + + if (lastSplinePoint.x < 255) { + for (int i = lastSplinePoint.x + 1; i <= 255; i++) { + NSPoint newNSPoint = NSMakePoint(i, 255); + [splinePoints addObject:[NSValue valueWithPoint:newNSPoint]]; + } + } +#endif + + // Prepare the spline points. + NSMutableArray *preparedSplinePoints = [NSMutableArray arrayWithCapacity:[splinePoints count]]; + for (int i=0; i<[splinePoints count]; i++) + { +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + CGPoint newPoint = [[splinePoints objectAtIndex:i] CGPointValue]; +#else + NSPoint newPoint = [[splinePoints objectAtIndex:i] pointValue]; +#endif + CGPoint origPoint = CGPointMake(newPoint.x, newPoint.x); + + float distance = sqrt(pow((origPoint.x - newPoint.x), 2.0) + pow((origPoint.y - newPoint.y), 2.0)); + + if (origPoint.y > newPoint.y) + { + distance = -distance; + } + + [preparedSplinePoints addObject:[NSNumber numberWithFloat:distance]]; + } + + return preparedSplinePoints; + } + + return nil; +} + + +- (NSMutableArray *)splineCurve:(NSArray *)points +{ + NSMutableArray *sdA = [self secondDerivative:points]; + + // [points count] is equal to [sdA count] + NSInteger n = [sdA count]; + if (n < 1) + { + return nil; + } + double sd[n]; + + // From NSMutableArray to sd[n]; + for (int i=0; i 255.0) + { + y = 255.0; + } + else if (y < 0.0) + { + y = 0.0; + } +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + [output addObject:[NSValue valueWithCGPoint:CGPointMake(x, y)]]; +#else + [output addObject:[NSValue valueWithPoint:NSMakePoint(x, y)]]; +#endif + } + } + + // The above always misses the last point because the last point is the last next, so we approach but don't equal it. + [output addObject:[points lastObject]]; + return output; +} + +- (NSMutableArray *)secondDerivative:(NSArray *)points +{ + const NSInteger n = [points count]; + if ((n <= 0) || (n == 1)) + { + return nil; + } + + double matrix[n][3]; + double result[n]; + matrix[0][1]=1; + // What about matrix[0][1] and matrix[0][0]? Assuming 0 for now (Brad L.) + matrix[0][0]=0; + matrix[0][2]=0; + + for(int i=1;idown) + for(int i=1;iup) + for(NSInteger i=n-2;i>=0;i--) + { + double k = matrix[i][2]/matrix[i+1][1]; + matrix[i][1] -= k*matrix[i+1][0]; + matrix[i][2] = 0; + result[i] -= k*result[i+1]; + } + + double y2[n]; + for(int i=0;i= 256) && ([_greenCurve count] >= 256) && ([_blueCurve count] >= 256) && ([_rgbCompositeCurve count] >= 256)) + { + for (unsigned int currentCurveIndex = 0; currentCurveIndex < 256; currentCurveIndex++) + { + // BGRA for upload to texture + GLubyte b = fmin(fmax(currentCurveIndex + [[_blueCurve objectAtIndex:currentCurveIndex] floatValue], 0), 255); + toneCurveByteArray[currentCurveIndex * 4] = fmin(fmax(b + [[_rgbCompositeCurve objectAtIndex:b] floatValue], 0), 255); + GLubyte g = fmin(fmax(currentCurveIndex + [[_greenCurve objectAtIndex:currentCurveIndex] floatValue], 0), 255); + toneCurveByteArray[currentCurveIndex * 4 + 1] = fmin(fmax(g + [[_rgbCompositeCurve objectAtIndex:g] floatValue], 0), 255); + GLubyte r = fmin(fmax(currentCurveIndex + [[_redCurve objectAtIndex:currentCurveIndex] floatValue], 0), 255); + toneCurveByteArray[currentCurveIndex * 4 + 2] = fmin(fmax(r + [[_rgbCompositeCurve objectAtIndex:r] floatValue], 0), 255); + toneCurveByteArray[currentCurveIndex * 4 + 3] = 255; + } + + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 256 /*width*/, 1 /*height*/, 0, GL_BGRA, GL_UNSIGNED_BYTE, toneCurveByteArray); + } + }); +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform, 2); + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, toneCurveTexture); + glUniform1i(toneCurveTextureUniform, 3); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [firstInputFramebuffer unlock]; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setRGBControlPoints:(NSArray *)points +{ + _redControlPoints = [points copy]; + _redCurve = [self getPreparedSplineCurve:_redControlPoints]; + + _greenControlPoints = [points copy]; + _greenCurve = [self getPreparedSplineCurve:_greenControlPoints]; + + _blueControlPoints = [points copy]; + _blueCurve = [self getPreparedSplineCurve:_blueControlPoints]; + + [self updateToneCurveTexture]; +} + + +- (void)setRgbCompositeControlPoints:(NSArray *)newValue +{ + _rgbCompositeControlPoints = [newValue copy]; + _rgbCompositeCurve = [self getPreparedSplineCurve:_rgbCompositeControlPoints]; + + [self updateToneCurveTexture]; +} + + +- (void)setRedControlPoints:(NSArray *)newValue; +{ + _redControlPoints = [newValue copy]; + _redCurve = [self getPreparedSplineCurve:_redControlPoints]; + + [self updateToneCurveTexture]; +} + + +- (void)setGreenControlPoints:(NSArray *)newValue +{ + _greenControlPoints = [newValue copy]; + _greenCurve = [self getPreparedSplineCurve:_greenControlPoints]; + + [self updateToneCurveTexture]; +} + + +- (void)setBlueControlPoints:(NSArray *)newValue +{ + _blueControlPoints = [newValue copy]; + _blueCurve = [self getPreparedSplineCurve:_blueControlPoints]; + + [self updateToneCurveTexture]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.h new file mode 100755 index 00000000..ef8e17c3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.h @@ -0,0 +1,19 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +/** This uses Sobel edge detection to place a black border around objects, + and then it quantizes the colors present in the image to give a cartoon-like quality to the image. + */ +@interface GPUImageToonFilter : GPUImage3x3TextureSamplingFilter +{ + GLint thresholdUniform, quantizationLevelsUniform; +} + +/** The threshold at which to apply the edges, default of 0.2 + */ +@property(readwrite, nonatomic) CGFloat threshold; + +/** The levels of quantization for the posterization of colors within the scene, with a default of 10.0 + */ +@property(readwrite, nonatomic) CGFloat quantizationLevels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.m new file mode 100755 index 00000000..e8ff104e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.m @@ -0,0 +1,149 @@ +#import "GPUImageToonFilter.h" +#import "GPUImageSobelEdgeDetectionFilter.h" +#import "GPUImage3x3ConvolutionFilter.h" + +// Code from "Graphics Shaders: Theory and Practice" by M. Bailey and S. Cunningham +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageToonFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp float intensity; + uniform highp float threshold; + uniform highp float quantizationLevels; + + const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)); + + vec3 posterizedImageColor = floor((textureColor.rgb * quantizationLevels) + 0.5) / quantizationLevels; + + float thresholdTest = 1.0 - step(threshold, mag); + + gl_FragColor = vec4(posterizedImageColor * thresholdTest, textureColor.a); + } +); +#else +NSString *const kGPUImageToonFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float intensity; + uniform float threshold; + uniform float quantizationLevels; + + const vec3 W = vec3(0.2125, 0.7154, 0.0721); + + void main() + { + vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); + + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity; + float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity; + + float mag = length(vec2(h, v)); + + vec3 posterizedImageColor = floor((textureColor.rgb * quantizationLevels) + 0.5) / quantizationLevels; + + float thresholdTest = 1.0 - step(threshold, mag); + + gl_FragColor = vec4(posterizedImageColor * thresholdTest, textureColor.a); + } +); +#endif + +@implementation GPUImageToonFilter + +@synthesize threshold = _threshold; +@synthesize quantizationLevels = _quantizationLevels; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageToonFragmentShaderString])) + { + return nil; + } + + hasOverriddenImageSizeFactor = NO; + + thresholdUniform = [filterProgram uniformIndex:@"threshold"]; + quantizationLevelsUniform = [filterProgram uniformIndex:@"quantizationLevels"]; + + self.threshold = 0.2; + self.quantizationLevels = 10.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setThreshold:(CGFloat)newValue; +{ + _threshold = newValue; + + [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; +} + +- (void)setQuantizationLevels:(CGFloat)newValue; +{ + _quantizationLevels = newValue; + + [self setFloat:_quantizationLevels forUniform:quantizationLevelsUniform program:filterProgram]; +} + + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.h new file mode 100755 index 00000000..9865b853 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilter.h" + +@interface GPUImageTransformFilter : GPUImageFilter +{ + GLint transformMatrixUniform, orthographicMatrixUniform; + GPUMatrix4x4 orthographicMatrix; +} + +// You can either set the transform to apply to be a 2-D affine transform or a 3-D transform. The default is the identity transform (the output image is identical to the input). +@property(readwrite, nonatomic) CGAffineTransform affineTransform; +@property(readwrite, nonatomic) CATransform3D transform3D; + +// This applies the transform to the raw frame data if set to YES, the default of NO takes the aspect ratio of the image input into account when rotating +@property(readwrite, nonatomic) BOOL ignoreAspectRatio; + +// sets the anchor point to top left corner +@property(readwrite, nonatomic) BOOL anchorTopLeft; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.m new file mode 100755 index 00000000..17933092 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.m @@ -0,0 +1,260 @@ +#import "GPUImageTransformFilter.h" + +NSString *const kGPUImageTransformVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + + uniform mat4 transformMatrix; + uniform mat4 orthographicMatrix; + + varying vec2 textureCoordinate; + + void main() + { + gl_Position = transformMatrix * vec4(position.xyz, 1.0) * orthographicMatrix; + textureCoordinate = inputTextureCoordinate.xy; + } +); + +@implementation GPUImageTransformFilter + +@synthesize affineTransform; +@synthesize transform3D = _transform3D; +@synthesize ignoreAspectRatio = _ignoreAspectRatio; +@synthesize anchorTopLeft = _anchorTopLeft; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageTransformVertexShaderString fragmentShaderFromString:kGPUImagePassthroughFragmentShaderString])) + { + return nil; + } + + transformMatrixUniform = [filterProgram uniformIndex:@"transformMatrix"]; + orthographicMatrixUniform = [filterProgram uniformIndex:@"orthographicMatrix"]; + + self.transform3D = CATransform3DIdentity; + + return self; +} + +#pragma mark - +#pragma mark Conversion from matrix formats + +- (void)loadOrthoMatrix:(GLfloat *)matrix left:(GLfloat)left right:(GLfloat)right bottom:(GLfloat)bottom top:(GLfloat)top near:(GLfloat)near far:(GLfloat)far; +{ + GLfloat r_l = right - left; + GLfloat t_b = top - bottom; + GLfloat f_n = far - near; + GLfloat tx = - (right + left) / (right - left); + GLfloat ty = - (top + bottom) / (top - bottom); + GLfloat tz = - (far + near) / (far - near); + + float scale = 2.0f; + if (_anchorTopLeft) + { + scale = 4.0f; + tx=-1.0f; + ty=-1.0f; + } + + matrix[0] = scale / r_l; + matrix[1] = 0.0f; + matrix[2] = 0.0f; + matrix[3] = tx; + + matrix[4] = 0.0f; + matrix[5] = scale / t_b; + matrix[6] = 0.0f; + matrix[7] = ty; + + matrix[8] = 0.0f; + matrix[9] = 0.0f; + matrix[10] = scale / f_n; + matrix[11] = tz; + + matrix[12] = 0.0f; + matrix[13] = 0.0f; + matrix[14] = 0.0f; + matrix[15] = 1.0f; +} + +//- (void)convert3DTransform:(CATransform3D *)transform3D toMatrix:(GLfloat *)matrix; +//{ +// // struct CATransform3D +// // { +// // CGFloat m11, m12, m13, m14; +// // CGFloat m21, m22, m23, m24; +// // CGFloat m31, m32, m33, m34; +// // CGFloat m41, m42, m43, m44; +// // }; +// +// matrix[0] = (GLfloat)transform3D->m11; +// matrix[1] = (GLfloat)transform3D->m12; +// matrix[2] = (GLfloat)transform3D->m13; +// matrix[3] = (GLfloat)transform3D->m14; +// matrix[4] = (GLfloat)transform3D->m21; +// matrix[5] = (GLfloat)transform3D->m22; +// matrix[6] = (GLfloat)transform3D->m23; +// matrix[7] = (GLfloat)transform3D->m24; +// matrix[8] = (GLfloat)transform3D->m31; +// matrix[9] = (GLfloat)transform3D->m32; +// matrix[10] = (GLfloat)transform3D->m33; +// matrix[11] = (GLfloat)transform3D->m34; +// matrix[12] = (GLfloat)transform3D->m41; +// matrix[13] = (GLfloat)transform3D->m42; +// matrix[14] = (GLfloat)transform3D->m43; +// matrix[15] = (GLfloat)transform3D->m44; +//} + +- (void)convert3DTransform:(CATransform3D *)transform3D toMatrix:(GPUMatrix4x4 *)matrix; +{ + // struct CATransform3D + // { + // CGFloat m11, m12, m13, m14; + // CGFloat m21, m22, m23, m24; + // CGFloat m31, m32, m33, m34; + // CGFloat m41, m42, m43, m44; + // }; + + GLfloat *mappedMatrix = (GLfloat *)matrix; + + mappedMatrix[0] = (GLfloat)transform3D->m11; + mappedMatrix[1] = (GLfloat)transform3D->m12; + mappedMatrix[2] = (GLfloat)transform3D->m13; + mappedMatrix[3] = (GLfloat)transform3D->m14; + mappedMatrix[4] = (GLfloat)transform3D->m21; + mappedMatrix[5] = (GLfloat)transform3D->m22; + mappedMatrix[6] = (GLfloat)transform3D->m23; + mappedMatrix[7] = (GLfloat)transform3D->m24; + mappedMatrix[8] = (GLfloat)transform3D->m31; + mappedMatrix[9] = (GLfloat)transform3D->m32; + mappedMatrix[10] = (GLfloat)transform3D->m33; + mappedMatrix[11] = (GLfloat)transform3D->m34; + mappedMatrix[12] = (GLfloat)transform3D->m41; + mappedMatrix[13] = (GLfloat)transform3D->m42; + mappedMatrix[14] = (GLfloat)transform3D->m43; + mappedMatrix[15] = (GLfloat)transform3D->m44; +} + +#pragma mark - +#pragma mark GPUImageInput + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + CGSize currentFBOSize = [self sizeOfFBO]; + CGFloat normalizedHeight = currentFBOSize.height / currentFBOSize.width; + + GLfloat adjustedVertices[] = { + -1.0f, -normalizedHeight, + 1.0f, -normalizedHeight, + -1.0f, normalizedHeight, + 1.0f, normalizedHeight, + }; + static const GLfloat squareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + GLfloat adjustedVerticesAnchorTL[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, normalizedHeight, + 1.0f, normalizedHeight, + }; + + static const GLfloat squareVerticesAnchorTL[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 1.0f, + }; + + if (_ignoreAspectRatio) + { + if (_anchorTopLeft) + { + [self renderToTextureWithVertices:squareVerticesAnchorTL textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + } + else + { + [self renderToTextureWithVertices:squareVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + } + } + else + { + if (_anchorTopLeft) + { + [self renderToTextureWithVertices:adjustedVerticesAnchorTL textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + } + else + { + [self renderToTextureWithVertices:adjustedVertices textureCoordinates:[[self class] textureCoordinatesForRotation:inputRotation]]; + } + } + + [self informTargetsAboutNewFrameAtTime:frameTime]; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!_ignoreAspectRatio) + { + [self loadOrthoMatrix:(GLfloat *)&orthographicMatrix left:-1.0 right:1.0 bottom:(-1.0 * filterFrameSize.height / filterFrameSize.width) top:(1.0 * filterFrameSize.height / filterFrameSize.width) near:-1.0 far:1.0]; + // [self loadOrthoMatrix:orthographicMatrix left:-1.0 right:1.0 bottom:(-1.0 * (GLfloat)backingHeight / (GLfloat)backingWidth) top:(1.0 * (GLfloat)backingHeight / (GLfloat)backingWidth) near:-2.0 far:2.0]; + + [self setMatrix4f:orthographicMatrix forUniform:orthographicMatrixUniform program:filterProgram]; + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setAffineTransform:(CGAffineTransform)newValue; +{ + self.transform3D = CATransform3DMakeAffineTransform(newValue); +} + +- (CGAffineTransform)affineTransform; +{ + return CATransform3DGetAffineTransform(self.transform3D); +} + +- (void)setTransform3D:(CATransform3D)newValue; +{ + _transform3D = newValue; + + GPUMatrix4x4 temporaryMatrix; + + [self convert3DTransform:&_transform3D toMatrix:&temporaryMatrix]; + [self setMatrix4f:temporaryMatrix forUniform:transformMatrixUniform program:filterProgram]; +} + +- (void)setIgnoreAspectRatio:(BOOL)newValue; +{ + _ignoreAspectRatio = newValue; + + if (_ignoreAspectRatio) + { + [self loadOrthoMatrix:(GLfloat *)&orthographicMatrix left:-1.0 right:1.0 bottom:-1.0 top:1.0 near:-1.0 far:1.0]; + [self setMatrix4f:orthographicMatrix forUniform:orthographicMatrixUniform program:filterProgram]; + } + else + { + [self setupFilterForSize:[self sizeOfFBO]]; + } +} + +- (void)setAnchorTopLeft:(BOOL)newValue +{ + _anchorTopLeft = newValue; + [self setIgnoreAspectRatio:_ignoreAspectRatio]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.h new file mode 100644 index 00000000..64eac9dc --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.h @@ -0,0 +1,15 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageTwoInputCrossTextureSamplingFilter : GPUImageTwoInputFilter +{ + GLint texelWidthUniform, texelHeightUniform; + + CGFloat texelWidth, texelHeight; + BOOL hasOverriddenImageSizeFactor; +} + +// The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. +@property(readwrite, nonatomic) CGFloat texelWidth; +@property(readwrite, nonatomic) CGFloat texelHeight; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.m new file mode 100644 index 00000000..aa338f81 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.m @@ -0,0 +1,108 @@ +#import "GPUImageTwoInputCrossTextureSamplingFilter.h" + +NSString *const kGPUImageTwoInputNearbyTexelSamplingVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + attribute vec4 inputTextureCoordinate2; + + uniform float texelWidth; + uniform float texelHeight; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + varying vec2 topTextureCoordinate; + varying vec2 bottomTextureCoordinate; + + varying vec2 textureCoordinate2; + varying vec2 leftTextureCoordinate2; + varying vec2 rightTextureCoordinate2; + varying vec2 topTextureCoordinate2; + varying vec2 bottomTextureCoordinate2; + + void main() + { + gl_Position = position; + + vec2 widthStep = vec2(texelWidth, 0.0); + vec2 heightStep = vec2(0.0, texelHeight); + + textureCoordinate = inputTextureCoordinate.xy; + leftTextureCoordinate = inputTextureCoordinate.xy - widthStep; + rightTextureCoordinate = inputTextureCoordinate.xy + widthStep; + topTextureCoordinate = inputTextureCoordinate.xy - heightStep; + bottomTextureCoordinate = inputTextureCoordinate.xy + heightStep; + + textureCoordinate2 = inputTextureCoordinate2.xy; + leftTextureCoordinate2 = inputTextureCoordinate2.xy - widthStep; + rightTextureCoordinate2 = inputTextureCoordinate2.xy + widthStep; + topTextureCoordinate2 = inputTextureCoordinate2.xy - heightStep; + bottomTextureCoordinate2 = inputTextureCoordinate2.xy + heightStep; + } +); + +@implementation GPUImageTwoInputCrossTextureSamplingFilter + +@synthesize texelWidth = _texelWidth; +@synthesize texelHeight = _texelHeight; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:kGPUImageTwoInputNearbyTexelSamplingVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + texelWidthUniform = [filterProgram uniformIndex:@"texelWidth"]; + texelHeightUniform = [filterProgram uniformIndex:@"texelHeight"]; + + return self; +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + if (!hasOverriddenImageSizeFactor) + { + _texelWidth = 1.0 / filterFrameSize.width; + _texelHeight = 1.0 / filterFrameSize.height; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:filterProgram]; + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + glUniform1f(texelWidthUniform, _texelHeight); + glUniform1f(texelHeightUniform, _texelWidth); + } + else + { + glUniform1f(texelWidthUniform, _texelWidth); + glUniform1f(texelHeightUniform, _texelHeight); + } + }); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTexelWidth:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelWidth = newValue; + + [self setFloat:_texelWidth forUniform:texelWidthUniform program:filterProgram]; +} + +- (void)setTexelHeight:(CGFloat)newValue; +{ + hasOverriddenImageSizeFactor = YES; + _texelHeight = newValue; + + [self setFloat:_texelHeight forUniform:texelHeightUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.h new file mode 100644 index 00000000..da3a1345 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.h @@ -0,0 +1,21 @@ +#import "GPUImageFilter.h" + +extern NSString *const kGPUImageTwoInputTextureVertexShaderString; + +@interface GPUImageTwoInputFilter : GPUImageFilter +{ + GPUImageFramebuffer *secondInputFramebuffer; + + GLint filterSecondTextureCoordinateAttribute; + GLint filterInputTextureUniform2; + GPUImageRotationMode inputRotation2; + CMTime firstFrameTime, secondFrameTime; + + BOOL hasSetFirstTexture, hasReceivedFirstFrame, hasReceivedSecondFrame, firstFrameWasVideo, secondFrameWasVideo; + BOOL firstFrameCheckDisabled, secondFrameCheckDisabled; +} + +- (void)disableFirstFrameCheck; +- (void)disableSecondFrameCheck; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.m new file mode 100644 index 00000000..cf318737 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.m @@ -0,0 +1,264 @@ +#import "GPUImageTwoInputFilter.h" + +NSString *const kGPUImageTwoInputTextureVertexShaderString = SHADER_STRING +( + attribute vec4 position; + attribute vec4 inputTextureCoordinate; + attribute vec4 inputTextureCoordinate2; + + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + void main() + { + gl_Position = position; + textureCoordinate = inputTextureCoordinate.xy; + textureCoordinate2 = inputTextureCoordinate2.xy; + } +); + + +@implementation GPUImageTwoInputFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [self initWithVertexShaderFromString:kGPUImageTwoInputTextureVertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + return self; +} + +- (id)initWithVertexShaderFromString:(NSString *)vertexShaderString fragmentShaderFromString:(NSString *)fragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:vertexShaderString fragmentShaderFromString:fragmentShaderString])) + { + return nil; + } + + inputRotation2 = kGPUImageNoRotation; + + hasSetFirstTexture = NO; + + hasReceivedFirstFrame = NO; + hasReceivedSecondFrame = NO; + firstFrameWasVideo = NO; + secondFrameWasVideo = NO; + firstFrameCheckDisabled = NO; + secondFrameCheckDisabled = NO; + + firstFrameTime = kCMTimeInvalid; + secondFrameTime = kCMTimeInvalid; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + filterSecondTextureCoordinateAttribute = [filterProgram attributeIndex:@"inputTextureCoordinate2"]; + + filterInputTextureUniform2 = [filterProgram uniformIndex:@"inputImageTexture2"]; // This does assume a name of "inputImageTexture2" for second input texture in the fragment shader + glEnableVertexAttribArray(filterSecondTextureCoordinateAttribute); + }); + + return self; +} + +- (void)initializeAttributes; +{ + [super initializeAttributes]; + [filterProgram addAttribute:@"inputTextureCoordinate2"]; +} + +- (void)disableFirstFrameCheck; +{ + firstFrameCheckDisabled = YES; +} + +- (void)disableSecondFrameCheck; +{ + secondFrameCheckDisabled = YES; +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + if (usingNextFrameForImageCapture) + { + [outputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform, 2); + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [secondInputFramebuffer texture]); + glUniform1i(filterInputTextureUniform2, 3); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + glVertexAttribPointer(filterSecondTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:inputRotation2]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + [firstInputFramebuffer unlock]; + [secondInputFramebuffer unlock]; + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +#pragma mark - +#pragma mark GPUImageInput + +- (NSInteger)nextAvailableTextureIndex; +{ + if (hasSetFirstTexture) + { + return 1; + } + else + { + return 0; + } +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + firstInputFramebuffer = newInputFramebuffer; + hasSetFirstTexture = YES; + [firstInputFramebuffer lock]; + } + else + { + secondInputFramebuffer = newInputFramebuffer; + [secondInputFramebuffer lock]; + } +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + [super setInputSize:newSize atIndex:textureIndex]; + + if (CGSizeEqualToSize(newSize, CGSizeZero)) + { + hasSetFirstTexture = NO; + } + } +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + if (textureIndex == 0) + { + inputRotation = newInputRotation; + } + else + { + inputRotation2 = newInputRotation; + } +} + +- (CGSize)rotatedSize:(CGSize)sizeToRotate forIndex:(NSInteger)textureIndex; +{ + CGSize rotatedSize = sizeToRotate; + + GPUImageRotationMode rotationToCheck; + if (textureIndex == 0) + { + rotationToCheck = inputRotation; + } + else + { + rotationToCheck = inputRotation2; + } + + if (GPUImageRotationSwapsWidthAndHeight(rotationToCheck)) + { + rotatedSize.width = sizeToRotate.height; + rotatedSize.height = sizeToRotate.width; + } + + return rotatedSize; +} + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + // You can set up infinite update loops, so this helps to short circuit them + if (hasReceivedFirstFrame && hasReceivedSecondFrame) + { + return; + } + + BOOL updatedMovieFrameOppositeStillImage = NO; + + if (textureIndex == 0) + { + hasReceivedFirstFrame = YES; + firstFrameTime = frameTime; + if (secondFrameCheckDisabled) + { + hasReceivedSecondFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(secondFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + else + { + hasReceivedSecondFrame = YES; + secondFrameTime = frameTime; + if (firstFrameCheckDisabled) + { + hasReceivedFirstFrame = YES; + } + + if (!CMTIME_IS_INDEFINITE(frameTime)) + { + if CMTIME_IS_INDEFINITE(firstFrameTime) + { + updatedMovieFrameOppositeStillImage = YES; + } + } + } + + // || (hasReceivedFirstFrame && secondFrameCheckDisabled) || (hasReceivedSecondFrame && firstFrameCheckDisabled) + if ((hasReceivedFirstFrame && hasReceivedSecondFrame) || updatedMovieFrameOppositeStillImage) + { + CMTime passOnFrameTime = (!CMTIME_IS_INDEFINITE(firstFrameTime)) ? firstFrameTime : secondFrameTime; + [super newFrameReadyAtTime:passOnFrameTime atIndex:0]; // Bugfix when trying to record: always use time from first input (unless indefinite, in which case use the second input) + hasReceivedFirstFrame = NO; + hasReceivedSecondFrame = NO; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.h new file mode 100755 index 00000000..23087f35 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.h @@ -0,0 +1,19 @@ +#import "GPUImageFilter.h" + +@interface GPUImageTwoPassFilter : GPUImageFilter +{ + GPUImageFramebuffer *secondOutputFramebuffer; + + GLProgram *secondFilterProgram; + GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute; + GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2; + + NSMutableDictionary *secondProgramUniformStateRestorationBlocks; +} + +// Initialization and teardown +- (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; +- (id)initWithFirstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; +- (void)initializeSecondaryAttributes; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.m new file mode 100755 index 00000000..9eb292b4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.m @@ -0,0 +1,201 @@ +#import "GPUImageTwoPassFilter.h" + +@implementation GPUImageTwoPassFilter + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; +{ + if (!(self = [super initWithVertexShaderFromString:firstStageVertexShaderString fragmentShaderFromString:firstStageFragmentShaderString])) + { + return nil; + } + + secondProgramUniformStateRestorationBlocks = [NSMutableDictionary dictionaryWithCapacity:10]; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + secondFilterProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:secondStageVertexShaderString fragmentShaderString:secondStageFragmentShaderString]; + + if (!secondFilterProgram.initialized) + { + [self initializeSecondaryAttributes]; + + if (![secondFilterProgram link]) + { + NSString *progLog = [secondFilterProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [secondFilterProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [secondFilterProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + secondFilterProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + secondFilterPositionAttribute = [secondFilterProgram attributeIndex:@"position"]; + secondFilterTextureCoordinateAttribute = [secondFilterProgram attributeIndex:@"inputTextureCoordinate"]; + secondFilterInputTextureUniform = [secondFilterProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputImageTexture" for the fragment shader + secondFilterInputTextureUniform2 = [secondFilterProgram uniformIndex:@"inputImageTexture2"]; // This does assume a name of "inputImageTexture2" for second input texture in the fragment shader + + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + + glEnableVertexAttribArray(secondFilterPositionAttribute); + glEnableVertexAttribArray(secondFilterTextureCoordinateAttribute); + }); + + return self; +} + +- (id)initWithFirstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; +{ + if (!(self = [self initWithFirstStageVertexShaderFromString:kGPUImageVertexShaderString firstStageFragmentShaderFromString:firstStageFragmentShaderString secondStageVertexShaderFromString:kGPUImageVertexShaderString secondStageFragmentShaderFromString:secondStageFragmentShaderString])) + { + return nil; + } + + return self; +} + +- (void)initializeSecondaryAttributes; +{ + [secondFilterProgram addAttribute:@"position"]; + [secondFilterProgram addAttribute:@"inputTextureCoordinate"]; +} + +#pragma mark - +#pragma mark Managing targets + +- (GPUImageFramebuffer *)framebufferForOutput; +{ + return secondOutputFramebuffer; +} + +- (void)removeOutputFramebuffer; +{ + secondOutputFramebuffer = nil; +} + +#pragma mark - +#pragma mark Rendering + +- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates; +{ + if (self.preventRendering) + { + [firstInputFramebuffer unlock]; + return; + } + + [GPUImageContext setActiveShaderProgram:filterProgram]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + [self setUniformsForProgramAtIndex:0]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, [firstInputFramebuffer texture]); + + glUniform1i(filterInputTextureUniform, 2); + + glVertexAttribPointer(filterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + glVertexAttribPointer(filterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + [firstInputFramebuffer unlock]; + firstInputFramebuffer = nil; + + // This assumes that any two-pass filter that says it desires monochrome input is using the first pass for a luminance conversion, which can be dropped +// if (!currentlyReceivingMonochromeInput) +// { + // Run the first stage of the two-pass filter +// [super renderToTextureWithVertices:vertices textureCoordinates:textureCoordinates]; +// } + + // Run the second stage of the two-pass filter + secondOutputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:[self sizeOfFBO] textureOptions:self.outputTextureOptions onlyTexture:NO]; + [secondOutputFramebuffer activateFramebuffer]; + [GPUImageContext setActiveShaderProgram:secondFilterProgram]; + if (usingNextFrameForImageCapture) + { + [secondOutputFramebuffer lock]; + } + + [self setUniformsForProgramAtIndex:1]; + + glActiveTexture(GL_TEXTURE3); + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + glVertexAttribPointer(secondFilterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:kGPUImageNoRotation]); + + // TODO: Re-enable this monochrome optimization +// if (!currentlyReceivingMonochromeInput) +// { +// glActiveTexture(GL_TEXTURE3); +// glBindTexture(GL_TEXTURE_2D, outputTexture); +// glVertexAttribPointer(secondFilterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [[self class] textureCoordinatesForRotation:kGPUImageNoRotation]); +// } +// else +// { +// glActiveTexture(GL_TEXTURE3); +// glBindTexture(GL_TEXTURE_2D, sourceTexture); +// glVertexAttribPointer(secondFilterTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); +// } + + glUniform1i(secondFilterInputTextureUniform, 3); + + glVertexAttribPointer(secondFilterPositionAttribute, 2, GL_FLOAT, 0, 0, vertices); + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + [outputFramebuffer unlock]; + outputFramebuffer = nil; + + if (usingNextFrameForImageCapture) + { + dispatch_semaphore_signal(imageCaptureSemaphore); + } +} + +- (void)setAndExecuteUniformStateCallbackAtIndex:(GLint)uniform forProgram:(GLProgram *)shaderProgram toBlock:(dispatch_block_t)uniformStateBlock; +{ +// TODO: Deal with the fact that two-pass filters may have the same shader program identifier + if (shaderProgram == filterProgram) + { + [uniformStateRestorationBlocks setObject:[uniformStateBlock copy] forKey:[NSNumber numberWithInt:uniform]]; + } + else + { + [secondProgramUniformStateRestorationBlocks setObject:[uniformStateBlock copy] forKey:[NSNumber numberWithInt:uniform]]; + } + uniformStateBlock(); +} + +- (void)setUniformsForProgramAtIndex:(NSUInteger)programIndex; +{ + if (programIndex == 0) + { + [uniformStateRestorationBlocks enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop){ + dispatch_block_t currentBlock = obj; + currentBlock(); + }]; + } + else + { + [secondProgramUniformStateRestorationBlocks enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop){ + dispatch_block_t currentBlock = obj; + currentBlock(); + }]; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.h new file mode 100644 index 00000000..73ab79d3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageTwoPassFilter.h" + +@interface GPUImageTwoPassTextureSamplingFilter : GPUImageTwoPassFilter +{ + GLint verticalPassTexelWidthOffsetUniform, verticalPassTexelHeightOffsetUniform, horizontalPassTexelWidthOffsetUniform, horizontalPassTexelHeightOffsetUniform; + GLfloat verticalPassTexelWidthOffset, verticalPassTexelHeightOffset, horizontalPassTexelWidthOffset, horizontalPassTexelHeightOffset; + CGFloat _verticalTexelSpacing, _horizontalTexelSpacing; +} + +// This sets the spacing between texels (in pixels) when sampling for the first. By default, this is 1.0 +@property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.m new file mode 100644 index 00000000..b6a2ec58 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.m @@ -0,0 +1,85 @@ +#import "GPUImageTwoPassTextureSamplingFilter.h" + +@implementation GPUImageTwoPassTextureSamplingFilter + +@synthesize verticalTexelSpacing = _verticalTexelSpacing; +@synthesize horizontalTexelSpacing = _horizontalTexelSpacing; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString +{ + if (!(self = [super initWithFirstStageVertexShaderFromString:firstStageVertexShaderString firstStageFragmentShaderFromString:firstStageFragmentShaderString secondStageVertexShaderFromString:secondStageVertexShaderString secondStageFragmentShaderFromString:secondStageFragmentShaderString])) + { + return nil; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + verticalPassTexelWidthOffsetUniform = [filterProgram uniformIndex:@"texelWidthOffset"]; + verticalPassTexelHeightOffsetUniform = [filterProgram uniformIndex:@"texelHeightOffset"]; + + horizontalPassTexelWidthOffsetUniform = [secondFilterProgram uniformIndex:@"texelWidthOffset"]; + horizontalPassTexelHeightOffsetUniform = [secondFilterProgram uniformIndex:@"texelHeightOffset"]; + }); + + self.verticalTexelSpacing = 1.0; + self.horizontalTexelSpacing = 1.0; + + return self; +} + +- (void)setUniformsForProgramAtIndex:(NSUInteger)programIndex; +{ + [super setUniformsForProgramAtIndex:programIndex]; + + if (programIndex == 0) + { + glUniform1f(verticalPassTexelWidthOffsetUniform, verticalPassTexelWidthOffset); + glUniform1f(verticalPassTexelHeightOffsetUniform, verticalPassTexelHeightOffset); + } + else + { + glUniform1f(horizontalPassTexelWidthOffsetUniform, horizontalPassTexelWidthOffset); + glUniform1f(horizontalPassTexelHeightOffsetUniform, horizontalPassTexelHeightOffset); + } +} + +- (void)setupFilterForSize:(CGSize)filterFrameSize; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + // The first pass through the framebuffer may rotate the inbound image, so need to account for that by changing up the kernel ordering for that pass + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + verticalPassTexelWidthOffset = _verticalTexelSpacing / filterFrameSize.height; + verticalPassTexelHeightOffset = 0.0; + } + else + { + verticalPassTexelWidthOffset = 0.0; + verticalPassTexelHeightOffset = _verticalTexelSpacing / filterFrameSize.height; + } + + horizontalPassTexelWidthOffset = _horizontalTexelSpacing / filterFrameSize.width; + horizontalPassTexelHeightOffset = 0.0; + }); +} + +#pragma mark - +#pragma mark Accessors + +- (void)setVerticalTexelSpacing:(CGFloat)newValue; +{ + _verticalTexelSpacing = newValue; + [self setupFilterForSize:[self sizeOfFBO]]; +} + +- (void)setHorizontalTexelSpacing:(CGFloat)newValue; +{ + _horizontalTexelSpacing = newValue; + [self setupFilterForSize:[self sizeOfFBO]]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h b/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h new file mode 100644 index 00000000..984ff2ad --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h @@ -0,0 +1,15 @@ +#import "GPUImageOutput.h" + +@interface GPUImageUIElement : GPUImageOutput + +// Initialization and teardown +- (id)initWithView:(UIView *)inputView; +- (id)initWithLayer:(CALayer *)inputLayer; + +// Layer management +- (CGSize)layerSizeInPixels; +- (void)update; +- (void)updateUsingCurrentTime; +- (void)updateWithTimestamp:(CMTime)frameTime; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.m b/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.m new file mode 100644 index 00000000..33208924 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.m @@ -0,0 +1,123 @@ +#import "GPUImageUIElement.h" + +@interface GPUImageUIElement () +{ + UIView *view; + CALayer *layer; + + CGSize previousLayerSizeInPixels; + CMTime time; + NSTimeInterval actualTimeOfLastUpdate; +} + +@end + +@implementation GPUImageUIElement + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithView:(UIView *)inputView; +{ + if (!(self = [super init])) + { + return nil; + } + + view = inputView; + layer = inputView.layer; + + previousLayerSizeInPixels = CGSizeZero; + [self update]; + + return self; +} + +- (id)initWithLayer:(CALayer *)inputLayer; +{ + if (!(self = [super init])) + { + return nil; + } + + view = nil; + layer = inputLayer; + + previousLayerSizeInPixels = CGSizeZero; + [self update]; + + return self; +} + +#pragma mark - +#pragma mark Layer management + +- (CGSize)layerSizeInPixels; +{ + CGSize pointSize = layer.bounds.size; + return CGSizeMake(layer.contentsScale * pointSize.width, layer.contentsScale * pointSize.height); +} + +- (void)update; +{ + [self updateWithTimestamp:kCMTimeIndefinite]; +} + +- (void)updateUsingCurrentTime; +{ + if(CMTIME_IS_INVALID(time)) { + time = CMTimeMakeWithSeconds(0, 600); + actualTimeOfLastUpdate = [NSDate timeIntervalSinceReferenceDate]; + } else { + NSTimeInterval now = [NSDate timeIntervalSinceReferenceDate]; + NSTimeInterval diff = now - actualTimeOfLastUpdate; + time = CMTimeAdd(time, CMTimeMakeWithSeconds(diff, 600)); + actualTimeOfLastUpdate = now; + } + + [self updateWithTimestamp:time]; +} + +- (void)updateWithTimestamp:(CMTime)frameTime; +{ + [GPUImageContext useImageProcessingContext]; + + CGSize layerPixelSize = [self layerSizeInPixels]; + + GLubyte *imageData = (GLubyte *) calloc(1, (int)layerPixelSize.width * (int)layerPixelSize.height * 4); + + CGColorSpaceRef genericRGBColorspace = CGColorSpaceCreateDeviceRGB(); + CGContextRef imageContext = CGBitmapContextCreate(imageData, (int)layerPixelSize.width, (int)layerPixelSize.height, 8, (int)layerPixelSize.width * 4, genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); +// CGContextRotateCTM(imageContext, M_PI_2); + CGContextTranslateCTM(imageContext, 0.0f, layerPixelSize.height); + CGContextScaleCTM(imageContext, layer.contentsScale, -layer.contentsScale); + // CGContextSetBlendMode(imageContext, kCGBlendModeCopy); // From Technical Q&A QA1708: http://developer.apple.com/library/ios/#qa/qa1708/_index.html + + [layer renderInContext:imageContext]; + + CGContextRelease(imageContext); + CGColorSpaceRelease(genericRGBColorspace); + + // TODO: This may not work + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:layerPixelSize textureOptions:self.outputTextureOptions onlyTexture:YES]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + // no need to use self.outputTextureOptions here, we always need these texture options + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (int)layerPixelSize.width, (int)layerPixelSize.height, 0, GL_BGRA, GL_UNSIGNED_BYTE, imageData); + + free(imageData); + + for (id currentTarget in targets) + { + if (currentTarget != self.targetToIgnoreForUpdates) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setInputSize:layerPixelSize atIndex:textureIndexOfTarget]; + [currentTarget newFrameReadyAtTime:frameTime atIndex:textureIndexOfTarget]; + } + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.h new file mode 100755 index 00000000..9d8aff01 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.h @@ -0,0 +1,16 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageGaussianBlurFilter; + +@interface GPUImageUnsharpMaskFilter : GPUImageFilterGroup +{ + GPUImageGaussianBlurFilter *blurFilter; + GPUImageFilter *unsharpMaskFilter; +} +// The blur radius of the underlying Gaussian blur. The default is 4.0. +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +// The strength of the sharpening, from 0.0 on up, with a default of 1.0 +@property(readwrite, nonatomic) CGFloat intensity; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.m new file mode 100755 index 00000000..542c5ea3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.m @@ -0,0 +1,101 @@ +#import "GPUImageUnsharpMaskFilter.h" +#import "GPUImageFilter.h" +#import "GPUImageTwoInputFilter.h" +#import "GPUImageGaussianBlurFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageUnsharpMaskFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + varying highp vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform highp float intensity; + + void main() + { + lowp vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + lowp vec3 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2).rgb; + + gl_FragColor = vec4(sharpImageColor.rgb * intensity + blurredImageColor * (1.0 - intensity), sharpImageColor.a); +// gl_FragColor = mix(blurredImageColor, sharpImageColor, intensity); +// gl_FragColor = vec4(sharpImageColor.rgb - (blurredImageColor.rgb * intensity), 1.0); + } +); +#else +NSString *const kGPUImageUnsharpMaskFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 textureCoordinate2; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + + uniform float intensity; + + void main() + { + vec4 sharpImageColor = texture2D(inputImageTexture, textureCoordinate); + vec3 blurredImageColor = texture2D(inputImageTexture2, textureCoordinate2).rgb; + + gl_FragColor = vec4(sharpImageColor.rgb * intensity + blurredImageColor * (1.0 - intensity), sharpImageColor.a); + // gl_FragColor = mix(blurredImageColor, sharpImageColor, intensity); + // gl_FragColor = vec4(sharpImageColor.rgb - (blurredImageColor.rgb * intensity), 1.0); + } +); +#endif + +@implementation GPUImageUnsharpMaskFilter + +@synthesize blurRadiusInPixels; +@synthesize intensity = _intensity; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: apply a variable Gaussian blur + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Second pass: combine the blurred image with the original sharp one + unsharpMaskFilter = [[GPUImageTwoInputFilter alloc] initWithFragmentShaderFromString:kGPUImageUnsharpMaskFragmentShaderString]; + [self addFilter:unsharpMaskFilter]; + + // Texture location 0 needs to be the sharp image for both the blur and the second stage processing + [blurFilter addTarget:unsharpMaskFilter atTextureLocation:1]; + + self.initialFilters = [NSArray arrayWithObjects:blurFilter, unsharpMaskFilter, nil]; + self.terminalFilter = unsharpMaskFilter; + + self.intensity = 1.0; + self.blurRadiusInPixels = 4.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setIntensity:(CGFloat)newValue; +{ + _intensity = newValue; + [unsharpMaskFilter setFloat:newValue forUniformName:@"intensity"]; +} + +@end \ No newline at end of file diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.h b/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.h new file mode 100755 index 00000000..458020cf --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.h @@ -0,0 +1,156 @@ +#import +#import +#import +#import "GPUImageContext.h" +#import "GPUImageOutput.h" +#import "GPUImageColorConversion.h" + +//Optionally override the YUV to RGB matrices +void setColorConversion601( GLfloat conversionMatrix[9] ); +void setColorConversion601FullRange( GLfloat conversionMatrix[9] ); +void setColorConversion709( GLfloat conversionMatrix[9] ); + + +//Delegate Protocal for Face Detection. +@protocol GPUImageVideoCameraDelegate + +@optional +- (void)willOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer; +@end + + +/** + A GPUImageOutput that provides frames from either camera +*/ +@interface GPUImageVideoCamera : GPUImageOutput +{ + NSUInteger numberOfFramesCaptured; + CGFloat totalFrameTimeDuringCapture; + + AVCaptureSession *_captureSession; + AVCaptureDevice *_inputCamera; + AVCaptureDevice *_microphone; + AVCaptureDeviceInput *videoInput; + AVCaptureVideoDataOutput *videoOutput; + + BOOL capturePaused; + GPUImageRotationMode outputRotation, internalRotation; + dispatch_semaphore_t frameRenderingSemaphore; + + BOOL captureAsYUV; + GLuint luminanceTexture, chrominanceTexture; + + __unsafe_unretained id _delegate; +} + +/// Whether or not the underlying AVCaptureSession is running +@property(readonly, nonatomic) BOOL isRunning; + +/// The AVCaptureSession used to capture from the camera +@property(readonly, retain, nonatomic) AVCaptureSession *captureSession; + +/// This enables the capture session preset to be changed on the fly +@property (readwrite, nonatomic, copy) NSString *captureSessionPreset; + +/// This sets the frame rate of the camera (iOS 5 and above only) +/** + Setting this to 0 or below will set the frame rate back to the default setting for a particular preset. + */ +@property (readwrite) int32_t frameRate; + +/// Easy way to tell which cameras are present on device +@property (readonly, getter = isFrontFacingCameraPresent) BOOL frontFacingCameraPresent; +@property (readonly, getter = isBackFacingCameraPresent) BOOL backFacingCameraPresent; + +/// This enables the benchmarking mode, which logs out instantaneous and average frame times to the console +@property(readwrite, nonatomic) BOOL runBenchmark; + +/// Use this property to manage camera settings. Focus point, exposure point, etc. +@property(readonly) AVCaptureDevice *inputCamera; + +/// This determines the rotation applied to the output image, based on the source material +@property(readwrite, nonatomic) UIInterfaceOrientation outputImageOrientation; + +/// These properties determine whether or not the two camera orientations should be mirrored. By default, both are NO. +@property(readwrite, nonatomic) BOOL horizontallyMirrorFrontFacingCamera, horizontallyMirrorRearFacingCamera; + +@property(nonatomic, assign) id delegate; + +/// @name Initialization and teardown + +/** Begin a capture session + + See AVCaptureSession for acceptable values + + @param sessionPreset Session preset to use + @param cameraPosition Camera to capture from + */ +- (id)initWithSessionPreset:(NSString *)sessionPreset cameraPosition:(AVCaptureDevicePosition)cameraPosition; + +/** Add audio capture to the session. Adding inputs and outputs freezes the capture session momentarily, so you + can use this method to add the audio inputs and outputs early, if you're going to set the audioEncodingTarget + later. Returns YES is the audio inputs and outputs were added, or NO if they had already been added. + */ +- (BOOL)addAudioInputsAndOutputs; + +/** Remove the audio capture inputs and outputs from this session. Returns YES if the audio inputs and outputs + were removed, or NO is they hadn't already been added. + */ +- (BOOL)removeAudioInputsAndOutputs; + +/** Tear down the capture session + */ +- (void)removeInputsAndOutputs; + +/// @name Manage the camera video stream + +/** Start camera capturing + */ +- (void)startCameraCapture; + +/** Stop camera capturing + */ +- (void)stopCameraCapture; + +/** Pause camera capturing + */ +- (void)pauseCameraCapture; + +/** Resume camera capturing + */ +- (void)resumeCameraCapture; + +/** Process a video sample + @param sampleBuffer Buffer to process + */ +- (void)processVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer; + +/** Process an audio sample + @param sampleBuffer Buffer to process + */ +- (void)processAudioSampleBuffer:(CMSampleBufferRef)sampleBuffer; + +/** Get the position (front, rear) of the source camera + */ +- (AVCaptureDevicePosition)cameraPosition; + +/** Get the AVCaptureConnection of the source camera + */ +- (AVCaptureConnection *)videoCaptureConnection; + +/** This flips between the front and rear cameras + */ +- (void)rotateCamera; + +/// @name Benchmarking + +/** When benchmarking is enabled, this will keep a running average of the time from uploading, processing, and final recording or display + */ +- (CGFloat)averageFrameDurationDuringCapture; + +- (void)resetBenchmarkAverage; + ++ (BOOL)isBackFacingCameraPresent; ++ (BOOL)isFrontFacingCameraPresent; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.m b/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.m new file mode 100644 index 00000000..18aa60c3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVideoCamera.m @@ -0,0 +1,1062 @@ +#import "GPUImageVideoCamera.h" +#import "GPUImageMovieWriter.h" +#import "GPUImageFilter.h" + +void setColorConversion601( GLfloat conversionMatrix[9] ) +{ + kColorConversion601 = conversionMatrix; +} + +void setColorConversion601FullRange( GLfloat conversionMatrix[9] ) +{ + kColorConversion601FullRange = conversionMatrix; +} + +void setColorConversion709( GLfloat conversionMatrix[9] ) +{ + kColorConversion709 = conversionMatrix; +} + +#pragma mark - +#pragma mark Private methods and instance variables + +@interface GPUImageVideoCamera () +{ + AVCaptureDeviceInput *audioInput; + AVCaptureAudioDataOutput *audioOutput; + NSDate *startingCaptureTime; + + dispatch_queue_t cameraProcessingQueue, audioProcessingQueue; + + GLProgram *yuvConversionProgram; + GLint yuvConversionPositionAttribute, yuvConversionTextureCoordinateAttribute; + GLint yuvConversionLuminanceTextureUniform, yuvConversionChrominanceTextureUniform; + GLint yuvConversionMatrixUniform; + const GLfloat *_preferredConversion; + + BOOL isFullYUVRange; + + int imageBufferWidth, imageBufferHeight; + + BOOL addedAudioInputsDueToEncodingTarget; +} + +- (void)updateOrientationSendToTargets; +- (void)convertYUVToRGBOutput; + +@end + +@implementation GPUImageVideoCamera + +@synthesize captureSessionPreset = _captureSessionPreset; +@synthesize captureSession = _captureSession; +@synthesize inputCamera = _inputCamera; +@synthesize runBenchmark = _runBenchmark; +@synthesize outputImageOrientation = _outputImageOrientation; +@synthesize delegate = _delegate; +@synthesize horizontallyMirrorFrontFacingCamera = _horizontallyMirrorFrontFacingCamera, horizontallyMirrorRearFacingCamera = _horizontallyMirrorRearFacingCamera; +@synthesize frameRate = _frameRate; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithSessionPreset:AVCaptureSessionPreset640x480 cameraPosition:AVCaptureDevicePositionBack])) + { + return nil; + } + + return self; +} + +- (id)initWithSessionPreset:(NSString *)sessionPreset cameraPosition:(AVCaptureDevicePosition)cameraPosition; +{ + if (!(self = [super init])) + { + return nil; + } + + cameraProcessingQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH,0); + audioProcessingQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW,0); + + frameRenderingSemaphore = dispatch_semaphore_create(1); + + _frameRate = 0; // This will not set frame rate unless this value gets set to 1 or above + _runBenchmark = NO; + capturePaused = NO; + outputRotation = kGPUImageNoRotation; + internalRotation = kGPUImageNoRotation; + captureAsYUV = YES; + _preferredConversion = kColorConversion709; + + // Grab the back-facing or front-facing camera + _inputCamera = nil; + NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; + for (AVCaptureDevice *device in devices) + { + if ([device position] == cameraPosition) + { + _inputCamera = device; + } + } + + if (!_inputCamera) { + return nil; + } + + // Create the capture session + _captureSession = [[AVCaptureSession alloc] init]; + + [_captureSession beginConfiguration]; + + // Add the video input + NSError *error = nil; + videoInput = [[AVCaptureDeviceInput alloc] initWithDevice:_inputCamera error:&error]; + if ([_captureSession canAddInput:videoInput]) + { + [_captureSession addInput:videoInput]; + } + + // Add the video frame output + videoOutput = [[AVCaptureVideoDataOutput alloc] init]; + [videoOutput setAlwaysDiscardsLateVideoFrames:NO]; + +// if (captureAsYUV && [GPUImageContext deviceSupportsRedTextures]) + if (captureAsYUV && [GPUImageContext supportsFastTextureUpload]) + { + BOOL supportsFullYUVRange = NO; + NSArray *supportedPixelFormats = videoOutput.availableVideoCVPixelFormatTypes; + for (NSNumber *currentPixelFormat in supportedPixelFormats) + { + if ([currentPixelFormat intValue] == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) + { + supportsFullYUVRange = YES; + } + } + + if (supportsFullYUVRange) + { + [videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + isFullYUVRange = YES; + } + else + { + [videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + isFullYUVRange = NO; + } + } + else + { + [videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; + } + + runSynchronouslyOnVideoProcessingQueue(^{ + + if (captureAsYUV) + { + [GPUImageContext useImageProcessingContext]; + // if ([GPUImageContext deviceSupportsRedTextures]) + // { + // yuvConversionProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageYUVVideoRangeConversionForRGFragmentShaderString]; + // } + // else + // { + if (isFullYUVRange) + { + yuvConversionProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageYUVFullRangeConversionForLAFragmentShaderString]; + } + else + { + yuvConversionProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageYUVVideoRangeConversionForLAFragmentShaderString]; + } + + // } + + if (!yuvConversionProgram.initialized) + { + [yuvConversionProgram addAttribute:@"position"]; + [yuvConversionProgram addAttribute:@"inputTextureCoordinate"]; + + if (![yuvConversionProgram link]) + { + NSString *progLog = [yuvConversionProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [yuvConversionProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [yuvConversionProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + yuvConversionProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + yuvConversionPositionAttribute = [yuvConversionProgram attributeIndex:@"position"]; + yuvConversionTextureCoordinateAttribute = [yuvConversionProgram attributeIndex:@"inputTextureCoordinate"]; + yuvConversionLuminanceTextureUniform = [yuvConversionProgram uniformIndex:@"luminanceTexture"]; + yuvConversionChrominanceTextureUniform = [yuvConversionProgram uniformIndex:@"chrominanceTexture"]; + yuvConversionMatrixUniform = [yuvConversionProgram uniformIndex:@"colorConversionMatrix"]; + + [GPUImageContext setActiveShaderProgram:yuvConversionProgram]; + + glEnableVertexAttribArray(yuvConversionPositionAttribute); + glEnableVertexAttribArray(yuvConversionTextureCoordinateAttribute); + } + }); + + [videoOutput setSampleBufferDelegate:self queue:cameraProcessingQueue]; + if ([_captureSession canAddOutput:videoOutput]) + { + [_captureSession addOutput:videoOutput]; + } + else + { + NSLog(@"Couldn't add video output"); + return nil; + } + + _captureSessionPreset = sessionPreset; + [_captureSession setSessionPreset:_captureSessionPreset]; + +// This will let you get 60 FPS video from the 720p preset on an iPhone 4S, but only that device and that preset +// AVCaptureConnection *conn = [videoOutput connectionWithMediaType:AVMediaTypeVideo]; +// +// if (conn.supportsVideoMinFrameDuration) +// conn.videoMinFrameDuration = CMTimeMake(1,60); +// if (conn.supportsVideoMaxFrameDuration) +// conn.videoMaxFrameDuration = CMTimeMake(1,60); + + [_captureSession commitConfiguration]; + + return self; +} + +- (GPUImageFramebuffer *)framebufferForOutput; +{ + return outputFramebuffer; +} + +- (void)dealloc +{ + [self stopCameraCapture]; + [videoOutput setSampleBufferDelegate:nil queue:dispatch_get_main_queue()]; + [audioOutput setSampleBufferDelegate:nil queue:dispatch_get_main_queue()]; + + [self removeInputsAndOutputs]; + +// ARC forbids explicit message send of 'release'; since iOS 6 even for dispatch_release() calls: stripping it out in that case is required. +#if !OS_OBJECT_USE_OBJC + if (frameRenderingSemaphore != NULL) + { + dispatch_release(frameRenderingSemaphore); + } +#endif +} + +- (BOOL)addAudioInputsAndOutputs +{ + if (audioOutput) + return NO; + + [_captureSession beginConfiguration]; + + _microphone = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio]; + audioInput = [AVCaptureDeviceInput deviceInputWithDevice:_microphone error:nil]; + if ([_captureSession canAddInput:audioInput]) + { + [_captureSession addInput:audioInput]; + } + audioOutput = [[AVCaptureAudioDataOutput alloc] init]; + + if ([_captureSession canAddOutput:audioOutput]) + { + [_captureSession addOutput:audioOutput]; + } + else + { + NSLog(@"Couldn't add audio output"); + } + [audioOutput setSampleBufferDelegate:self queue:audioProcessingQueue]; + + [_captureSession commitConfiguration]; + return YES; +} + +- (BOOL)removeAudioInputsAndOutputs +{ + if (!audioOutput) + return NO; + + [_captureSession beginConfiguration]; + [_captureSession removeInput:audioInput]; + [_captureSession removeOutput:audioOutput]; + audioInput = nil; + audioOutput = nil; + _microphone = nil; + [_captureSession commitConfiguration]; + return YES; +} + +- (void)removeInputsAndOutputs; +{ + [_captureSession beginConfiguration]; + if (videoInput) { + [_captureSession removeInput:videoInput]; + [_captureSession removeOutput:videoOutput]; + videoInput = nil; + videoOutput = nil; + } + if (_microphone != nil) + { + [_captureSession removeInput:audioInput]; + [_captureSession removeOutput:audioOutput]; + audioInput = nil; + audioOutput = nil; + _microphone = nil; + } + [_captureSession commitConfiguration]; +} + +#pragma mark - +#pragma mark Managing targets + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + [super addTarget:newTarget atTextureLocation:textureLocation]; + + [newTarget setInputRotation:outputRotation atIndex:textureLocation]; +} + +#pragma mark - +#pragma mark Manage the camera video stream + +- (BOOL)isRunning; +{ + return [_captureSession isRunning]; +} + +- (void)startCameraCapture; +{ + if (![_captureSession isRunning]) + { + startingCaptureTime = [NSDate date]; + [_captureSession startRunning]; + }; +} + +- (void)stopCameraCapture; +{ + if ([_captureSession isRunning]) + { + [_captureSession stopRunning]; + } +} + +- (void)pauseCameraCapture; +{ + capturePaused = YES; +} + +- (void)resumeCameraCapture; +{ + capturePaused = NO; +} + +- (void)rotateCamera +{ + if (self.frontFacingCameraPresent == NO) + return; + + NSError *error; + AVCaptureDeviceInput *newVideoInput; + AVCaptureDevicePosition currentCameraPosition = [[videoInput device] position]; + + if (currentCameraPosition == AVCaptureDevicePositionBack) + { + currentCameraPosition = AVCaptureDevicePositionFront; + } + else + { + currentCameraPosition = AVCaptureDevicePositionBack; + } + + AVCaptureDevice *backFacingCamera = nil; + NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; + for (AVCaptureDevice *device in devices) + { + if ([device position] == currentCameraPosition) + { + backFacingCamera = device; + } + } + newVideoInput = [[AVCaptureDeviceInput alloc] initWithDevice:backFacingCamera error:&error]; + + if (newVideoInput != nil) + { + [_captureSession beginConfiguration]; + + [_captureSession removeInput:videoInput]; + if ([_captureSession canAddInput:newVideoInput]) + { + [_captureSession addInput:newVideoInput]; + videoInput = newVideoInput; + } + else + { + [_captureSession addInput:videoInput]; + } + //captureSession.sessionPreset = oriPreset; + [_captureSession commitConfiguration]; + } + + _inputCamera = backFacingCamera; + [self setOutputImageOrientation:_outputImageOrientation]; +} + +- (AVCaptureDevicePosition)cameraPosition +{ + return [[videoInput device] position]; +} + ++ (BOOL)isBackFacingCameraPresent; +{ + NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; + + for (AVCaptureDevice *device in devices) + { + if ([device position] == AVCaptureDevicePositionBack) + return YES; + } + + return NO; +} + +- (BOOL)isBackFacingCameraPresent +{ + return [GPUImageVideoCamera isBackFacingCameraPresent]; +} + ++ (BOOL)isFrontFacingCameraPresent; +{ + NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; + + for (AVCaptureDevice *device in devices) + { + if ([device position] == AVCaptureDevicePositionFront) + return YES; + } + + return NO; +} + +- (BOOL)isFrontFacingCameraPresent +{ + return [GPUImageVideoCamera isFrontFacingCameraPresent]; +} + +- (void)setCaptureSessionPreset:(NSString *)captureSessionPreset; +{ + [_captureSession beginConfiguration]; + + _captureSessionPreset = captureSessionPreset; + [_captureSession setSessionPreset:_captureSessionPreset]; + + [_captureSession commitConfiguration]; +} + +- (void)setFrameRate:(int32_t)frameRate; +{ + _frameRate = frameRate; + + if (_frameRate > 0) + { + if ([_inputCamera respondsToSelector:@selector(setActiveVideoMinFrameDuration:)] && + [_inputCamera respondsToSelector:@selector(setActiveVideoMaxFrameDuration:)]) { + + NSError *error; + [_inputCamera lockForConfiguration:&error]; + if (error == nil) { +#if defined(__IPHONE_7_0) + [_inputCamera setActiveVideoMinFrameDuration:CMTimeMake(1, _frameRate)]; + [_inputCamera setActiveVideoMaxFrameDuration:CMTimeMake(1, _frameRate)]; +#endif + } + [_inputCamera unlockForConfiguration]; + + } else { + + for (AVCaptureConnection *connection in videoOutput.connections) + { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if ([connection respondsToSelector:@selector(setVideoMinFrameDuration:)]) + connection.videoMinFrameDuration = CMTimeMake(1, _frameRate); + + if ([connection respondsToSelector:@selector(setVideoMaxFrameDuration:)]) + connection.videoMaxFrameDuration = CMTimeMake(1, _frameRate); +#pragma clang diagnostic pop + } + } + + } + else + { + if ([_inputCamera respondsToSelector:@selector(setActiveVideoMinFrameDuration:)] && + [_inputCamera respondsToSelector:@selector(setActiveVideoMaxFrameDuration:)]) { + + NSError *error; + [_inputCamera lockForConfiguration:&error]; + if (error == nil) { +#if defined(__IPHONE_7_0) + [_inputCamera setActiveVideoMinFrameDuration:kCMTimeInvalid]; + [_inputCamera setActiveVideoMaxFrameDuration:kCMTimeInvalid]; +#endif + } + [_inputCamera unlockForConfiguration]; + + } else { + + for (AVCaptureConnection *connection in videoOutput.connections) + { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + if ([connection respondsToSelector:@selector(setVideoMinFrameDuration:)]) + connection.videoMinFrameDuration = kCMTimeInvalid; // This sets videoMinFrameDuration back to default + + if ([connection respondsToSelector:@selector(setVideoMaxFrameDuration:)]) + connection.videoMaxFrameDuration = kCMTimeInvalid; // This sets videoMaxFrameDuration back to default +#pragma clang diagnostic pop + } + } + + } +} + +- (int32_t)frameRate; +{ + return _frameRate; +} + +- (AVCaptureConnection *)videoCaptureConnection { + for (AVCaptureConnection *connection in [videoOutput connections] ) { + for ( AVCaptureInputPort *port in [connection inputPorts] ) { + if ( [[port mediaType] isEqual:AVMediaTypeVideo] ) { + return connection; + } + } + } + + return nil; +} + +#define INITIALFRAMESTOIGNOREFORBENCHMARK 5 + +- (void)updateTargetsForVideoCameraUsingCacheTextureAtWidth:(int)bufferWidth height:(int)bufferHeight time:(CMTime)currentTime; +{ + // First, update all the framebuffers in the targets + for (id currentTarget in targets) + { + if ([currentTarget enabled]) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + if (currentTarget != self.targetToIgnoreForUpdates) + { + [currentTarget setInputRotation:outputRotation atIndex:textureIndexOfTarget]; + [currentTarget setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:textureIndexOfTarget]; + + if ([currentTarget wantsMonochromeInput] && captureAsYUV) + { + [currentTarget setCurrentlyReceivingMonochromeInput:YES]; + // TODO: Replace optimization for monochrome output + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:textureIndexOfTarget]; + } + else + { + [currentTarget setCurrentlyReceivingMonochromeInput:NO]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:textureIndexOfTarget]; + } + } + else + { + [currentTarget setInputRotation:outputRotation atIndex:textureIndexOfTarget]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:textureIndexOfTarget]; + } + } + } + + // Then release our hold on the local framebuffer to send it back to the cache as soon as it's no longer needed + [outputFramebuffer unlock]; + outputFramebuffer = nil; + + // Finally, trigger rendering as needed + for (id currentTarget in targets) + { + if ([currentTarget enabled]) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + if (currentTarget != self.targetToIgnoreForUpdates) + { + [currentTarget newFrameReadyAtTime:currentTime atIndex:textureIndexOfTarget]; + } + } + } +} + +- (void)processVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer; +{ + if (capturePaused) + { + return; + } + + CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent(); + CVImageBufferRef cameraFrame = CMSampleBufferGetImageBuffer(sampleBuffer); + int bufferWidth = (int) CVPixelBufferGetWidth(cameraFrame); + int bufferHeight = (int) CVPixelBufferGetHeight(cameraFrame); + CFTypeRef colorAttachments = CVBufferGetAttachment(cameraFrame, kCVImageBufferYCbCrMatrixKey, NULL); + if (colorAttachments != NULL) + { + if(CFStringCompare(colorAttachments, kCVImageBufferYCbCrMatrix_ITU_R_601_4, 0) == kCFCompareEqualTo) + { + if (isFullYUVRange) + { + _preferredConversion = kColorConversion601FullRange; + } + else + { + _preferredConversion = kColorConversion601; + } + } + else + { + _preferredConversion = kColorConversion709; + } + } + else + { + if (isFullYUVRange) + { + _preferredConversion = kColorConversion601FullRange; + } + else + { + _preferredConversion = kColorConversion601; + } + } + + CMTime currentTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); + + [GPUImageContext useImageProcessingContext]; + + if ([GPUImageContext supportsFastTextureUpload] && captureAsYUV) + { + CVOpenGLESTextureRef luminanceTextureRef = NULL; + CVOpenGLESTextureRef chrominanceTextureRef = NULL; + +// if (captureAsYUV && [GPUImageContext deviceSupportsRedTextures]) + if (CVPixelBufferGetPlaneCount(cameraFrame) > 0) // Check for YUV planar inputs to do RGB conversion + { + CVPixelBufferLockBaseAddress(cameraFrame, 0); + + if ( (imageBufferWidth != bufferWidth) && (imageBufferHeight != bufferHeight) ) + { + imageBufferWidth = bufferWidth; + imageBufferHeight = bufferHeight; + } + + CVReturn err; + // Y-plane + glActiveTexture(GL_TEXTURE4); + if ([GPUImageContext deviceSupportsRedTextures]) + { +// err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, coreVideoTextureCache, cameraFrame, NULL, GL_TEXTURE_2D, GL_RED_EXT, bufferWidth, bufferHeight, GL_RED_EXT, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef); + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef); + } + else + { + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef); + } + if (err) + { + NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err); + } + + luminanceTexture = CVOpenGLESTextureGetName(luminanceTextureRef); + glBindTexture(GL_TEXTURE_2D, luminanceTexture); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + // UV-plane + glActiveTexture(GL_TEXTURE5); + if ([GPUImageContext deviceSupportsRedTextures]) + { +// err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, coreVideoTextureCache, cameraFrame, NULL, GL_TEXTURE_2D, GL_RG_EXT, bufferWidth/2, bufferHeight/2, GL_RG_EXT, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef); + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef); + } + else + { + err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef); + } + if (err) + { + NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err); + } + + chrominanceTexture = CVOpenGLESTextureGetName(chrominanceTextureRef); + glBindTexture(GL_TEXTURE_2D, chrominanceTexture); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + +// if (!allTargetsWantMonochromeData) +// { + [self convertYUVToRGBOutput]; +// } + + int rotatedImageBufferWidth = bufferWidth, rotatedImageBufferHeight = bufferHeight; + + if (GPUImageRotationSwapsWidthAndHeight(internalRotation)) + { + rotatedImageBufferWidth = bufferHeight; + rotatedImageBufferHeight = bufferWidth; + } + + [self updateTargetsForVideoCameraUsingCacheTextureAtWidth:rotatedImageBufferWidth height:rotatedImageBufferHeight time:currentTime]; + + CVPixelBufferUnlockBaseAddress(cameraFrame, 0); + CFRelease(luminanceTextureRef); + CFRelease(chrominanceTextureRef); + } + else + { + // TODO: Mesh this with the output framebuffer structure + +// CVPixelBufferLockBaseAddress(cameraFrame, 0); +// +// CVReturn err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_RGBA, bufferWidth, bufferHeight, GL_BGRA, GL_UNSIGNED_BYTE, 0, &texture); +// +// if (!texture || err) { +// NSLog(@"Camera CVOpenGLESTextureCacheCreateTextureFromImage failed (error: %d)", err); +// NSAssert(NO, @"Camera failure"); +// return; +// } +// +// outputTexture = CVOpenGLESTextureGetName(texture); +// // glBindTexture(CVOpenGLESTextureGetTarget(texture), outputTexture); +// glBindTexture(GL_TEXTURE_2D, outputTexture); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); +// +// [self updateTargetsForVideoCameraUsingCacheTextureAtWidth:bufferWidth height:bufferHeight time:currentTime]; +// +// CVPixelBufferUnlockBaseAddress(cameraFrame, 0); +// CFRelease(texture); +// +// outputTexture = 0; + } + + + if (_runBenchmark) + { + numberOfFramesCaptured++; + if (numberOfFramesCaptured > INITIALFRAMESTOIGNOREFORBENCHMARK) + { + CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); + totalFrameTimeDuringCapture += currentFrameTime; + NSLog(@"Average frame time : %f ms", [self averageFrameDurationDuringCapture]); + NSLog(@"Current frame time : %f ms", 1000.0 * currentFrameTime); + } + } + } + else + { + CVPixelBufferLockBaseAddress(cameraFrame, 0); + + int bytesPerRow = (int) CVPixelBufferGetBytesPerRow(cameraFrame); + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(bytesPerRow / 4, bufferHeight) onlyTexture:YES]; + [outputFramebuffer activateFramebuffer]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + + // glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bufferWidth, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame)); + + // Using BGRA extension to pull in video frame data directly + // The use of bytesPerRow / 4 accounts for a display glitch present in preview video frames when using the photo preset on the camera + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bytesPerRow / 4, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame)); + + [self updateTargetsForVideoCameraUsingCacheTextureAtWidth:bytesPerRow / 4 height:bufferHeight time:currentTime]; + + CVPixelBufferUnlockBaseAddress(cameraFrame, 0); + + if (_runBenchmark) + { + numberOfFramesCaptured++; + if (numberOfFramesCaptured > INITIALFRAMESTOIGNOREFORBENCHMARK) + { + CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime); + totalFrameTimeDuringCapture += currentFrameTime; + } + } + } +} + +- (void)processAudioSampleBuffer:(CMSampleBufferRef)sampleBuffer; +{ + [self.audioEncodingTarget processAudioBuffer:sampleBuffer]; +} + +- (void)convertYUVToRGBOutput; +{ + [GPUImageContext setActiveShaderProgram:yuvConversionProgram]; + + int rotatedImageBufferWidth = imageBufferWidth, rotatedImageBufferHeight = imageBufferHeight; + + if (GPUImageRotationSwapsWidthAndHeight(internalRotation)) + { + rotatedImageBufferWidth = imageBufferHeight; + rotatedImageBufferHeight = imageBufferWidth; + } + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(rotatedImageBufferWidth, rotatedImageBufferHeight) textureOptions:self.outputTextureOptions onlyTexture:NO]; + [outputFramebuffer activateFramebuffer]; + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + static const GLfloat squareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, luminanceTexture); + glUniform1i(yuvConversionLuminanceTextureUniform, 4); + + glActiveTexture(GL_TEXTURE5); + glBindTexture(GL_TEXTURE_2D, chrominanceTexture); + glUniform1i(yuvConversionChrominanceTextureUniform, 5); + + glUniformMatrix3fv(yuvConversionMatrixUniform, 1, GL_FALSE, _preferredConversion); + + glVertexAttribPointer(yuvConversionPositionAttribute, 2, GL_FLOAT, 0, 0, squareVertices); + glVertexAttribPointer(yuvConversionTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [GPUImageFilter textureCoordinatesForRotation:internalRotation]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); +} + +#pragma mark - +#pragma mark Benchmarking + +- (CGFloat)averageFrameDurationDuringCapture; +{ + return (totalFrameTimeDuringCapture / (CGFloat)(numberOfFramesCaptured - INITIALFRAMESTOIGNOREFORBENCHMARK)) * 1000.0; +} + +- (void)resetBenchmarkAverage; +{ + numberOfFramesCaptured = 0; + totalFrameTimeDuringCapture = 0.0; +} + +#pragma mark - +#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate + +- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection +{ + if (!self.captureSession.isRunning) + { + return; + } + else if (captureOutput == audioOutput) + { + [self processAudioSampleBuffer:sampleBuffer]; + } + else + { + if (dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_NOW) != 0) + { + return; + } + + CFRetain(sampleBuffer); + runAsynchronouslyOnVideoProcessingQueue(^{ + //Feature Detection Hook. + if (self.delegate) + { + [self.delegate willOutputSampleBuffer:sampleBuffer]; + } + + [self processVideoSampleBuffer:sampleBuffer]; + + CFRelease(sampleBuffer); + dispatch_semaphore_signal(frameRenderingSemaphore); + }); + } +} + +#pragma mark - +#pragma mark Accessors + +- (void)setAudioEncodingTarget:(GPUImageMovieWriter *)newValue; +{ + if (newValue) { + /* Add audio inputs and outputs, if necessary */ + addedAudioInputsDueToEncodingTarget |= [self addAudioInputsAndOutputs]; + } else if (addedAudioInputsDueToEncodingTarget) { + /* Remove audio inputs and outputs, if they were added by previously setting the audio encoding target */ + [self removeAudioInputsAndOutputs]; + addedAudioInputsDueToEncodingTarget = NO; + } + + [super setAudioEncodingTarget:newValue]; +} + +- (void)updateOrientationSendToTargets; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + + // From the iOS 5.0 release notes: + // In previous iOS versions, the front-facing camera would always deliver buffers in AVCaptureVideoOrientationLandscapeLeft and the back-facing camera would always deliver buffers in AVCaptureVideoOrientationLandscapeRight. + + if (captureAsYUV && [GPUImageContext supportsFastTextureUpload]) + { + outputRotation = kGPUImageNoRotation; + if ([self cameraPosition] == AVCaptureDevicePositionBack) + { + if (_horizontallyMirrorRearFacingCamera) + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:internalRotation = kGPUImageRotateRightFlipVertical; break; + case UIInterfaceOrientationPortraitUpsideDown:internalRotation = kGPUImageRotate180; break; + case UIInterfaceOrientationLandscapeLeft:internalRotation = kGPUImageFlipHorizonal; break; + case UIInterfaceOrientationLandscapeRight:internalRotation = kGPUImageFlipVertical; break; + default:internalRotation = kGPUImageNoRotation; + } + } + else + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:internalRotation = kGPUImageRotateRight; break; + case UIInterfaceOrientationPortraitUpsideDown:internalRotation = kGPUImageRotateLeft; break; + case UIInterfaceOrientationLandscapeLeft:internalRotation = kGPUImageRotate180; break; + case UIInterfaceOrientationLandscapeRight:internalRotation = kGPUImageNoRotation; break; + default:internalRotation = kGPUImageNoRotation; + } + } + } + else + { + if (_horizontallyMirrorFrontFacingCamera) + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:internalRotation = kGPUImageRotateRightFlipVertical; break; + case UIInterfaceOrientationPortraitUpsideDown:internalRotation = kGPUImageRotateRightFlipHorizontal; break; + case UIInterfaceOrientationLandscapeLeft:internalRotation = kGPUImageFlipHorizonal; break; + case UIInterfaceOrientationLandscapeRight:internalRotation = kGPUImageFlipVertical; break; + default:internalRotation = kGPUImageNoRotation; + } + } + else + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:internalRotation = kGPUImageRotateRight; break; + case UIInterfaceOrientationPortraitUpsideDown:internalRotation = kGPUImageRotateLeft; break; + case UIInterfaceOrientationLandscapeLeft:internalRotation = kGPUImageNoRotation; break; + case UIInterfaceOrientationLandscapeRight:internalRotation = kGPUImageRotate180; break; + default:internalRotation = kGPUImageNoRotation; + } + } + } + } + else + { + if ([self cameraPosition] == AVCaptureDevicePositionBack) + { + if (_horizontallyMirrorRearFacingCamera) + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:outputRotation = kGPUImageRotateRightFlipVertical; break; + case UIInterfaceOrientationPortraitUpsideDown:outputRotation = kGPUImageRotate180; break; + case UIInterfaceOrientationLandscapeLeft:outputRotation = kGPUImageFlipHorizonal; break; + case UIInterfaceOrientationLandscapeRight:outputRotation = kGPUImageFlipVertical; break; + default:outputRotation = kGPUImageNoRotation; + } + } + else + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:outputRotation = kGPUImageRotateRight; break; + case UIInterfaceOrientationPortraitUpsideDown:outputRotation = kGPUImageRotateLeft; break; + case UIInterfaceOrientationLandscapeLeft:outputRotation = kGPUImageRotate180; break; + case UIInterfaceOrientationLandscapeRight:outputRotation = kGPUImageNoRotation; break; + default:outputRotation = kGPUImageNoRotation; + } + } + } + else + { + if (_horizontallyMirrorFrontFacingCamera) + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:outputRotation = kGPUImageRotateRightFlipVertical; break; + case UIInterfaceOrientationPortraitUpsideDown:outputRotation = kGPUImageRotateRightFlipHorizontal; break; + case UIInterfaceOrientationLandscapeLeft:outputRotation = kGPUImageFlipHorizonal; break; + case UIInterfaceOrientationLandscapeRight:outputRotation = kGPUImageFlipVertical; break; + default:outputRotation = kGPUImageNoRotation; + } + } + else + { + switch(_outputImageOrientation) + { + case UIInterfaceOrientationPortrait:outputRotation = kGPUImageRotateRight; break; + case UIInterfaceOrientationPortraitUpsideDown:outputRotation = kGPUImageRotateLeft; break; + case UIInterfaceOrientationLandscapeLeft:outputRotation = kGPUImageNoRotation; break; + case UIInterfaceOrientationLandscapeRight:outputRotation = kGPUImageRotate180; break; + default:outputRotation = kGPUImageNoRotation; + } + } + } + } + + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + [currentTarget setInputRotation:outputRotation atIndex:[[targetTextureIndices objectAtIndex:indexOfObject] integerValue]]; + } + }); +} + +- (void)setOutputImageOrientation:(UIInterfaceOrientation)newValue; +{ + _outputImageOrientation = newValue; + [self updateOrientationSendToTargets]; +} + +- (void)setHorizontallyMirrorFrontFacingCamera:(BOOL)newValue +{ + _horizontallyMirrorFrontFacingCamera = newValue; + [self updateOrientationSendToTargets]; +} + +- (void)setHorizontallyMirrorRearFacingCamera:(BOOL)newValue +{ + _horizontallyMirrorRearFacingCamera = newValue; + [self updateOrientationSendToTargets]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.h new file mode 100755 index 00000000..37be9449 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.h @@ -0,0 +1,22 @@ +#import "GPUImageFilter.h" + +/** Performs a vignetting effect, fading out the image at the edges + */ +@interface GPUImageVignetteFilter : GPUImageFilter +{ + GLint vignetteCenterUniform, vignetteColorUniform, vignetteStartUniform, vignetteEndUniform; +} + +// the center for the vignette in tex coords (defaults to 0.5, 0.5) +@property (nonatomic, readwrite) CGPoint vignetteCenter; + +// The color to use for the Vignette (defaults to black) +@property (nonatomic, readwrite) GPUVector3 vignetteColor; + +// The normalized distance from the center where the vignette effect starts. Default of 0.5. +@property (nonatomic, readwrite) CGFloat vignetteStart; + +// The normalized distance from the center where the vignette effect ends. Default of 0.75. +@property (nonatomic, readwrite) CGFloat vignetteEnd; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.m new file mode 100755 index 00000000..6e1eadb2 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.m @@ -0,0 +1,104 @@ +#import "GPUImageVignetteFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageVignetteFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + varying highp vec2 textureCoordinate; + + uniform lowp vec2 vignetteCenter; + uniform lowp vec3 vignetteColor; + uniform highp float vignetteStart; + uniform highp float vignetteEnd; + + void main() + { + lowp vec4 sourceImageColor = texture2D(inputImageTexture, textureCoordinate); + lowp float d = distance(textureCoordinate, vec2(vignetteCenter.x, vignetteCenter.y)); + lowp float percent = smoothstep(vignetteStart, vignetteEnd, d); + gl_FragColor = vec4(mix(sourceImageColor.rgb, vignetteColor, percent), sourceImageColor.a); + } +); +#else +NSString *const kGPUImageVignetteFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + varying vec2 textureCoordinate; + + uniform vec2 vignetteCenter; + uniform vec3 vignetteColor; + uniform float vignetteStart; + uniform float vignetteEnd; + + void main() + { + vec4 sourceImageColor = texture2D(inputImageTexture, textureCoordinate); + float d = distance(textureCoordinate, vec2(vignetteCenter.x, vignetteCenter.y)); + float percent = smoothstep(vignetteStart, vignetteEnd, d); + gl_FragColor = vec4(mix(sourceImageColor.rgb, vignetteColor, percent), sourceImageColor.a); + } +); +#endif + +@implementation GPUImageVignetteFilter + +@synthesize vignetteCenter = _vignetteCenter; +@synthesize vignetteColor = _vignetteColor; +@synthesize vignetteStart =_vignetteStart; +@synthesize vignetteEnd = _vignetteEnd; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageVignetteFragmentShaderString])) + { + return nil; + } + + vignetteCenterUniform = [filterProgram uniformIndex:@"vignetteCenter"]; + vignetteColorUniform = [filterProgram uniformIndex:@"vignetteColor"]; + vignetteStartUniform = [filterProgram uniformIndex:@"vignetteStart"]; + vignetteEndUniform = [filterProgram uniformIndex:@"vignetteEnd"]; + + self.vignetteCenter = (CGPoint){ 0.5f, 0.5f }; + self.vignetteColor = (GPUVector3){ 0.0f, 0.0f, 0.0f }; + self.vignetteStart = 0.3; + self.vignetteEnd = 0.75; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setVignetteCenter:(CGPoint)newValue +{ + _vignetteCenter = newValue; + + [self setPoint:newValue forUniform:vignetteCenterUniform program:filterProgram]; +} + +- (void)setVignetteColor:(GPUVector3)newValue +{ + _vignetteColor = newValue; + + [self setVec3:newValue forUniform:vignetteColorUniform program:filterProgram]; +} + +- (void)setVignetteStart:(CGFloat)newValue; +{ + _vignetteStart = newValue; + + [self setFloat:_vignetteStart forUniform:vignetteStartUniform program:filterProgram]; +} + +- (void)setVignetteEnd:(CGFloat)newValue; +{ + _vignetteEnd = newValue; + + [self setFloat:_vignetteEnd forUniform:vignetteEndUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.h new file mode 100644 index 00000000..659e39d5 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.h @@ -0,0 +1,10 @@ +#import "GPUImageTwoInputFilter.h" + +@interface GPUImageVoronoiConsumerFilter : GPUImageTwoInputFilter +{ + GLint sizeUniform; +} + +@property (nonatomic, readwrite) CGSize sizeInPixels; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.m new file mode 100644 index 00000000..c12c34f4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.m @@ -0,0 +1,94 @@ +#import "GPUImageVoronoiConsumerFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageVoronoiConsumerFragmentShaderString = SHADER_STRING +( + + precision highp float; + + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + uniform vec2 size; + varying vec2 textureCoordinate; + + vec2 getCoordFromColor(vec4 color) +{ + float z = color.z * 256.0; + float yoff = floor(z / 8.0); + float xoff = mod(z, 8.0); + float x = color.x*256.0 + xoff*256.0; + float y = color.y*256.0 + yoff*256.0; + return vec2(x,y) / size; +} + + void main(void) { + vec4 colorLoc = texture2D(inputImageTexture2, textureCoordinate); + vec4 color = texture2D(inputImageTexture, getCoordFromColor(colorLoc)); + + gl_FragColor = color; + } +); +#else +NSString *const kGPUImageVoronoiConsumerFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + uniform sampler2D inputImageTexture2; + uniform vec2 size; + varying vec2 textureCoordinate; + + vec2 getCoordFromColor(vec4 color) + { + float z = color.z * 256.0; + float yoff = floor(z / 8.0); + float xoff = mod(z, 8.0); + float x = color.x*256.0 + xoff*256.0; + float y = color.y*256.0 + yoff*256.0; + return vec2(x,y) / size; + } + + void main(void) + { + vec4 colorLoc = texture2D(inputImageTexture2, textureCoordinate); + vec4 color = texture2D(inputImageTexture, getCoordFromColor(colorLoc)); + + gl_FragColor = color; + } +); +#endif + +@implementation GPUImageVoronoiConsumerFilter + +@synthesize sizeInPixels = _sizeInPixels; + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageVoronoiConsumerFragmentShaderString])) + { + return nil; + } + + sizeUniform = [filterProgram uniformIndex:@"size"]; + + return self; +} + +-(void)setSizeInPixels:(CGSize)sizeInPixels { + _sizeInPixels = sizeInPixels; + + //validate that it's a power of 2 and square + + float width = log2(sizeInPixels.width); + float height = log2(sizeInPixels.height); + + if (width != height) { + NSLog(@"Voronoi point texture must be square"); + return; + } + if (width != floor(width) || height != floor(height)) { + NSLog(@"Voronoi point texture must be a power of 2. Texture size %f, %f", sizeInPixels.width, sizeInPixels.height); + return; + } + glUniform2f(sizeUniform, _sizeInPixels.width, _sizeInPixels.height); +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.h new file mode 100644 index 00000000..44b76c6a --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.h @@ -0,0 +1,5 @@ +#import "GPUImage3x3TextureSamplingFilter.h" + +@interface GPUImageWeakPixelInclusionFilter : GPUImage3x3TextureSamplingFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.m new file mode 100644 index 00000000..4e95ad5d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.m @@ -0,0 +1,94 @@ +#import "GPUImageWeakPixelInclusionFilter.h" + +@implementation GPUImageWeakPixelInclusionFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageWeakPixelInclusionFragmentShaderString = SHADER_STRING +( + precision lowp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; + + float pixelIntensitySum = bottomLeftIntensity + topRightIntensity + topLeftIntensity + bottomRightIntensity + leftIntensity + rightIntensity + bottomIntensity + topIntensity + centerIntensity; + float sumTest = step(1.5, pixelIntensitySum); + float pixelTest = step(0.01, centerIntensity); + + gl_FragColor = vec4(vec3(sumTest * pixelTest), 1.0); + } +); +#else +NSString *const kGPUImageWeakPixelInclusionFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float centerIntensity = texture2D(inputImageTexture, textureCoordinate).r; + + float pixelIntensitySum = bottomLeftIntensity + topRightIntensity + topLeftIntensity + bottomRightIntensity + leftIntensity + rightIntensity + bottomIntensity + topIntensity + centerIntensity; + float sumTest = step(1.5, pixelIntensitySum); + float pixelTest = step(0.01, centerIntensity); + + gl_FragColor = vec4(vec3(sumTest * pixelTest), 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageWeakPixelInclusionFragmentShaderString])) + { + return nil; + } + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.h new file mode 100644 index 00000000..6b09c33d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.h @@ -0,0 +1,17 @@ +#import "GPUImageFilter.h" +/** + * Created by Alaric Cole + * Allows adjustment of color temperature in terms of what an image was effectively shot in. This means higher Kelvin values will warm the image, while lower values will cool it. + + */ +@interface GPUImageWhiteBalanceFilter : GPUImageFilter +{ + GLint temperatureUniform, tintUniform; +} +//choose color temperature, in degrees Kelvin +@property(readwrite, nonatomic) CGFloat temperature; + +//adjust tint to compensate +@property(readwrite, nonatomic) CGFloat tint; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.m new file mode 100644 index 00000000..17c9bce9 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.m @@ -0,0 +1,107 @@ +#import "GPUImageWhiteBalanceFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageWhiteBalanceFragmentShaderString = SHADER_STRING +( +uniform sampler2D inputImageTexture; +varying highp vec2 textureCoordinate; + +uniform lowp float temperature; +uniform lowp float tint; + +const lowp vec3 warmFilter = vec3(0.93, 0.54, 0.0); + +const mediump mat3 RGBtoYIQ = mat3(0.299, 0.587, 0.114, 0.596, -0.274, -0.322, 0.212, -0.523, 0.311); +const mediump mat3 YIQtoRGB = mat3(1.0, 0.956, 0.621, 1.0, -0.272, -0.647, 1.0, -1.105, 1.702); + +void main() +{ + lowp vec4 source = texture2D(inputImageTexture, textureCoordinate); + + mediump vec3 yiq = RGBtoYIQ * source.rgb; //adjusting tint + yiq.b = clamp(yiq.b + tint*0.5226*0.1, -0.5226, 0.5226); + lowp vec3 rgb = YIQtoRGB * yiq; + + lowp vec3 processed = vec3( + (rgb.r < 0.5 ? (2.0 * rgb.r * warmFilter.r) : (1.0 - 2.0 * (1.0 - rgb.r) * (1.0 - warmFilter.r))), //adjusting temperature + (rgb.g < 0.5 ? (2.0 * rgb.g * warmFilter.g) : (1.0 - 2.0 * (1.0 - rgb.g) * (1.0 - warmFilter.g))), + (rgb.b < 0.5 ? (2.0 * rgb.b * warmFilter.b) : (1.0 - 2.0 * (1.0 - rgb.b) * (1.0 - warmFilter.b)))); + + gl_FragColor = vec4(mix(rgb, processed, temperature), source.a); +} +); +#else +NSString *const kGPUImageWhiteBalanceFragmentShaderString = SHADER_STRING +( + uniform sampler2D inputImageTexture; + varying vec2 textureCoordinate; + + uniform float temperature; + uniform float tint; + + const vec3 warmFilter = vec3(0.93, 0.54, 0.0); + + const mat3 RGBtoYIQ = mat3(0.299, 0.587, 0.114, 0.596, -0.274, -0.322, 0.212, -0.523, 0.311); + const mat3 YIQtoRGB = mat3(1.0, 0.956, 0.621, 1.0, -0.272, -0.647, 1.0, -1.105, 1.702); + + void main() +{ + vec4 source = texture2D(inputImageTexture, textureCoordinate); + + vec3 yiq = RGBtoYIQ * source.rgb; //adjusting tint + yiq.b = clamp(yiq.b + tint*0.5226*0.1, -0.5226, 0.5226); + vec3 rgb = YIQtoRGB * yiq; + + vec3 processed = vec3( + (rgb.r < 0.5 ? (2.0 * rgb.r * warmFilter.r) : (1.0 - 2.0 * (1.0 - rgb.r) * (1.0 - warmFilter.r))), //adjusting temperature + (rgb.g < 0.5 ? (2.0 * rgb.g * warmFilter.g) : (1.0 - 2.0 * (1.0 - rgb.g) * (1.0 - warmFilter.g))), + (rgb.b < 0.5 ? (2.0 * rgb.b * warmFilter.b) : (1.0 - 2.0 * (1.0 - rgb.b) * (1.0 - warmFilter.b)))); + + gl_FragColor = vec4(mix(rgb, processed, temperature), source.a); +} +); +#endif + +@implementation GPUImageWhiteBalanceFilter + +@synthesize temperature = _temperature; +@synthesize tint = _tint; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageWhiteBalanceFragmentShaderString])) + { + return nil; + } + + temperatureUniform = [filterProgram uniformIndex:@"temperature"]; + tintUniform = [filterProgram uniformIndex:@"tint"]; + + self.temperature = 5000.0; + self.tint = 0.0; + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setTemperature:(CGFloat)newValue; +{ + _temperature = newValue; + + [self setFloat:_temperature < 5000 ? 0.0004 * (_temperature-5000.0) : 0.00006 * (_temperature-5000.0) forUniform:temperatureUniform program:filterProgram]; +} + +- (void)setTint:(CGFloat)newValue; +{ + _tint = newValue; + + [self setFloat:_tint / 100.0 forUniform:tintUniform program:filterProgram]; +} + +@end + diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.h new file mode 100755 index 00000000..8db57457 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.h @@ -0,0 +1,5 @@ +#import "GPUImageSobelEdgeDetectionFilter.h" + +@interface GPUImageXYDerivativeFilter : GPUImageSobelEdgeDetectionFilter + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.m new file mode 100755 index 00000000..7e19e9d4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.m @@ -0,0 +1,106 @@ +#import "GPUImageXYDerivativeFilter.h" + +// I'm using the Prewitt operator to obtain the derivative, then squaring the X and Y components and placing the product of the two in Z. +// In tests, Prewitt seemed to be tied with Sobel for the best, and it's just a little cheaper to compute. +// This is primarily intended to be used with corner detection filters. + +@implementation GPUImageXYDerivativeFilter + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageGradientFragmentShaderString = SHADER_STRING +( + precision highp float; + + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float edgeStrength; + + void main() + { + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + + float verticalDerivative = -topLeftIntensity - topIntensity - topRightIntensity + bottomLeftIntensity + bottomIntensity + bottomRightIntensity; + float horizontalDerivative = -bottomLeftIntensity - leftIntensity - topLeftIntensity + bottomRightIntensity + rightIntensity + topRightIntensity; + verticalDerivative = verticalDerivative * edgeStrength; + horizontalDerivative = horizontalDerivative * edgeStrength; + + // Scaling the X * Y operation so that negative numbers are not clipped in the 0..1 range. This will be expanded in the corner detection filter + gl_FragColor = vec4(horizontalDerivative * horizontalDerivative, verticalDerivative * verticalDerivative, ((verticalDerivative * horizontalDerivative) + 1.0) / 2.0, 1.0); + } +); +#else +NSString *const kGPUImageGradientFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + varying vec2 leftTextureCoordinate; + varying vec2 rightTextureCoordinate; + + varying vec2 topTextureCoordinate; + varying vec2 topLeftTextureCoordinate; + varying vec2 topRightTextureCoordinate; + + varying vec2 bottomTextureCoordinate; + varying vec2 bottomLeftTextureCoordinate; + varying vec2 bottomRightTextureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform float edgeStrength; + + void main() + { + float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r; + float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r; + float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r; + float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r; + float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r; + float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r; + float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r; + float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r; + + float verticalDerivative = -topLeftIntensity - topIntensity - topRightIntensity + bottomLeftIntensity + bottomIntensity + bottomRightIntensity; + float horizontalDerivative = -bottomLeftIntensity - leftIntensity - topLeftIntensity + bottomRightIntensity + rightIntensity + topRightIntensity; + verticalDerivative = verticalDerivative * edgeStrength; + horizontalDerivative = horizontalDerivative * edgeStrength; + + // Scaling the X * Y operation so that negative numbers are not clipped in the 0..1 range. This will be expanded in the corner detection filter + gl_FragColor = vec4(horizontalDerivative * horizontalDerivative, verticalDerivative * verticalDerivative, ((verticalDerivative * horizontalDerivative) + 1.0) / 2.0, 1.0); + } +); +#endif + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [self initWithFragmentShaderFromString:kGPUImageGradientFragmentShaderString])) + { + return nil; + } + + self.edgeStrength = 1.0; + + return self; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.h new file mode 100644 index 00000000..744a72cb --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.h @@ -0,0 +1,13 @@ +#import "GPUImageFilter.h" + +@interface GPUImageZoomBlurFilter : GPUImageFilter + +/** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 + */ +@property (readwrite, nonatomic) CGFloat blurSize; + +/** The normalized center of the blur. (0.5, 0.5) by default + */ +@property (readwrite, nonatomic) CGPoint blurCenter; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.m new file mode 100644 index 00000000..2ae84934 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.m @@ -0,0 +1,115 @@ +#import "GPUImageZoomBlurFilter.h" + +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +NSString *const kGPUImageZoomBlurFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform highp vec2 blurCenter; + uniform highp float blurSize; + + void main() + { + // TODO: Do a more intelligent scaling based on resolution here + highp vec2 samplingOffset = 1.0/100.0 * (blurCenter - textureCoordinate) * blurSize; + + lowp vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.18; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + samplingOffset) * 0.15; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (2.0 * samplingOffset)) * 0.12; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (3.0 * samplingOffset)) * 0.09; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (4.0 * samplingOffset)) * 0.05; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - samplingOffset) * 0.15; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (2.0 * samplingOffset)) * 0.12; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (3.0 * samplingOffset)) * 0.09; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (4.0 * samplingOffset)) * 0.05; + + gl_FragColor = fragmentColor; + } +); +#else +NSString *const kGPUImageZoomBlurFragmentShaderString = SHADER_STRING +( + varying vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + uniform vec2 blurCenter; + uniform float blurSize; + + void main() + { + // TODO: Do a more intelligent scaling based on resolution here + vec2 samplingOffset = 1.0/100.0 * (blurCenter - textureCoordinate) * blurSize; + + vec4 fragmentColor = texture2D(inputImageTexture, textureCoordinate) * 0.18; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + samplingOffset) * 0.15; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (2.0 * samplingOffset)) * 0.12; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (3.0 * samplingOffset)) * 0.09; + fragmentColor += texture2D(inputImageTexture, textureCoordinate + (4.0 * samplingOffset)) * 0.05; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - samplingOffset) * 0.15; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (2.0 * samplingOffset)) * 0.12; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (3.0 * samplingOffset)) * 0.09; + fragmentColor += texture2D(inputImageTexture, textureCoordinate - (4.0 * samplingOffset)) * 0.05; + + gl_FragColor = fragmentColor; + } +); +#endif + +@interface GPUImageZoomBlurFilter() +{ + GLint blurSizeUniform, blurCenterUniform; +} +@end + +@implementation GPUImageZoomBlurFilter + +@synthesize blurSize = _blurSize; +@synthesize blurCenter = _blurCenter; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super initWithFragmentShaderFromString:kGPUImageZoomBlurFragmentShaderString])) + { + return nil; + } + + blurSizeUniform = [filterProgram uniformIndex:@"blurSize"]; + blurCenterUniform = [filterProgram uniformIndex:@"blurCenter"]; + + self.blurSize = 1.0; + self.blurCenter = CGPointMake(0.5, 0.5); + + return self; +} + +#pragma mark - +#pragma mark Accessors + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + [super setInputRotation:newInputRotation atIndex:textureIndex]; + [self setBlurCenter:self.blurCenter]; +} + +- (void)setBlurSize:(CGFloat)newValue; +{ + _blurSize = newValue; + + [self setFloat:_blurSize forUniform:blurSizeUniform program:filterProgram]; +} + +- (void)setBlurCenter:(CGPoint)newValue; +{ + _blurCenter = newValue; + + CGPoint rotatedPoint = [self rotatedPoint:_blurCenter forRotation:inputRotation]; + [self setPoint:rotatedPoint forUniform:blurCenterUniform program:filterProgram]; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.h b/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.h new file mode 100644 index 00000000..261d0d7c --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.h @@ -0,0 +1,31 @@ +#import "GPUImageFilterGroup.h" + +@class GPUImageSaturationFilter; +@class GPUImageGaussianBlurFilter; +@class GPUImageLuminanceRangeFilter; + +@interface GPUImageiOSBlurFilter : GPUImageFilterGroup +{ + GPUImageSaturationFilter *saturationFilter; + GPUImageGaussianBlurFilter *blurFilter; + GPUImageLuminanceRangeFilter *luminanceRangeFilter; +} + +/** A radius in pixels to use for the blur, with a default of 12.0. This adjusts the sigma variable in the Gaussian distribution function. + */ +@property (readwrite, nonatomic) CGFloat blurRadiusInPixels; + +/** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 0.8 as the normal level + */ +@property (readwrite, nonatomic) CGFloat saturation; + +/** The degree to which to downsample, then upsample the incoming image to minimize computations within the Gaussian blur, default of 4.0 + */ +@property (readwrite, nonatomic) CGFloat downsampling; + + +/** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6. + */ +@property (readwrite, nonatomic) CGFloat rangeReductionFactor; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.m b/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.m new file mode 100644 index 00000000..18533213 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.m @@ -0,0 +1,114 @@ +#import "GPUImageiOSBlurFilter.h" +#import "GPUImageSaturationFilter.h" +#import "GPUImageGaussianBlurFilter.h" +#import "GPUImageLuminanceRangeFilter.h" + +@implementation GPUImageiOSBlurFilter + +@synthesize blurRadiusInPixels; +@synthesize saturation; +@synthesize downsampling = _downsampling; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + // First pass: downsample and desaturate + saturationFilter = [[GPUImageSaturationFilter alloc] init]; + [self addFilter:saturationFilter]; + + // Second pass: apply a strong Gaussian blur + blurFilter = [[GPUImageGaussianBlurFilter alloc] init]; + [self addFilter:blurFilter]; + + // Third pass: upsample and adjust luminance range + luminanceRangeFilter = [[GPUImageLuminanceRangeFilter alloc] init]; + [self addFilter:luminanceRangeFilter]; + + [saturationFilter addTarget:blurFilter]; + [blurFilter addTarget:luminanceRangeFilter]; + + self.initialFilters = [NSArray arrayWithObject:saturationFilter]; + self.terminalFilter = luminanceRangeFilter; + + self.blurRadiusInPixels = 12.0; + self.saturation = 0.8; + self.downsampling = 4.0; + self.rangeReductionFactor = 0.6; + + return self; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + if (_downsampling > 1.0) + { + CGSize rotatedSize = [saturationFilter rotatedSize:newSize forIndex:textureIndex]; + + [saturationFilter forceProcessingAtSize:CGSizeMake(rotatedSize.width / _downsampling, rotatedSize.height / _downsampling)]; + [luminanceRangeFilter forceProcessingAtSize:rotatedSize]; + } + + [super setInputSize:newSize atIndex:textureIndex]; +} + +#pragma mark - +#pragma mark Accessors + +// From Apple's UIImage+ImageEffects category: + +// A description of how to compute the box kernel width from the Gaussian +// radius (aka standard deviation) appears in the SVG spec: +// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement +// +// For larger values of 's' (s >= 2.0), an approximation can be used: Three +// successive box-blurs build a piece-wise quadratic convolution kernel, which +// approximates the Gaussian kernel to within roughly 3%. +// +// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5) +// +// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel. + + +- (void)setBlurRadiusInPixels:(CGFloat)newValue; +{ + blurFilter.blurRadiusInPixels = newValue; +} + +- (CGFloat)blurRadiusInPixels; +{ + return blurFilter.blurRadiusInPixels; +} + +- (void)setSaturation:(CGFloat)newValue; +{ + saturationFilter.saturation = newValue; +} + +- (CGFloat)saturation; +{ + return saturationFilter.saturation; +} + +- (void)setDownsampling:(CGFloat)newValue; +{ + _downsampling = newValue; +} + +- (void)setRangeReductionFactor:(CGFloat)rangeReductionFactor +{ + luminanceRangeFilter.rangeReductionFactor = rangeReductionFactor; +} + +- (CGFloat)rangeReductionFactor +{ + return luminanceRangeFilter.rangeReductionFactor; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/Framework/GPUImageFramework.h b/LFLiveKit/Vendor/GPUImage/iOS/Framework/GPUImageFramework.h new file mode 100644 index 00000000..3de78bf4 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/Framework/GPUImageFramework.h @@ -0,0 +1,177 @@ +#import + +//! Project version number for GPUImageFramework. +FOUNDATION_EXPORT double GPUImageFrameworkVersionNumber; + +//! Project version string for GPUImageFramework. +FOUNDATION_EXPORT const unsigned char GPUImageFrameworkVersionString[]; + +#import + +// Base classes +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import + +// Filters +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.h b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.h new file mode 100755 index 00000000..71142b88 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.h @@ -0,0 +1,64 @@ +#import "GLProgram.h" +#import "GPUImageFramebuffer.h" +#import "GPUImageFramebufferCache.h" + +#define GPUImageRotationSwapsWidthAndHeight(rotation) ((rotation) == kGPUImageRotateLeft || (rotation) == kGPUImageRotateRight || (rotation) == kGPUImageRotateRightFlipVertical || (rotation) == kGPUImageRotateRightFlipHorizontal) + +typedef NS_ENUM(NSUInteger, GPUImageRotationMode) { + kGPUImageNoRotation, + kGPUImageRotateLeft, + kGPUImageRotateRight, + kGPUImageFlipVertical, + kGPUImageFlipHorizonal, + kGPUImageRotateRightFlipVertical, + kGPUImageRotateRightFlipHorizontal, + kGPUImageRotate180 +}; + +@interface GPUImageContext : NSObject + +@property(readonly, nonatomic) dispatch_queue_t contextQueue; +@property(readwrite, retain, nonatomic) GLProgram *currentShaderProgram; +@property(readonly, retain, nonatomic) EAGLContext *context; +@property(readonly) CVOpenGLESTextureCacheRef coreVideoTextureCache; +@property(readonly) GPUImageFramebufferCache *framebufferCache; + ++ (void *)contextKey; ++ (GPUImageContext *)sharedImageProcessingContext; ++ (dispatch_queue_t)sharedContextQueue; ++ (GPUImageFramebufferCache *)sharedFramebufferCache; ++ (void)useImageProcessingContext; +- (void)useAsCurrentContext; ++ (void)setActiveShaderProgram:(GLProgram *)shaderProgram; +- (void)setContextShaderProgram:(GLProgram *)shaderProgram; ++ (GLint)maximumTextureSizeForThisDevice; ++ (GLint)maximumTextureUnitsForThisDevice; ++ (GLint)maximumVaryingVectorsForThisDevice; ++ (BOOL)deviceSupportsOpenGLESExtension:(NSString *)extension; ++ (BOOL)deviceSupportsRedTextures; ++ (BOOL)deviceSupportsFramebufferReads; ++ (CGSize)sizeThatFitsWithinATextureForSize:(CGSize)inputSize; + +- (void)presentBufferForDisplay; +- (GLProgram *)programForVertexShaderString:(NSString *)vertexShaderString fragmentShaderString:(NSString *)fragmentShaderString; + +- (void)useSharegroup:(EAGLSharegroup *)sharegroup; + +// Manage fast texture upload ++ (BOOL)supportsFastTextureUpload; + +@end + +@protocol GPUImageInput +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +- (NSInteger)nextAvailableTextureIndex; +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +- (CGSize)maximumOutputSize; +- (void)endProcessing; +- (BOOL)shouldIgnoreUpdatesToThisTarget; +- (BOOL)enabled; +- (BOOL)wantsMonochromeInput; +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.m b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.m new file mode 100755 index 00000000..c167d15d --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageContext.m @@ -0,0 +1,320 @@ +#import "GPUImageContext.h" +#import +#import + +#define MAXSHADERPROGRAMSALLOWEDINCACHE 40 + +extern dispatch_queue_attr_t GPUImageDefaultQueueAttribute(void); + +@interface GPUImageContext() +{ + NSMutableDictionary *shaderProgramCache; + NSMutableArray *shaderProgramUsageHistory; + EAGLSharegroup *_sharegroup; +} + +@end + +@implementation GPUImageContext + +@synthesize context = _context; +@synthesize currentShaderProgram = _currentShaderProgram; +@synthesize contextQueue = _contextQueue; +@synthesize coreVideoTextureCache = _coreVideoTextureCache; +@synthesize framebufferCache = _framebufferCache; + +static void *openGLESContextQueueKey; + +- (id)init; +{ + if (!(self = [super init])) + { + return nil; + } + + openGLESContextQueueKey = &openGLESContextQueueKey; + _contextQueue = dispatch_queue_create("com.sunsetlakesoftware.GPUImage.openGLESContextQueue", GPUImageDefaultQueueAttribute()); + +#if OS_OBJECT_USE_OBJC + dispatch_queue_set_specific(_contextQueue, openGLESContextQueueKey, (__bridge void *)self, NULL); +#endif + shaderProgramCache = [[NSMutableDictionary alloc] init]; + shaderProgramUsageHistory = [[NSMutableArray alloc] init]; + + return self; +} + ++ (void *)contextKey { + return openGLESContextQueueKey; +} + +// Based on Colin Wheeler's example here: http://cocoasamurai.blogspot.com/2011/04/singletons-your-doing-them-wrong.html ++ (GPUImageContext *)sharedImageProcessingContext; +{ + static dispatch_once_t pred; + static GPUImageContext *sharedImageProcessingContext = nil; + + dispatch_once(&pred, ^{ + sharedImageProcessingContext = [[[self class] alloc] init]; + }); + return sharedImageProcessingContext; +} + ++ (dispatch_queue_t)sharedContextQueue; +{ + return [[self sharedImageProcessingContext] contextQueue]; +} + ++ (GPUImageFramebufferCache *)sharedFramebufferCache; +{ + return [[self sharedImageProcessingContext] framebufferCache]; +} + ++ (void)useImageProcessingContext; +{ + [[GPUImageContext sharedImageProcessingContext] useAsCurrentContext]; +} + +- (void)useAsCurrentContext; +{ + EAGLContext *imageProcessingContext = [self context]; + if ([EAGLContext currentContext] != imageProcessingContext) + { + [EAGLContext setCurrentContext:imageProcessingContext]; + } +} + ++ (void)setActiveShaderProgram:(GLProgram *)shaderProgram; +{ + GPUImageContext *sharedContext = [GPUImageContext sharedImageProcessingContext]; + [sharedContext setContextShaderProgram:shaderProgram]; +} + +- (void)setContextShaderProgram:(GLProgram *)shaderProgram; +{ + EAGLContext *imageProcessingContext = [self context]; + if ([EAGLContext currentContext] != imageProcessingContext) + { + [EAGLContext setCurrentContext:imageProcessingContext]; + } + + if (self.currentShaderProgram != shaderProgram) + { + self.currentShaderProgram = shaderProgram; + [shaderProgram use]; + } +} + ++ (GLint)maximumTextureSizeForThisDevice; +{ + static dispatch_once_t pred; + static GLint maxTextureSize = 0; + + dispatch_once(&pred, ^{ + [self useImageProcessingContext]; + glGetIntegerv(GL_MAX_TEXTURE_SIZE, &maxTextureSize); + }); + + return maxTextureSize; +} + ++ (GLint)maximumTextureUnitsForThisDevice; +{ + static dispatch_once_t pred; + static GLint maxTextureUnits = 0; + + dispatch_once(&pred, ^{ + [self useImageProcessingContext]; + glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &maxTextureUnits); + }); + + return maxTextureUnits; +} + ++ (GLint)maximumVaryingVectorsForThisDevice; +{ + static dispatch_once_t pred; + static GLint maxVaryingVectors = 0; + + dispatch_once(&pred, ^{ + [self useImageProcessingContext]; + glGetIntegerv(GL_MAX_VARYING_VECTORS, &maxVaryingVectors); + }); + + return maxVaryingVectors; +} + ++ (BOOL)deviceSupportsOpenGLESExtension:(NSString *)extension; +{ + static dispatch_once_t pred; + static NSArray *extensionNames = nil; + + // Cache extensions for later quick reference, since this won't change for a given device + dispatch_once(&pred, ^{ + [GPUImageContext useImageProcessingContext]; + NSString *extensionsString = [NSString stringWithCString:(const char *)glGetString(GL_EXTENSIONS) encoding:NSASCIIStringEncoding]; + extensionNames = [extensionsString componentsSeparatedByString:@" "]; + }); + + return [extensionNames containsObject:extension]; +} + + +// http://www.khronos.org/registry/gles/extensions/EXT/EXT_texture_rg.txt + ++ (BOOL)deviceSupportsRedTextures; +{ + static dispatch_once_t pred; + static BOOL supportsRedTextures = NO; + + dispatch_once(&pred, ^{ + supportsRedTextures = [GPUImageContext deviceSupportsOpenGLESExtension:@"GL_EXT_texture_rg"]; + }); + + return supportsRedTextures; +} + ++ (BOOL)deviceSupportsFramebufferReads; +{ + static dispatch_once_t pred; + static BOOL supportsFramebufferReads = NO; + + dispatch_once(&pred, ^{ + supportsFramebufferReads = [GPUImageContext deviceSupportsOpenGLESExtension:@"GL_EXT_shader_framebuffer_fetch"]; + }); + + return supportsFramebufferReads; +} + ++ (CGSize)sizeThatFitsWithinATextureForSize:(CGSize)inputSize; +{ + GLint maxTextureSize = [self maximumTextureSizeForThisDevice]; + if ( (inputSize.width < maxTextureSize) && (inputSize.height < maxTextureSize) ) + { + return inputSize; + } + + CGSize adjustedSize; + if (inputSize.width > inputSize.height) + { + adjustedSize.width = (CGFloat)maxTextureSize; + adjustedSize.height = ((CGFloat)maxTextureSize / inputSize.width) * inputSize.height; + } + else + { + adjustedSize.height = (CGFloat)maxTextureSize; + adjustedSize.width = ((CGFloat)maxTextureSize / inputSize.height) * inputSize.width; + } + + return adjustedSize; +} + +- (void)presentBufferForDisplay; +{ + [self.context presentRenderbuffer:GL_RENDERBUFFER]; +} + +- (GLProgram *)programForVertexShaderString:(NSString *)vertexShaderString fragmentShaderString:(NSString *)fragmentShaderString; +{ + NSString *lookupKeyForShaderProgram = [NSString stringWithFormat:@"V: %@ - F: %@", vertexShaderString, fragmentShaderString]; + GLProgram *programFromCache = [shaderProgramCache objectForKey:lookupKeyForShaderProgram]; + + if (programFromCache == nil) + { + programFromCache = [[GLProgram alloc] initWithVertexShaderString:vertexShaderString fragmentShaderString:fragmentShaderString]; + [shaderProgramCache setObject:programFromCache forKey:lookupKeyForShaderProgram]; +// [shaderProgramUsageHistory addObject:lookupKeyForShaderProgram]; +// if ([shaderProgramUsageHistory count] >= MAXSHADERPROGRAMSALLOWEDINCACHE) +// { +// for (NSUInteger currentShaderProgramRemovedFromCache = 0; currentShaderProgramRemovedFromCache < 10; currentShaderProgramRemovedFromCache++) +// { +// NSString *shaderProgramToRemoveFromCache = [shaderProgramUsageHistory objectAtIndex:0]; +// [shaderProgramUsageHistory removeObjectAtIndex:0]; +// [shaderProgramCache removeObjectForKey:shaderProgramToRemoveFromCache]; +// } +// } + } + + return programFromCache; +} + +- (void)useSharegroup:(EAGLSharegroup *)sharegroup; +{ + NSAssert(_context == nil, @"Unable to use a share group when the context has already been created. Call this method before you use the context for the first time."); + + _sharegroup = sharegroup; +} + +- (EAGLContext *)createContext; +{ + EAGLContext *context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2 sharegroup:_sharegroup]; + NSAssert(context != nil, @"Unable to create an OpenGL ES 2.0 context. The GPUImage framework requires OpenGL ES 2.0 support to work."); + return context; +} + + +#pragma mark - +#pragma mark Manage fast texture upload + ++ (BOOL)supportsFastTextureUpload; +{ +#if TARGET_IPHONE_SIMULATOR + return NO; +#else + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wtautological-pointer-compare" + return (CVOpenGLESTextureCacheCreate != NULL); +#pragma clang diagnostic pop + +#endif +} + +#pragma mark - +#pragma mark Accessors + +- (EAGLContext *)context; +{ + if (_context == nil) + { + _context = [self createContext]; + [EAGLContext setCurrentContext:_context]; + + // Set up a few global settings for the image processing pipeline + glDisable(GL_DEPTH_TEST); + } + + return _context; +} + +- (CVOpenGLESTextureCacheRef)coreVideoTextureCache; +{ + if (_coreVideoTextureCache == NULL) + { +#if defined(__IPHONE_6_0) + CVReturn err = CVOpenGLESTextureCacheCreate(kCFAllocatorDefault, NULL, [self context], NULL, &_coreVideoTextureCache); +#else + CVReturn err = CVOpenGLESTextureCacheCreate(kCFAllocatorDefault, NULL, (__bridge void *)[self context], NULL, &_coreVideoTextureCache); +#endif + + if (err) + { + NSAssert(NO, @"Error at CVOpenGLESTextureCacheCreate %d", err); + } + + } + + return _coreVideoTextureCache; +} + +- (GPUImageFramebufferCache *)framebufferCache; +{ + if (_framebufferCache == nil) + { + _framebufferCache = [[GPUImageFramebufferCache alloc] init]; + } + + return _framebufferCache; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.h b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.h new file mode 100755 index 00000000..f0b2c6ad --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.h @@ -0,0 +1,67 @@ +#import +#import +#import "GPUImageContext.h" + +extern NSString *const kGPUImageColorSwizzlingFragmentShaderString; + +@protocol GPUImageMovieWriterDelegate + +@optional +- (void)movieRecordingCompleted; +- (void)movieRecordingFailedWithError:(NSError*)error; + +@end + +@interface GPUImageMovieWriter : NSObject +{ + BOOL alreadyFinishedRecording; + + NSURL *movieURL; + NSString *fileType; + AVAssetWriter *assetWriter; + AVAssetWriterInput *assetWriterAudioInput; + AVAssetWriterInput *assetWriterVideoInput; + AVAssetWriterInputPixelBufferAdaptor *assetWriterPixelBufferInput; + + GPUImageContext *_movieWriterContext; + CVPixelBufferRef renderTarget; + CVOpenGLESTextureRef renderTexture; + + CGSize videoSize; + GPUImageRotationMode inputRotation; +} + +@property(readwrite, nonatomic) BOOL hasAudioTrack; +@property(readwrite, nonatomic) BOOL shouldPassthroughAudio; +@property(readwrite, nonatomic) BOOL shouldInvalidateAudioSampleWhenDone; +@property(nonatomic, copy) void(^completionBlock)(void); +@property(nonatomic, copy) void(^failureBlock)(NSError*); +@property(nonatomic, assign) id delegate; +@property(readwrite, nonatomic) BOOL encodingLiveVideo; +@property(nonatomic, copy) BOOL(^videoInputReadyCallback)(void); +@property(nonatomic, copy) BOOL(^audioInputReadyCallback)(void); +@property(nonatomic, copy) void(^audioProcessingCallback)(SInt16 **samplesRef, CMItemCount numSamplesInBuffer); +@property(nonatomic) BOOL enabled; +@property(nonatomic, readonly) AVAssetWriter *assetWriter; +@property(nonatomic, readonly) CMTime duration; +@property(nonatomic, assign) CGAffineTransform transform; +@property(nonatomic, copy) NSArray *metaData; +@property(nonatomic, assign, getter = isPaused) BOOL paused; +@property(nonatomic, retain) GPUImageContext *movieWriterContext; + +// Initialization and teardown +- (id)initWithMovieURL:(NSURL *)newMovieURL size:(CGSize)newSize; +- (id)initWithMovieURL:(NSURL *)newMovieURL size:(CGSize)newSize fileType:(NSString *)newFileType outputSettings:(NSDictionary *)outputSettings; + +- (void)setHasAudioTrack:(BOOL)hasAudioTrack audioSettings:(NSDictionary *)audioOutputSettings; + +// Movie recording +- (void)startRecording; +- (void)startRecordingInOrientation:(CGAffineTransform)orientationTransform; +- (void)finishRecording; +- (void)finishRecordingWithCompletionHandler:(void (^)(void))handler; +- (void)cancelRecording; +- (void)processAudioBuffer:(CMSampleBufferRef)audioBuffer; +- (void)enableSynchronizationCallbacks; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.m b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.m new file mode 100755 index 00000000..addc0a79 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageMovieWriter.m @@ -0,0 +1,1016 @@ +#import "GPUImageMovieWriter.h" + +#import "GPUImageContext.h" +#import "GLProgram.h" +#import "GPUImageFilter.h" + +NSString *const kGPUImageColorSwizzlingFragmentShaderString = SHADER_STRING +( + varying highp vec2 textureCoordinate; + + uniform sampler2D inputImageTexture; + + void main() + { + gl_FragColor = texture2D(inputImageTexture, textureCoordinate).bgra; + } +); + + +@interface GPUImageMovieWriter () +{ + GLuint movieFramebuffer, movieRenderbuffer; + + GLProgram *colorSwizzlingProgram; + GLint colorSwizzlingPositionAttribute, colorSwizzlingTextureCoordinateAttribute; + GLint colorSwizzlingInputTextureUniform; + + GPUImageFramebuffer *firstInputFramebuffer; + + BOOL discont; + CMTime startTime, previousFrameTime, previousAudioTime; + CMTime offsetTime; + + dispatch_queue_t audioQueue, videoQueue; + BOOL audioEncodingIsFinished, videoEncodingIsFinished; + + BOOL isRecording; +} + +// Movie recording +- (void)initializeMovieWithOutputSettings:(NSMutableDictionary *)outputSettings; + +// Frame rendering +- (void)createDataFBO; +- (void)destroyDataFBO; +- (void)setFilterFBO; + +- (void)renderAtInternalSizeUsingFramebuffer:(GPUImageFramebuffer *)inputFramebufferToUse; + +@end + +@implementation GPUImageMovieWriter + +@synthesize hasAudioTrack = _hasAudioTrack; +@synthesize encodingLiveVideo = _encodingLiveVideo; +@synthesize shouldPassthroughAudio = _shouldPassthroughAudio; +@synthesize completionBlock; +@synthesize failureBlock; +@synthesize videoInputReadyCallback; +@synthesize audioInputReadyCallback; +@synthesize enabled; +@synthesize shouldInvalidateAudioSampleWhenDone = _shouldInvalidateAudioSampleWhenDone; +@synthesize paused = _paused; +@synthesize movieWriterContext = _movieWriterContext; + +@synthesize delegate = _delegate; + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithMovieURL:(NSURL *)newMovieURL size:(CGSize)newSize; +{ + return [self initWithMovieURL:newMovieURL size:newSize fileType:AVFileTypeQuickTimeMovie outputSettings:nil]; +} + +- (id)initWithMovieURL:(NSURL *)newMovieURL size:(CGSize)newSize fileType:(NSString *)newFileType outputSettings:(NSMutableDictionary *)outputSettings; +{ + if (!(self = [super init])) + { + return nil; + } + + _shouldInvalidateAudioSampleWhenDone = NO; + + self.enabled = YES; + alreadyFinishedRecording = NO; + videoEncodingIsFinished = NO; + audioEncodingIsFinished = NO; + + discont = NO; + videoSize = newSize; + movieURL = newMovieURL; + fileType = newFileType; + startTime = kCMTimeInvalid; + _encodingLiveVideo = [[outputSettings objectForKey:@"EncodingLiveVideo"] isKindOfClass:[NSNumber class]] ? [[outputSettings objectForKey:@"EncodingLiveVideo"] boolValue] : YES; + previousFrameTime = kCMTimeNegativeInfinity; + previousAudioTime = kCMTimeNegativeInfinity; + inputRotation = kGPUImageNoRotation; + + _movieWriterContext = [[GPUImageContext alloc] init]; + [_movieWriterContext useSharegroup:[[[GPUImageContext sharedImageProcessingContext] context] sharegroup]]; + + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + [_movieWriterContext useAsCurrentContext]; + + if ([GPUImageContext supportsFastTextureUpload]) + { + colorSwizzlingProgram = [_movieWriterContext programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImagePassthroughFragmentShaderString]; + } + else + { + colorSwizzlingProgram = [_movieWriterContext programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImageColorSwizzlingFragmentShaderString]; + } + + if (!colorSwizzlingProgram.initialized) + { + [colorSwizzlingProgram addAttribute:@"position"]; + [colorSwizzlingProgram addAttribute:@"inputTextureCoordinate"]; + + if (![colorSwizzlingProgram link]) + { + NSString *progLog = [colorSwizzlingProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [colorSwizzlingProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [colorSwizzlingProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + colorSwizzlingProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + colorSwizzlingPositionAttribute = [colorSwizzlingProgram attributeIndex:@"position"]; + colorSwizzlingTextureCoordinateAttribute = [colorSwizzlingProgram attributeIndex:@"inputTextureCoordinate"]; + colorSwizzlingInputTextureUniform = [colorSwizzlingProgram uniformIndex:@"inputImageTexture"]; + + [_movieWriterContext setContextShaderProgram:colorSwizzlingProgram]; + + glEnableVertexAttribArray(colorSwizzlingPositionAttribute); + glEnableVertexAttribArray(colorSwizzlingTextureCoordinateAttribute); + }); + + [self initializeMovieWithOutputSettings:outputSettings]; + + return self; +} + +- (void)dealloc; +{ + [self destroyDataFBO]; + +#if !OS_OBJECT_USE_OBJC + if( audioQueue != NULL ) + { + dispatch_release(audioQueue); + } + if( videoQueue != NULL ) + { + dispatch_release(videoQueue); + } +#endif +} + +#pragma mark - +#pragma mark Movie recording + +- (void)initializeMovieWithOutputSettings:(NSDictionary *)outputSettings; +{ + isRecording = NO; + + self.enabled = YES; + NSError *error = nil; + assetWriter = [[AVAssetWriter alloc] initWithURL:movieURL fileType:fileType error:&error]; + if (error != nil) + { + NSLog(@"Error: %@", error); + if (failureBlock) + { + failureBlock(error); + } + else + { + if(self.delegate && [self.delegate respondsToSelector:@selector(movieRecordingFailedWithError:)]) + { + [self.delegate movieRecordingFailedWithError:error]; + } + } + } + + // Set this to make sure that a functional movie is produced, even if the recording is cut off mid-stream. Only the last second should be lost in that case. + assetWriter.movieFragmentInterval = CMTimeMakeWithSeconds(1.0, 1000); + + // use default output settings if none specified + if (outputSettings == nil) + { + NSMutableDictionary *settings = [[NSMutableDictionary alloc] init]; + [settings setObject:AVVideoCodecH264 forKey:AVVideoCodecKey]; + [settings setObject:[NSNumber numberWithInt:videoSize.width] forKey:AVVideoWidthKey]; + [settings setObject:[NSNumber numberWithInt:videoSize.height] forKey:AVVideoHeightKey]; + outputSettings = settings; + } + // custom output settings specified + else + { + __unused NSString *videoCodec = [outputSettings objectForKey:AVVideoCodecKey]; + __unused NSNumber *width = [outputSettings objectForKey:AVVideoWidthKey]; + __unused NSNumber *height = [outputSettings objectForKey:AVVideoHeightKey]; + + NSAssert(videoCodec && width && height, @"OutputSettings is missing required parameters."); + + if( [outputSettings objectForKey:@"EncodingLiveVideo"] ) { + NSMutableDictionary *tmp = [outputSettings mutableCopy]; + [tmp removeObjectForKey:@"EncodingLiveVideo"]; + outputSettings = tmp; + } + } + + /* + NSDictionary *videoCleanApertureSettings = [NSDictionary dictionaryWithObjectsAndKeys: + [NSNumber numberWithInt:videoSize.width], AVVideoCleanApertureWidthKey, + [NSNumber numberWithInt:videoSize.height], AVVideoCleanApertureHeightKey, + [NSNumber numberWithInt:0], AVVideoCleanApertureHorizontalOffsetKey, + [NSNumber numberWithInt:0], AVVideoCleanApertureVerticalOffsetKey, + nil]; + + NSDictionary *videoAspectRatioSettings = [NSDictionary dictionaryWithObjectsAndKeys: + [NSNumber numberWithInt:3], AVVideoPixelAspectRatioHorizontalSpacingKey, + [NSNumber numberWithInt:3], AVVideoPixelAspectRatioVerticalSpacingKey, + nil]; + + NSMutableDictionary * compressionProperties = [[NSMutableDictionary alloc] init]; + [compressionProperties setObject:videoCleanApertureSettings forKey:AVVideoCleanApertureKey]; + [compressionProperties setObject:videoAspectRatioSettings forKey:AVVideoPixelAspectRatioKey]; + [compressionProperties setObject:[NSNumber numberWithInt: 2000000] forKey:AVVideoAverageBitRateKey]; + [compressionProperties setObject:[NSNumber numberWithInt: 16] forKey:AVVideoMaxKeyFrameIntervalKey]; + [compressionProperties setObject:AVVideoProfileLevelH264Main31 forKey:AVVideoProfileLevelKey]; + + [outputSettings setObject:compressionProperties forKey:AVVideoCompressionPropertiesKey]; + */ + + assetWriterVideoInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings]; + assetWriterVideoInput.expectsMediaDataInRealTime = _encodingLiveVideo; + + // You need to use BGRA for the video in order to get realtime encoding. I use a color-swizzling shader to line up glReadPixels' normal RGBA output with the movie input's BGRA. + NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA], kCVPixelBufferPixelFormatTypeKey, + [NSNumber numberWithInt:videoSize.width], kCVPixelBufferWidthKey, + [NSNumber numberWithInt:videoSize.height], kCVPixelBufferHeightKey, + nil]; +// NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, +// nil]; + + assetWriterPixelBufferInput = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:assetWriterVideoInput sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary]; + + [assetWriter addInput:assetWriterVideoInput]; +} + +- (void)setEncodingLiveVideo:(BOOL) value +{ + _encodingLiveVideo = value; + if (isRecording) { + NSAssert(NO, @"Can not change Encoding Live Video while recording"); + } + else + { + assetWriterVideoInput.expectsMediaDataInRealTime = _encodingLiveVideo; + assetWriterAudioInput.expectsMediaDataInRealTime = _encodingLiveVideo; + } +} + +- (void)startRecording; +{ + alreadyFinishedRecording = NO; + startTime = kCMTimeInvalid; + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + if (audioInputReadyCallback == NULL) + { + [assetWriter startWriting]; + } + }); + isRecording = YES; + // [assetWriter startSessionAtSourceTime:kCMTimeZero]; +} + +- (void)startRecordingInOrientation:(CGAffineTransform)orientationTransform; +{ + assetWriterVideoInput.transform = orientationTransform; + + [self startRecording]; +} + +- (void)cancelRecording; +{ + if (assetWriter.status == AVAssetWriterStatusCompleted) + { + return; + } + + isRecording = NO; + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + alreadyFinishedRecording = YES; + + if( assetWriter.status == AVAssetWriterStatusWriting && ! videoEncodingIsFinished ) + { + videoEncodingIsFinished = YES; + [assetWriterVideoInput markAsFinished]; + } + if( assetWriter.status == AVAssetWriterStatusWriting && ! audioEncodingIsFinished ) + { + audioEncodingIsFinished = YES; + [assetWriterAudioInput markAsFinished]; + } + [assetWriter cancelWriting]; + }); +} + +- (void)finishRecording; +{ + [self finishRecordingWithCompletionHandler:NULL]; +} + +- (void)finishRecordingWithCompletionHandler:(void (^)(void))handler; +{ + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + isRecording = NO; + + if (assetWriter.status == AVAssetWriterStatusCompleted || assetWriter.status == AVAssetWriterStatusCancelled || assetWriter.status == AVAssetWriterStatusUnknown) + { + if (handler) + runAsynchronouslyOnContextQueue(_movieWriterContext, handler); + return; + } + if( assetWriter.status == AVAssetWriterStatusWriting && ! videoEncodingIsFinished ) + { + videoEncodingIsFinished = YES; + [assetWriterVideoInput markAsFinished]; + } + if( assetWriter.status == AVAssetWriterStatusWriting && ! audioEncodingIsFinished ) + { + audioEncodingIsFinished = YES; + [assetWriterAudioInput markAsFinished]; + } +#if (!defined(__IPHONE_6_0) || (__IPHONE_OS_VERSION_MAX_ALLOWED < __IPHONE_6_0)) + // Not iOS 6 SDK + [assetWriter finishWriting]; + if (handler) + runAsynchronouslyOnContextQueue(_movieWriterContext,handler); +#else + // iOS 6 SDK + if ([assetWriter respondsToSelector:@selector(finishWritingWithCompletionHandler:)]) { + // Running iOS 6 + [assetWriter finishWritingWithCompletionHandler:(handler ?: ^{ })]; + } + else { + // Not running iOS 6 +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + [assetWriter finishWriting]; +#pragma clang diagnostic pop + if (handler) + runAsynchronouslyOnContextQueue(_movieWriterContext, handler); + } +#endif + }); +} + +- (void)processAudioBuffer:(CMSampleBufferRef)audioBuffer; +{ + if (!isRecording || _paused) + { + return; + } + +// if (_hasAudioTrack && CMTIME_IS_VALID(startTime)) + if (_hasAudioTrack) + { + CFRetain(audioBuffer); + + CMTime currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(audioBuffer); + + if (CMTIME_IS_INVALID(startTime)) + { + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + if ((audioInputReadyCallback == NULL) && (assetWriter.status != AVAssetWriterStatusWriting)) + { + [assetWriter startWriting]; + } + [assetWriter startSessionAtSourceTime:currentSampleTime]; + startTime = currentSampleTime; + }); + } + + if (!assetWriterAudioInput.readyForMoreMediaData && _encodingLiveVideo) + { + NSLog(@"1: Had to drop an audio frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, currentSampleTime))); + if (_shouldInvalidateAudioSampleWhenDone) + { + CMSampleBufferInvalidate(audioBuffer); + } + CFRelease(audioBuffer); + return; + } + + if (discont) { + discont = NO; + + CMTime current; + if (offsetTime.value > 0) { + current = CMTimeSubtract(currentSampleTime, offsetTime); + } else { + current = currentSampleTime; + } + + CMTime offset = CMTimeSubtract(current, previousAudioTime); + + if (offsetTime.value == 0) { + offsetTime = offset; + } else { + offsetTime = CMTimeAdd(offsetTime, offset); + } + } + + if (offsetTime.value > 0) { + CFRelease(audioBuffer); + audioBuffer = [self adjustTime:audioBuffer by:offsetTime]; + CFRetain(audioBuffer); + } + + // record most recent time so we know the length of the pause + currentSampleTime = CMSampleBufferGetPresentationTimeStamp(audioBuffer); + + previousAudioTime = currentSampleTime; + + //if the consumer wants to do something with the audio samples before writing, let him. + if (self.audioProcessingCallback) { + //need to introspect into the opaque CMBlockBuffer structure to find its raw sample buffers. + CMBlockBufferRef buffer = CMSampleBufferGetDataBuffer(audioBuffer); + CMItemCount numSamplesInBuffer = CMSampleBufferGetNumSamples(audioBuffer); + AudioBufferList audioBufferList; + + CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(audioBuffer, + NULL, + &audioBufferList, + sizeof(audioBufferList), + NULL, + NULL, + kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, + &buffer + ); + //passing a live pointer to the audio buffers, try to process them in-place or we might have syncing issues. + for (int bufferCount=0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++) { + SInt16 *samples = (SInt16 *)audioBufferList.mBuffers[bufferCount].mData; + self.audioProcessingCallback(&samples, numSamplesInBuffer); + } + } + +// NSLog(@"Recorded audio sample time: %lld, %d, %lld", currentSampleTime.value, currentSampleTime.timescale, currentSampleTime.epoch); + void(^write)() = ^() { + while( ! assetWriterAudioInput.readyForMoreMediaData && ! _encodingLiveVideo && ! audioEncodingIsFinished ) { + NSDate *maxDate = [NSDate dateWithTimeIntervalSinceNow:0.5]; + //NSLog(@"audio waiting..."); + [[NSRunLoop currentRunLoop] runUntilDate:maxDate]; + } + if (!assetWriterAudioInput.readyForMoreMediaData) + { + NSLog(@"2: Had to drop an audio frame %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, currentSampleTime))); + } + else if(assetWriter.status == AVAssetWriterStatusWriting) + { + if (![assetWriterAudioInput appendSampleBuffer:audioBuffer]) + NSLog(@"Problem appending audio buffer at time: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, currentSampleTime))); + } + else + { + //NSLog(@"Wrote an audio frame %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, currentSampleTime))); + } + + if (_shouldInvalidateAudioSampleWhenDone) + { + CMSampleBufferInvalidate(audioBuffer); + } + CFRelease(audioBuffer); + }; +// runAsynchronouslyOnContextQueue(_movieWriterContext, write); + if( _encodingLiveVideo ) + + { + runAsynchronouslyOnContextQueue(_movieWriterContext, write); + } + else + { + write(); + } + } +} + +- (void)enableSynchronizationCallbacks; +{ + if (videoInputReadyCallback != NULL) + { + if( assetWriter.status != AVAssetWriterStatusWriting ) + { + [assetWriter startWriting]; + } + videoQueue = dispatch_queue_create("com.sunsetlakesoftware.GPUImage.videoReadingQueue", GPUImageDefaultQueueAttribute()); + [assetWriterVideoInput requestMediaDataWhenReadyOnQueue:videoQueue usingBlock:^{ + if( _paused ) + { + //NSLog(@"video requestMediaDataWhenReadyOnQueue paused"); + // if we don't sleep, we'll get called back almost immediately, chewing up CPU + usleep(10000); + return; + } + //NSLog(@"video requestMediaDataWhenReadyOnQueue begin"); + while( assetWriterVideoInput.readyForMoreMediaData && ! _paused ) + { + if( videoInputReadyCallback && ! videoInputReadyCallback() && ! videoEncodingIsFinished ) + { + runAsynchronouslyOnContextQueue(_movieWriterContext, ^{ + if( assetWriter.status == AVAssetWriterStatusWriting && ! videoEncodingIsFinished ) + { + videoEncodingIsFinished = YES; + [assetWriterVideoInput markAsFinished]; + } + }); + } + } + //NSLog(@"video requestMediaDataWhenReadyOnQueue end"); + }]; + } + + if (audioInputReadyCallback != NULL) + { + audioQueue = dispatch_queue_create("com.sunsetlakesoftware.GPUImage.audioReadingQueue", GPUImageDefaultQueueAttribute()); + [assetWriterAudioInput requestMediaDataWhenReadyOnQueue:audioQueue usingBlock:^{ + if( _paused ) + { + //NSLog(@"audio requestMediaDataWhenReadyOnQueue paused"); + // if we don't sleep, we'll get called back almost immediately, chewing up CPU + usleep(10000); + return; + } + //NSLog(@"audio requestMediaDataWhenReadyOnQueue begin"); + while( assetWriterAudioInput.readyForMoreMediaData && ! _paused ) + { + if( audioInputReadyCallback && ! audioInputReadyCallback() && ! audioEncodingIsFinished ) + { + runAsynchronouslyOnContextQueue(_movieWriterContext, ^{ + if( assetWriter.status == AVAssetWriterStatusWriting && ! audioEncodingIsFinished ) + { + audioEncodingIsFinished = YES; + [assetWriterAudioInput markAsFinished]; + } + }); + } + } + //NSLog(@"audio requestMediaDataWhenReadyOnQueue end"); + }]; + } + +} + +#pragma mark - +#pragma mark Frame rendering + +- (void)createDataFBO; +{ + glActiveTexture(GL_TEXTURE1); + glGenFramebuffers(1, &movieFramebuffer); + glBindFramebuffer(GL_FRAMEBUFFER, movieFramebuffer); + + if ([GPUImageContext supportsFastTextureUpload]) + { + // Code originally sourced from http://allmybrain.com/2011/12/08/rendering-to-a-texture-with-ios-5-texture-cache-api/ + + + CVPixelBufferPoolCreatePixelBuffer (NULL, [assetWriterPixelBufferInput pixelBufferPool], &renderTarget); + + /* AVAssetWriter will use BT.601 conversion matrix for RGB to YCbCr conversion + * regardless of the kCVImageBufferYCbCrMatrixKey value. + * Tagging the resulting video file as BT.601, is the best option right now. + * Creating a proper BT.709 video is not possible at the moment. + */ + CVBufferSetAttachment(renderTarget, kCVImageBufferColorPrimariesKey, kCVImageBufferColorPrimaries_ITU_R_709_2, kCVAttachmentMode_ShouldPropagate); + CVBufferSetAttachment(renderTarget, kCVImageBufferYCbCrMatrixKey, kCVImageBufferYCbCrMatrix_ITU_R_601_4, kCVAttachmentMode_ShouldPropagate); + CVBufferSetAttachment(renderTarget, kCVImageBufferTransferFunctionKey, kCVImageBufferTransferFunction_ITU_R_709_2, kCVAttachmentMode_ShouldPropagate); + + CVOpenGLESTextureCacheCreateTextureFromImage (kCFAllocatorDefault, [_movieWriterContext coreVideoTextureCache], renderTarget, + NULL, // texture attributes + GL_TEXTURE_2D, + GL_RGBA, // opengl format + (int)videoSize.width, + (int)videoSize.height, + GL_BGRA, // native iOS format + GL_UNSIGNED_BYTE, + 0, + &renderTexture); + + glBindTexture(CVOpenGLESTextureGetTarget(renderTexture), CVOpenGLESTextureGetName(renderTexture)); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, CVOpenGLESTextureGetName(renderTexture), 0); + } + else + { + glGenRenderbuffers(1, &movieRenderbuffer); + glBindRenderbuffer(GL_RENDERBUFFER, movieRenderbuffer); + glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8_OES, (int)videoSize.width, (int)videoSize.height); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, movieRenderbuffer); + } + + + __unused GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); + + NSAssert(status == GL_FRAMEBUFFER_COMPLETE, @"Incomplete filter FBO: %d", status); +} + +- (void)destroyDataFBO; +{ + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + [_movieWriterContext useAsCurrentContext]; + + if (movieFramebuffer) + { + glDeleteFramebuffers(1, &movieFramebuffer); + movieFramebuffer = 0; + } + + if (movieRenderbuffer) + { + glDeleteRenderbuffers(1, &movieRenderbuffer); + movieRenderbuffer = 0; + } + + if ([GPUImageContext supportsFastTextureUpload]) + { + if (renderTexture) + { + CFRelease(renderTexture); + } + if (renderTarget) + { + CVPixelBufferRelease(renderTarget); + } + + } + }); +} + +- (void)setFilterFBO; +{ + if (!movieFramebuffer) + { + [self createDataFBO]; + } + + glBindFramebuffer(GL_FRAMEBUFFER, movieFramebuffer); + + glViewport(0, 0, (int)videoSize.width, (int)videoSize.height); +} + +- (void)renderAtInternalSizeUsingFramebuffer:(GPUImageFramebuffer *)inputFramebufferToUse; +{ + [_movieWriterContext useAsCurrentContext]; + [self setFilterFBO]; + + [_movieWriterContext setContextShaderProgram:colorSwizzlingProgram]; + + glClearColor(1.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + // This needs to be flipped to write out to video correctly + static const GLfloat squareVertices[] = { + -1.0f, -1.0f, + 1.0f, -1.0f, + -1.0f, 1.0f, + 1.0f, 1.0f, + }; + + const GLfloat *textureCoordinates = [GPUImageFilter textureCoordinatesForRotation:inputRotation]; + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, [inputFramebufferToUse texture]); + glUniform1i(colorSwizzlingInputTextureUniform, 4); + +// NSLog(@"Movie writer framebuffer: %@", inputFramebufferToUse); + + glVertexAttribPointer(colorSwizzlingPositionAttribute, 2, GL_FLOAT, 0, 0, squareVertices); + glVertexAttribPointer(colorSwizzlingTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, textureCoordinates); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + glFinish(); +} + +#pragma mark - +#pragma mark GPUImageInput protocol + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + if (!isRecording || _paused) + { + [firstInputFramebuffer unlock]; + return; + } + + if (discont) { + discont = NO; + CMTime current; + + if (offsetTime.value > 0) { + current = CMTimeSubtract(frameTime, offsetTime); + } else { + current = frameTime; + } + + CMTime offset = CMTimeSubtract(current, previousFrameTime); + + if (offsetTime.value == 0) { + offsetTime = offset; + } else { + offsetTime = CMTimeAdd(offsetTime, offset); + } + } + + if (offsetTime.value > 0) { + frameTime = CMTimeSubtract(frameTime, offsetTime); + } + + // Drop frames forced by images and other things with no time constants + // Also, if two consecutive times with the same value are added to the movie, it aborts recording, so I bail on that case + if ( (CMTIME_IS_INVALID(frameTime)) || (CMTIME_COMPARE_INLINE(frameTime, ==, previousFrameTime)) || (CMTIME_IS_INDEFINITE(frameTime)) ) + { + [firstInputFramebuffer unlock]; + return; + } + + if (CMTIME_IS_INVALID(startTime)) + { + runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + if ((videoInputReadyCallback == NULL) && (assetWriter.status != AVAssetWriterStatusWriting)) + { + [assetWriter startWriting]; + } + + [assetWriter startSessionAtSourceTime:frameTime]; + startTime = frameTime; + }); + } + + GPUImageFramebuffer *inputFramebufferForBlock = firstInputFramebuffer; + glFinish(); + + runAsynchronouslyOnContextQueue(_movieWriterContext, ^{ + if (!assetWriterVideoInput.readyForMoreMediaData && _encodingLiveVideo) + { + [inputFramebufferForBlock unlock]; + NSLog(@"1: Had to drop a video frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, frameTime))); + return; + } + + // Render the frame with swizzled colors, so that they can be uploaded quickly as BGRA frames + [_movieWriterContext useAsCurrentContext]; + [self renderAtInternalSizeUsingFramebuffer:inputFramebufferForBlock]; + + CVPixelBufferRef pixel_buffer = NULL; + + if ([GPUImageContext supportsFastTextureUpload]) + { + pixel_buffer = renderTarget; + CVPixelBufferLockBaseAddress(pixel_buffer, 0); + } + else + { + CVReturn status = CVPixelBufferPoolCreatePixelBuffer (NULL, [assetWriterPixelBufferInput pixelBufferPool], &pixel_buffer); + if ((pixel_buffer == NULL) || (status != kCVReturnSuccess)) + { + CVPixelBufferRelease(pixel_buffer); + return; + } + else + { + CVPixelBufferLockBaseAddress(pixel_buffer, 0); + + GLubyte *pixelBufferData = (GLubyte *)CVPixelBufferGetBaseAddress(pixel_buffer); + glReadPixels(0, 0, videoSize.width, videoSize.height, GL_RGBA, GL_UNSIGNED_BYTE, pixelBufferData); + } + } + + void(^write)() = ^() { + while( ! assetWriterVideoInput.readyForMoreMediaData && ! _encodingLiveVideo && ! videoEncodingIsFinished ) { + NSDate *maxDate = [NSDate dateWithTimeIntervalSinceNow:0.1]; + // NSLog(@"video waiting..."); + [[NSRunLoop currentRunLoop] runUntilDate:maxDate]; + } + if (!assetWriterVideoInput.readyForMoreMediaData) + { + NSLog(@"2: Had to drop a video frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, frameTime))); + } + else if(self.assetWriter.status == AVAssetWriterStatusWriting) + { + if (![assetWriterPixelBufferInput appendPixelBuffer:pixel_buffer withPresentationTime:frameTime]) + NSLog(@"Problem appending pixel buffer at time: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, frameTime))); + } + else + { + NSLog(@"Couldn't write a frame"); + //NSLog(@"Wrote a video frame: %@", CFBridgingRelease(CMTimeCopyDescription(kCFAllocatorDefault, frameTime))); + } + CVPixelBufferUnlockBaseAddress(pixel_buffer, 0); + + previousFrameTime = frameTime; + + if (![GPUImageContext supportsFastTextureUpload]) + { + CVPixelBufferRelease(pixel_buffer); + } + }; + + write(); + + [inputFramebufferForBlock unlock]; + }); +} + +- (NSInteger)nextAvailableTextureIndex; +{ + return 0; +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + [newInputFramebuffer lock]; +// runSynchronouslyOnContextQueue(_movieWriterContext, ^{ + firstInputFramebuffer = newInputFramebuffer; +// }); +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = newInputRotation; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ +} + +- (CGSize)maximumOutputSize; +{ + return videoSize; +} + +- (void)endProcessing +{ + if (completionBlock) + { + if (!alreadyFinishedRecording) + { + alreadyFinishedRecording = YES; + completionBlock(); + } + } + else + { + if (_delegate && [_delegate respondsToSelector:@selector(movieRecordingCompleted)]) + { + [_delegate movieRecordingCompleted]; + } + } +} + +- (BOOL)shouldIgnoreUpdatesToThisTarget; +{ + return NO; +} + +- (BOOL)wantsMonochromeInput; +{ + return NO; +} + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +{ + +} + +#pragma mark - +#pragma mark Accessors + +- (void)setHasAudioTrack:(BOOL)newValue +{ + [self setHasAudioTrack:newValue audioSettings:nil]; +} + +- (void)setHasAudioTrack:(BOOL)newValue audioSettings:(NSDictionary *)audioOutputSettings; +{ + _hasAudioTrack = newValue; + + if (_hasAudioTrack) + { + if (_shouldPassthroughAudio) + { + // Do not set any settings so audio will be the same as passthrough + audioOutputSettings = nil; + } + else if (audioOutputSettings == nil) + { + AVAudioSession *sharedAudioSession = [AVAudioSession sharedInstance]; + double preferredHardwareSampleRate; + + if ([sharedAudioSession respondsToSelector:@selector(sampleRate)]) + { + preferredHardwareSampleRate = [sharedAudioSession sampleRate]; + } + else + { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + preferredHardwareSampleRate = [[AVAudioSession sharedInstance] currentHardwareSampleRate]; +#pragma clang diagnostic pop + } + + AudioChannelLayout acl; + bzero( &acl, sizeof(acl)); + acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono; + + audioOutputSettings = [NSDictionary dictionaryWithObjectsAndKeys: + [ NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey, + [ NSNumber numberWithInt: 1 ], AVNumberOfChannelsKey, + [ NSNumber numberWithFloat: preferredHardwareSampleRate ], AVSampleRateKey, + [ NSData dataWithBytes: &acl length: sizeof( acl ) ], AVChannelLayoutKey, + //[ NSNumber numberWithInt:AVAudioQualityLow], AVEncoderAudioQualityKey, + [ NSNumber numberWithInt: 64000 ], AVEncoderBitRateKey, + nil]; +/* + AudioChannelLayout acl; + bzero( &acl, sizeof(acl)); + acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono; + + audioOutputSettings = [NSDictionary dictionaryWithObjectsAndKeys: + [ NSNumber numberWithInt: kAudioFormatMPEG4AAC ], AVFormatIDKey, + [ NSNumber numberWithInt: 1 ], AVNumberOfChannelsKey, + [ NSNumber numberWithFloat: 44100.0 ], AVSampleRateKey, + [ NSNumber numberWithInt: 64000 ], AVEncoderBitRateKey, + [ NSData dataWithBytes: &acl length: sizeof( acl ) ], AVChannelLayoutKey, + nil];*/ + } + + assetWriterAudioInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings:audioOutputSettings]; + [assetWriter addInput:assetWriterAudioInput]; + assetWriterAudioInput.expectsMediaDataInRealTime = _encodingLiveVideo; + } + else + { + // Remove audio track if it exists + } +} + +- (NSArray*)metaData { + return assetWriter.metadata; +} + +- (void)setMetaData:(NSArray*)metaData { + assetWriter.metadata = metaData; +} + +- (CMTime)duration { + if( ! CMTIME_IS_VALID(startTime) ) + return kCMTimeZero; + if( ! CMTIME_IS_NEGATIVE_INFINITY(previousFrameTime) ) + return CMTimeSubtract(previousFrameTime, startTime); + if( ! CMTIME_IS_NEGATIVE_INFINITY(previousAudioTime) ) + return CMTimeSubtract(previousAudioTime, startTime); + return kCMTimeZero; +} + +- (CGAffineTransform)transform { + return assetWriterVideoInput.transform; +} + +- (void)setTransform:(CGAffineTransform)transform { + assetWriterVideoInput.transform = transform; +} + +- (AVAssetWriter*)assetWriter { + return assetWriter; +} + +- (void)setPaused:(BOOL)newValue { + if (_paused != newValue) { + _paused = newValue; + + if (_paused) { + discont = YES; + } + } +} + +- (CMSampleBufferRef)adjustTime:(CMSampleBufferRef) sample by:(CMTime) offset { + CMItemCount count; + CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count); + CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count); + CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count); + + for (CMItemCount i = 0; i < count; i++) { + pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset); + pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset); + } + + CMSampleBufferRef sout; + CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout); + free(pInfo); + + return sout; +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.h b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.h new file mode 100644 index 00000000..03c631bc --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.h @@ -0,0 +1,19 @@ +// +// GPUImagePicture+TextureSubimage.h +// GPUImage +// +// Created by Jack Wu on 2014-05-28. +// Copyright (c) 2014 Brad Larson. All rights reserved. +// + +#import "GPUImagePicture.h" + +@interface GPUImagePicture (TextureSubimage) + +- (void)replaceTextureWithSubimage:(UIImage*)subimage; +- (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource; + +- (void)replaceTextureWithSubimage:(UIImage*)subimage inRect:(CGRect)subRect; +- (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource inRect:(CGRect)subRect; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.m b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.m new file mode 100644 index 00000000..71ef8f97 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.m @@ -0,0 +1,103 @@ +// +// GPUImagePicture+TextureSubimage.m +// GPUImage +// +// Created by Jack Wu on 2014-05-28. +// Copyright (c) 2014 Brad Larson. All rights reserved. +// + +#import "GPUImagePicture+TextureSubimage.h" + +@implementation GPUImagePicture (TextureSubimage) + +- (void)replaceTextureWithSubimage:(UIImage*)subimage { + return [self replaceTextureWithSubCGImage:[subimage CGImage]]; +} + +- (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource { + CGRect rect = (CGRect) {.origin = CGPointZero, .size = (CGSize){.width = CGImageGetWidth(subimageSource), .height = CGImageGetHeight(subimageSource)}}; + return [self replaceTextureWithSubCGImage:subimageSource inRect:rect]; +} + +- (void)replaceTextureWithSubimage:(UIImage*)subimage inRect:(CGRect)subRect { + return [self replaceTextureWithSubCGImage:[subimage CGImage] inRect:subRect]; +} + +- (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource inRect:(CGRect)subRect { + NSAssert(outputFramebuffer, @"Picture must be initialized first before replacing subtexture"); + NSAssert(self.framebufferForOutput.textureOptions.internalFormat == GL_RGBA, @"For replacing subtexture the internal texture format must be GL_RGBA."); + + CGRect subimageRect = (CGRect){.origin = CGPointZero, .size = (CGSize){.width = CGImageGetWidth(subimageSource), .height = CGImageGetHeight(subimageSource)}}; + NSAssert(!CGRectIsEmpty(subimageRect), @"Passed sub image must not be empty - it should be at least 1px tall and wide"); + NSAssert(!CGRectIsEmpty(subRect), @"Passed sub rect must not be empty"); + + NSAssert(CGSizeEqualToSize(subimageRect.size, subRect.size), @"Subimage size must match the size of sub rect"); + + // We don't have to worry about scaling the subimage or finding a power of two size. + // The initialization has taken care of that for us. + + dispatch_semaphore_signal(imageUpdateSemaphore); + + BOOL shouldRedrawUsingCoreGraphics = NO; + + // Since internal format is always RGBA, we need the input data in RGBA as well. + CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(subimageSource); + CGBitmapInfo byteOrderInfo = bitmapInfo & kCGBitmapByteOrderMask; + if (byteOrderInfo != kCGBitmapByteOrderDefault && byteOrderInfo != kCGBitmapByteOrder32Big) { + shouldRedrawUsingCoreGraphics = YES; + } + else { + CGImageAlphaInfo alphaInfo = bitmapInfo & kCGBitmapAlphaInfoMask; + if (alphaInfo != kCGImageAlphaPremultipliedLast && alphaInfo != kCGImageAlphaLast && alphaInfo != kCGImageAlphaNoneSkipLast) { + shouldRedrawUsingCoreGraphics = YES; + } + } + + GLubyte *imageData = NULL; + CFDataRef dataFromImageDataProvider; + if (shouldRedrawUsingCoreGraphics) + { + // For resized or incompatible image: redraw + imageData = (GLubyte *) calloc(1, (int)subimageRect.size.width * (int)subimageRect.size.height * 4); + + CGColorSpaceRef genericRGBColorspace = CGColorSpaceCreateDeviceRGB(); + + CGContextRef imageContext = CGBitmapContextCreate(imageData, (size_t)subimageRect.size.width, (size_t)subimageRect.size.height, 8, (size_t)subimageRect.size.width * 4, genericRGBColorspace, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast); + + CGContextDrawImage(imageContext, CGRectMake(0.0, 0.0, subimageRect.size.width, subimageRect.size.height), subimageSource); + CGContextRelease(imageContext); + CGColorSpaceRelease(genericRGBColorspace); + } + else + { + // Access the raw image bytes directly + dataFromImageDataProvider = CGDataProviderCopyData(CGImageGetDataProvider(subimageSource)); + imageData = (GLubyte *)CFDataGetBytePtr(dataFromImageDataProvider); + } + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + [outputFramebuffer disableReferenceCounting]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + + // no need to use self.outputTextureOptions here since pictures need this texture formats and type + glTexSubImage2D(GL_TEXTURE_2D, 0, subRect.origin.x, subRect.origin.y, (GLint)subRect.size.width, subRect.size.height, GL_RGBA, GL_UNSIGNED_BYTE, imageData); + + if (self.shouldSmoothlyScaleOutput) + { + glGenerateMipmap(GL_TEXTURE_2D); + } + glBindTexture(GL_TEXTURE_2D, 0); + }); + + if (shouldRedrawUsingCoreGraphics) + { + free(imageData); + } + else + { + CFRelease(dataFromImageDataProvider); + } +} +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.h b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.h new file mode 100755 index 00000000..4c4b9942 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.h @@ -0,0 +1,38 @@ +#import +#import "GPUImageOutput.h" + + +@interface GPUImagePicture : GPUImageOutput +{ + CGSize pixelSizeOfImage; + BOOL hasProcessedImage; + + dispatch_semaphore_t imageUpdateSemaphore; +} + +// Initialization and teardown +- (id)initWithURL:(NSURL *)url; +- (id)initWithImage:(UIImage *)newImageSource; +- (id)initWithCGImage:(CGImageRef)newImageSource; +- (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; +- (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; +- (id)initWithImage:(UIImage *)newImageSource removePremultiplication:(BOOL)removePremultiplication; +- (id)initWithCGImage:(CGImageRef)newImageSource removePremultiplication:(BOOL)removePremultiplication; +- (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; +- (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; + +// Image rendering +- (void)processImage; +- (CGSize)outputImageSize; + +/** + * Process image with all targets and filters asynchronously + * The completion handler is called after processing finished in the + * GPU's dispatch queue - and only if this method did not return NO. + * + * @returns NO if resource is blocked and processing is discarded, YES otherwise + */ +- (BOOL)processImageWithCompletionHandler:(void (^)(void))completion; +- (void)processImageUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage))block; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.m b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.m new file mode 100755 index 00000000..c525f4dc --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.m @@ -0,0 +1,371 @@ +#import "GPUImagePicture.h" + +@implementation GPUImagePicture + +#pragma mark - +#pragma mark Initialization and teardown + +- (id)initWithURL:(NSURL *)url; +{ + NSData *imageData = [[NSData alloc] initWithContentsOfURL:url]; + + if (!(self = [self initWithData:imageData])) + { + return nil; + } + + return self; +} + +- (id)initWithData:(NSData *)imageData; +{ + UIImage *inputImage = [[UIImage alloc] initWithData:imageData]; + + if (!(self = [self initWithImage:inputImage])) + { + return nil; + } + + return self; +} + +- (id)initWithImage:(UIImage *)newImageSource; +{ + if (!(self = [self initWithImage:newImageSource smoothlyScaleOutput:NO])) + { + return nil; + } + + return self; +} + +- (id)initWithCGImage:(CGImageRef)newImageSource; +{ + if (!(self = [self initWithCGImage:newImageSource smoothlyScaleOutput:NO])) + { + return nil; + } + return self; +} + +- (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; +{ + return [self initWithCGImage:[newImageSource CGImage] smoothlyScaleOutput:smoothlyScaleOutput]; +} + +- (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; +{ + return [self initWithCGImage:newImageSource smoothlyScaleOutput:smoothlyScaleOutput removePremultiplication:NO]; +} + +- (id)initWithImage:(UIImage *)newImageSource removePremultiplication:(BOOL)removePremultiplication; +{ + return [self initWithCGImage:[newImageSource CGImage] smoothlyScaleOutput:NO removePremultiplication:removePremultiplication]; +} + +- (id)initWithCGImage:(CGImageRef)newImageSource removePremultiplication:(BOOL)removePremultiplication; +{ + return [self initWithCGImage:newImageSource smoothlyScaleOutput:NO removePremultiplication:removePremultiplication]; +} + +- (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; +{ + return [self initWithCGImage:[newImageSource CGImage] smoothlyScaleOutput:smoothlyScaleOutput removePremultiplication:removePremultiplication]; +} + +- (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; +{ + if (!(self = [super init])) + { + return nil; + } + + hasProcessedImage = NO; + self.shouldSmoothlyScaleOutput = smoothlyScaleOutput; + imageUpdateSemaphore = dispatch_semaphore_create(0); + dispatch_semaphore_signal(imageUpdateSemaphore); + + + // TODO: Dispatch this whole thing asynchronously to move image loading off main thread + CGFloat widthOfImage = CGImageGetWidth(newImageSource); + CGFloat heightOfImage = CGImageGetHeight(newImageSource); + + // If passed an empty image reference, CGContextDrawImage will fail in future versions of the SDK. + NSAssert( widthOfImage > 0 && heightOfImage > 0, @"Passed image must not be empty - it should be at least 1px tall and wide"); + + pixelSizeOfImage = CGSizeMake(widthOfImage, heightOfImage); + CGSize pixelSizeToUseForTexture = pixelSizeOfImage; + + BOOL shouldRedrawUsingCoreGraphics = NO; + + // For now, deal with images larger than the maximum texture size by resizing to be within that limit + CGSize scaledImageSizeToFitOnGPU = [GPUImageContext sizeThatFitsWithinATextureForSize:pixelSizeOfImage]; + if (!CGSizeEqualToSize(scaledImageSizeToFitOnGPU, pixelSizeOfImage)) + { + pixelSizeOfImage = scaledImageSizeToFitOnGPU; + pixelSizeToUseForTexture = pixelSizeOfImage; + shouldRedrawUsingCoreGraphics = YES; + } + + if (self.shouldSmoothlyScaleOutput) + { + // In order to use mipmaps, you need to provide power-of-two textures, so convert to the next largest power of two and stretch to fill + CGFloat powerClosestToWidth = ceil(log2(pixelSizeOfImage.width)); + CGFloat powerClosestToHeight = ceil(log2(pixelSizeOfImage.height)); + + pixelSizeToUseForTexture = CGSizeMake(pow(2.0, powerClosestToWidth), pow(2.0, powerClosestToHeight)); + + shouldRedrawUsingCoreGraphics = YES; + } + + GLubyte *imageData = NULL; + CFDataRef dataFromImageDataProvider = NULL; + GLenum format = GL_BGRA; + BOOL isLitteEndian = YES; + BOOL alphaFirst = NO; + BOOL premultiplied = NO; + + if (!shouldRedrawUsingCoreGraphics) { + /* Check that the memory layout is compatible with GL, as we cannot use glPixelStore to + * tell GL about the memory layout with GLES. + */ + if (CGImageGetBytesPerRow(newImageSource) != CGImageGetWidth(newImageSource) * 4 || + CGImageGetBitsPerPixel(newImageSource) != 32 || + CGImageGetBitsPerComponent(newImageSource) != 8) + { + shouldRedrawUsingCoreGraphics = YES; + } else { + /* Check that the bitmap pixel format is compatible with GL */ + CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(newImageSource); + if ((bitmapInfo & kCGBitmapFloatComponents) != 0) { + /* We don't support float components for use directly in GL */ + shouldRedrawUsingCoreGraphics = YES; + } else { + CGBitmapInfo byteOrderInfo = bitmapInfo & kCGBitmapByteOrderMask; + if (byteOrderInfo == kCGBitmapByteOrder32Little) { + /* Little endian, for alpha-first we can use this bitmap directly in GL */ + CGImageAlphaInfo alphaInfo = bitmapInfo & kCGBitmapAlphaInfoMask; + if (alphaInfo != kCGImageAlphaPremultipliedFirst && alphaInfo != kCGImageAlphaFirst && + alphaInfo != kCGImageAlphaNoneSkipFirst) { + shouldRedrawUsingCoreGraphics = YES; + } + } else if (byteOrderInfo == kCGBitmapByteOrderDefault || byteOrderInfo == kCGBitmapByteOrder32Big) { + isLitteEndian = NO; + /* Big endian, for alpha-last we can use this bitmap directly in GL */ + CGImageAlphaInfo alphaInfo = bitmapInfo & kCGBitmapAlphaInfoMask; + if (alphaInfo != kCGImageAlphaPremultipliedLast && alphaInfo != kCGImageAlphaLast && + alphaInfo != kCGImageAlphaNoneSkipLast) { + shouldRedrawUsingCoreGraphics = YES; + } else { + /* Can access directly using GL_RGBA pixel format */ + premultiplied = alphaInfo == kCGImageAlphaPremultipliedLast || alphaInfo == kCGImageAlphaPremultipliedLast; + alphaFirst = alphaInfo == kCGImageAlphaFirst || alphaInfo == kCGImageAlphaPremultipliedFirst; + format = GL_RGBA; + } + } + } + } + } + + // CFAbsoluteTime elapsedTime, startTime = CFAbsoluteTimeGetCurrent(); + + if (shouldRedrawUsingCoreGraphics) + { + // For resized or incompatible image: redraw + imageData = (GLubyte *) calloc(1, (int)pixelSizeToUseForTexture.width * (int)pixelSizeToUseForTexture.height * 4); + + CGColorSpaceRef genericRGBColorspace = CGColorSpaceCreateDeviceRGB(); + + CGContextRef imageContext = CGBitmapContextCreate(imageData, (size_t)pixelSizeToUseForTexture.width, (size_t)pixelSizeToUseForTexture.height, 8, (size_t)pixelSizeToUseForTexture.width * 4, genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); + // CGContextSetBlendMode(imageContext, kCGBlendModeCopy); // From Technical Q&A QA1708: http://developer.apple.com/library/ios/#qa/qa1708/_index.html + CGContextDrawImage(imageContext, CGRectMake(0.0, 0.0, pixelSizeToUseForTexture.width, pixelSizeToUseForTexture.height), newImageSource); + CGContextRelease(imageContext); + CGColorSpaceRelease(genericRGBColorspace); + isLitteEndian = YES; + alphaFirst = YES; + premultiplied = YES; + } + else + { + // Access the raw image bytes directly + dataFromImageDataProvider = CGDataProviderCopyData(CGImageGetDataProvider(newImageSource)); + imageData = (GLubyte *)CFDataGetBytePtr(dataFromImageDataProvider); + } + + if (removePremultiplication && premultiplied) { + NSUInteger totalNumberOfPixels = round(pixelSizeToUseForTexture.width * pixelSizeToUseForTexture.height); + uint32_t *pixelP = (uint32_t *)imageData; + uint32_t pixel; + CGFloat srcR, srcG, srcB, srcA; + + for (NSUInteger idx=0; idx> 24) / 255.0f; + } + else { + srcA = (CGFloat)(pixel & 0x000000ff) / 255.0f; + pixel >>= 8; + } + + srcR = (CGFloat)((pixel & 0x00ff0000) >> 16) / 255.0f; + srcG = (CGFloat)((pixel & 0x0000ff00) >> 8) / 255.0f; + srcB = (CGFloat)(pixel & 0x000000ff) / 255.0f; + + srcR /= srcA; srcG /= srcA; srcB /= srcA; + + pixel = (uint32_t)(srcR * 255.0) << 16; + pixel |= (uint32_t)(srcG * 255.0) << 8; + pixel |= (uint32_t)(srcB * 255.0); + + if (alphaFirst) { + pixel |= (uint32_t)(srcA * 255.0) << 24; + } + else { + pixel <<= 8; + pixel |= (uint32_t)(srcA * 255.0); + } + *pixelP = isLitteEndian ? CFSwapInt32HostToLittle(pixel) : CFSwapInt32HostToBig(pixel); + } + } + + // elapsedTime = (CFAbsoluteTimeGetCurrent() - startTime) * 1000.0; + // NSLog(@"Core Graphics drawing time: %f", elapsedTime); + + // CGFloat currentRedTotal = 0.0f, currentGreenTotal = 0.0f, currentBlueTotal = 0.0f, currentAlphaTotal = 0.0f; + // NSUInteger totalNumberOfPixels = round(pixelSizeToUseForTexture.width * pixelSizeToUseForTexture.height); + // + // for (NSUInteger currentPixel = 0; currentPixel < totalNumberOfPixels; currentPixel++) + // { + // currentBlueTotal += (CGFloat)imageData[(currentPixel * 4)] / 255.0f; + // currentGreenTotal += (CGFloat)imageData[(currentPixel * 4) + 1] / 255.0f; + // currentRedTotal += (CGFloat)imageData[(currentPixel * 4 + 2)] / 255.0f; + // currentAlphaTotal += (CGFloat)imageData[(currentPixel * 4) + 3] / 255.0f; + // } + // + // NSLog(@"Debug, average input image red: %f, green: %f, blue: %f, alpha: %f", currentRedTotal / (CGFloat)totalNumberOfPixels, currentGreenTotal / (CGFloat)totalNumberOfPixels, currentBlueTotal / (CGFloat)totalNumberOfPixels, currentAlphaTotal / (CGFloat)totalNumberOfPixels); + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:pixelSizeToUseForTexture onlyTexture:YES]; + [outputFramebuffer disableReferenceCounting]; + + glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]); + if (self.shouldSmoothlyScaleOutput) + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + } + // no need to use self.outputTextureOptions here since pictures need this texture formats and type + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (int)pixelSizeToUseForTexture.width, (int)pixelSizeToUseForTexture.height, 0, format, GL_UNSIGNED_BYTE, imageData); + + if (self.shouldSmoothlyScaleOutput) + { + glGenerateMipmap(GL_TEXTURE_2D); + } + glBindTexture(GL_TEXTURE_2D, 0); + }); + + if (shouldRedrawUsingCoreGraphics) + { + free(imageData); + } + else + { + if (dataFromImageDataProvider) + { + CFRelease(dataFromImageDataProvider); + } + } + + return self; +} + +// ARC forbids explicit message send of 'release'; since iOS 6 even for dispatch_release() calls: stripping it out in that case is required. +- (void)dealloc; +{ + [outputFramebuffer enableReferenceCounting]; + [outputFramebuffer unlock]; + +#if !OS_OBJECT_USE_OBJC + if (imageUpdateSemaphore != NULL) + { + dispatch_release(imageUpdateSemaphore); + } +#endif +} + +#pragma mark - +#pragma mark Image rendering + +- (void)removeAllTargets; +{ + [super removeAllTargets]; + hasProcessedImage = NO; +} + +- (void)processImage; +{ + [self processImageWithCompletionHandler:nil]; +} + +- (BOOL)processImageWithCompletionHandler:(void (^)(void))completion; +{ + hasProcessedImage = YES; + + // dispatch_semaphore_wait(imageUpdateSemaphore, DISPATCH_TIME_FOREVER); + + if (dispatch_semaphore_wait(imageUpdateSemaphore, DISPATCH_TIME_NOW) != 0) + { + return NO; + } + + runAsynchronouslyOnVideoProcessingQueue(^{ + for (id currentTarget in targets) + { + NSInteger indexOfObject = [targets indexOfObject:currentTarget]; + NSInteger textureIndexOfTarget = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; + + [currentTarget setCurrentlyReceivingMonochromeInput:NO]; + [currentTarget setInputSize:pixelSizeOfImage atIndex:textureIndexOfTarget]; + [currentTarget setInputFramebuffer:outputFramebuffer atIndex:textureIndexOfTarget]; + [currentTarget newFrameReadyAtTime:kCMTimeIndefinite atIndex:textureIndexOfTarget]; + } + + dispatch_semaphore_signal(imageUpdateSemaphore); + + if (completion != nil) { + completion(); + } + }); + + return YES; +} + +- (void)processImageUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage))block; +{ + [finalFilterInChain useNextFrameForImageCapture]; + [self processImageWithCompletionHandler:^{ + UIImage *imageFromFilter = [finalFilterInChain imageFromCurrentFramebuffer]; + block(imageFromFilter); + }]; +} + +- (CGSize)outputImageSize; +{ + return pixelSizeOfImage; +} + +- (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; +{ + [super addTarget:newTarget atTextureLocation:textureLocation]; + + if (hasProcessedImage) + { + [newTarget setInputSize:pixelSizeOfImage atIndex:textureLocation]; + [newTarget newFrameReadyAtTime:kCMTimeIndefinite atIndex:textureLocation]; + } +} + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h new file mode 100755 index 00000000..b42651d3 --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h @@ -0,0 +1,41 @@ +#import +#import "GPUImageContext.h" + +typedef NS_ENUM(NSUInteger, GPUImageFillModeType) { + kGPUImageFillModeStretch, // Stretch to fill the full view, which may distort the image outside of its normal aspect ratio + kGPUImageFillModePreserveAspectRatio, // Maintains the aspect ratio of the source image, adding bars of the specified background color + kGPUImageFillModePreserveAspectRatioAndFill // Maintains the aspect ratio of the source image, zooming in on its center to fill the view +}; + + + +/** + UIView subclass to use as an endpoint for displaying GPUImage outputs + */ +@interface GPUImageView : UIView +{ + GPUImageRotationMode inputRotation; +} + +/** The fill mode dictates how images are fit in the view, with the default being kGPUImageFillModePreserveAspectRatio + */ +@property(readwrite, nonatomic) GPUImageFillModeType fillMode; + +/** This calculates the current display size, in pixels, taking into account Retina scaling factors + */ +@property(readonly, nonatomic) CGSize sizeInPixels; + +@property(nonatomic) BOOL enabled; + +/** Handling fill mode + + @param redComponent Red component for background color + @param greenComponent Green component for background color + @param blueComponent Blue component for background color + @param alphaComponent Alpha component for background color + */ +- (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; + +@end diff --git a/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.m b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.m new file mode 100755 index 00000000..e092b80e --- /dev/null +++ b/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.m @@ -0,0 +1,484 @@ +#import "GPUImageView.h" +#import +#import +#import "GPUImageContext.h" +#import "GPUImageFilter.h" +#import + +#pragma mark - +#pragma mark Private methods and instance variables + +@interface GPUImageView () +{ + GPUImageFramebuffer *inputFramebufferForDisplay; + GLuint displayRenderbuffer, displayFramebuffer; + + GLProgram *displayProgram; + GLint displayPositionAttribute, displayTextureCoordinateAttribute; + GLint displayInputTextureUniform; + + CGSize inputImageSize; + GLfloat imageVertices[8]; + GLfloat backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha; + + CGSize boundsSizeAtFrameBufferEpoch; +} + +@property (assign, nonatomic) NSUInteger aspectRatio; + +// Initialization and teardown +- (void)commonInit; + +// Managing the display FBOs +- (void)createDisplayFramebuffer; +- (void)destroyDisplayFramebuffer; + +// Handling fill mode +- (void)recalculateViewGeometry; + +@end + +@implementation GPUImageView + +@synthesize aspectRatio; +@synthesize sizeInPixels = _sizeInPixels; +@synthesize fillMode = _fillMode; +@synthesize enabled; + +#pragma mark - +#pragma mark Initialization and teardown + ++ (Class)layerClass +{ + return [CAEAGLLayer class]; +} + +- (id)initWithFrame:(CGRect)frame +{ + if (!(self = [super initWithFrame:frame])) + { + return nil; + } + + [self commonInit]; + + return self; +} + +-(id)initWithCoder:(NSCoder *)coder +{ + if (!(self = [super initWithCoder:coder])) + { + return nil; + } + + [self commonInit]; + + return self; +} + +- (void)commonInit; +{ + // Set scaling to account for Retina display + if ([self respondsToSelector:@selector(setContentScaleFactor:)]) + { + self.contentScaleFactor = [[UIScreen mainScreen] scale]; + } + + inputRotation = kGPUImageNoRotation; + self.opaque = YES; + self.hidden = NO; + CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer; + eaglLayer.opaque = YES; + eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithBool:NO], kEAGLDrawablePropertyRetainedBacking, kEAGLColorFormatRGBA8, kEAGLDrawablePropertyColorFormat, nil]; + + self.enabled = YES; + + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext useImageProcessingContext]; + + displayProgram = [[GPUImageContext sharedImageProcessingContext] programForVertexShaderString:kGPUImageVertexShaderString fragmentShaderString:kGPUImagePassthroughFragmentShaderString]; + if (!displayProgram.initialized) + { + [displayProgram addAttribute:@"position"]; + [displayProgram addAttribute:@"inputTextureCoordinate"]; + + if (![displayProgram link]) + { + NSString *progLog = [displayProgram programLog]; + NSLog(@"Program link log: %@", progLog); + NSString *fragLog = [displayProgram fragmentShaderLog]; + NSLog(@"Fragment shader compile log: %@", fragLog); + NSString *vertLog = [displayProgram vertexShaderLog]; + NSLog(@"Vertex shader compile log: %@", vertLog); + displayProgram = nil; + NSAssert(NO, @"Filter shader link failed"); + } + } + + displayPositionAttribute = [displayProgram attributeIndex:@"position"]; + displayTextureCoordinateAttribute = [displayProgram attributeIndex:@"inputTextureCoordinate"]; + displayInputTextureUniform = [displayProgram uniformIndex:@"inputImageTexture"]; // This does assume a name of "inputTexture" for the fragment shader + + [GPUImageContext setActiveShaderProgram:displayProgram]; + glEnableVertexAttribArray(displayPositionAttribute); + glEnableVertexAttribArray(displayTextureCoordinateAttribute); + + [self setBackgroundColorRed:0.0 green:0.0 blue:0.0 alpha:1.0]; + _fillMode = kGPUImageFillModePreserveAspectRatio; + [self createDisplayFramebuffer]; + }); +} + +- (void)layoutSubviews { + [super layoutSubviews]; + + // The frame buffer needs to be trashed and re-created when the view size changes. + if (!CGSizeEqualToSize(self.bounds.size, boundsSizeAtFrameBufferEpoch) && + !CGSizeEqualToSize(self.bounds.size, CGSizeZero)) { + runSynchronouslyOnVideoProcessingQueue(^{ + [self destroyDisplayFramebuffer]; + [self createDisplayFramebuffer]; + }); + } else if (!CGSizeEqualToSize(self.bounds.size, CGSizeZero)) { + [self recalculateViewGeometry]; + } +} + +- (void)dealloc +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [self destroyDisplayFramebuffer]; + }); +} + +#pragma mark - +#pragma mark Managing the display FBOs + +- (void)createDisplayFramebuffer; +{ + [GPUImageContext useImageProcessingContext]; + + glGenFramebuffers(1, &displayFramebuffer); + glBindFramebuffer(GL_FRAMEBUFFER, displayFramebuffer); + + glGenRenderbuffers(1, &displayRenderbuffer); + glBindRenderbuffer(GL_RENDERBUFFER, displayRenderbuffer); + + [[[GPUImageContext sharedImageProcessingContext] context] renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer*)self.layer]; + + GLint backingWidth, backingHeight; + + glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &backingWidth); + glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &backingHeight); + + if ( (backingWidth == 0) || (backingHeight == 0) ) + { + [self destroyDisplayFramebuffer]; + return; + } + + _sizeInPixels.width = (CGFloat)backingWidth; + _sizeInPixels.height = (CGFloat)backingHeight; + +// NSLog(@"Backing width: %d, height: %d", backingWidth, backingHeight); + + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, displayRenderbuffer); + + __unused GLuint framebufferCreationStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER); + NSAssert(framebufferCreationStatus == GL_FRAMEBUFFER_COMPLETE, @"Failure with display framebuffer generation for display of size: %f, %f", self.bounds.size.width, self.bounds.size.height); + boundsSizeAtFrameBufferEpoch = self.bounds.size; + + [self recalculateViewGeometry]; +} + +- (void)destroyDisplayFramebuffer; +{ + [GPUImageContext useImageProcessingContext]; + + if (displayFramebuffer) + { + glDeleteFramebuffers(1, &displayFramebuffer); + displayFramebuffer = 0; + } + + if (displayRenderbuffer) + { + glDeleteRenderbuffers(1, &displayRenderbuffer); + displayRenderbuffer = 0; + } +} + +- (void)setDisplayFramebuffer; +{ + if (!displayFramebuffer) + { + [self createDisplayFramebuffer]; + } + + glBindFramebuffer(GL_FRAMEBUFFER, displayFramebuffer); + + glViewport(0, 0, (GLint)_sizeInPixels.width, (GLint)_sizeInPixels.height); +} + +- (void)presentFramebuffer; +{ + glBindRenderbuffer(GL_RENDERBUFFER, displayRenderbuffer); + [[GPUImageContext sharedImageProcessingContext] presentBufferForDisplay]; +} + +#pragma mark - +#pragma mark Handling fill mode + +- (void)recalculateViewGeometry; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + CGFloat heightScaling, widthScaling; + + CGSize currentViewSize = self.bounds.size; + + // CGFloat imageAspectRatio = inputImageSize.width / inputImageSize.height; + // CGFloat viewAspectRatio = currentViewSize.width / currentViewSize.height; + + CGRect insetRect = AVMakeRectWithAspectRatioInsideRect(inputImageSize, self.bounds); + + switch(_fillMode) + { + case kGPUImageFillModeStretch: + { + widthScaling = 1.0; + heightScaling = 1.0; + }; break; + case kGPUImageFillModePreserveAspectRatio: + { + widthScaling = insetRect.size.width / currentViewSize.width; + heightScaling = insetRect.size.height / currentViewSize.height; + }; break; + case kGPUImageFillModePreserveAspectRatioAndFill: + { + // CGFloat widthHolder = insetRect.size.width / currentViewSize.width; + widthScaling = currentViewSize.height / insetRect.size.height; + heightScaling = currentViewSize.width / insetRect.size.width; + }; break; + } + + imageVertices[0] = -widthScaling; + imageVertices[1] = -heightScaling; + imageVertices[2] = widthScaling; + imageVertices[3] = -heightScaling; + imageVertices[4] = -widthScaling; + imageVertices[5] = heightScaling; + imageVertices[6] = widthScaling; + imageVertices[7] = heightScaling; + }); + +// static const GLfloat imageVertices[] = { +// -1.0f, -1.0f, +// 1.0f, -1.0f, +// -1.0f, 1.0f, +// 1.0f, 1.0f, +// }; +} + +- (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; +{ + backgroundColorRed = redComponent; + backgroundColorGreen = greenComponent; + backgroundColorBlue = blueComponent; + backgroundColorAlpha = alphaComponent; +} + ++ (const GLfloat *)textureCoordinatesForRotation:(GPUImageRotationMode)rotationMode; +{ +// static const GLfloat noRotationTextureCoordinates[] = { +// 0.0f, 0.0f, +// 1.0f, 0.0f, +// 0.0f, 1.0f, +// 1.0f, 1.0f, +// }; + + static const GLfloat noRotationTextureCoordinates[] = { + 0.0f, 1.0f, + 1.0f, 1.0f, + 0.0f, 0.0f, + 1.0f, 0.0f, + }; + + static const GLfloat rotateRightTextureCoordinates[] = { + 1.0f, 1.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 0.0f, 0.0f, + }; + + static const GLfloat rotateLeftTextureCoordinates[] = { + 0.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 1.0f, 1.0f, + }; + + static const GLfloat verticalFlipTextureCoordinates[] = { + 0.0f, 0.0f, + 1.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 1.0f, + }; + + static const GLfloat horizontalFlipTextureCoordinates[] = { + 1.0f, 1.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 0.0f, 0.0f, + }; + + static const GLfloat rotateRightVerticalFlipTextureCoordinates[] = { + 1.0f, 0.0f, + 1.0f, 1.0f, + 0.0f, 0.0f, + 0.0f, 1.0f, + }; + + static const GLfloat rotateRightHorizontalFlipTextureCoordinates[] = { + 0.0f, 1.0f, + 0.0f, 0.0f, + 1.0f, 1.0f, + 1.0f, 0.0f, + }; + + static const GLfloat rotate180TextureCoordinates[] = { + 1.0f, 0.0f, + 0.0f, 0.0f, + 1.0f, 1.0f, + 0.0f, 1.0f, + }; + + switch(rotationMode) + { + case kGPUImageNoRotation: return noRotationTextureCoordinates; + case kGPUImageRotateLeft: return rotateLeftTextureCoordinates; + case kGPUImageRotateRight: return rotateRightTextureCoordinates; + case kGPUImageFlipVertical: return verticalFlipTextureCoordinates; + case kGPUImageFlipHorizonal: return horizontalFlipTextureCoordinates; + case kGPUImageRotateRightFlipVertical: return rotateRightVerticalFlipTextureCoordinates; + case kGPUImageRotateRightFlipHorizontal: return rotateRightHorizontalFlipTextureCoordinates; + case kGPUImageRotate180: return rotate180TextureCoordinates; + } +} + +#pragma mark - +#pragma mark GPUInput protocol + +- (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + [GPUImageContext setActiveShaderProgram:displayProgram]; + [self setDisplayFramebuffer]; + + glClearColor(backgroundColorRed, backgroundColorGreen, backgroundColorBlue, backgroundColorAlpha); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + glActiveTexture(GL_TEXTURE4); + glBindTexture(GL_TEXTURE_2D, [inputFramebufferForDisplay texture]); + glUniform1i(displayInputTextureUniform, 4); + + glVertexAttribPointer(displayPositionAttribute, 2, GL_FLOAT, 0, 0, imageVertices); + glVertexAttribPointer(displayTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, [GPUImageView textureCoordinatesForRotation:inputRotation]); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + [self presentFramebuffer]; + [inputFramebufferForDisplay unlock]; + inputFramebufferForDisplay = nil; + }); +} + +- (NSInteger)nextAvailableTextureIndex; +{ + return 0; +} + +- (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; +{ + inputFramebufferForDisplay = newInputFramebuffer; + [inputFramebufferForDisplay lock]; +} + +- (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; +{ + inputRotation = newInputRotation; +} + +- (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; +{ + runSynchronouslyOnVideoProcessingQueue(^{ + CGSize rotatedSize = newSize; + + if (GPUImageRotationSwapsWidthAndHeight(inputRotation)) + { + rotatedSize.width = newSize.height; + rotatedSize.height = newSize.width; + } + + if (!CGSizeEqualToSize(inputImageSize, rotatedSize)) + { + inputImageSize = rotatedSize; + [self recalculateViewGeometry]; + } + }); +} + +- (CGSize)maximumOutputSize; +{ + if ([self respondsToSelector:@selector(setContentScaleFactor:)]) + { + CGSize pointSize = self.bounds.size; + return CGSizeMake(self.contentScaleFactor * pointSize.width, self.contentScaleFactor * pointSize.height); + } + else + { + return self.bounds.size; + } +} + +- (void)endProcessing +{ +} + +- (BOOL)shouldIgnoreUpdatesToThisTarget; +{ + return NO; +} + +- (BOOL)wantsMonochromeInput; +{ + return NO; +} + +- (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; +{ + +} + +#pragma mark - +#pragma mark Accessors + +- (CGSize)sizeInPixels; +{ + if (CGSizeEqualToSize(_sizeInPixels, CGSizeZero)) + { + return [self maximumOutputSize]; + } + else + { + return _sizeInPixels; + } +} + +- (void)setFillMode:(GPUImageFillModeType)newValue; +{ + _fillMode = newValue; + [self recalculateViewGeometry]; +} + +@end diff --git a/LFLiveKit/Vendor/pili-librtmp/amf.c b/LFLiveKit/Vendor/pili-librtmp/amf.c new file mode 100644 index 00000000..18630ce2 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/amf.c @@ -0,0 +1,1037 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include + +#include "amf.h" +#include "bytes.h" +#include "log.h" +#include "rtmp_sys.h" + +static const AMFObjectProperty AMFProp_Invalid = {{0, 0}, AMF_INVALID}; +static const AVal AV_empty = {0, 0}; + +/* Data is Big-Endian */ +unsigned short + AMF_DecodeInt16(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned short val; + val = (c[0] << 8) | c[1]; + return val; +} + +unsigned int + AMF_DecodeInt24(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + val = (c[0] << 16) | (c[1] << 8) | c[2]; + return val; +} + +unsigned int + AMF_DecodeInt32(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + val = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3]; + return val; +} + +void AMF_DecodeString(const char *data, AVal *bv) { + bv->av_len = AMF_DecodeInt16(data); + bv->av_val = (bv->av_len > 0) ? (char *)data + 2 : NULL; +} + +void AMF_DecodeLongString(const char *data, AVal *bv) { + bv->av_len = AMF_DecodeInt32(data); + bv->av_val = (bv->av_len > 0) ? (char *)data + 4 : NULL; +} + +double + AMF_DecodeNumber(const char *data) { + double dVal; +#if __FLOAT_WORD_ORDER == __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN + memcpy(&dVal, data, 8); +#elif __BYTE_ORDER == __LITTLE_ENDIAN + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[7]; + co[1] = ci[6]; + co[2] = ci[5]; + co[3] = ci[4]; + co[4] = ci[3]; + co[5] = ci[2]; + co[6] = ci[1]; + co[7] = ci[0]; +#endif +#else +#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[3]; + co[1] = ci[2]; + co[2] = ci[1]; + co[3] = ci[0]; + co[4] = ci[7]; + co[5] = ci[6]; + co[6] = ci[5]; + co[7] = ci[4]; +#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ + unsigned char *ci, *co; + ci = (unsigned char *)data; + co = (unsigned char *)&dVal; + co[0] = ci[4]; + co[1] = ci[5]; + co[2] = ci[6]; + co[3] = ci[7]; + co[4] = ci[0]; + co[5] = ci[1]; + co[6] = ci[2]; + co[7] = ci[3]; +#endif +#endif + return dVal; +} + +int AMF_DecodeBoolean(const char *data) { + return *data != 0; +} + +char * + AMF_EncodeInt16(char *output, char *outend, short nVal) { + if (output + 2 > outend) + return NULL; + + output[1] = nVal & 0xff; + output[0] = nVal >> 8; + return output + 2; +} + +char * + AMF_EncodeInt24(char *output, char *outend, int nVal) { + if (output + 3 > outend) + return NULL; + + output[2] = nVal & 0xff; + output[1] = nVal >> 8; + output[0] = nVal >> 16; + return output + 3; +} + +char * + AMF_EncodeInt32(char *output, char *outend, int nVal) { + if (output + 4 > outend) + return NULL; + + output[3] = nVal & 0xff; + output[2] = nVal >> 8; + output[1] = nVal >> 16; + output[0] = nVal >> 24; + return output + 4; +} + +char * + AMF_EncodeString(char *output, char *outend, const AVal *bv) { + if ((bv->av_len < 65536 && output + 1 + 2 + bv->av_len > outend) || + output + 1 + 4 + bv->av_len > outend) + return NULL; + + if (bv->av_len < 65536) { + *output++ = AMF_STRING; + + output = AMF_EncodeInt16(output, outend, bv->av_len); + } else { + *output++ = AMF_LONG_STRING; + + output = AMF_EncodeInt32(output, outend, bv->av_len); + } + memcpy(output, bv->av_val, bv->av_len); + output += bv->av_len; + + return output; +} + +char * + AMF_EncodeNumber(char *output, char *outend, double dVal) { + if (output + 1 + 8 > outend) + return NULL; + + *output++ = AMF_NUMBER; /* type: Number */ + +#if __FLOAT_WORD_ORDER == __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN + memcpy(output, &dVal, 8); +#elif __BYTE_ORDER == __LITTLE_ENDIAN + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[7]; + co[1] = ci[6]; + co[2] = ci[5]; + co[3] = ci[4]; + co[4] = ci[3]; + co[5] = ci[2]; + co[6] = ci[1]; + co[7] = ci[0]; + } +#endif +#else +#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */ + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[3]; + co[1] = ci[2]; + co[2] = ci[1]; + co[3] = ci[0]; + co[4] = ci[7]; + co[5] = ci[6]; + co[6] = ci[5]; + co[7] = ci[4]; + } +#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */ + { + unsigned char *ci, *co; + ci = (unsigned char *)&dVal; + co = (unsigned char *)output; + co[0] = ci[4]; + co[1] = ci[5]; + co[2] = ci[6]; + co[3] = ci[7]; + co[4] = ci[0]; + co[5] = ci[1]; + co[6] = ci[2]; + co[7] = ci[3]; + } +#endif +#endif + + return output + 8; +} + +char * + AMF_EncodeBoolean(char *output, char *outend, int bVal) { + if (output + 2 > outend) + return NULL; + + *output++ = AMF_BOOLEAN; + + *output++ = bVal ? 0x01 : 0x00; + + return output; +} + +char * + AMF_EncodeNamedString(char *output, char *outend, const AVal *strName, const AVal *strValue) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeString(output, outend, strValue); +} + +char * + AMF_EncodeNamedNumber(char *output, char *outend, const AVal *strName, double dVal) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeNumber(output, outend, dVal); +} + +char * + AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *strName, int bVal) { + if (output + 2 + strName->av_len > outend) + return NULL; + output = AMF_EncodeInt16(output, outend, strName->av_len); + + memcpy(output, strName->av_val, strName->av_len); + output += strName->av_len; + + return AMF_EncodeBoolean(output, outend, bVal); +} + +void AMFProp_GetName(AMFObjectProperty *prop, AVal *name) { + *name = prop->p_name; +} + +void AMFProp_SetName(AMFObjectProperty *prop, AVal *name) { + prop->p_name = *name; +} + +AMFDataType + AMFProp_GetType(AMFObjectProperty *prop) { + return prop->p_type; +} + +double + AMFProp_GetNumber(AMFObjectProperty *prop) { + return prop->p_vu.p_number; +} + +int AMFProp_GetBoolean(AMFObjectProperty *prop) { + return prop->p_vu.p_number != 0; +} + +void AMFProp_GetString(AMFObjectProperty *prop, AVal *str) { + *str = prop->p_vu.p_aval; +} + +void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj) { + *obj = prop->p_vu.p_object; +} + +int AMFProp_IsValid(AMFObjectProperty *prop) { + return prop->p_type != AMF_INVALID; +} + +char * + AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd) { + if (prop->p_type == AMF_INVALID) + return NULL; + + if (prop->p_type != AMF_NULL && pBuffer + prop->p_name.av_len + 2 + 1 >= pBufEnd) + return NULL; + + if (prop->p_type != AMF_NULL && prop->p_name.av_len) { + *pBuffer++ = prop->p_name.av_len >> 8; + *pBuffer++ = prop->p_name.av_len & 0xff; + memcpy(pBuffer, prop->p_name.av_val, prop->p_name.av_len); + pBuffer += prop->p_name.av_len; + } + + switch (prop->p_type) { + case AMF_NUMBER: + pBuffer = AMF_EncodeNumber(pBuffer, pBufEnd, prop->p_vu.p_number); + break; + + case AMF_BOOLEAN: + pBuffer = AMF_EncodeBoolean(pBuffer, pBufEnd, prop->p_vu.p_number != 0); + break; + + case AMF_STRING: + pBuffer = AMF_EncodeString(pBuffer, pBufEnd, &prop->p_vu.p_aval); + break; + + case AMF_NULL: + if (pBuffer + 1 >= pBufEnd) + return NULL; + *pBuffer++ = AMF_NULL; + break; + + case AMF_OBJECT: + pBuffer = AMF_Encode(&prop->p_vu.p_object, pBuffer, pBufEnd); + break; + + default: + RTMP_Log(RTMP_LOGERROR, "%s, invalid type. %d", __FUNCTION__, prop->p_type); + pBuffer = NULL; + }; + + return pBuffer; +} + +#define AMF3_INTEGER_MAX 268435455 +#define AMF3_INTEGER_MIN -268435456 + +int AMF3ReadInteger(const char *data, int32_t *valp) { + int i = 0; + int32_t val = 0; + + while (i <= 2) { /* handle first 3 bytes */ + if (data[i] & 0x80) { /* byte used */ + val <<= 7; /* shift up */ + val |= (data[i] & 0x7f); /* add bits */ + i++; + } else { + break; + } + } + + if (i > 2) { /* use 4th byte, all 8bits */ + val <<= 8; + val |= data[3]; + + /* range check */ + if (val > AMF3_INTEGER_MAX) + val -= (1 << 29); + } else { /* use 7bits of last unparsed byte (0xxxxxxx) */ + val <<= 7; + val |= data[i]; + } + + *valp = val; + + return i > 2 ? 4 : i + 1; +} + +int AMF3ReadString(const char *data, AVal *str) { + int32_t ref = 0; + int len; + assert(str != 0); + + len = AMF3ReadInteger(data, &ref); + data += len; + + if ((ref & 0x1) == 0) { /* reference: 0xxx */ + uint32_t refIndex = (ref >> 1); + RTMP_Log(RTMP_LOGDEBUG, + "%s, string reference, index: %d, not supported, ignoring!", + __FUNCTION__, refIndex); + return len; + } else { + uint32_t nSize = (ref >> 1); + + str->av_val = (char *)data; + str->av_len = nSize; + + return len + nSize; + } + return len; +} + +int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName) { + int nOriginalSize = nSize; + AMF3DataType type; + + prop->p_name.av_len = 0; + prop->p_name.av_val = NULL; + + if (nSize == 0 || !pBuffer) { + RTMP_Log(RTMP_LOGDEBUG, "empty buffer/no buffer pointer!"); + return -1; + } + + /* decode name */ + if (bDecodeName) { + AVal name; + int nRes = AMF3ReadString(pBuffer, &name); + + if (name.av_len <= 0) + return nRes; + + prop->p_name = name; + pBuffer += nRes; + nSize -= nRes; + } + + /* decode */ + type = *pBuffer++; + nSize--; + + switch (type) { + case AMF3_UNDEFINED: + case AMF3_NULL: + prop->p_type = AMF_NULL; + break; + case AMF3_FALSE: + prop->p_type = AMF_BOOLEAN; + prop->p_vu.p_number = 0.0; + break; + case AMF3_TRUE: + prop->p_type = AMF_BOOLEAN; + prop->p_vu.p_number = 1.0; + break; + case AMF3_INTEGER: { + int32_t res = 0; + int len = AMF3ReadInteger(pBuffer, &res); + prop->p_vu.p_number = (double)res; + prop->p_type = AMF_NUMBER; + nSize -= len; + break; + } + case AMF3_DOUBLE: + if (nSize < 8) + return -1; + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + prop->p_type = AMF_NUMBER; + nSize -= 8; + break; + case AMF3_STRING: + case AMF3_XML_DOC: + case AMF3_XML: { + int len = AMF3ReadString(pBuffer, &prop->p_vu.p_aval); + prop->p_type = AMF_STRING; + nSize -= len; + break; + } + case AMF3_DATE: { + int32_t res = 0; + int len = AMF3ReadInteger(pBuffer, &res); + + nSize -= len; + pBuffer += len; + + if ((res & 0x1) == 0) { /* reference */ + uint32_t nIndex = (res >> 1); + RTMP_Log(RTMP_LOGDEBUG, "AMF3_DATE reference: %d, not supported!", nIndex); + } else { + if (nSize < 8) + return -1; + + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + nSize -= 8; + prop->p_type = AMF_NUMBER; + } + break; + } + case AMF3_OBJECT: { + int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF3_ARRAY: + case AMF3_BYTE_ARRAY: + default: + RTMP_Log(RTMP_LOGDEBUG, "%s - AMF3 unknown/unsupported datatype 0x%02x, @0x%08X", + __FUNCTION__, (unsigned char)(*pBuffer), pBuffer); + return -1; + } + + return nOriginalSize - nSize; +} + +int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName) { + int nOriginalSize = nSize; + int nRes; + + prop->p_name.av_len = 0; + prop->p_name.av_val = NULL; + + if (nSize == 0 || !pBuffer) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Empty buffer/no buffer pointer!", __FUNCTION__); + return -1; + } + + if (bDecodeName && nSize < 4) { /* at least name (length + at least 1 byte) and 1 byte of data */ + RTMP_Log(RTMP_LOGDEBUG, + "%s: Not enough data for decoding with name, less than 4 bytes!", + __FUNCTION__); + return -1; + } + + if (bDecodeName) { + unsigned short nNameSize = AMF_DecodeInt16(pBuffer); + if (nNameSize > nSize - 2) { + RTMP_Log(RTMP_LOGDEBUG, + "%s: Name size out of range: namesize (%d) > len (%d) - 2", + __FUNCTION__, nNameSize, nSize); + return -1; + } + + AMF_DecodeString(pBuffer, &prop->p_name); + nSize -= 2 + nNameSize; + pBuffer += 2 + nNameSize; + } + + if (nSize == 0) { + return -1; + } + + nSize--; + + prop->p_type = *pBuffer++; + switch (prop->p_type) { + case AMF_NUMBER: + if (nSize < 8) + return -1; + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + nSize -= 8; + break; + case AMF_BOOLEAN: + if (nSize < 1) + return -1; + prop->p_vu.p_number = (double)AMF_DecodeBoolean(pBuffer); + nSize--; + break; + case AMF_STRING: { + unsigned short nStringSize = AMF_DecodeInt16(pBuffer); + + if (nSize < (long)nStringSize + 2) + return -1; + AMF_DecodeString(pBuffer, &prop->p_vu.p_aval); + nSize -= (2 + nStringSize); + break; + } + case AMF_OBJECT: { + int nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + break; + } + case AMF_MOVIECLIP: { + RTMP_Log(RTMP_LOGERROR, "AMF_MOVIECLIP reserved!"); + return -1; + break; + } + case AMF_NULL: + case AMF_UNDEFINED: + case AMF_UNSUPPORTED: + prop->p_type = AMF_NULL; + break; + case AMF_REFERENCE: { + RTMP_Log(RTMP_LOGERROR, "AMF_REFERENCE not supported!"); + return -1; + break; + } + case AMF_ECMA_ARRAY: { + nSize -= 4; + + /* next comes the rest, mixed array has a final 0x000009 mark and names, so its an object */ + nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer + 4, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF_OBJECT_END: { + return -1; + break; + } + case AMF_STRICT_ARRAY: { + unsigned int nArrayLen = AMF_DecodeInt32(pBuffer); + nSize -= 4; + + nRes = AMF_DecodeArray(&prop->p_vu.p_object, pBuffer + 4, nSize, + nArrayLen, FALSE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + case AMF_DATE: { + RTMP_Log(RTMP_LOGDEBUG, "AMF_DATE"); + + if (nSize < 10) + return -1; + + prop->p_vu.p_number = AMF_DecodeNumber(pBuffer); + prop->p_UTCoffset = AMF_DecodeInt16(pBuffer + 8); + + nSize -= 10; + break; + } + case AMF_LONG_STRING: { + unsigned int nStringSize = AMF_DecodeInt32(pBuffer); + if (nSize < (long)nStringSize + 4) + return -1; + AMF_DecodeLongString(pBuffer, &prop->p_vu.p_aval); + nSize -= (4 + nStringSize); + prop->p_type = AMF_STRING; + break; + } + case AMF_RECORDSET: { + RTMP_Log(RTMP_LOGERROR, "AMF_RECORDSET reserved!"); + return -1; + break; + } + case AMF_XML_DOC: { + RTMP_Log(RTMP_LOGERROR, "AMF_XML_DOC not supported!"); + return -1; + break; + } + case AMF_TYPED_OBJECT: { + RTMP_Log(RTMP_LOGERROR, "AMF_TYPED_OBJECT not supported!"); + return -1; + break; + } + case AMF_AVMPLUS: { + int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE); + if (nRes == -1) + return -1; + nSize -= nRes; + prop->p_type = AMF_OBJECT; + break; + } + default: + RTMP_Log(RTMP_LOGDEBUG, "%s - unknown datatype 0x%02x, @0x%08X", __FUNCTION__, + prop->p_type, pBuffer - 1); + return -1; + } + + return nOriginalSize - nSize; +} + +void AMFProp_Dump(AMFObjectProperty *prop) { + char strRes[256]; + char str[256]; + AVal name; + + if (prop->p_type == AMF_INVALID) { + RTMP_Log(RTMP_LOGDEBUG, "Property: INVALID"); + return; + } + + if (prop->p_type == AMF_NULL) { + RTMP_Log(RTMP_LOGDEBUG, "Property: NULL"); + return; + } + + if (prop->p_name.av_len) { + name = prop->p_name; + } else { + name.av_val = "no-name."; + name.av_len = sizeof("no-name.") - 1; + } + if (name.av_len > 18) + name.av_len = 18; + + snprintf(strRes, 255, "Name: %18.*s, ", name.av_len, name.av_val); + + if (prop->p_type == AMF_OBJECT) { + RTMP_Log(RTMP_LOGDEBUG, "Property: <%sOBJECT>", strRes); + AMF_Dump(&prop->p_vu.p_object); + return; + } + + switch (prop->p_type) { + case AMF_NUMBER: + snprintf(str, 255, "NUMBER:\t%.2f", prop->p_vu.p_number); + break; + case AMF_BOOLEAN: + snprintf(str, 255, "BOOLEAN:\t%s", + prop->p_vu.p_number != 0.0 ? "TRUE" : "FALSE"); + break; + case AMF_STRING: + snprintf(str, 255, "STRING:\t%.*s", prop->p_vu.p_aval.av_len, + prop->p_vu.p_aval.av_val); + break; + case AMF_DATE: + snprintf(str, 255, "DATE:\ttimestamp: %.2f, UTC offset: %d", + prop->p_vu.p_number, prop->p_UTCoffset); + break; + default: + snprintf(str, 255, "INVALID TYPE 0x%02x", (unsigned char)prop->p_type); + } + + RTMP_Log(RTMP_LOGDEBUG, "Property: <%s%s>", strRes, str); +} + +void AMFProp_Reset(AMFObjectProperty *prop) { + if (prop->p_type == AMF_OBJECT) + AMF_Reset(&prop->p_vu.p_object); + else { + prop->p_vu.p_aval.av_len = 0; + prop->p_vu.p_aval.av_val = NULL; + } + prop->p_type = AMF_INVALID; +} + +/* AMFObject */ + +char * + AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd) { + int i; + + if (pBuffer + 4 >= pBufEnd) + return NULL; + + *pBuffer++ = AMF_OBJECT; + + for (i = 0; i < obj->o_num; i++) { + char *res = AMFProp_Encode(&obj->o_props[i], pBuffer, pBufEnd); + if (res == NULL) { + RTMP_Log(RTMP_LOGERROR, "AMF_Encode - failed to encode property in index %d", + i); + break; + } else { + pBuffer = res; + } + } + + if (pBuffer + 3 >= pBufEnd) + return NULL; /* no room for the end marker */ + + pBuffer = AMF_EncodeInt24(pBuffer, pBufEnd, AMF_OBJECT_END); + + return pBuffer; +} + +int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, + int nArrayLen, int bDecodeName) { + int nOriginalSize = nSize; + int bError = FALSE; + + obj->o_num = 0; + obj->o_props = NULL; + while (nArrayLen > 0) { + AMFObjectProperty prop; + int nRes; + nArrayLen--; + + nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); + if (nRes == -1) + bError = TRUE; + else { + nSize -= nRes; + pBuffer += nRes; + AMF_AddProp(obj, &prop); + } + } + if (bError) + return -1; + + return nOriginalSize - nSize; +} + +int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bAMFData) { + int nOriginalSize = nSize; + int32_t ref; + int len; + + obj->o_num = 0; + obj->o_props = NULL; + if (bAMFData) { + if (*pBuffer != AMF3_OBJECT) + RTMP_Log(RTMP_LOGERROR, + "AMF3 Object encapsulated in AMF stream does not start with AMF3_OBJECT!"); + pBuffer++; + nSize--; + } + + ref = 0; + len = AMF3ReadInteger(pBuffer, &ref); + pBuffer += len; + nSize -= len; + + if ((ref & 1) == 0) { /* object reference, 0xxx */ + uint32_t objectIndex = (ref >> 1); + + RTMP_Log(RTMP_LOGDEBUG, "Object reference, index: %d", objectIndex); + } else /* object instance */ + { + int32_t classRef = (ref >> 1); + + AMF3ClassDef cd = {{0, 0}}; + AMFObjectProperty prop; + + if ((classRef & 0x1) == 0) { /* class reference */ + uint32_t classIndex = (classRef >> 1); + RTMP_Log(RTMP_LOGDEBUG, "Class reference: %d", classIndex); + } else { + int32_t classExtRef = (classRef >> 1); + int i; + + cd.cd_externalizable = (classExtRef & 0x1) == 1; + cd.cd_dynamic = ((classExtRef >> 1) & 0x1) == 1; + + cd.cd_num = classExtRef >> 2; + + /* class name */ + + len = AMF3ReadString(pBuffer, &cd.cd_name); + nSize -= len; + pBuffer += len; + + /*std::string str = className; */ + + RTMP_Log(RTMP_LOGDEBUG, + "Class name: %s, externalizable: %d, dynamic: %d, classMembers: %d", + cd.cd_name.av_val, cd.cd_externalizable, cd.cd_dynamic, + cd.cd_num); + + for (i = 0; i < cd.cd_num; i++) { + AVal memberName; + len = AMF3ReadString(pBuffer, &memberName); + RTMP_Log(RTMP_LOGDEBUG, "Member: %s", memberName.av_val); + AMF3CD_AddProp(&cd, &memberName); + nSize -= len; + pBuffer += len; + } + } + + /* add as referencable object */ + + if (cd.cd_externalizable) { + int nRes; + AVal name = AVC("DEFAULT_ATTRIBUTE"); + + RTMP_Log(RTMP_LOGDEBUG, "Externalizable, TODO check"); + + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); + if (nRes == -1) + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", + __FUNCTION__); + else { + nSize -= nRes; + pBuffer += nRes; + } + + AMFProp_SetName(&prop, &name); + AMF_AddProp(obj, &prop); + } else { + int nRes, i; + for (i = 0; i < cd.cd_num; i++) /* non-dynamic */ + { + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE); + if (nRes == -1) + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!", + __FUNCTION__); + + AMFProp_SetName(&prop, AMF3CD_GetProp(&cd, i)); + AMF_AddProp(obj, &prop); + + pBuffer += nRes; + nSize -= nRes; + } + if (cd.cd_dynamic) { + int len = 0; + + do { + nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, TRUE); + AMF_AddProp(obj, &prop); + + pBuffer += nRes; + nSize -= nRes; + + len = prop.p_name.av_len; + } while (len > 0); + } + } + RTMP_Log(RTMP_LOGDEBUG, "class object!"); + } + return nOriginalSize - nSize; +} + +int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName) { + int nOriginalSize = nSize; + int bError = FALSE; /* if there is an error while decoding - try to at least find the end mark AMF_OBJECT_END */ + + obj->o_num = 0; + obj->o_props = NULL; + while (nSize > 0) { + AMFObjectProperty prop; + int nRes; + + if (nSize >= 3 && AMF_DecodeInt24(pBuffer) == AMF_OBJECT_END) { + nSize -= 3; + bError = FALSE; + break; + } + + if (bError) { + RTMP_Log(RTMP_LOGERROR, + "DECODING ERROR, IGNORING BYTES UNTIL NEXT KNOWN PATTERN!"); + nSize--; + pBuffer++; + continue; + } + + nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName); + if (nRes == -1) + bError = TRUE; + else { + nSize -= nRes; + pBuffer += nRes; + AMF_AddProp(obj, &prop); + } + } + + if (bError) + return -1; + + return nOriginalSize - nSize; +} + +void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop) { + if (!(obj->o_num & 0x0f)) + obj->o_props = + realloc(obj->o_props, (obj->o_num + 16) * sizeof(AMFObjectProperty)); + obj->o_props[obj->o_num++] = *prop; +} + +int AMF_CountProp(AMFObject *obj) { + return obj->o_num; +} + +AMFObjectProperty * + AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex) { + if (nIndex >= 0) { + if (nIndex <= obj->o_num) + return &obj->o_props[nIndex]; + } else { + int n; + for (n = 0; n < obj->o_num; n++) { + if (AVMATCH(&obj->o_props[n].p_name, name)) + return &obj->o_props[n]; + } + } + + return (AMFObjectProperty *)&AMFProp_Invalid; +} + +void AMF_Dump(AMFObject *obj) { + int n; + RTMP_Log(RTMP_LOGDEBUG, "(object begin)"); + for (n = 0; n < obj->o_num; n++) { + AMFProp_Dump(&obj->o_props[n]); + } + RTMP_Log(RTMP_LOGDEBUG, "(object end)"); +} + +void AMF_Reset(AMFObject *obj) { + int n; + for (n = 0; n < obj->o_num; n++) { + AMFProp_Reset(&obj->o_props[n]); + } + free(obj->o_props); + obj->o_props = NULL; + obj->o_num = 0; +} + +/* AMF3ClassDefinition */ + +void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop) { + if (!(cd->cd_num & 0x0f)) + cd->cd_props = realloc(cd->cd_props, (cd->cd_num + 16) * sizeof(AVal)); + cd->cd_props[cd->cd_num++] = *prop; +} + +AVal * + AMF3CD_GetProp(AMF3ClassDef *cd, int nIndex) { + if (nIndex >= cd->cd_num) + return (AVal *)&AV_empty; + return &cd->cd_props[nIndex]; +} diff --git a/LFLiveKit/Vendor/pili-librtmp/amf.h b/LFLiveKit/Vendor/pili-librtmp/amf.h new file mode 100644 index 00000000..77f93e84 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/amf.h @@ -0,0 +1,180 @@ +#ifndef __AMF_H__ +#define __AMF_H__ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include + +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + AMF_NUMBER = 0, + AMF_BOOLEAN, + AMF_STRING, + AMF_OBJECT, + AMF_MOVIECLIP, /* reserved, not used */ + AMF_NULL, + AMF_UNDEFINED, + AMF_REFERENCE, + AMF_ECMA_ARRAY, + AMF_OBJECT_END, + AMF_STRICT_ARRAY, + AMF_DATE, + AMF_LONG_STRING, + AMF_UNSUPPORTED, + AMF_RECORDSET, /* reserved, not used */ + AMF_XML_DOC, + AMF_TYPED_OBJECT, + AMF_AVMPLUS, /* switch to AMF3 */ + AMF_INVALID = 0xff +} AMFDataType; + +typedef enum { + AMF3_UNDEFINED = 0, + AMF3_NULL, + AMF3_FALSE, + AMF3_TRUE, + AMF3_INTEGER, + AMF3_DOUBLE, + AMF3_STRING, + AMF3_XML_DOC, + AMF3_DATE, + AMF3_ARRAY, + AMF3_OBJECT, + AMF3_XML, + AMF3_BYTE_ARRAY +} AMF3DataType; + +typedef struct AVal { + char *av_val; + int av_len; +} AVal; +#define AVC(str) \ + { str, sizeof(str) - 1 } +#define AVMATCH(a1, a2) \ + ((a1)->av_len == (a2)->av_len && \ + !memcmp((a1)->av_val, (a2)->av_val, (a1)->av_len)) + +struct AMFObjectProperty; + +typedef struct AMFObject { + int o_num; + struct AMFObjectProperty *o_props; +} AMFObject; + +typedef struct AMFObjectProperty { + AVal p_name; + AMFDataType p_type; + union { + double p_number; + AVal p_aval; + AMFObject p_object; + } p_vu; + int16_t p_UTCoffset; +} AMFObjectProperty; + +char *AMF_EncodeString(char *output, char *outend, const AVal *str); +char *AMF_EncodeNumber(char *output, char *outend, double dVal); +char *AMF_EncodeInt16(char *output, char *outend, short nVal); +char *AMF_EncodeInt24(char *output, char *outend, int nVal); +char *AMF_EncodeInt32(char *output, char *outend, int nVal); +char *AMF_EncodeBoolean(char *output, char *outend, int bVal); + +/* Shortcuts for AMFProp_Encode */ +char *AMF_EncodeNamedString(char *output, char *outend, const AVal *name, + const AVal *value); +char *AMF_EncodeNamedNumber(char *output, char *outend, const AVal *name, + double dVal); +char *AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *name, + int bVal); + +unsigned short AMF_DecodeInt16(const char *data); +unsigned int AMF_DecodeInt24(const char *data); +unsigned int AMF_DecodeInt32(const char *data); +void AMF_DecodeString(const char *data, AVal *str); +void AMF_DecodeLongString(const char *data, AVal *str); +int AMF_DecodeBoolean(const char *data); +double AMF_DecodeNumber(const char *data); + +char *AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd); +int AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName); +int AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize, + int nArrayLen, int bDecodeName); +int AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, + int bDecodeName); +void AMF_Dump(AMFObject *obj); +void AMF_Reset(AMFObject *obj); + +void AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop); +int AMF_CountProp(AMFObject *obj); +AMFObjectProperty *AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex); + +AMFDataType AMFProp_GetType(AMFObjectProperty *prop); +void AMFProp_SetNumber(AMFObjectProperty *prop, double dval); +void AMFProp_SetBoolean(AMFObjectProperty *prop, int bflag); +void AMFProp_SetString(AMFObjectProperty *prop, AVal *str); +void AMFProp_SetObject(AMFObjectProperty *prop, AMFObject *obj); + +void AMFProp_GetName(AMFObjectProperty *prop, AVal *name); +void AMFProp_SetName(AMFObjectProperty *prop, AVal *name); +double AMFProp_GetNumber(AMFObjectProperty *prop); +int AMFProp_GetBoolean(AMFObjectProperty *prop); +void AMFProp_GetString(AMFObjectProperty *prop, AVal *str); +void AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj); + +int AMFProp_IsValid(AMFObjectProperty *prop); + +char *AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd); +int AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName); +int AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize, + int bDecodeName); + +void AMFProp_Dump(AMFObjectProperty *prop); +void AMFProp_Reset(AMFObjectProperty *prop); + +typedef struct AMF3ClassDef { + AVal cd_name; + char cd_externalizable; + char cd_dynamic; + int cd_num; + AVal *cd_props; +} AMF3ClassDef; + +void AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop); +AVal *AMF3CD_GetProp(AMF3ClassDef *cd, int idx); + +#ifdef __cplusplus +} +#endif + +#endif /* __AMF_H__ */ diff --git a/LFLiveKit/Vendor/pili-librtmp/bytes.h b/LFLiveKit/Vendor/pili-librtmp/bytes.h new file mode 100644 index 00000000..87221cf1 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/bytes.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifndef __BYTES_H__ +#define __BYTES_H__ + +#include + +#ifdef _WIN32 +/* Windows is little endian only */ +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 +#define __BYTE_ORDER __LITTLE_ENDIAN +#define __FLOAT_WORD_ORDER __BYTE_ORDER + +typedef unsigned char uint8_t; + +#else /* !_WIN32 */ + +#include + +#if defined(BYTE_ORDER) && !defined(__BYTE_ORDER) +#define __BYTE_ORDER BYTE_ORDER +#endif + +#if defined(BIG_ENDIAN) && !defined(__BIG_ENDIAN) +#define __BIG_ENDIAN BIG_ENDIAN +#endif + +#if defined(LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN) +#define __LITTLE_ENDIAN LITTLE_ENDIAN +#endif + +#endif /* !_WIN32 */ + +/* define default endianness */ +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif + +#ifndef __BYTE_ORDER +#warning "Byte order not defined on your system, assuming little endian!" +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +/* ok, we assume to have the same float word order and byte order if float word + * order is not defined */ +#ifndef __FLOAT_WORD_ORDER +#warning "Float word order not defined, assuming the same as byte order!" +#define __FLOAT_WORD_ORDER __BYTE_ORDER +#endif + +#if !defined(__BYTE_ORDER) || !defined(__FLOAT_WORD_ORDER) +#error "Undefined byte or float word order!" +#endif + +#if __FLOAT_WORD_ORDER != __BIG_ENDIAN && __FLOAT_WORD_ORDER != __LITTLE_ENDIAN +#error "Unknown/unsupported float word order!" +#endif + +#if __BYTE_ORDER != __BIG_ENDIAN && __BYTE_ORDER != __LITTLE_ENDIAN +#error "Unknown/unsupported byte order!" +#endif + +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/dh.h b/LFLiveKit/Vendor/pili-librtmp/dh.h new file mode 100644 index 00000000..d7aeb5a5 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/dh.h @@ -0,0 +1,345 @@ +/* RTMPDump - Diffie-Hellmann Key Exchange + * Copyright (C) 2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#ifdef USE_POLARSSL +#include +typedef mpi *MP_t; +#define MP_new(m) \ + m = malloc(sizeof(mpi)); \ + mpi_init(m, NULL) +#define MP_set_w(mpi, w) mpi_lset(mpi, w) +#define MP_cmp(u, v) mpi_cmp_mpi(u, v) +#define MP_set(u, v) mpi_copy(u, v) +#define MP_sub_w(mpi, w) mpi_sub_int(mpi, mpi, w) +#define MP_cmp_1(mpi) mpi_cmp_int(mpi, 1) +#define MP_modexp(r, y, q, p) mpi_exp_mod(r, y, q, p, NULL) +#define MP_free(mpi) \ + mpi_free(mpi, NULL); \ + free(mpi) +#define MP_gethex(u, hex, res) \ + MP_new(u); \ + res = mpi_read_string(u, 16, hex) == 0 +#define MP_bytes(u) mpi_size(u) +#define MP_setbin(u, buf, len) mpi_write_binary(u, buf, len) +#define MP_getbin(u, buf, len) \ + MP_new(u); \ + mpi_read_binary(u, buf, len) + +typedef struct MDH { + MP_t p; + MP_t g; + MP_t pub_key; + MP_t priv_key; + long length; + dhm_context ctx; +} MDH; + +#define MDH_new() calloc(1, sizeof(MDH)) +#define MDH_free(vp) \ + { \ + MDH *dh = vp; \ + dhm_free(&dh->ctx); \ + MP_free(dh->p); \ + MP_free(dh->g); \ + MP_free(dh->pub_key); \ + MP_free(dh->priv_key); \ + free(dh); \ + } + +static int MDH_generate_key(MDH *dh) { + unsigned char out[2]; + MP_set(&dh->ctx.P, dh->p); + MP_set(&dh->ctx.G, dh->g); + dh->ctx.len = 128; + dhm_make_public(&dh->ctx, 1024, out, 1, havege_rand, &RTMP_TLS_ctx->hs); + MP_new(dh->pub_key); + MP_new(dh->priv_key); + MP_set(dh->pub_key, &dh->ctx.GX); + MP_set(dh->priv_key, &dh->ctx.X); + return 1; +} + +static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { + int n = len; + MP_set(&dh->ctx.GY, pub); + dhm_calc_secret(&dh->ctx, secret, &n); + return 0; +} + +#elif defined(USE_GNUTLS) +#include +typedef gcry_mpi_t MP_t; +#define MP_new(m) m = gcry_mpi_new(1) +#define MP_set_w(mpi, w) gcry_mpi_set_ui(mpi, w) +#define MP_cmp(u, v) gcry_mpi_cmp(u, v) +#define MP_set(u, v) gcry_mpi_set(u, v) +#define MP_sub_w(mpi, w) gcry_mpi_sub_ui(mpi, mpi, w) +#define MP_cmp_1(mpi) gcry_mpi_cmp_ui(mpi, 1) +#define MP_modexp(r, y, q, p) gcry_mpi_powm(r, y, q, p) +#define MP_free(mpi) gcry_mpi_release(mpi) +#define MP_gethex(u, hex, res) \ + res = (gcry_mpi_scan(&u, GCRYMPI_FMT_HEX, hex, 0, 0) == 0) +#define MP_bytes(u) (gcry_mpi_get_nbits(u) + 7) / 8 +#define MP_setbin(u, buf, len) \ + gcry_mpi_print(GCRYMPI_FMT_USG, buf, len, NULL, u) +#define MP_getbin(u, buf, len) \ + gcry_mpi_scan(&u, GCRYMPI_FMT_USG, buf, len, NULL) + +typedef struct MDH { + MP_t p; + MP_t g; + MP_t pub_key; + MP_t priv_key; + long length; +} MDH; + +#define MDH_new() calloc(1, sizeof(MDH)) +#define MDH_free(dh) \ + do { \ + MP_free(((MDH *)(dh))->p); \ + MP_free(((MDH *)(dh))->g); \ + MP_free(((MDH *)(dh))->pub_key); \ + MP_free(((MDH *)(dh))->priv_key); \ + free(dh); \ + } while (0) + +extern MP_t gnutls_calc_dh_secret(MP_t *priv, MP_t g, MP_t p); +extern MP_t gnutls_calc_dh_key(MP_t y, MP_t x, MP_t p); + +#define MDH_generate_key(dh) \ + (dh->pub_key = gnutls_calc_dh_secret(&dh->priv_key, dh->g, dh->p)) +static int MDH_compute_key(uint8_t *secret, size_t len, MP_t pub, MDH *dh) { + MP_t sec = gnutls_calc_dh_key(pub, dh->priv_key, dh->p); + if (sec) { + MP_setbin(sec, secret, len); + MP_free(sec); + return 0; + } else + return -1; +} + +#else /* USE_OPENSSL */ +#include +#include + +typedef BIGNUM *MP_t; +#define MP_new(m) m = BN_new() +#define MP_set_w(mpi, w) BN_set_word(mpi, w) +#define MP_cmp(u, v) BN_cmp(u, v) +#define MP_set(u, v) BN_copy(u, v) +#define MP_sub_w(mpi, w) BN_sub_word(mpi, w) +#define MP_cmp_1(mpi) BN_cmp(mpi, BN_value_one()) +#define MP_modexp(r, y, q, p) \ + do { \ + BN_CTX *ctx = BN_CTX_new(); \ + BN_mod_exp(r, y, q, p, ctx); \ + BN_CTX_free(ctx); \ + } while (0) +#define MP_free(mpi) BN_free(mpi) +#define MP_gethex(u, hex, res) res = BN_hex2bn(&u, hex) +#define MP_bytes(u) BN_num_bytes(u) +#define MP_setbin(u, buf, len) BN_bn2bin(u, buf) +#define MP_getbin(u, buf, len) u = BN_bin2bn(buf, len, 0) + +#define MDH DH +#define MDH_new() DH_new() +#define MDH_free(dh) DH_free(dh) +#define MDH_generate_key(dh) DH_generate_key(dh) +#define MDH_compute_key(secret, seclen, pub, dh) DH_compute_key(secret, pub, dh) + +#endif + +#include "dhgroups.h" +#include "log.h" + +/* RFC 2631, Section 2.1.5, http://www.ietf.org/rfc/rfc2631.txt */ +static int isValidPublicKey(MP_t y, MP_t p, MP_t q) { + int ret = TRUE; + MP_t bn; + assert(y); + + MP_new(bn); + assert(bn); + + /* y must lie in [2,p-1] */ + MP_set_w(bn, 1); + if (MP_cmp(y, bn) < 0) { + RTMP_Log(RTMP_LOGERROR, "DH public key must be at least 2"); + ret = FALSE; + goto failed; + } + + /* bn = p-2 */ + MP_set(bn, p); + MP_sub_w(bn, 1); + if (MP_cmp(y, bn) > 0) { + RTMP_Log(RTMP_LOGERROR, "DH public key must be at most p-2"); + ret = FALSE; + goto failed; + } + + /* Verify with Sophie-Germain prime + * + * This is a nice test to make sure the public key position is calculated + * correctly. This test will fail in about 50% of the cases if applied to + * random data. + */ + if (q) { + /* y must fulfill y^q mod p = 1 */ + MP_modexp(bn, y, q, p); + + if (MP_cmp_1(bn) != 0) { + RTMP_Log(RTMP_LOGWARNING, "DH public key does not fulfill y^q mod p = 1"); + } + } + +failed: + MP_free(bn); + return ret; +} + +static MDH *DHInit(int nKeyBits) { + size_t res; + MDH *dh = MDH_new(); + + if (!dh) + goto failed; + + MP_new(dh->g); + + if (!dh->g) + goto failed; + + MP_gethex(dh->p, P1024, res); /* prime P1024, see dhgroups.h */ + if (!res) { + goto failed; + } + + MP_set_w(dh->g, 2); /* base 2 */ + + dh->length = nKeyBits; + return dh; + +failed: + if (dh) + MDH_free(dh); + + return 0; +} + +static int DHGenerateKey(MDH *dh) { + size_t res = 0; + if (!dh) + return 0; + + while (!res) { + MP_t q1 = NULL; + + if (!MDH_generate_key(dh)) + return 0; + + MP_gethex(q1, Q1024, res); + assert(res); + + res = isValidPublicKey(dh->pub_key, dh->p, q1); + if (!res) { + MP_free(dh->pub_key); + MP_free(dh->priv_key); + dh->pub_key = dh->priv_key = 0; + } + + MP_free(q1); + } + return 1; +} + +/* fill pubkey with the public key in BIG ENDIAN order + * 00 00 00 00 00 x1 x2 x3 ..... + */ + +static int DHGetPublicKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen) { + int len; + if (!dh || !dh->pub_key) + return 0; + + len = MP_bytes(dh->pub_key); + if (len <= 0 || len > (int)nPubkeyLen) + return 0; + + memset(pubkey, 0, nPubkeyLen); + MP_setbin(dh->pub_key, pubkey + (nPubkeyLen - len), len); + return 1; +} + +#if 0 /* unused */ +static int +DHGetPrivateKey(MDH *dh, uint8_t *privkey, size_t nPrivkeyLen) +{ + if (!dh || !dh->priv_key) + return 0; + + int len = MP_bytes(dh->priv_key); + if (len <= 0 || len > (int) nPrivkeyLen) + return 0; + + memset(privkey, 0, nPrivkeyLen); + MP_setbin(dh->priv_key, privkey + (nPrivkeyLen - len), len); + return 1; +} +#endif + +/* computes the shared secret key from the private MDH value and the + * other party's public key (pubkey) + */ +static int DHComputeSharedSecretKey(MDH *dh, uint8_t *pubkey, size_t nPubkeyLen, + uint8_t *secret) { + MP_t q1 = NULL, pubkeyBn = NULL; + size_t len; + int res; + + if (!dh || !secret || nPubkeyLen >= INT_MAX) + return -1; + + MP_getbin(pubkeyBn, pubkey, nPubkeyLen); + if (!pubkeyBn) + return -1; + + MP_gethex(q1, Q1024, len); + assert(len); + + if (isValidPublicKey(pubkeyBn, dh->p, q1)) + res = MDH_compute_key(secret, nPubkeyLen, pubkeyBn, dh); + else + res = -1; + + MP_free(q1); + MP_free(pubkeyBn); + + return res; +} diff --git a/LFLiveKit/Vendor/pili-librtmp/dhgroups.h b/LFLiveKit/Vendor/pili-librtmp/dhgroups.h new file mode 100644 index 00000000..f3d0293f --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/dhgroups.h @@ -0,0 +1,198 @@ +/* librtmp - Diffie-Hellmann Key Exchange + * Copyright (C) 2009 Andrej Stepanchuk + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +/* from RFC 3526, see http://www.ietf.org/rfc/rfc3526.txt */ + +/* 2^768 - 2 ^704 - 1 + 2^64 * { [2^638 pi] + 149686 } */ +#define P768 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF" + +/* 2^1024 - 2^960 - 1 + 2^64 * { [2^894 pi] + 129093 } */ +#define P1024 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381" \ + "FFFFFFFFFFFFFFFF" + +/* Group morder largest prime factor: */ +#define Q1024 \ + "7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68" \ + "948127044533E63A0105DF531D89CD9128A5043CC71A026E" \ + "F7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122" \ + "F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6" \ + "F71C35FDAD44CFD2D74F9208BE258FF324943328F67329C0" \ + "FFFFFFFFFFFFFFFF" + +/* 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 } */ +#define P1536 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF" + +/* 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 } */ +#define P2048 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AACAA68FFFFFFFFFFFFFFFF" + +/* 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 } */ +#define P3072 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF" + +/* 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 } */ +#define P4096 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" \ + "FFFFFFFFFFFFFFFF" + +/* 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 } */ +#define P6144 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ + "12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF" + +/* 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 } */ +#define P8192 \ + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \ + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \ + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" \ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" \ + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" \ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" \ + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" \ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" \ + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" \ + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" \ + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" \ + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" \ + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" \ + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" \ + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" \ + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" \ + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" \ + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" \ + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" \ + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" \ + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" \ + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" \ + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" \ + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" \ + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" \ + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" \ + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" \ + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" \ + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" \ + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" \ + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" \ + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" \ + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" \ + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" \ + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" \ + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" \ + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" \ + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" \ + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" \ + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" \ + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF" diff --git a/LFLiveKit/Vendor/pili-librtmp/error.c b/LFLiveKit/Vendor/pili-librtmp/error.c new file mode 100644 index 00000000..0b4cafdf --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/error.c @@ -0,0 +1,20 @@ +#include "error.h" +#include +#include + +void RTMPError_Alloc(RTMPError *error, size_t msg_size) { + RTMPError_Free(error); + + error->code = 0; + error->message = (char *)malloc(msg_size + 1); + memset(error->message, 0, msg_size); +} + +void RTMPError_Free(RTMPError *error) { + if (error) { + if (error->message) { + free(error->message); + error->message = NULL; + } + } +} diff --git a/LFLiveKit/Vendor/pili-librtmp/error.h b/LFLiveKit/Vendor/pili-librtmp/error.h new file mode 100644 index 00000000..4ec31fda --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/error.h @@ -0,0 +1,45 @@ +#ifndef __ERROR_H__ +#define __ERROR_H__ + +#include + +typedef struct RTMPError { + int code; + char *message; +} RTMPError; + +void RTMPError_Alloc(RTMPError *error, size_t msg_size); +void RTMPError_Free(RTMPError *error); + +// error defines +enum { + RTMPErrorUnknow = -1, // "Unknow error" + RTMPErrorUnknowOption = -999, // "Unknown option %s" + RTMPErrorAccessDNSFailed = -1000, // "Failed to access the DNS. (addr: %s)" + RTMPErrorFailedToConnectSocket = + -1001, // "Failed to connect socket. %d (%s)" + RTMPErrorSocksNegotiationFailed = -1002, // "Socks negotiation failed" + RTMPErrorFailedToCreateSocket = + -1003, // "Failed to create socket. %d (%s)" + RTMPErrorHandshakeFailed = -1004, // "Handshake failed" + RTMPErrorRTMPConnectFailed = -1005, // "RTMP connect failed" + RTMPErrorSendFailed = -1006, // "Send error %d (%s), (%d bytes)" + RTMPErrorServerRequestedClose = -1007, // "RTMP server requested close" + RTMPErrorNetStreamFailed = -1008, // "NetStream failed" + RTMPErrorNetStreamPlayFailed = -1009, // "NetStream play failed" + RTMPErrorNetStreamPlayStreamNotFound = + -1010, // "NetStream play stream not found" + RTMPErrorNetConnectionConnectInvalidApp = + -1011, // "NetConnection connect invalip app" + RTMPErrorSanityFailed = + -1012, // "Sanity failed. Trying to send header of type: 0x%02X" + RTMPErrorSocketClosedByPeer = -1013, // "RTMP socket closed by peer" + RTMPErrorRTMPConnectStreamFailed = -1014, // "RTMP connect stream failed" + RTMPErrorSocketTimeout = -1015, // "RTMP socket timeout" + + // SSL errors + RTMPErrorTLSConnectFailed = -1200, // "TLS_Connect failed" + RTMPErrorNoSSLOrTLSSupport = -1201, // "No SSL/TLS support" +}; + +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/handshake.h b/LFLiveKit/Vendor/pili-librtmp/handshake.h new file mode 100644 index 00000000..f791cf74 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/handshake.h @@ -0,0 +1,1034 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * Copyright (C) 2010 + * 2a665470ced7adb7156fcef47f8199a6371c117b8a79e399a2771e0b36384090 + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +/* This file is #included in rtmp.c, it is not meant to be compiled alone */ + +#ifdef USE_POLARSSL +#include +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX sha2_context +#define HMAC_setup(ctx, key, len) \ + sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) +#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + sha2_hmac_finish(&ctx, dig) + +typedef arc4_context *RC4_handle; +#define RC4_alloc(h) *h = malloc(sizeof(arc4_context)) +#define RC4_setkey(h, l, k) arc4_setup(h, k, l) +#define RC4_encrypt(h, l, d) \ + arc4_crypt(h, l, (unsigned char *)d, (unsigned char *)d) +#define RC4_encrypt2(h, l, s, d) \ + arc4_crypt(h, l, (unsigned char *)s, (unsigned char *)d) +#define RC4_free(h) free(h) + +#elif defined(USE_GNUTLS) +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX gcry_md_hd_t +#define HMAC_setup(ctx, key, len) \ + gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ + gcry_md_setkey(ctx, key, len) +#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + memcpy(dig, gcry_md_read(ctx, 0), dlen); \ + gcry_md_close(ctx) + +typedef gcry_cipher_hd_t RC4_handle; +#define RC4_alloc(h) \ + gcry_cipher_open(h, GCRY_CIPHER_ARCFOUR, GCRY_CIPHER_MODE_STREAM, 0) +#define RC4_setkey(h, l, k) gcry_cipher_setkey(h, k, l) +#define RC4_encrypt(h, l, d) gcry_cipher_encrypt(h, (void *)d, l, NULL, 0) +#define RC4_encrypt2(h, l, s, d) \ + gcry_cipher_encrypt(h, (void *)d, l, (void *)s, l) +#define RC4_free(h) gcry_cipher_close(h) + +#else /* USE_OPENSSL */ +#include +#include +#include +#if OPENSSL_VERSION_NUMBER < 0x0090800 || !defined(SHA256_DIGEST_LENGTH) +#error Your OpenSSL is too old, need 0.9.8 or newer with SHA256 +#endif +#define HMAC_setup(ctx, key, len) \ + HMAC_CTX_init(&ctx); \ + HMAC_Init_ex(&ctx, key, len, EVP_sha256(), 0) +#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + HMAC_Final(&ctx, dig, &dlen); \ + HMAC_CTX_cleanup(&ctx) + +typedef RC4_KEY *RC4_handle; +#define RC4_alloc(h) *h = malloc(sizeof(RC4_KEY)) +#define RC4_setkey(h, l, k) RC4_set_key(h, l, k) +#define RC4_encrypt(h, l, d) RC4(h, l, (uint8_t *)d, (uint8_t *)d) +#define RC4_encrypt2(h, l, s, d) RC4(h, l, (uint8_t *)s, (uint8_t *)d) +#define RC4_free(h) free(h) +#endif + +#define FP10 + +#include "dh.h" + +static const uint8_t GenuineFMSKey[] = { + 0x47, 0x65, 0x6e, 0x75, 0x69, 0x6e, 0x65, 0x20, 0x41, 0x64, 0x6f, + 0x62, 0x65, 0x20, 0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x4d, 0x65, + 0x64, 0x69, 0x61, 0x20, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, + 0x30, 0x30, 0x31, /* Genuine Adobe Flash Media Server 001 */ + + 0xf0, 0xee, 0xc2, 0x4a, 0x80, 0x68, 0xbe, 0xe8, 0x2e, 0x00, 0xd0, + 0xd1, 0x02, 0x9e, 0x7e, 0x57, 0x6e, 0xec, 0x5d, 0x2d, 0x29, 0x80, + 0x6f, 0xab, 0x93, 0xb8, 0xe6, 0x36, 0xcf, 0xeb, 0x31, 0xae}; /* 68 */ + +static const uint8_t GenuineFPKey[] = { + 0x47, 0x65, 0x6E, 0x75, 0x69, 0x6E, 0x65, 0x20, 0x41, 0x64, 0x6F, + 0x62, 0x65, 0x20, 0x46, 0x6C, 0x61, 0x73, 0x68, 0x20, 0x50, 0x6C, + 0x61, 0x79, 0x65, 0x72, 0x20, 0x30, 0x30, 0x31, /* Genuine Adobe Flash + Player 001 */ + 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, + 0xD1, 0x02, 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, + 0x6F, 0xAB, 0x93, 0xB8, 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE}; /* 62 */ + +static void InitRC4Encryption(uint8_t *secretKey, uint8_t *pubKeyIn, + uint8_t *pubKeyOut, RC4_handle *rc4keyIn, + RC4_handle *rc4keyOut) { + uint8_t digest[SHA256_DIGEST_LENGTH]; + unsigned int digestLen = 0; + HMAC_CTX ctx; + + RC4_alloc(rc4keyIn); + RC4_alloc(rc4keyOut); + + HMAC_setup(ctx, secretKey, 128); + HMAC_crunch(ctx, pubKeyIn, 128); + HMAC_finish(ctx, digest, digestLen); + + RTMP_Log(RTMP_LOGDEBUG, "RC4 Out Key: "); + RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); + + RC4_setkey(*rc4keyOut, 16, digest); + + HMAC_setup(ctx, secretKey, 128); + HMAC_crunch(ctx, pubKeyOut, 128); + HMAC_finish(ctx, digest, digestLen); + + RTMP_Log(RTMP_LOGDEBUG, "RC4 In Key: "); + RTMP_LogHex(RTMP_LOGDEBUG, digest, 16); + + RC4_setkey(*rc4keyIn, 16, digest); +} + +typedef unsigned int(getoff)(uint8_t *buf, unsigned int len); + +static unsigned int GetDHOffset2(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 768; + unsigned int res; + + assert(RTMP_SIG_SIZE <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 632) + 8; + + if (res + 128 > 767) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate correct DH offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + return res; +} + +static unsigned int GetDigestOffset2(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 772; + unsigned int res; + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 728) + 776; + + if (res + 32 > 1535) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate correct digest offset (got %d), exiting", + __FUNCTION__, res); + exit(1); + } + return res; +} + +static unsigned int GetDHOffset1(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 1532; + unsigned int res; + + assert(RTMP_SIG_SIZE <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 632) + 772; + + if (res + 128 > 1531) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate DH offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + + return res; +} + +static unsigned int GetDigestOffset1(uint8_t *handshake, unsigned int len) { + unsigned int offset = 0; + uint8_t *ptr = handshake + 8; + unsigned int res; + + assert(12 <= len); + + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + ptr++; + offset += (*ptr); + + res = (offset % 728) + 12; + + if (res + 32 > 771) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't calculate digest offset (got %d), exiting!", + __FUNCTION__, res); + exit(1); + } + + return res; +} + +static getoff *digoff[] = {GetDigestOffset1, GetDigestOffset2}; +static getoff *dhoff[] = {GetDHOffset1, GetDHOffset2}; + +static void HMACsha256(const uint8_t *message, size_t messageLen, + const uint8_t *key, size_t keylen, uint8_t *digest) { + unsigned int digestLen; + HMAC_CTX ctx; + + HMAC_setup(ctx, key, keylen); + HMAC_crunch(ctx, message, messageLen); + HMAC_finish(ctx, digest, digestLen); + + assert(digestLen == 32); +} + +static void CalculateDigest(unsigned int digestPos, uint8_t *handshakeMessage, + const uint8_t *key, size_t keyLen, + uint8_t *digest) { + const int messageLen = RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + uint8_t message[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH]; + + memcpy(message, handshakeMessage, digestPos); + memcpy(message + digestPos, + &handshakeMessage[digestPos + SHA256_DIGEST_LENGTH], + messageLen - digestPos); + + HMACsha256(message, messageLen, key, keyLen, digest); +} + +static int VerifyDigest(unsigned int digestPos, uint8_t *handshakeMessage, + const uint8_t *key, size_t keyLen) { + uint8_t calcDigest[SHA256_DIGEST_LENGTH]; + + CalculateDigest(digestPos, handshakeMessage, key, keyLen, calcDigest); + + return memcmp(&handshakeMessage[digestPos], calcDigest, + SHA256_DIGEST_LENGTH) == 0; +} + +/* handshake + * + * Type = [1 bytes] plain: 0x03, encrypted: 0x06, 0x08, 0x09 + * -------------------------------------------------------------------- [1536 + * bytes] + * Uptime = [4 bytes] big endian unsigned number, uptime + * Version = [4 bytes] each byte represents a version number, e.g. + * 9.0.124.0 + * ... + * + */ + +static const uint32_t rtmpe8_keys[16][4] = { + {0xbff034b2, 0x11d9081f, 0xccdfb795, 0x748de732}, + {0x086a5eb6, 0x1743090e, 0x6ef05ab8, 0xfe5a39e2}, + {0x7b10956f, 0x76ce0521, 0x2388a73a, 0x440149a1}, + {0xa943f317, 0xebf11bb2, 0xa691a5ee, 0x17f36339}, + {0x7a30e00a, 0xb529e22c, 0xa087aea5, 0xc0cb79ac}, + {0xbdce0c23, 0x2febdeff, 0x1cfaae16, 0x1123239d}, + {0x55dd3f7b, 0x77e7e62e, 0x9bb8c499, 0xc9481ee4}, + {0x407bb6b4, 0x71e89136, 0xa7aebf55, 0xca33b839}, + {0xfcf6bdc3, 0xb63c3697, 0x7ce4f825, 0x04d959b2}, + {0x28e091fd, 0x41954c4c, 0x7fb7db00, 0xe3a066f8}, + {0x57845b76, 0x4f251b03, 0x46d45bcd, 0xa2c30d29}, + {0x0acceef8, 0xda55b546, 0x03473452, 0x5863713b}, + {0xb82075dc, 0xa75f1fee, 0xd84268e8, 0xa72a44cc}, + {0x07cf6e9e, 0xa16d7b25, 0x9fa7ae6c, 0xd92f5629}, + {0xfeb1eae4, 0x8c8c3ce1, 0x4e0064a7, 0x6a387c2a}, + {0x893a9427, 0xcc3013a2, 0xf106385b, 0xa829f927}}; + +/* RTMPE type 8 uses XTEA on the regular signature + * http://en.wikipedia.org/wiki/XTEA + */ +static void rtmpe8_sig(uint8_t *in, uint8_t *out, int keyid) { + unsigned int i, num_rounds = 32; + uint32_t v0, v1, sum = 0, delta = 0x9E3779B9; + uint32_t const *k; + + v0 = in[0] | (in[1] << 8) | (in[2] << 16) | (in[3] << 24); + v1 = in[4] | (in[5] << 8) | (in[6] << 16) | (in[7] << 24); + k = rtmpe8_keys[keyid]; + + for (i = 0; i < num_rounds; i++) { + v0 += (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + k[sum & 3]); + sum += delta; + v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + k[(sum >> 11) & 3]); + } + + out[0] = v0; + v0 >>= 8; + out[1] = v0; + v0 >>= 8; + out[2] = v0; + v0 >>= 8; + out[3] = v0; + + out[4] = v1; + v1 >>= 8; + out[5] = v1; + v1 >>= 8; + out[6] = v1; + v1 >>= 8; + out[7] = v1; +} + +static int HandShake(RTMP *r, int FP9HandShake) { + int i, offalg = 0; + int dhposClient = 0; + int digestPosClient = 0; + int encrypted = r->Link.protocol & RTMP_FEATURE_ENC; + + RC4_handle keyIn = 0; + RC4_handle keyOut = 0; + + int32_t *ip; + uint32_t uptime; + + uint8_t clientbuf[RTMP_SIG_SIZE + 4], *clientsig = clientbuf + 4; + uint8_t serversig[RTMP_SIG_SIZE], client2[RTMP_SIG_SIZE], *reply; + uint8_t type; + getoff *getdh = NULL, *getdig = NULL; + + if (encrypted || r->Link.SWFSize) + FP9HandShake = TRUE; + else + FP9HandShake = FALSE; + + r->Link.rc4keyIn = r->Link.rc4keyOut = 0; + + if (encrypted) { + clientsig[-1] = 0x06; /* 0x08 is RTMPE as well */ + offalg = 1; + } else + clientsig[-1] = 0x03; + + uptime = htonl(RTMP_GetTime()); + memcpy(clientsig, &uptime, 4); + + if (FP9HandShake) { + /* set version to at least 9.0.115.0 */ + if (encrypted) { + clientsig[4] = 128; + clientsig[6] = 3; + } else { + clientsig[4] = 10; + clientsig[6] = 45; + } + clientsig[5] = 0; + clientsig[7] = 2; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client type: %02X", __FUNCTION__, + clientsig[-1]); + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + } else { + memset(&clientsig[4], 0, 4); + } + +/* generate random data */ +#ifdef _DEBUG + memset(clientsig + 8, 0, RTMP_SIG_SIZE - 8); +#else + ip = (int32_t *)(clientsig + 8); + for (i = 2; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + + /* set handshake digest */ + if (FP9HandShake) { + if (encrypted) { + /* generate Diffie-Hellmann parameters */ + r->Link.dh = DHInit(1024); + if (!r->Link.dh) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", + __FUNCTION__); + return FALSE; + } + + dhposClient = getdh(clientsig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, + dhposClient); + + if (!DHGenerateKey(r->Link.dh)) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't generate Diffie-Hellmann public key!", + __FUNCTION__); + return FALSE; + } + + if (!DHGetPublicKey(r->Link.dh, &clientsig[dhposClient], 128)) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); + return FALSE; + } + } + + digestPosClient = + getdig(clientsig, RTMP_SIG_SIZE); /* reuse this value in verification */ + RTMP_Log(RTMP_LOGDEBUG, "%s: Client digest offset: %d", __FUNCTION__, + digestPosClient); + + CalculateDigest(digestPosClient, clientsig, GenuineFPKey, 30, + &clientsig[digestPosClient]); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Initial client digest: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, clientsig + digestPosClient, + SHA256_DIGEST_LENGTH); + } + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "Clientsig: "); + RTMP_LogHex(RTMP_LOGDEBUG, clientsig, RTMP_SIG_SIZE); +#endif + + if (!WriteN(r, (char *)clientsig - 1, RTMP_SIG_SIZE + 1)) + return FALSE; + + if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); + + if (type != clientsig[-1]) + RTMP_Log(RTMP_LOGWARNING, + "%s: Type mismatch: client sent %d, server answered %d", + __FUNCTION__, clientsig[-1], type); + + if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode server response */ + memcpy(&uptime, serversig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, + serversig[4], serversig[5], serversig[6], serversig[7]); + + if (FP9HandShake && type == 3 && !serversig[4]) + FP9HandShake = FALSE; + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "Server signature:"); + RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE); +#endif + + if (FP9HandShake) { + uint8_t digestResp[SHA256_DIGEST_LENGTH]; + uint8_t *signatureResp = NULL; + + /* we have to use this signature now to find the correct algorithms for + * getting the digest and DH positions */ + int digestPosServer = getdig(serversig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { + RTMP_Log(RTMP_LOGWARNING, "Trying different position for server digest!"); + offalg ^= 1; + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + digestPosServer = getdig(serversig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36)) { + RTMP_Log( + RTMP_LOGERROR, + "Couldn't verify the server digest"); /* continuing anyway will + probably fail */ + return FALSE; + } + } + + /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key + * are the last 32 bytes of the server handshake) */ + if (r->Link.SWFSize) { + const char swfVerify[] = {0x01, 0x01}; + char *vend = r->Link.SWFVerificationResponse + + sizeof(r->Link.SWFVerificationResponse); + + memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, + r->Link.SWFSize); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, + r->Link.SWFSize); + HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, + &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], + SHA256_DIGEST_LENGTH, + (uint8_t *)&r->Link.SWFVerificationResponse[10]); + } + + /* do Diffie-Hellmann Key exchange for encrypted RTMP */ + if (encrypted) { + /* compute secret key */ + uint8_t secretKey[128] = {0}; + int len, dhposServer; + + dhposServer = getdh(serversig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: Server DH public key offset: %d", + __FUNCTION__, dhposServer); + len = DHComputeSharedSecretKey(r->Link.dh, &serversig[dhposServer], 128, + secretKey); + if (len < 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); + return FALSE; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); + + InitRC4Encryption(secretKey, (uint8_t *)&serversig[dhposServer], + (uint8_t *)&clientsig[dhposClient], &keyIn, &keyOut); + } + + reply = client2; +#ifdef _DEBUG + memset(reply, 0xff, RTMP_SIG_SIZE); +#else + ip = (int32_t *)reply; + for (i = 0; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + /* calculate response now */ + signatureResp = reply + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + + HMACsha256(&serversig[digestPosServer], SHA256_DIGEST_LENGTH, GenuineFPKey, + sizeof(GenuineFPKey), digestResp); + HMACsha256(reply, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, + SHA256_DIGEST_LENGTH, signatureResp); + + /* some info output */ + RTMP_Log(RTMP_LOGDEBUG, + "%s: Calculated digest key from secure key and server digest: ", + __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, digestResp, SHA256_DIGEST_LENGTH); + +#ifdef FP10 + if (type == 8) { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) + rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); + } +#if 0 + else if (type == 9)) + { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i=0; iLink.rc4keyIn = keyIn; + r->Link.rc4keyOut = keyOut; + + /* update the keystreams */ + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); + } + + if (r->Link.rc4keyOut) { + RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); + } + } + } else { + if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { + RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", + __FUNCTION__); + } + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); + return TRUE; +} + +static int SHandShake(RTMP *r) { + int i, offalg = 0; + int dhposServer = 0; + int digestPosServer = 0; + RC4_handle keyIn = 0; + RC4_handle keyOut = 0; + int FP9HandShake = FALSE; + int encrypted; + int32_t *ip; + + uint8_t clientsig[RTMP_SIG_SIZE]; + uint8_t serverbuf[RTMP_SIG_SIZE + 4], *serversig = serverbuf + 4; + uint8_t type; + uint32_t uptime; + getoff *getdh = NULL, *getdig = NULL; + + if (ReadN(r, (char *)&type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + if (ReadN(r, (char *)clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Requested : %02X", __FUNCTION__, type); + RTMP_LogHex(RTMP_LOGDEBUG2, clientsig, RTMP_SIG_SIZE); + + if (type == 3) { + encrypted = FALSE; + } else if (type == 6 || type == 8) { + offalg = 1; + encrypted = TRUE; + FP9HandShake = TRUE; + r->Link.protocol |= RTMP_FEATURE_ENC; + /* use FP10 if client is capable */ + if (clientsig[4] == 128) + type = 8; + } else { + RTMP_Log(RTMP_LOGERROR, "%s: Unknown version %02x", __FUNCTION__, type); + return FALSE; + } + + if (!FP9HandShake && clientsig[4]) + FP9HandShake = TRUE; + + serversig[-1] = type; + + r->Link.rc4keyIn = r->Link.rc4keyOut = 0; + + uptime = htonl(RTMP_GetTime()); + memcpy(serversig, &uptime, 4); + + if (FP9HandShake) { + /* Server version */ + serversig[4] = 3; + serversig[5] = 5; + serversig[6] = 1; + serversig[7] = 1; + + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + } else { + memset(&serversig[4], 0, 4); + } + +/* generate random data */ +#ifdef _DEBUG + memset(serversig + 8, 0, RTMP_SIG_SIZE - 8); +#else + ip = (int32_t *)(serversig + 8); + for (i = 2; i < RTMP_SIG_SIZE / 4; i++) + *ip++ = rand(); +#endif + + /* set handshake digest */ + if (FP9HandShake) { + if (encrypted) { + /* generate Diffie-Hellmann parameters */ + r->Link.dh = DHInit(1024); + if (!r->Link.dh) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!", + __FUNCTION__); + return FALSE; + } + + dhposServer = getdh(serversig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, + dhposServer); + + if (!DHGenerateKey(r->Link.dh)) { + RTMP_Log(RTMP_LOGERROR, + "%s: Couldn't generate Diffie-Hellmann public key!", + __FUNCTION__); + return FALSE; + } + + if (!DHGetPublicKey(r->Link.dh, (uint8_t *)&serversig[dhposServer], + 128)) { + RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__); + return FALSE; + } + } + + digestPosServer = + getdig(serversig, RTMP_SIG_SIZE); /* reuse this value in verification */ + RTMP_Log(RTMP_LOGDEBUG, "%s: Server digest offset: %d", __FUNCTION__, + digestPosServer); + + CalculateDigest(digestPosServer, serversig, GenuineFMSKey, 36, + &serversig[digestPosServer]); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Initial server digest: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, serversig + digestPosServer, + SHA256_DIGEST_LENGTH); + } + + RTMP_Log(RTMP_LOGDEBUG2, "Serversig: "); + RTMP_LogHex(RTMP_LOGDEBUG2, serversig, RTMP_SIG_SIZE); + + if (!WriteN(r, (char *)serversig - 1, RTMP_SIG_SIZE + 1)) + return FALSE; + + /* decode client response */ + memcpy(&uptime, clientsig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, + clientsig[4], clientsig[5], clientsig[6], clientsig[7]); + + if (FP9HandShake) { + uint8_t digestResp[SHA256_DIGEST_LENGTH]; + uint8_t *signatureResp = NULL; + + /* we have to use this signature now to find the correct algorithms for + * getting the digest and DH positions */ + int digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { + RTMP_Log(RTMP_LOGWARNING, "Trying different position for client digest!"); + offalg ^= 1; + getdig = digoff[offalg]; + getdh = dhoff[offalg]; + + digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); + + if (!VerifyDigest(digestPosClient, clientsig, GenuineFPKey, 30)) { + RTMP_Log( + RTMP_LOGERROR, + "Couldn't verify the client digest"); /* continuing anyway will + probably fail */ + return FALSE; + } + } + + /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key + * are the last 32 bytes of the server handshake) */ + if (r->Link.SWFSize) { + const char swfVerify[] = {0x01, 0x01}; + char *vend = r->Link.SWFVerificationResponse + + sizeof(r->Link.SWFVerificationResponse); + + memcpy(r->Link.SWFVerificationResponse, swfVerify, 2); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, + r->Link.SWFSize); + AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, + r->Link.SWFSize); + HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH, + &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH], + SHA256_DIGEST_LENGTH, + (uint8_t *)&r->Link.SWFVerificationResponse[10]); + } + + /* do Diffie-Hellmann Key exchange for encrypted RTMP */ + if (encrypted) { + int dhposClient, len; + /* compute secret key */ + uint8_t secretKey[128] = {0}; + + dhposClient = getdh(clientsig, RTMP_SIG_SIZE); + RTMP_Log(RTMP_LOGDEBUG, "%s: Client DH public key offset: %d", + __FUNCTION__, dhposClient); + len = DHComputeSharedSecretKey( + r->Link.dh, (uint8_t *)&clientsig[dhposClient], 128, secretKey); + if (len < 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__); + return FALSE; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__); + RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128); + + InitRC4Encryption(secretKey, (uint8_t *)&clientsig[dhposClient], + (uint8_t *)&serversig[dhposServer], &keyIn, &keyOut); + } + + /* calculate response now */ + signatureResp = clientsig + RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH; + + HMACsha256(&clientsig[digestPosClient], SHA256_DIGEST_LENGTH, GenuineFMSKey, + sizeof(GenuineFMSKey), digestResp); + HMACsha256(clientsig, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp, + SHA256_DIGEST_LENGTH, signatureResp); +#ifdef FP10 + if (type == 8) { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i = 0; i < SHA256_DIGEST_LENGTH; i += 8) + rtmpe8_sig(sig + i, sig + i, dptr[i] % 15); + } +#if 0 + else if (type == 9)) + { + uint8_t *dptr = digestResp; + uint8_t *sig = signatureResp; + /* encrypt signatureResp */ + for (i=0; iLink.rc4keyIn = keyIn; + r->Link.rc4keyOut = keyOut; + + /* update the keystreams */ + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *)buff); + } + + if (r->Link.rc4keyOut) { + RC4_encrypt(r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *)buff); + } + } + } else { + if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0) { + RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!", + __FUNCTION__); + } + } + + RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__); + return TRUE; +} diff --git a/LFLiveKit/Vendor/pili-librtmp/hashswf.c b/LFLiveKit/Vendor/pili-librtmp/hashswf.c new file mode 100644 index 00000000..88d20c7f --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/hashswf.c @@ -0,0 +1,626 @@ +/* + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#include "http.h" +#include "log.h" +#include "rtmp_sys.h" + +#ifdef CRYPTO +#ifdef USE_POLARSSL +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX sha2_context +#define HMAC_setup(ctx, key, len) sha2_hmac_starts(&ctx, (unsigned char *)key, len, 0) +#define HMAC_crunch(ctx, buf, len) sha2_hmac_update(&ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + sha2_hmac_finish(&ctx, dig) +#define HMAC_close(ctx) +#elif defined(USE_GNUTLS) +#include +#include +#ifndef SHA256_DIGEST_LENGTH +#define SHA256_DIGEST_LENGTH 32 +#endif +#define HMAC_CTX gcry_md_hd_t +#define HMAC_setup(ctx, key, len) \ + gcry_md_open(&ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC); \ + gcry_md_setkey(ctx, key, len) +#define HMAC_crunch(ctx, buf, len) gcry_md_write(ctx, buf, len) +#define HMAC_finish(ctx, dig, dlen) \ + dlen = SHA256_DIGEST_LENGTH; \ + memcpy(dig, gcry_md_read(ctx, 0), dlen) +#define HMAC_close(ctx) gcry_md_close(ctx) +#else /* USE_OPENSSL */ +#include +#include +#include +#include +#define HMAC_setup(ctx, key, len) \ + HMAC_CTX_init(&ctx); \ + HMAC_Init_ex(&ctx, (unsigned char *)key, len, EVP_sha256(), 0) +#define HMAC_crunch(ctx, buf, len) HMAC_Update(&ctx, (unsigned char *)buf, len) +#define HMAC_finish(ctx, dig, dlen) HMAC_Final(&ctx, (unsigned char *)dig, &dlen); +#define HMAC_close(ctx) HMAC_CTX_cleanup(&ctx) +#endif + +extern void RTMP_TLS_Init(); +extern TLS_CTX RTMP_TLS_ctx; + +#endif /* CRYPTO */ + +#include + +#define AGENT "Mozilla/5.0" + +HTTPResult + HTTP_get(struct HTTP_ctx *http, const char *url, HTTP_read_callback *cb) { + char *host, *path; + char *p1, *p2; + char hbuf[256]; + int port = 80; +#ifdef CRYPTO + int ssl = 0; +#endif + int hlen, flen = 0; + int rc, i; + int len_known; + HTTPResult ret = HTTPRES_OK; + // struct sockaddr_in sa; + PILI_RTMPSockBuf sb = {0}; + + http->status = -1; + + // memset(&sa, 0, sizeof(struct sockaddr_in)); + // sa.sin_family = AF_INET; + + /* we only handle http here */ + if (strncasecmp(url, "http", 4)) + return HTTPRES_BAD_REQUEST; + + if (url[4] == 's') { +#ifdef CRYPTO + ssl = 1; + port = 443; + if (!RTMP_TLS_ctx) + RTMP_TLS_Init(); +#else + return HTTPRES_BAD_REQUEST; +#endif + } + + p1 = strchr(url + 4, ':'); + if (!p1 || strncmp(p1, "://", 3)) + return HTTPRES_BAD_REQUEST; + + host = p1 + 3; + path = strchr(host, '/'); + hlen = path - host; + strncpy(hbuf, host, hlen); + hbuf[hlen] = '\0'; + host = hbuf; + p1 = strrchr(host, ':'); + if (p1) { + *p1++ = '\0'; + port = atoi(p1); + } + + // sa.sin_addr.s_addr = inet_addr(host); + // if (sa.sin_addr.s_addr == INADDR_NONE) + // { + // struct hostent *hp = gethostbyname(host); + // if (!hp || !hp->h_addr) + // return HTTPRES_LOST_CONNECTION; + // sa.sin_addr = *(struct in_addr *)hp->h_addr; + // } + // sa.sin_port = htons(port); + struct addrinfo hints = {0}, *ai, *cur_ai; + char portstr[10]; + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + snprintf(portstr, sizeof(portstr), "%d", port); + ret = getaddrinfo(host, portstr, &hints, &ai); + if (ret != 0) { + return HTTPRES_LOST_CONNECTION; + } + + cur_ai = ai; + + sb.sb_socket = socket(cur_ai->ai_family, + cur_ai->ai_socktype, + cur_ai->ai_protocol); + // sb.sb_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + if (sb.sb_socket == -1) { + freeaddrinfo(ai); + return HTTPRES_LOST_CONNECTION; + } + i = sprintf(sb.sb_buf, + "GET %s HTTP/1.0\r\nUser-Agent: %s\r\nHost: %s\r\nReferrer: %.*s\r\n", + path, AGENT, host, (int)(path - url + 1), url); + if (http->date[0]) + i += sprintf(sb.sb_buf + i, "If-Modified-Since: %s\r\n", http->date); + i += sprintf(sb.sb_buf + i, "\r\n"); + + if (cur_ai->ai_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)cur_ai->ai_addr; + in6->sin6_port = htons(port); + } + + if (connect(sb.sb_socket, cur_ai->ai_addr, cur_ai->ai_addrlen) < 0) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } +#ifdef CRYPTO + if (ssl) { +#ifdef NO_SSL + RTMP_Log(RTMP_LOGERROR, "%s, No SSL/TLS support", __FUNCTION__); + ret = HTTPRES_BAD_REQUEST; + goto leave; +#else + TLS_client(RTMP_TLS_ctx, sb.sb_ssl); + TLS_setfd(sb.sb_ssl, sb.sb_socket); + if ((i = TLS_connect(sb.sb_ssl)) < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } +#endif + } +#endif + PILI_RTMPSockBuf_Send(&sb, sb.sb_buf, i); + +/* set timeout */ +#define HTTP_TIMEOUT 5 + { + SET_RCVTIMEO(tv, HTTP_TIMEOUT); + if (setsockopt(sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket timeout to %ds failed!", + __FUNCTION__, HTTP_TIMEOUT); + } + } + + sb.sb_size = 0; + sb.sb_timedout = FALSE; + if (PILI_RTMPSockBuf_Fill(&sb) < 1) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } + if (strncmp(sb.sb_buf, "HTTP/1", 6)) { + ret = HTTPRES_BAD_REQUEST; + goto leave; + } + + p1 = strchr(sb.sb_buf, ' '); + rc = atoi(p1 + 1); + http->status = rc; + + if (rc >= 300) { + if (rc == 304) { + ret = HTTPRES_OK_NOT_MODIFIED; + goto leave; + } else if (rc == 404) + ret = HTTPRES_NOT_FOUND; + else if (rc >= 500) + ret = HTTPRES_SERVER_ERROR; + else if (rc >= 400) + ret = HTTPRES_BAD_REQUEST; + else + ret = HTTPRES_REDIRECTED; + } + + p1 = memchr(sb.sb_buf, '\n', sb.sb_size); + if (!p1) { + ret = HTTPRES_BAD_REQUEST; + goto leave; + } + sb.sb_start = p1 + 1; + sb.sb_size -= sb.sb_start - sb.sb_buf; + + while ((p2 = memchr(sb.sb_start, '\r', sb.sb_size))) { + if (*sb.sb_start == '\r') { + sb.sb_start += 2; + sb.sb_size -= 2; + break; + } else if (!strncasecmp(sb.sb_start, "Content-Length: ", sizeof("Content-Length: ") - 1)) { + flen = atoi(sb.sb_start + sizeof("Content-Length: ") - 1); + } else if (!strncasecmp(sb.sb_start, "Last-Modified: ", sizeof("Last-Modified: ") - 1)) { + *p2 = '\0'; + strcpy(http->date, sb.sb_start + sizeof("Last-Modified: ") - 1); + } + p2 += 2; + sb.sb_size -= p2 - sb.sb_start; + sb.sb_start = p2; + if (sb.sb_size < 1) { + if (PILI_RTMPSockBuf_Fill(&sb) < 1) { + ret = HTTPRES_LOST_CONNECTION; + goto leave; + } + } + } + + len_known = flen > 0; + while ((!len_known || flen > 0) && + (sb.sb_size > 0 || PILI_RTMPSockBuf_Fill(&sb) > 0)) { + cb(sb.sb_start, 1, sb.sb_size, http->data); + if (len_known) + flen -= sb.sb_size; + http->size += sb.sb_size; + sb.sb_size = 0; + } + + if (flen > 0) + ret = HTTPRES_LOST_CONNECTION; + +leave: + PILI_RTMPSockBuf_Close(&sb); + freeaddrinfo(ai); + return ret; +} + +#ifdef CRYPTO + +#define CHUNK 16384 + +struct info { + z_stream *zs; + HMAC_CTX ctx; + int first; + int zlib; + int size; +}; + +static size_t + swfcrunch(void *ptr, size_t size, size_t nmemb, void *stream) { + struct info *i = stream; + char *p = ptr; + size_t len = size * nmemb; + + if (i->first) { + i->first = 0; + /* compressed? */ + if (!strncmp(p, "CWS", 3)) { + *p = 'F'; + i->zlib = 1; + } + HMAC_crunch(i->ctx, (unsigned char *)p, 8); + p += 8; + len -= 8; + i->size = 8; + } + + if (i->zlib) { + unsigned char out[CHUNK]; + i->zs->next_in = (unsigned char *)p; + i->zs->avail_in = len; + do { + i->zs->avail_out = CHUNK; + i->zs->next_out = out; + inflate(i->zs, Z_NO_FLUSH); + len = CHUNK - i->zs->avail_out; + i->size += len; + HMAC_crunch(i->ctx, out, len); + } while (i->zs->avail_out == 0); + } else { + i->size += len; + HMAC_crunch(i->ctx, (unsigned char *)p, len); + } + return size * nmemb; +} + +static int tzoff; +static int tzchecked; + +#define JAN02_1980 318340800 + +static const char *monthtab[12] = {"Jan", "Feb", "Mar", + "Apr", "May", "Jun", + "Jul", "Aug", "Sep", + "Oct", "Nov", "Dec"}; +static const char *days[] = + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; + +/* Parse an HTTP datestamp into Unix time */ +static time_t + make_unix_time(char *s) { + struct tm time; + int i, ysub = 1900, fmt = 0; + char *month; + char *n; + time_t res; + + if (s[3] != ' ') { + fmt = 1; + if (s[3] != ',') + ysub = 0; + } + for (n = s; *n; ++n) + if (*n == '-' || *n == ':') + *n = ' '; + + time.tm_mon = 0; + n = strchr(s, ' '); + if (fmt) { + /* Day, DD-MMM-YYYY HH:MM:SS GMT */ + time.tm_mday = strtol(n + 1, &n, 0); + month = n + 1; + n = strchr(month, ' '); + time.tm_year = strtol(n + 1, &n, 0); + time.tm_hour = strtol(n + 1, &n, 0); + time.tm_min = strtol(n + 1, &n, 0); + time.tm_sec = strtol(n + 1, NULL, 0); + } else { + /* Unix ctime() format. Does not conform to HTTP spec. */ + /* Day MMM DD HH:MM:SS YYYY */ + month = n + 1; + n = strchr(month, ' '); + while (isspace(*n)) + n++; + time.tm_mday = strtol(n, &n, 0); + time.tm_hour = strtol(n + 1, &n, 0); + time.tm_min = strtol(n + 1, &n, 0); + time.tm_sec = strtol(n + 1, &n, 0); + time.tm_year = strtol(n + 1, NULL, 0); + } + if (time.tm_year > 100) + time.tm_year -= ysub; + + for (i = 0; i < 12; i++) + if (!strncasecmp(month, monthtab[i], 3)) { + time.tm_mon = i; + break; + } + time.tm_isdst = 0; /* daylight saving is never in effect in GMT */ + + /* this is normally the value of extern int timezone, but some + * braindead C libraries don't provide it. + */ + if (!tzchecked) { + struct tm *tc; + time_t then = JAN02_1980; + tc = localtime(&then); + tzoff = (12 - tc->tm_hour) * 3600 + tc->tm_min * 60 + tc->tm_sec; + tzchecked = 1; + } + res = mktime(&time); + /* Unfortunately, mktime() assumes the input is in local time, + * not GMT, so we have to correct it here. + */ + if (res != -1) + res += tzoff; + return res; +} + +/* Convert a Unix time to a network time string + * Weekday, DD-MMM-YYYY HH:MM:SS GMT + */ +void strtime(time_t *t, char *s) { + struct tm *tm; + + tm = gmtime((time_t *)t); + sprintf(s, "%s, %02d %s %d %02d:%02d:%02d GMT", + days[tm->tm_wday], tm->tm_mday, monthtab[tm->tm_mon], + tm->tm_year + 1900, tm->tm_hour, tm->tm_min, tm->tm_sec); +} + +#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) + +int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age) { + FILE *f = NULL; + char *path, date[64], cctim[64]; + long pos = 0; + time_t ctim = -1, cnow; + int i, got = 0, ret = 0; + unsigned int hlen; + struct info in = {0}; + struct HTTP_ctx http = {0}; + HTTPResult httpres; + z_stream zs = {0}; + AVal home, hpre; + + date[0] = '\0'; +#ifdef _WIN32 +#ifdef _XBOX + hpre.av_val = "Q:"; + hpre.av_len = 2; + home.av_val = "\\UserData"; +#else + hpre.av_val = getenv("HOMEDRIVE"); + hpre.av_len = strlen(hpre.av_val); + home.av_val = getenv("HOMEPATH"); +#endif +#define DIRSEP "\\" + +#else /* !_WIN32 */ + hpre.av_val = ""; + hpre.av_len = 0; + home.av_val = getenv("HOME"); +#define DIRSEP "/" +#endif + if (!home.av_val) + home.av_val = "."; + home.av_len = strlen(home.av_val); + + /* SWF hash info is cached in a fixed-format file. + * url: + * ctim: HTTP datestamp of when we last checked it. + * date: HTTP datestamp of the SWF's last modification. + * size: SWF size in hex + * hash: SWF hash in hex + * + * These fields must be present in this order. All fields + * besides URL are fixed size. + */ + path = malloc(hpre.av_len + home.av_len + sizeof(DIRSEP ".swfinfo")); + sprintf(path, "%s%s" DIRSEP ".swfinfo", hpre.av_val, home.av_val); + + f = fopen(path, "r+"); + while (f) { + char buf[4096], *file, *p; + + file = strchr(url, '/'); + if (!file) + break; + file += 2; + file = strchr(file, '/'); + if (!file) + break; + file++; + hlen = file - url; + p = strrchr(file, '/'); + if (p) + file = p; + else + file--; + + while (fgets(buf, sizeof(buf), f)) { + char *r1; + + got = 0; + + if (strncmp(buf, "url: ", 5)) + continue; + if (strncmp(buf + 5, url, hlen)) + continue; + r1 = strrchr(buf, '/'); + i = strlen(r1); + r1[--i] = '\0'; + if (strncmp(r1, file, i)) + continue; + pos = ftell(f); + while (got < 4 && fgets(buf, sizeof(buf), f)) { + if (!strncmp(buf, "size: ", 6)) { + *size = strtol(buf + 6, NULL, 16); + got++; + } else if (!strncmp(buf, "hash: ", 6)) { + unsigned char *ptr = hash, *in = (unsigned char *)buf + 6; + int l = strlen((char *)in) - 1; + for (i = 0; i < l; i += 2) + *ptr++ = (HEX2BIN(in[i]) << 4) | HEX2BIN(in[i + 1]); + got++; + } else if (!strncmp(buf, "date: ", 6)) { + buf[strlen(buf) - 1] = '\0'; + strncpy(date, buf + 6, sizeof(date)); + got++; + } else if (!strncmp(buf, "ctim: ", 6)) { + buf[strlen(buf) - 1] = '\0'; + ctim = make_unix_time(buf + 6); + got++; + } else if (!strncmp(buf, "url: ", 5)) + break; + } + break; + } + break; + } + + cnow = time(NULL); + /* If we got a cache time, see if it's young enough to use directly */ + if (age && ctim > 0) { + ctim = cnow - ctim; + ctim /= 3600 * 24; /* seconds to days */ + if (ctim < age) /* ok, it's new enough */ + goto out; + } + + in.first = 1; + HMAC_setup(in.ctx, "Genuine Adobe Flash Player 001", 30); + inflateInit(&zs); + in.zs = &zs; + + http.date = date; + http.data = ∈ + + httpres = HTTP_get(&http, url, swfcrunch); + + inflateEnd(&zs); + + if (httpres != HTTPRES_OK && httpres != HTTPRES_OK_NOT_MODIFIED) { + ret = -1; + if (httpres == HTTPRES_LOST_CONNECTION) + RTMP_Log(RTMP_LOGERROR, "%s: connection lost while downloading swfurl %s", + __FUNCTION__, url); + else if (httpres == HTTPRES_NOT_FOUND) + RTMP_Log(RTMP_LOGERROR, "%s: swfurl %s not found", __FUNCTION__, url); + else + RTMP_Log(RTMP_LOGERROR, "%s: couldn't contact swfurl %s (HTTP error %d)", + __FUNCTION__, url, http.status); + } else { + if (got && pos) + fseek(f, pos, SEEK_SET); + else { + char *q; + if (!f) + f = fopen(path, "w"); + if (!f) { + int err = errno; + RTMP_Log(RTMP_LOGERROR, + "%s: couldn't open %s for writing, errno %d (%s)", + __FUNCTION__, path, err, strerror(err)); + ret = -1; + goto out; + } + fseek(f, 0, SEEK_END); + q = strchr(url, '?'); + if (q) + i = q - url; + else + i = strlen(url); + + fprintf(f, "url: %.*s\n", i, url); + } + strtime(&cnow, cctim); + fprintf(f, "ctim: %s\n", cctim); + + if (!in.first) { + HMAC_finish(in.ctx, hash, hlen); + *size = in.size; + + fprintf(f, "date: %s\n", date); + fprintf(f, "size: %08x\n", in.size); + fprintf(f, "hash: "); + for (i = 0; i < SHA256_DIGEST_LENGTH; i++) + fprintf(f, "%02x", hash[i]); + fprintf(f, "\n"); + } + } + HMAC_close(in.ctx); +out: + free(path); + if (f) + fclose(f); + return ret; +} +#else +int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age) { + return -1; +} +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/http.h b/LFLiveKit/Vendor/pili-librtmp/http.h new file mode 100644 index 00000000..1eb7a462 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/http.h @@ -0,0 +1,49 @@ +#ifndef __RTMP_HTTP_H__ +#define __RTMP_HTTP_H__ +/* + * Copyright (C) 2010 Howard Chu + * Copyright (C) 2010 Antti Ajanki + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +typedef enum { + HTTPRES_OK, /* result OK */ + HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */ + HTTPRES_NOT_FOUND, /* not found */ + HTTPRES_BAD_REQUEST, /* client error */ + HTTPRES_SERVER_ERROR, /* server reported an error */ + HTTPRES_REDIRECTED, /* resource has been moved */ + HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */ +} HTTPResult; + +struct HTTP_ctx { + char *date; + int size; + int status; + void *data; +}; + +typedef size_t(HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, + void *stream); + +HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, + HTTP_read_callback *cb); + +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/log.c b/LFLiveKit/Vendor/pili-librtmp/log.c new file mode 100644 index 00000000..d3934366 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/log.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include +#include + +#include "log.h" +#include "rtmp_sys.h" + +#define MAX_PRINT_LEN 2048 + +RTMP_LogLevel RTMP_debuglevel = RTMP_LOGERROR; + +static int neednl; + +static FILE *fmsg; + +static RTMP_LogCallback rtmp_log_default, *cb = rtmp_log_default; + +static const char *levels[] = { + "CRIT", "ERROR", "WARNING", "INFO", + "DEBUG", "DEBUG2"}; + +static void rtmp_log_default(int level, const char *format, va_list vl) { + char str[MAX_PRINT_LEN] = ""; + + vsnprintf(str, MAX_PRINT_LEN - 1, format, vl); + + /* Filter out 'no-name' */ + if (RTMP_debuglevel < RTMP_LOGALL && strstr(str, "no-name") != NULL) + return; + + if (!fmsg) fmsg = stderr; + + if (level <= RTMP_debuglevel) { + if (neednl) { + putc('\n', fmsg); + neednl = 0; + } + fprintf(fmsg, "%s: %s\n", levels[level], str); +#ifdef _DEBUG + fflush(fmsg); +#endif + } +} + +void RTMP_LogSetOutput(FILE *file) { + fmsg = file; +} + +void RTMP_LogSetLevel(RTMP_LogLevel level) { + RTMP_debuglevel = level; +} + +void RTMP_LogSetCallback(RTMP_LogCallback *cbp) { + cb = cbp; +} + +RTMP_LogLevel RTMP_LogGetLevel() { + return RTMP_debuglevel; +} + +void RTMP_Log(int level, const char *format, ...) { + va_list args; + va_start(args, format); + cb(level, format, args); + va_end(args); +} + +static const char hexdig[] = "0123456789abcdef"; + +void RTMP_LogHex(int level, const uint8_t *data, unsigned long len) { + unsigned long i; + char line[50], *ptr; + + if (level > RTMP_debuglevel) + return; + + ptr = line; + + for (i = 0; i < len; i++) { + *ptr++ = hexdig[0x0f & (data[i] >> 4)]; + *ptr++ = hexdig[0x0f & data[i]]; + if ((i & 0x0f) == 0x0f) { + *ptr = '\0'; + ptr = line; + RTMP_Log(level, "%s", line); + } else { + *ptr++ = ' '; + } + } + if (i & 0x0f) { + *ptr = '\0'; + RTMP_Log(level, "%s", line); + } +} + +void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len) { +#define BP_OFFSET 9 +#define BP_GRAPH 60 +#define BP_LEN 80 + char line[BP_LEN]; + unsigned long i; + + if (!data || level > RTMP_debuglevel) + return; + + /* in case len is zero */ + line[0] = '\0'; + + for (i = 0; i < len; i++) { + int n = i % 16; + unsigned off; + + if (!n) { + if (i) RTMP_Log(level, "%s", line); + memset(line, ' ', sizeof(line) - 2); + line[sizeof(line) - 2] = '\0'; + + off = i % 0x0ffffU; + + line[2] = hexdig[0x0f & (off >> 12)]; + line[3] = hexdig[0x0f & (off >> 8)]; + line[4] = hexdig[0x0f & (off >> 4)]; + line[5] = hexdig[0x0f & off]; + line[6] = ':'; + } + + off = BP_OFFSET + n * 3 + ((n >= 8) ? 1 : 0); + line[off] = hexdig[0x0f & (data[i] >> 4)]; + line[off + 1] = hexdig[0x0f & data[i]]; + + off = BP_GRAPH + n + ((n >= 8) ? 1 : 0); + + if (isprint(data[i])) { + line[BP_GRAPH + n] = data[i]; + } else { + line[BP_GRAPH + n] = '.'; + } + } + + RTMP_Log(level, "%s", line); +} + +/* These should only be used by apps, never by the library itself */ +void RTMP_LogPrintf(const char *format, ...) { + char str[MAX_PRINT_LEN] = ""; + int len; + va_list args; + va_start(args, format); + len = vsnprintf(str, MAX_PRINT_LEN - 1, format, args); + va_end(args); + + if (RTMP_debuglevel == RTMP_LOGCRIT) + return; + + if (!fmsg) fmsg = stderr; + + if (neednl) { + putc('\n', fmsg); + neednl = 0; + } + + if (len > MAX_PRINT_LEN - 1) + len = MAX_PRINT_LEN - 1; + fprintf(fmsg, "%s", str); + if (str[len - 1] == '\n') + fflush(fmsg); +} + +void RTMP_LogStatus(const char *format, ...) { + char str[MAX_PRINT_LEN] = ""; + va_list args; + va_start(args, format); + vsnprintf(str, MAX_PRINT_LEN - 1, format, args); + va_end(args); + + if (RTMP_debuglevel == RTMP_LOGCRIT) + return; + + if (!fmsg) fmsg = stderr; + + fprintf(fmsg, "%s", str); + fflush(fmsg); + neednl = 1; +} diff --git a/LFLiveKit/Vendor/pili-librtmp/log.h b/LFLiveKit/Vendor/pili-librtmp/log.h new file mode 100644 index 00000000..f7daf375 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/log.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifndef __RTMP_LOG_H__ +#define __RTMP_LOG_H__ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* Enable this to get full debugging output */ +/* #define _DEBUG */ + +#ifdef _DEBUG +#undef NODEBUG +#endif + +typedef enum { + RTMP_LOGCRIT = 0, + RTMP_LOGERROR, + RTMP_LOGWARNING, + RTMP_LOGINFO, + RTMP_LOGDEBUG, + RTMP_LOGDEBUG2, + RTMP_LOGALL +} RTMP_LogLevel; + +extern RTMP_LogLevel RTMP_debuglevel; + +typedef void(RTMP_LogCallback)(int level, const char *fmt, va_list); +void RTMP_LogSetCallback(RTMP_LogCallback *cb); +void RTMP_LogSetOutput(FILE *file); +void RTMP_LogPrintf(const char *format, ...); +void RTMP_LogStatus(const char *format, ...); +void RTMP_Log(int level, const char *format, ...); +void RTMP_LogHex(int level, const uint8_t *data, unsigned long len); +void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len); +void RTMP_LogSetLevel(RTMP_LogLevel lvl); +RTMP_LogLevel RTMP_LogGetLevel(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/parseurl.c b/LFLiveKit/Vendor/pili-librtmp/parseurl.c new file mode 100644 index 00000000..0e50352b --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/parseurl.c @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include + +#include +#include + +#include "log.h" +#include "rtmp_sys.h" + +int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, unsigned int *port, + AVal *playpath, AVal *app, AVal *domainName) { + char *p, *end, *col, *ques, *slash; + + RTMP_Log(RTMP_LOGDEBUG, "Parsing..."); + + *protocol = RTMP_PROTOCOL_RTMP; + *port = 0; + playpath->av_len = 0; + playpath->av_val = NULL; + app->av_len = 0; + app->av_val = NULL; + + /* Old School Parsing */ + + /* look for usual :// pattern */ + p = strstr(url, "://"); + if (!p) { + RTMP_Log(RTMP_LOGERROR, "RTMP URL: No :// in url!"); + return FALSE; + } + { + int len = (int)(p - url); + + if (len == 4 && strncasecmp(url, "rtmp", 4) == 0) + *protocol = RTMP_PROTOCOL_RTMP; + else if (len == 5 && strncasecmp(url, "rtmpt", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPT; + else if (len == 5 && strncasecmp(url, "rtmps", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPS; + else if (len == 5 && strncasecmp(url, "rtmpe", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMPE; + else if (len == 5 && strncasecmp(url, "rtmfp", 5) == 0) + *protocol = RTMP_PROTOCOL_RTMFP; + else if (len == 6 && strncasecmp(url, "rtmpte", 6) == 0) + *protocol = RTMP_PROTOCOL_RTMPTE; + else if (len == 6 && strncasecmp(url, "rtmpts", 6) == 0) + *protocol = RTMP_PROTOCOL_RTMPTS; + else { + RTMP_Log(RTMP_LOGWARNING, "Unknown protocol!\n"); + goto parsehost; + } + } + + RTMP_Log(RTMP_LOGDEBUG, "Parsed protocol: %d", *protocol); + +parsehost: + /* let's get the hostname */ + p += 3; + + /* check for sudden death */ + if (*p == 0) { + RTMP_Log(RTMP_LOGWARNING, "No hostname in URL!"); + return FALSE; + } + + end = p + strlen(p); + col = strchr(p, ':'); + ques = strchr(p, '?'); + slash = strchr(p, '/'); + + { + int hostlen; + if (slash) + hostlen = slash - p; + else + hostlen = end - p; + if (col && col - p < hostlen) + hostlen = col - p; + + if (hostlen < 256) { + host->av_val = p; + host->av_len = hostlen; + RTMP_Log(RTMP_LOGDEBUG, "Parsed host : %.*s", hostlen, host->av_val); + } else { + RTMP_Log(RTMP_LOGWARNING, "Hostname exceeds 255 characters!"); + } + + p += hostlen; + } + + /* get the port number if available */ + if (*p == ':') { + unsigned int p2; + p++; + p2 = atoi(p); + if (p2 > 65535) { + RTMP_Log(RTMP_LOGWARNING, "Invalid port number!"); + } else { + *port = p2; + } + } + + if (!slash) { + RTMP_Log(RTMP_LOGWARNING, "No application or playpath in URL!"); + return TRUE; + } + p = slash + 1; + + /** parse domain + + * rtmp://host:[port]/app/...?domain=a.com + + * use domain to replace host + + */ + + if (domainName != NULL && ques != NULL) { + char *domain = strstr(ques, "domain="); + if (domain) { + end = domain - 1; + domain += 7; //skip "domain=" + char *domain_end = strchr(domain, '&'); + int host_len = 0; + if (domain_end) { + host_len = domain_end - domain; + } else { + host_len = strlen(domain); + } + if (host_len < 256) { + domainName->av_val = domain; + domainName->av_len = host_len; + RTMP_Log(RTMP_LOGDEBUG, "Parsed host and domain : %.*s", host_len, host->av_val); + } + } + } + + { + /* parse application + * + * rtmp://host[:port]/app[/appinstance][/...] + * application = app[/appinstance] + */ + + char *slash2, *slash3 = NULL; + int applen, appnamelen; + + slash2 = strchr(p, '/'); + if (slash2) + slash3 = strchr(slash2 + 1, '/'); + + applen = end - p; /* ondemand, pass all parameters as app */ + appnamelen = applen; /* ondemand length */ + + if (ques && strstr(p, "slist=")) { /* whatever it is, the '?' and slist= means we need to use everything as app and parse plapath from slist= */ + appnamelen = ques - p; + } else if (strncmp(p, "ondemand/", 9) == 0) { + /* app = ondemand/foobar, only pass app=ondemand */ + applen = 8; + appnamelen = 8; + } else { /* app!=ondemand, so app is app[/appinstance] */ + if (slash3) + appnamelen = slash3 - p; + else if (slash2) + appnamelen = slash2 - p; + + applen = appnamelen; + } + + app->av_val = p; + app->av_len = applen; + RTMP_Log(RTMP_LOGDEBUG, "Parsed app : %.*s", applen, p); + + p += appnamelen; + } + + if (*p == '/') + p++; + + if (end - p) { + AVal av = {p, end - p}; + PILI_RTMP_ParsePlaypath(&av, playpath); + } + + return TRUE; +} + +/* + * Extracts playpath from RTMP URL. playpath is the file part of the + * URL, i.e. the part that comes after rtmp://host:port/app/ + * + * Returns the stream name in a format understood by FMS. The name is + * the playpath part of the URL with formatting depending on the stream + * type: + * + * mp4 streams: prepend "mp4:", remove extension + * mp3 streams: prepend "mp3:", remove extension + * flv streams: remove extension + */ +void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out) { + int addMP4 = 0; + int addMP3 = 0; + int subExt = 0; + const char *playpath = in->av_val; + const char *temp, *q, *ext = NULL; + const char *ppstart = playpath; + char *streamname, *destptr, *p; + + int pplen = in->av_len; + + out->av_val = NULL; + out->av_len = 0; + + if ((*ppstart == '?') && + (temp = strstr(ppstart, "slist=")) != 0) { + ppstart = temp + 6; + pplen = strlen(ppstart); + + temp = strchr(ppstart, '&'); + if (temp) { + pplen = temp - ppstart; + } + } + + q = strchr(ppstart, '?'); + if (pplen >= 4) { + if (q) + ext = q - 4; + else + ext = &ppstart[pplen - 4]; + if ((strncmp(ext, ".f4v", 4) == 0) || + (strncmp(ext, ".mp4", 4) == 0)) { + addMP4 = 1; + subExt = 1; + /* Only remove .flv from rtmp URL, not slist params */ + } else if ((ppstart == playpath) && + (strncmp(ext, ".flv", 4) == 0)) { + subExt = 1; + } else if (strncmp(ext, ".mp3", 4) == 0) { + addMP3 = 1; + subExt = 1; + } + } + + streamname = (char *)malloc((pplen + 4 + 1) * sizeof(char)); + if (!streamname) + return; + + destptr = streamname; + if (addMP4) { + if (strncmp(ppstart, "mp4:", 4)) { + strcpy(destptr, "mp4:"); + destptr += 4; + } else { + subExt = 0; + } + } else if (addMP3) { + if (strncmp(ppstart, "mp3:", 4)) { + strcpy(destptr, "mp3:"); + destptr += 4; + } else { + subExt = 0; + } + } + + for (p = (char *)ppstart; pplen > 0;) { + /* skip extension */ + if (subExt && p == ext) { + p += 4; + pplen -= 4; + continue; + } + if (*p == '%') { + unsigned int c; + sscanf(p + 1, "%02x", &c); + *destptr++ = c; + pplen -= 3; + p += 3; + } else { + *destptr++ = *p++; + pplen--; + } + } + *destptr = '\0'; + + out->av_val = streamname; + out->av_len = destptr - streamname; +} + +int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app) { + return PILI_RTMP_ParseURL2(url, protocol, host, port, playpath, app, NULL); +} diff --git a/LFLiveKit/Vendor/pili-librtmp/rtmp.c b/LFLiveKit/Vendor/pili-librtmp/rtmp.c new file mode 100644 index 00000000..5043885b --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/rtmp.c @@ -0,0 +1,4136 @@ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * libPILI_RTMP is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * libPILI_RTMP is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with libPILI_RTMP see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#include +#include +#include +#include + +#include "log.h" +#include "rtmp_sys.h" + +#ifdef CRYPTO +#ifdef USE_POLARSSL +#include +#elif defined(USE_GNUTLS) +#include +#else /* USE_OPENSSL */ +#include +#include +#endif +TLS_CTX RTMP_TLS_ctx; +#endif + +#define RTMP_SIG_SIZE 1536 +#define RTMP_LARGE_HEADER_SIZE 12 + +static const int packetSize[] = {12, 8, 4, 1}; + +int PILI_RTMP_ctrlC; + +const char PILI_RTMPProtocolStrings[][7] = { + "RTMP", + "RTMPT", + "RTMPE", + "RTMPTE", + "RTMPS", + "RTMPTS", + "", + "", + "RTMFP"}; + +const char PILI_RTMPProtocolStringsLower[][7] = { + "rtmp", + "rtmpt", + "rtmpe", + "rtmpte", + "rtmps", + "rtmpts", + "", + "", + "rtmfp"}; + +static const char *RTMPT_cmds[] = { + "open", + "send", + "idle", + "close"}; + +typedef enum { + RTMPT_OPEN = 0, + RTMPT_SEND, + RTMPT_IDLE, + RTMPT_CLOSE +} RTMPTCmd; + +static int DumpMetaData(AMFObject *obj); +static int HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error); +static int SocksNegotiate(PILI_RTMP *r, RTMPError *error); + +static int SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +static int SendCheckBW(PILI_RTMP *r, RTMPError *error); +static int SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error); +static int SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error); +static int SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error); +static int SendPlay(PILI_RTMP *r, RTMPError *error); +static int SendBytesReceived(PILI_RTMP *r, RTMPError *error); + +#if 0 /* unused */ +static int SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath); +#endif + +static int HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize); +static int HandleMetadata(PILI_RTMP *r, char *body, unsigned int len); +static void HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); +static void HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet); + +static int ReadN(PILI_RTMP *r, char *buffer, int n); +static int WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error); + +static void DecodeTEA(AVal *key, AVal *text); + +static int HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len); +static int HTTP_read(PILI_RTMP *r, int fill); + +#ifndef _WIN32 +static int clk_tck; +#endif + +#ifdef CRYPTO +#include "handshake.h" +#endif + +uint32_t + PILI_RTMP_GetTime() { +#ifdef _DEBUG + return 0; +#elif defined(_WIN32) + return timeGetTime(); +#else + struct tms t; + if (!clk_tck) clk_tck = sysconf(_SC_CLK_TCK); + return times(&t) * 1000 / clk_tck; +#endif +} + +void PILI_RTMP_UserInterrupt() { + PILI_RTMP_ctrlC = TRUE; +} + +void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p) { + p->m_headerType = 0; + p->m_packetType = 0; + p->m_nChannel = 0; + p->m_nTimeStamp = 0; + p->m_nInfoField2 = 0; + p->m_hasAbsTimestamp = FALSE; + p->m_nBodySize = 0; + p->m_nBytesRead = 0; +} + +int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize) { + char *ptr = calloc(1, nSize + RTMP_MAX_HEADER_SIZE); + if (!ptr) + return FALSE; + p->m_body = ptr + RTMP_MAX_HEADER_SIZE; + p->m_nBytesRead = 0; + return TRUE; +} + +void PILI_RTMPPacket_Free(PILI_RTMPPacket *p) { + if (p->m_body) { + free(p->m_body - RTMP_MAX_HEADER_SIZE); + p->m_body = NULL; + } +} + +void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p) { + RTMP_Log(RTMP_LOGDEBUG, + "PILI_RTMP PACKET: packet type: 0x%02x. channel: 0x%02x. info 1: %d info 2: %d. Body size: %lu. body: 0x%02x", + p->m_packetType, p->m_nChannel, p->m_nTimeStamp, p->m_nInfoField2, + p->m_nBodySize, p->m_body ? (unsigned char)p->m_body[0] : 0); +} + +int PILI_RTMP_LibVersion() { + return RTMP_LIB_VERSION; +} + +void PILI_RTMP_TLS_Init() { +#ifdef CRYPTO +#ifdef USE_POLARSSL + /* Do this regardless of NO_SSL, we use havege for rtmpe too */ + RTMP_TLS_ctx = calloc(1, sizeof(struct tls_ctx)); + havege_init(&RTMP_TLS_ctx->hs); +#elif defined(USE_GNUTLS) && !defined(NO_SSL) + /* Technically we need to initialize libgcrypt ourselves if + * we're not going to call gnutls_global_init(). Ignoring this + * for now. + */ + gnutls_global_init(); + RTMP_TLS_ctx = malloc(sizeof(struct tls_ctx)); + gnutls_certificate_allocate_credentials(&RTMP_TLS_ctx->cred); + gnutls_priority_init(&RTMP_TLS_ctx->prios, "NORMAL", NULL); + gnutls_certificate_set_x509_trust_file(RTMP_TLS_ctx->cred, + "ca.pem", GNUTLS_X509_FMT_PEM); +#elif !defined(NO_SSL) /* USE_OPENSSL */ + /* libcrypto doesn't need anything special */ + SSL_load_error_strings(); + SSL_library_init(); + OpenSSL_add_all_digests(); + RTMP_TLS_ctx = SSL_CTX_new(SSLv23_method()); + SSL_CTX_set_options(RTMP_TLS_ctx, SSL_OP_ALL); + SSL_CTX_set_default_verify_paths(RTMP_TLS_ctx); +#endif +#endif +} + +PILI_RTMP * + PILI_RTMP_Alloc() { + return calloc(1, sizeof(PILI_RTMP)); +} + +void PILI_RTMP_Free(PILI_RTMP *r) { + r->m_errorCallback = NULL; + r->m_userData = NULL; + RTMPError_Free(r->m_error); + r->m_error = NULL; + + free(r); +} + +void PILI_RTMP_Init(PILI_RTMP *r) { +#ifdef CRYPTO + if (!RTMP_TLS_ctx) + RTMP_TLS_Init(); +#endif + + memset(r, 0, sizeof(PILI_RTMP)); + r->m_sb.sb_socket = -1; + r->m_inChunkSize = RTMP_DEFAULT_CHUNKSIZE; + r->m_outChunkSize = RTMP_DEFAULT_CHUNKSIZE; + r->m_nBufferMS = 30000; + r->m_nClientBW = 2500000; + r->m_nClientBW2 = 2; + r->m_nServerBW = 2500000; + r->m_fAudioCodecs = 3191.0; + r->m_fVideoCodecs = 252.0; + r->Link.timeout = 10; + r->Link.send_timeout = 10; + r->Link.swfAge = 30; + + r->m_errorCallback = NULL; + r->m_error = NULL; + r->m_userData = NULL; + r->m_is_closing = 0; + r->m_tcp_nodelay = 1; + + r->m_connCallback = NULL; + r->ip = 0; +} + +void PILI_RTMP_EnableWrite(PILI_RTMP *r) { + r->Link.protocol |= RTMP_FEATURE_WRITE; +} + +double + PILI_RTMP_GetDuration(PILI_RTMP *r) { + return r->m_fDuration; +} + +int PILI_RTMP_IsConnected(PILI_RTMP *r) { + return r->m_sb.sb_socket != -1; +} + +int PILI_RTMP_Socket(PILI_RTMP *r) { + return r->m_sb.sb_socket; +} + +int PILI_RTMP_IsTimedout(PILI_RTMP *r) { + return r->m_sb.sb_timedout; +} + +void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size) { + r->m_nBufferMS = size; +} + +void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error) { + PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, error); +} + +#undef OSS +#ifdef _WIN32 +#define OSS "WIN" +#elif defined(__sun__) +#define OSS "SOL" +#elif defined(__APPLE__) +#define OSS "MAC" +#elif defined(__linux__) +#define OSS "LNX" +#else +#define OSS "GNU" +#endif +#define DEF_VERSTR OSS " 10,0,32,18" +static const char DEFAULT_FLASH_VER[] = DEF_VERSTR; +const AVal RTMP_DefaultFlashVer = + {(char *)DEFAULT_FLASH_VER, sizeof(DEFAULT_FLASH_VER) - 1}; + +void PILI_RTMP_SetupStream(PILI_RTMP *r, + int protocol, + AVal *host, + unsigned int port, + AVal *sockshost, + AVal *playpath, + AVal *tcUrl, + AVal *swfUrl, + AVal *pageUrl, + AVal *app, + AVal *auth, + AVal *swfSHA256Hash, + uint32_t swfSize, + AVal *flashVer, + AVal *subscribepath, + int dStart, + int dStop, int bLiveStream, long int timeout) { + RTMP_Log(RTMP_LOGDEBUG, "Protocol : %s", PILI_RTMPProtocolStrings[protocol & 7]); + RTMP_Log(RTMP_LOGDEBUG, "Hostname : %.*s", host->av_len, host->av_val); + RTMP_Log(RTMP_LOGDEBUG, "Port : %d", port); + RTMP_Log(RTMP_LOGDEBUG, "Playpath : %s", playpath->av_val); + + if (tcUrl && tcUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "tcUrl : %s", tcUrl->av_val); + if (swfUrl && swfUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "swfUrl : %s", swfUrl->av_val); + if (pageUrl && pageUrl->av_val) + RTMP_Log(RTMP_LOGDEBUG, "pageUrl : %s", pageUrl->av_val); + if (app && app->av_val) + RTMP_Log(RTMP_LOGDEBUG, "app : %.*s", app->av_len, app->av_val); + if (auth && auth->av_val) + RTMP_Log(RTMP_LOGDEBUG, "auth : %s", auth->av_val); + if (subscribepath && subscribepath->av_val) + RTMP_Log(RTMP_LOGDEBUG, "subscribepath : %s", subscribepath->av_val); + if (flashVer && flashVer->av_val) + RTMP_Log(RTMP_LOGDEBUG, "flashVer : %s", flashVer->av_val); + if (dStart > 0) + RTMP_Log(RTMP_LOGDEBUG, "StartTime : %d msec", dStart); + if (dStop > 0) + RTMP_Log(RTMP_LOGDEBUG, "StopTime : %d msec", dStop); + + RTMP_Log(RTMP_LOGDEBUG, "live : %s", bLiveStream ? "yes" : "no"); + RTMP_Log(RTMP_LOGDEBUG, "timeout : %d sec", timeout); + +#ifdef CRYPTO + if (swfSHA256Hash != NULL && swfSize > 0) { + memcpy(r->Link.SWFHash, swfSHA256Hash->av_val, sizeof(r->Link.SWFHash)); + r->Link.SWFSize = swfSize; + RTMP_Log(RTMP_LOGDEBUG, "SWFSHA256:"); + RTMP_LogHex(RTMP_LOGDEBUG, r->Link.SWFHash, sizeof(r->Link.SWFHash)); + RTMP_Log(RTMP_LOGDEBUG, "SWFSize : %lu", r->Link.SWFSize); + } else { + r->Link.SWFSize = 0; + } +#endif + + if (sockshost->av_len) { + const char *socksport = strchr(sockshost->av_val, ':'); + char *hostname = strdup(sockshost->av_val); + + if (socksport) + hostname[socksport - sockshost->av_val] = '\0'; + r->Link.sockshost.av_val = hostname; + r->Link.sockshost.av_len = strlen(hostname); + + r->Link.socksport = socksport ? atoi(socksport + 1) : 1080; + RTMP_Log(RTMP_LOGDEBUG, "Connecting via SOCKS proxy: %s:%d", r->Link.sockshost.av_val, + r->Link.socksport); + } else { + r->Link.sockshost.av_val = NULL; + r->Link.sockshost.av_len = 0; + r->Link.socksport = 0; + } + + if (tcUrl && tcUrl->av_len) + r->Link.tcUrl = *tcUrl; + if (swfUrl && swfUrl->av_len) + r->Link.swfUrl = *swfUrl; + if (pageUrl && pageUrl->av_len) + r->Link.pageUrl = *pageUrl; + if (app && app->av_len) + r->Link.app = *app; + if (auth && auth->av_len) { + r->Link.auth = *auth; + r->Link.lFlags |= RTMP_LF_AUTH; + } + if (flashVer && flashVer->av_len) + r->Link.flashVer = *flashVer; + else + r->Link.flashVer = RTMP_DefaultFlashVer; + if (subscribepath && subscribepath->av_len) + r->Link.subscribepath = *subscribepath; + r->Link.seekTime = dStart; + r->Link.stopTime = dStop; + if (bLiveStream) + r->Link.lFlags |= RTMP_LF_LIVE; + r->Link.timeout = timeout; + + r->Link.protocol = protocol; + r->Link.hostname = *host; + r->Link.port = port; + r->Link.playpath = *playpath; + + if (r->Link.port == 0) { + if (protocol & RTMP_FEATURE_SSL) + r->Link.port = 443; + else if (protocol & RTMP_FEATURE_HTTP) + r->Link.port = 80; + else + r->Link.port = 1935; + } +} + +enum { OPT_STR = 0, + OPT_INT, + OPT_BOOL, + OPT_CONN }; +static const char *optinfo[] = { + "string", "integer", "boolean", "AMF"}; + +#define OFF(x) offsetof(struct PILI_RTMP, x) + +static struct urlopt { + AVal name; + off_t off; + int otype; + int omisc; + char *use; +} options[] = { + {AVC("socks"), OFF(Link.sockshost), OPT_STR, 0, + "Use the specified SOCKS proxy"}, + {AVC("app"), OFF(Link.app), OPT_STR, 0, + "Name of target app on server"}, + {AVC("tcUrl"), OFF(Link.tcUrl), OPT_STR, 0, + "URL to played stream"}, + {AVC("pageUrl"), OFF(Link.pageUrl), OPT_STR, 0, + "URL of played media's web page"}, + {AVC("swfUrl"), OFF(Link.swfUrl), OPT_STR, 0, + "URL to player SWF file"}, + {AVC("flashver"), OFF(Link.flashVer), OPT_STR, 0, + "Flash version string (default " DEF_VERSTR ")"}, + {AVC("conn"), OFF(Link.extras), OPT_CONN, 0, + "Append arbitrary AMF data to Connect message"}, + {AVC("playpath"), OFF(Link.playpath), OPT_STR, 0, + "Path to target media on server"}, + {AVC("playlist"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_PLST, + "Set playlist before play command"}, + {AVC("live"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_LIVE, + "Stream is live, no seeking possible"}, + {AVC("subscribe"), OFF(Link.subscribepath), OPT_STR, 0, + "Stream to subscribe to"}, + {AVC("token"), OFF(Link.token), OPT_STR, 0, + "Key for SecureToken response"}, + {AVC("swfVfy"), OFF(Link.lFlags), OPT_BOOL, RTMP_LF_SWFV, + "Perform SWF Verification"}, + {AVC("swfAge"), OFF(Link.swfAge), OPT_INT, 0, + "Number of days to use cached SWF hash"}, + {AVC("start"), OFF(Link.seekTime), OPT_INT, 0, + "Stream start position in milliseconds"}, + {AVC("stop"), OFF(Link.stopTime), OPT_INT, 0, + "Stream stop position in milliseconds"}, + {AVC("buffer"), OFF(m_nBufferMS), OPT_INT, 0, + "Buffer time in milliseconds"}, + {AVC("timeout"), OFF(Link.timeout), OPT_INT, 0, + "Session timeout in seconds"}, + {{NULL, 0}, 0, 0}}; + +static const AVal truth[] = { + AVC("1"), + AVC("on"), + AVC("yes"), + AVC("true"), + {0, 0}}; + +static void RTMP_OptUsage() { + int i; + + RTMP_Log(RTMP_LOGERROR, "Valid PILI_RTMP options are:\n"); + for (i = 0; options[i].name.av_len; i++) { + RTMP_Log(RTMP_LOGERROR, "%10s %-7s %s\n", options[i].name.av_val, + optinfo[options[i].otype], options[i].use); + } +} + +static int + parseAMF(AMFObject *obj, AVal *av, int *depth) { + AMFObjectProperty prop = {{0, 0}}; + int i; + char *p, *arg = av->av_val; + + if (arg[1] == ':') { + p = (char *)arg + 2; + switch (arg[0]) { + case 'B': + prop.p_type = AMF_BOOLEAN; + prop.p_vu.p_number = atoi(p); + break; + case 'S': + prop.p_type = AMF_STRING; + prop.p_vu.p_aval.av_val = p; + prop.p_vu.p_aval.av_len = av->av_len - (p - arg); + break; + case 'N': + prop.p_type = AMF_NUMBER; + prop.p_vu.p_number = strtod(p, NULL); + break; + case 'Z': + prop.p_type = AMF_NULL; + break; + case 'O': + i = atoi(p); + if (i) { + prop.p_type = AMF_OBJECT; + } else { + (*depth)--; + return 0; + } + break; + default: + return -1; + } + } else if (arg[2] == ':' && arg[0] == 'N') { + p = strchr(arg + 3, ':'); + if (!p || !*depth) + return -1; + prop.p_name.av_val = (char *)arg + 3; + prop.p_name.av_len = p - (arg + 3); + + p++; + switch (arg[1]) { + case 'B': + prop.p_type = AMF_BOOLEAN; + prop.p_vu.p_number = atoi(p); + break; + case 'S': + prop.p_type = AMF_STRING; + prop.p_vu.p_aval.av_val = p; + prop.p_vu.p_aval.av_len = av->av_len - (p - arg); + break; + case 'N': + prop.p_type = AMF_NUMBER; + prop.p_vu.p_number = strtod(p, NULL); + break; + case 'O': + prop.p_type = AMF_OBJECT; + break; + default: + return -1; + } + } else + return -1; + + if (*depth) { + AMFObject *o2; + for (i = 0; i < *depth; i++) { + o2 = &obj->o_props[obj->o_num - 1].p_vu.p_object; + obj = o2; + } + } + AMF_AddProp(obj, &prop); + if (prop.p_type == AMF_OBJECT) + (*depth)++; + return 0; +} + +int RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, RTMPError *error) { + int i; + void *v; + + for (i = 0; options[i].name.av_len; i++) { + if (opt->av_len != options[i].name.av_len) continue; + if (strcasecmp(opt->av_val, options[i].name.av_val)) continue; + v = (char *)r + options[i].off; + switch (options[i].otype) { + case OPT_STR: { + AVal *aptr = v; + *aptr = *arg; + } break; + case OPT_INT: { + long l = strtol(arg->av_val, NULL, 0); + *(int *)v = l; + } break; + case OPT_BOOL: { + int j, fl; + fl = *(int *)v; + for (j = 0; truth[j].av_len; j++) { + if (arg->av_len != truth[j].av_len) continue; + if (strcasecmp(arg->av_val, truth[j].av_val)) continue; + fl |= options[i].omisc; + break; + } + *(int *)v = fl; + } break; + case OPT_CONN: + if (parseAMF(&r->Link.extras, arg, &r->Link.edepth)) + return FALSE; + break; + } + break; + } + if (!options[i].name.av_len) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Unknown option "); + strcat(msg, opt->av_val); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorUnknowOption; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "Unknown option %s", opt->av_val); + RTMP_OptUsage(); + return FALSE; + } + + return TRUE; +} + +int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error) { + AVal opt, arg; + char *p1, *p2, *ptr = strchr(url, ' '); + int ret, len; + unsigned int port = 0; + + if (ptr) + *ptr = '\0'; + + len = (int)strlen(url); + ret = PILI_RTMP_ParseURL2(url, &r->Link.protocol, &r->Link.hostname, + &port, &r->Link.playpath0, &r->Link.app, &r->Link.domain); + if (!ret) + return ret; + r->Link.port = port; + r->Link.playpath = r->Link.playpath0; + + while (ptr) { + *ptr++ = '\0'; + p1 = ptr; + p2 = strchr(p1, '='); + if (!p2) + break; + opt.av_val = p1; + opt.av_len = p2 - p1; + *p2++ = '\0'; + arg.av_val = p2; + ptr = strchr(p2, ' '); + if (ptr) { + *ptr = '\0'; + arg.av_len = ptr - p2; + /* skip repeated spaces */ + while (ptr[1] == ' ') + *ptr++ = '\0'; + } else { + arg.av_len = strlen(p2); + } + + /* unescape */ + port = arg.av_len; + for (p1 = p2; port > 0;) { + if (*p1 == '\\') { + unsigned int c; + if (port < 3) + return FALSE; + sscanf(p1 + 1, "%02x", &c); + *p2++ = c; + port -= 3; + p1 += 3; + } else { + *p2++ = *p1++; + port--; + } + } + arg.av_len = p2 - arg.av_val; + + ret = RTMP_SetOpt(r, &opt, &arg, error); + if (!ret) + return ret; + } + + if (!r->Link.tcUrl.av_len) { + r->Link.tcUrl.av_val = url; + if (r->Link.app.av_len) { + AVal *domain = &r->Link.domain; + if (domain->av_len == 0 && r->Link.app.av_val < url + len) { + /* if app is part of original url, just use it */ + r->Link.tcUrl.av_len = r->Link.app.av_len + (r->Link.app.av_val - url); + } else { + if (domain->av_len == 0) { + domain = &r->Link.hostname; + } + if (r->Link.port = 0) { + r->Link.port = 1935; + } + len = domain->av_len + r->Link.app.av_len + sizeof("rtmpte://:65535/"); + r->Link.tcUrl.av_val = malloc(len); + r->Link.tcUrl.av_len = snprintf(r->Link.tcUrl.av_val, len, + "%s://%.*s:%d/%.*s", + PILI_RTMPProtocolStringsLower[r->Link.protocol], + domain->av_len, domain->av_val, + r->Link.port, + r->Link.app.av_len, r->Link.app.av_val); + r->Link.lFlags |= RTMP_LF_FTCU; + } + } else { + r->Link.tcUrl.av_len = strlen(url); + } + } + +#ifdef CRYPTO + if ((r->Link.lFlags & RTMP_LF_SWFV) && r->Link.swfUrl.av_len) + RTMP_HashSWF(r->Link.swfUrl.av_val, &r->Link.SWFSize, + (unsigned char *)r->Link.SWFHash, r->Link.swfAge); +#endif + + if (r->Link.port == 0) { + if (r->Link.protocol & RTMP_FEATURE_SSL) + r->Link.port = 443; + else if (r->Link.protocol & RTMP_FEATURE_HTTP) + r->Link.port = 80; + else + r->Link.port = 1935; + } + return TRUE; +} + +static int add_addr_info(PILI_RTMP *r, struct addrinfo *hints, struct addrinfo **ai, AVal *host, int port, RTMPError *error) { + char *hostname; + int ret = TRUE; + if (host->av_val[host->av_len]) { + hostname = malloc(host->av_len + 1); + memcpy(hostname, host->av_val, host->av_len); + hostname[host->av_len] = '\0'; + } else { + hostname = host->av_val; + } + + struct addrinfo *cur_ai; + char portstr[10]; + snprintf(portstr, sizeof(portstr), "%d", port); + int addrret = getaddrinfo(hostname, portstr, hints, ai); + if (addrret != 0) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Problem accessing the DNS. addr: "); + strcat(msg, hostname); + + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorAccessDNSFailed; + strcpy(error->message, msg); + RTMP_Log(RTMP_LOGERROR, "Problem accessing the DNS. (addr: %s)", hostname); + ret = FALSE; + } + + if (hostname != host->av_val) { + free(hostname); + } + return ret; +} + +int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, RTMPError *error) { + r->m_sb.sb_timedout = FALSE; + r->m_pausing = 0; + r->m_fDuration = 0.0; + + r->m_sb.sb_socket = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); + if (ai->ai_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)ai->ai_addr; + in6->sin6_port = htons(port); + } + if (r->m_sb.sb_socket != -1) { + if (connect(r->m_sb.sb_socket, ai->ai_addr, ai->ai_addrlen) < 0) { + int err = GetSockError(); + + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Failed to connect socket. "); + strcat(msg, strerror(err)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorFailedToConnectSocket; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, failed to connect socket. %d (%s)", + __FUNCTION__, err, strerror(err)); + + PILI_RTMP_Close(r, NULL); + return FALSE; + } + + if (r->Link.socksport) { + RTMP_Log(RTMP_LOGDEBUG, "%s ... SOCKS negotiation", __FUNCTION__); + if (!SocksNegotiate(r, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Socks negotiation failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSocksNegotiationFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, SOCKS negotiation failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + } + } else { + int err = GetSockError(); + + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Failed to create socket. "); + strcat(msg, strerror(err)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorFailedToCreateSocket; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, failed to create socket. Error: %d (%s)", __FUNCTION__, err, strerror(err)); + + return FALSE; + } + + /* set receive timeout */ + { + SET_RCVTIMEO(tv, r->Link.timeout); + if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket recieve timeout to %ds failed!", + __FUNCTION__, r->Link.timeout); + } + } + + /* set send timeout*/ + { + struct timeval timeout; + timeout.tv_sec = r->Link.send_timeout; + timeout.tv_usec = 0; + + if (setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout, sizeof(timeout))) { + RTMP_Log(RTMP_LOGERROR, "%s, Setting socket send timeout to %ds failed!", + __FUNCTION__, r->Link.timeout); + } + } + + /* ignore sigpipe */ + int kOne = 1; +#ifdef __linux + setsockopt(r->m_sb.sb_socket, SOL_SOCKET, MSG_NOSIGNAL, &kOne, sizeof(kOne)); +#else + setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_NOSIGPIPE, &kOne, sizeof(kOne)); +#endif + if (r->m_tcp_nodelay) { + int on = 1; + setsockopt(r->m_sb.sb_socket, IPPROTO_TCP, TCP_NODELAY, (char *)&on, sizeof(on)); + } + + return TRUE; +} + +int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + if (r->Link.protocol & RTMP_FEATURE_SSL) { +#if defined(CRYPTO) && !defined(NO_SSL) + TLS_client(RTMP_TLS_ctx, r->m_sb.sb_ssl); + TLS_setfd(r->m_sb.sb_ssl, r->m_sb.sb_socket); + if (TLS_connect(r->m_sb.sb_ssl) < 0) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "TLS_Connect failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorTLSConnectFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__); + RTMP_Close(r, NULL); + return FALSE; + } +#else + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "No SSL/TLS support."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorNoSSLOrTLSSupport; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, no SSL/TLS support", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + +#endif + } + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + r->m_msgCounter = 1; + r->m_clientID.av_val = NULL; + r->m_clientID.av_len = 0; + HTTP_Post(r, RTMPT_OPEN, "", 1); + HTTP_read(r, 1); + r->m_msgCounter = 0; + } + RTMP_Log(RTMP_LOGDEBUG, "%s, ... connected, handshaking", __FUNCTION__); + if (!HandShake(r, TRUE, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "Handshake failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorHandshakeFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "%s, handshake failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + RTMP_Log(RTMP_LOGDEBUG, "%s, handshaked", __FUNCTION__); + + if (!SendConnectPacket(r, cp, error)) { + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP connect failed."); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorRTMPConnectFailed; + strcpy(error->message, msg); + } + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP connect failed.", __FUNCTION__); + PILI_RTMP_Close(r, NULL); + return FALSE; + } + return TRUE; +} + +int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + struct PILI_CONNECTION_TIME conn_time; + if (!r->Link.hostname.av_len) + return FALSE; + + struct addrinfo hints = {0}, *ai, *cur_ai; + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_DEFAULT; + unsigned short port; + if (r->Link.socksport) { + port = r->Link.socksport; + /* Connect via SOCKS */ + if (!add_addr_info(r, &hints, &ai, &r->Link.sockshost, r->Link.socksport, error)) { + return FALSE; + } + } else { + port = r->Link.port; + /* Connect directly */ + if (!add_addr_info(r, &hints, &ai, &r->Link.hostname, r->Link.port, error)) { + return FALSE; + } + } + r->ip = 0; //useless for ipv6 + cur_ai = ai; + + int t1 = PILI_RTMP_GetTime(); + if (!PILI_RTMP_Connect0(r, cur_ai, port, error)) { + freeaddrinfo(ai); + return FALSE; + } + conn_time.connect_time = PILI_RTMP_GetTime() - t1; + r->m_bSendCounter = TRUE; + + int t2 = PILI_RTMP_GetTime(); + int ret = PILI_RTMP_Connect1(r, cp, error); + conn_time.handshake_time = PILI_RTMP_GetTime() - t2; + + if (r->m_connCallback != NULL) { + r->m_connCallback(&conn_time, r->m_userData); + } + freeaddrinfo(ai); + return ret; +} + +//useless +static int + SocksNegotiate(PILI_RTMP *r, RTMPError *error) { + // unsigned long addr; + // struct sockaddr_in service; + // memset(&service, 0, sizeof(struct sockaddr_in)); + // + // add_addr_info(r, &service, &r->Link.hostname, r->Link.port, error); + // addr = htonl(service.sin_addr.s_addr); + // + // { + // char packet[] = { + // 4, 1, /* SOCKS 4, connect */ + // (r->Link.port >> 8) & 0xFF, + // (r->Link.port) & 0xFF, + // (char)(addr >> 24) & 0xFF, (char)(addr >> 16) & 0xFF, + // (char)(addr >> 8) & 0xFF, (char)addr & 0xFF, + // 0 + // }; /* NULL terminate */ + // + // WriteN(r, packet, sizeof packet, error); + // + // if (ReadN(r, packet, 8) != 8) + // return FALSE; + // + // if (packet[0] == 0 && packet[1] == 90) + // { + // return TRUE; + // } + // else + // { + // RTMP_Log(RTMP_LOGERROR, "%s, SOCKS returned error code %d", packet[1]); + // return FALSE; + // } + // } + return 0; +} + +int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { + PILI_RTMPPacket packet = {0}; + + /* seekTime was already set by SetupStream / SetupURL. + * This is only needed by ReconnectStream. + */ + if (seekTime > 0) + r->Link.seekTime = seekTime; + + r->m_mediaChannel = 0; + + while (!r->m_bPlaying && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, &packet)) { + if (RTMPPacket_IsReady(&packet)) { + if (!packet.m_nBodySize) + continue; + if ((packet.m_packetType == RTMP_PACKET_TYPE_AUDIO) || + (packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) || + (packet.m_packetType == RTMP_PACKET_TYPE_INFO)) { + RTMP_Log(RTMP_LOGWARNING, "Received FLV packet before play()! Ignoring."); + PILI_RTMPPacket_Free(&packet); + continue; + } + + PILI_RTMP_ClientPacket(r, &packet); + PILI_RTMPPacket_Free(&packet); + } + } + + if (!r->m_bPlaying && error) { + char *msg = "PILI_RTMP connect stream failed."; + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorRTMPConnectStreamFailed; + strcpy(error->message, msg); + } + + return r->m_bPlaying; +} + +int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error) { + PILI_RTMP_DeleteStream(r, error); + + PILI_RTMP_SendCreateStream(r, error); + + return PILI_RTMP_ConnectStream(r, seekTime, error); +} + +int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error) { + int res; + + if (!r->m_pausing) { + res = PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, error); + if (!res) + return res; + + r->m_pausing = 1; + sleep(1); + } + res = PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, error); + r->m_pausing = 3; + return res; +} + +void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error) { + if (r->m_stream_id < 0) + return; + + r->m_bPlaying = FALSE; + + SendDeleteStream(r, r->m_stream_id, error); + r->m_stream_id = -1; +} + +int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + int bHasMediaPacket = 0; + + while (!bHasMediaPacket && PILI_RTMP_IsConnected(r) && PILI_RTMP_ReadPacket(r, packet)) { + if (!RTMPPacket_IsReady(packet)) { + continue; + } + + bHasMediaPacket = PILI_RTMP_ClientPacket(r, packet); + + if (!bHasMediaPacket) { + PILI_RTMPPacket_Free(packet); + } else if (r->m_pausing == 3) { + if (packet->m_nTimeStamp <= r->m_mediaStamp) { + bHasMediaPacket = 0; +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "Skipped type: %02X, size: %d, TS: %d ms, abs TS: %d, pause: %d ms", + packet->m_packetType, packet->m_nBodySize, + packet->m_nTimeStamp, packet->m_hasAbsTimestamp, + r->m_mediaStamp); +#endif + continue; + } + r->m_pausing = 0; + } + } + + if (bHasMediaPacket) + r->m_bPlaying = TRUE; + else if (r->m_sb.sb_timedout && !r->m_pausing) + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + + return bHasMediaPacket; +} + +int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + int bHasMediaPacket = 0; + switch (packet->m_packetType) { + case 0x01: + /* chunk size */ + HandleChangeChunkSize(r, packet); + break; + + case 0x03: + /* bytes read report */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__); + break; + + case 0x04: + /* ctrl */ + HandleCtrl(r, packet); + break; + + case 0x05: + /* server bw */ + HandleServerBW(r, packet); + break; + + case 0x06: + /* client bw */ + HandleClientBW(r, packet); + break; + + case 0x08: + /* audio data */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + HandleAudio(r, packet); + bHasMediaPacket = 1; + if (!r->m_mediaChannel) + r->m_mediaChannel = packet->m_nChannel; + if (!r->m_pausing) + r->m_mediaStamp = packet->m_nTimeStamp; + break; + + case 0x09: + /* video data */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + HandleVideo(r, packet); + bHasMediaPacket = 1; + if (!r->m_mediaChannel) + r->m_mediaChannel = packet->m_nChannel; + if (!r->m_pausing) + r->m_mediaStamp = packet->m_nTimeStamp; + break; + + case 0x0F: /* flex stream send */ + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex stream send, size %lu bytes, not supported, ignoring", + __FUNCTION__, packet->m_nBodySize); + break; + + case 0x10: /* flex shared object */ + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex shared object, size %lu bytes, not supported, ignoring", + __FUNCTION__, packet->m_nBodySize); + break; + + case 0x11: /* flex message */ + { + RTMP_Log(RTMP_LOGDEBUG, + "%s, flex message, size %lu bytes, not fully supported", + __FUNCTION__, packet->m_nBodySize); +/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + +/* some DEBUG code */ +#if 0 + RTMP_LIB_AMFObject obj; + int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1); + if(nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__); + /*return; */ + } + + obj.Dump(); +#endif + + if (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1) + bHasMediaPacket = 2; + break; + } + case 0x12: + /* metadata (notify) */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__, + packet->m_nBodySize); + if (HandleMetadata(r, packet->m_body, packet->m_nBodySize)) + bHasMediaPacket = 1; + break; + + case 0x13: + RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring", + __FUNCTION__); + break; + + case 0x14: + /* invoke */ + RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__, + packet->m_nBodySize); + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1) + bHasMediaPacket = 2; + break; + + case 0x16: { + /* go through FLV packets and handle metadata packets */ + unsigned int pos = 0; + uint32_t nTimeStamp = packet->m_nTimeStamp; + + while (pos + 11 < packet->m_nBodySize) { + uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1); /* size without header (11) and prevTagSize (4) */ + + if (pos + 11 + dataSize + 4 > packet->m_nBodySize) { + RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!"); + break; + } + if (packet->m_body[pos] == 0x12) { + HandleMetadata(r, packet->m_body + pos + 11, dataSize); + } else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9) { + nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4); + nTimeStamp |= (packet->m_body[pos + 7] << 24); + } + pos += (11 + dataSize + 4); + } + if (!r->m_pausing) + r->m_mediaStamp = nTimeStamp; + + /* FLV tag(s) */ + /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */ + bHasMediaPacket = 1; + break; + } + default: + RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__, + packet->m_packetType); +#ifdef _DEBUG + RTMP_LogHex(RTMP_LOGDEBUG, packet->m_body, packet->m_nBodySize); +#endif + } + + return bHasMediaPacket; +} + +#ifdef _DEBUG +extern FILE *netstackdump; +extern FILE *netstackdump_read; +#endif + +static int + ReadN(PILI_RTMP *r, char *buffer, int n) { + int nOriginalSize = n; + int avail; + char *ptr; + + r->m_sb.sb_timedout = FALSE; + +#ifdef _DEBUG + memset(buffer, 0, n); +#endif + + ptr = buffer; + while (n > 0) { + int nBytes = 0, nRead; + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + while (!r->m_resplen) { + if (r->m_sb.sb_size < 144) { + if (!r->m_unackd) + HTTP_Post(r, RTMPT_IDLE, "", 1); + if (PILI_RTMPSockBuf_Fill(&r->m_sb) < 1) { + if (!r->m_sb.sb_timedout) { + PILI_RTMP_Close(r, NULL); + } else { + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket timeout"); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketTimeout; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } + + return 0; + } + } + HTTP_read(r, 0); + } + if (r->m_resplen && !r->m_sb.sb_size) + PILI_RTMPSockBuf_Fill(&r->m_sb); + avail = r->m_sb.sb_size; + if (avail > r->m_resplen) + avail = r->m_resplen; + } else { + avail = r->m_sb.sb_size; + if (avail == 0) { + if (PILI_RTMPSockBuf_Fill(&r->m_sb) < 1) { + if (!r->m_sb.sb_timedout) { + PILI_RTMP_Close(r, NULL); + } else { + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket timeout"); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketTimeout; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } + + return 0; + } + avail = r->m_sb.sb_size; + } + } + nRead = ((n < avail) ? n : avail); + if (nRead > 0) { + memcpy(ptr, r->m_sb.sb_start, nRead); + r->m_sb.sb_start += nRead; + r->m_sb.sb_size -= nRead; + nBytes = nRead; + r->m_nBytesIn += nRead; + if (r->m_bSendCounter && r->m_nBytesIn > r->m_nBytesInSent + r->m_nClientBW / 2) + SendBytesReceived(r, NULL); + } +/*RTMP_Log(RTMP_LOGDEBUG, "%s: %d bytes\n", __FUNCTION__, nBytes); */ +#ifdef _DEBUG + fwrite(ptr, 1, nBytes, netstackdump_read); +#endif + + if (nBytes == 0) { + RTMP_Log(RTMP_LOGDEBUG, "%s, PILI_RTMP socket closed by peer", __FUNCTION__); + /*goto again; */ + RTMPError error = {0}; + + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP socket closed by peer. "); + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorSocketClosedByPeer; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + break; + } + + if (r->Link.protocol & RTMP_FEATURE_HTTP) + r->m_resplen -= nBytes; + +#ifdef CRYPTO + if (r->Link.rc4keyIn) { + RC4_encrypt(r->Link.rc4keyIn, nBytes, ptr); + } +#endif + + n -= nBytes; + ptr += nBytes; + } + + return nOriginalSize - n; +} + +static int + WriteN(PILI_RTMP *r, const char *buffer, int n, RTMPError *error) { + const char *ptr = buffer; +#ifdef CRYPTO + char *encrypted = 0; + char buf[RTMP_BUFFER_CACHE_SIZE]; + + if (r->Link.rc4keyOut) { + if (n > sizeof(buf)) + encrypted = (char *)malloc(n); + else + encrypted = (char *)buf; + ptr = encrypted; + RC4_encrypt2(r->Link.rc4keyOut, n, buffer, ptr); + } +#endif + + while (n > 0) { + int nBytes; + + if (r->Link.protocol & RTMP_FEATURE_HTTP) + nBytes = HTTP_Post(r, RTMPT_SEND, ptr, n); + else + nBytes = PILI_RTMPSockBuf_Send(&r->m_sb, ptr, n); + /*RTMP_Log(RTMP_LOGDEBUG, "%s: %d\n", __FUNCTION__, nBytes); */ + + if (nBytes < 0) { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGERROR, "%s, PILI_RTMP send error %d, %s, (%d bytes)", __FUNCTION__, + sockerr, strerror(sockerr), n); + + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + if (error) { + char msg[100]; + memset(msg, 0, 100); + strcat(msg, "PILI_RTMP send error. socket error: "); + strcat(msg, strerror(sockerr)); + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSendFailed; + strcpy(error->message, msg); + } + + PILI_RTMP_Close(r, error); + + RTMPError_Free(error); + + n = 1; + break; + } + + if (nBytes == 0) + break; + + n -= nBytes; + ptr += nBytes; + } + +#ifdef CRYPTO + if (encrypted && encrypted != buf) + free(encrypted); +#endif + + return n == 0; +} + +#define SAVC(x) static const AVal av_##x = AVC(#x) + +SAVC(app); +SAVC(connect); +SAVC(flashVer); +SAVC(swfUrl); +SAVC(pageUrl); +SAVC(tcUrl); +SAVC(fpad); +SAVC(capabilities); +SAVC(audioCodecs); +SAVC(videoCodecs); +SAVC(videoFunction); +SAVC(objectEncoding); +SAVC(secureToken); +SAVC(secureTokenResponse); +SAVC(type); +SAVC(nonprivate); + +static int + SendConnectPacket(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[4096], *pend = pbuf + sizeof(pbuf); + char *enc; + + if (cp) + return PILI_RTMP_SendPacket(r, cp, TRUE, error); + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_connect); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_OBJECT; + + enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app); + if (!enc) + return FALSE; + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + enc = AMF_EncodeNamedString(enc, pend, &av_type, &av_nonprivate); + if (!enc) + return FALSE; + } + if (r->Link.flashVer.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer); + if (!enc) + return FALSE; + } + if (r->Link.swfUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl); + if (!enc) + return FALSE; + } + if (r->Link.tcUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl); + if (!enc) + return FALSE; + } + if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { + enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs); + if (!enc) + return FALSE; + enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0); + if (!enc) + return FALSE; + if (r->Link.pageUrl.av_len) { + enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl); + if (!enc) + return FALSE; + } + } + if (r->m_fEncoding != 0.0 || r->m_bSendEncoding) { /* AMF0, AMF3 not fully supported yet */ + enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding); + if (!enc) + return FALSE; + } + if (enc + 3 >= pend) + return FALSE; + *enc++ = 0; + *enc++ = 0; /* end of object - 0x00 0x00 0x09 */ + *enc++ = AMF_OBJECT_END; + + /* add auth string */ + if (r->Link.auth.av_len) { + enc = AMF_EncodeBoolean(enc, pend, r->Link.lFlags & RTMP_LF_AUTH); + if (!enc) + return FALSE; + enc = AMF_EncodeString(enc, pend, &r->Link.auth); + if (!enc) + return FALSE; + } + if (r->Link.extras.o_num) { + int i; + for (i = 0; i < r->Link.extras.o_num; i++) { + enc = AMFProp_Encode(&r->Link.extras.o_props[i], enc, pend); + if (!enc) + return FALSE; + } + } + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +#if 0 /* unused */ +SAVC(bgHasStream); + +static int +SendBGHasStream(PILI_RTMP *r, double dId, AVal *playpath) +{ + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_bgHasStream); + enc = AMF_EncodeNumber(enc, pend, dId); + *enc++ = AMF_NULL; + + enc = AMF_EncodeString(enc, pend, playpath); + if (enc == NULL) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE); +} +#endif + +SAVC(createStream); + +int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_createStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; /* NULL */ + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(FCSubscribe); + +static int + SendFCSubscribe(PILI_RTMP *r, AVal *subscribepath, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[512], *pend = pbuf + sizeof(pbuf); + char *enc; + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + RTMP_Log(RTMP_LOGDEBUG, "FCSubscribe: %s", subscribepath->av_val); + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCSubscribe); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, subscribepath); + + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(releaseStream); + +static int + SendReleaseStream(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_releaseStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(FCPublish); + +static int + SendFCPublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCPublish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(FCUnpublish); + +static int + SendFCUnpublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_FCUnpublish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(publish); +SAVC(live); +SAVC(record); + +static int + SendPublish(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x04; /* source channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_publish); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + /* FIXME: should we choose live based on Link.lFlags & RTMP_LF_LIVE? */ + enc = AMF_EncodeString(enc, pend, &av_live); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(deleteStream); + +static int + SendDeleteStream(PILI_RTMP *r, double dStreamId, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_deleteStream); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, dStreamId); + + packet.m_nBodySize = enc - packet.m_body; + + /* no response expected */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(pause); + +int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int iTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* video channel */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* invoke */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_pause); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeBoolean(enc, pend, DoPause); + enc = AMF_EncodeNumber(enc, pend, (double)iTime); + + packet.m_nBodySize = enc - packet.m_body; + + RTMP_Log(RTMP_LOGDEBUG, "%s, %d, pauseTime=%d", __FUNCTION__, DoPause, iTime); + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error) { + if (DoPause) + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + return PILI_RTMP_SendPause(r, DoPause, r->m_pauseStamp, error); +} + +SAVC(seek); + +int PILI_RTMP_SendSeek(PILI_RTMP *r, int iTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* video channel */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* invoke */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_seek); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, (double)iTime); + + packet.m_nBodySize = enc - packet.m_body; + + r->m_read.flags |= RTMP_READ_SEEKING; + r->m_read.nResumeTS = 0; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x05; /* Server BW */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 4; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nServerBW); + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x06; /* Client BW */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 5; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nClientBW); + packet.m_body[4] = r->m_nClientBW2; + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +static int + SendBytesReceived(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + + packet.m_nChannel = 0x02; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x03; /* bytes in */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + packet.m_nBodySize = 4; + + AMF_EncodeInt32(packet.m_body, pend, r->m_nBytesIn); /* hard coded for now */ + r->m_nBytesInSent = r->m_nBytesIn; + + /*RTMP_Log(RTMP_LOGDEBUG, "Send bytes report. 0x%x (%d bytes)", (unsigned int)m_nBytesIn, m_nBytesIn); */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(_checkbw); + +static int + SendCheckBW(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av__checkbw); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + + packet.m_nBodySize = enc - packet.m_body; + + /* triggers _onbwcheck and eventually results in _onbwdone */ + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(_result); + +static int + SendCheckBWResult(PILI_RTMP *r, double txn, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av__result); + enc = AMF_EncodeNumber(enc, pend, txn); + *enc++ = AMF_NULL; + enc = AMF_EncodeNumber(enc, pend, (double)r->m_nBWCheckCounter++); + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(ping); +SAVC(pong); + +static int + SendPong(PILI_RTMP *r, double txn, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0x16 * r->m_nBWCheckCounter; /* temp inc value. till we figure it out. */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_pong); + enc = AMF_EncodeNumber(enc, pend, txn); + *enc++ = AMF_NULL; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +SAVC(play); + +static int + SendPlay(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* we make 8 our stream channel */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_play); + enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes); + *enc++ = AMF_NULL; + + RTMP_Log(RTMP_LOGDEBUG, "%s, seekTime=%d, stopTime=%d, sending play: %s", + __FUNCTION__, r->Link.seekTime, r->Link.stopTime, + r->Link.playpath.av_val); + enc = AMF_EncodeString(enc, pend, &r->Link.playpath); + if (!enc) + return FALSE; + + /* Optional parameters start and len. + * + * start: -2, -1, 0, positive number + * -2: looks for a live stream, then a recorded stream, + * if not found any open a live stream + * -1: plays a live stream + * >=0: plays a recorded streams from 'start' milliseconds + */ + if (r->Link.lFlags & RTMP_LF_LIVE) + enc = AMF_EncodeNumber(enc, pend, -1000.0); + else { + if (r->Link.seekTime > 0.0) + enc = AMF_EncodeNumber(enc, pend, r->Link.seekTime); /* resume from here */ + else + enc = AMF_EncodeNumber(enc, pend, 0.0); /*-2000.0);*/ /* recorded as default, -2000.0 is not reliable since that freezes the player if the stream is not found */ + } + if (!enc) + return FALSE; + + /* len: -1, 0, positive number + * -1: plays live or recorded stream to the end (default) + * 0: plays a frame 'start' ms away from the beginning + * >0: plays a live or recoded stream for 'len' milliseconds + */ + /*enc += EncodeNumber(enc, -1.0); */ /* len */ + if (r->Link.stopTime) { + enc = AMF_EncodeNumber(enc, pend, r->Link.stopTime - r->Link.seekTime); + if (!enc) + return FALSE; + } + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +SAVC(set_playlist); +SAVC(0); + +static int + SendPlaylist(PILI_RTMP *r, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x08; /* we make 8 our stream channel */ + packet.m_headerType = RTMP_PACKET_SIZE_LARGE; + packet.m_packetType = 0x14; /* INVOKE */ + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = r->m_stream_id; /*0x01000000; */ + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_set_playlist); + enc = AMF_EncodeNumber(enc, pend, 0); + *enc++ = AMF_NULL; + *enc++ = AMF_ECMA_ARRAY; + *enc++ = 0; + *enc++ = 0; + *enc++ = 0; + *enc++ = AMF_OBJECT; + enc = AMF_EncodeNamedString(enc, pend, &av_0, &r->Link.playpath); + if (!enc) + return FALSE; + if (enc + 3 >= pend) + return FALSE; + *enc++ = 0; + *enc++ = 0; + *enc++ = AMF_OBJECT_END; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, TRUE, error); +} + +static int + SendSecureTokenResponse(PILI_RTMP *r, AVal *resp, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[1024], *pend = pbuf + sizeof(pbuf); + char *enc; + + packet.m_nChannel = 0x03; /* control channel (invoke) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x14; + packet.m_nTimeStamp = 0; + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + enc = packet.m_body; + enc = AMF_EncodeString(enc, pend, &av_secureTokenResponse); + enc = AMF_EncodeNumber(enc, pend, 0.0); + *enc++ = AMF_NULL; + enc = AMF_EncodeString(enc, pend, resp); + if (!enc) + return FALSE; + + packet.m_nBodySize = enc - packet.m_body; + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +/* +from http://jira.red5.org/confluence/display/docs/Ping: + +Ping is the most mysterious message in PILI_RTMP and till now we haven't fully interpreted it yet. In summary, Ping message is used as a special command that are exchanged between client and server. This page aims to document all known Ping messages. Expect the list to grow. + +The type of Ping packet is 0x4 and contains two mandatory parameters and two optional parameters. The first parameter is the type of Ping and in short integer. The second parameter is the target of the ping. As Ping is always sent in Channel 2 (control channel) and the target object in PILI_RTMP header is always 0 which means the Connection object, it's necessary to put an extra parameter to indicate the exact target object the Ping is sent to. The second parameter takes this responsibility. The value has the same meaning as the target object field in PILI_RTMP header. (The second value could also be used as other purposes, like RTT Ping/Pong. It is used as the timestamp.) The third and fourth parameters are optional and could be looked upon as the parameter of the Ping packet. Below is an unexhausted list of Ping messages. + + * type 0: Clear the stream. No third and fourth parameters. The second parameter could be 0. After the connection is established, a Ping 0,0 will be sent from server to client. The message will also be sent to client on the start of Play and in response of a Seek or Pause/Resume request. This Ping tells client to re-calibrate the clock with the timestamp of the next packet server sends. + * type 1: Tell the stream to clear the playing buffer. + * type 3: Buffer time of the client. The third parameter is the buffer time in millisecond. + * type 4: Reset a stream. Used together with type 0 in the case of VOD. Often sent before type 0. + * type 6: Ping the client from server. The second parameter is the current time. + * type 7: Pong reply from client. The second parameter is the time the server sent with his ping request. + * type 26: SWFVerification request + * type 27: SWFVerification response +*/ +int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, unsigned int nTime, RTMPError *error) { + PILI_RTMPPacket packet; + char pbuf[256], *pend = pbuf + sizeof(pbuf); + int nSize; + char *buf; + + RTMP_Log(RTMP_LOGDEBUG, "sending ctrl. type: 0x%04x", (unsigned short)nType); + + packet.m_nChannel = 0x02; /* control channel (ping) */ + packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM; + packet.m_packetType = 0x04; /* ctrl */ + packet.m_nTimeStamp = 0; /* RTMP_GetTime(); */ + packet.m_nInfoField2 = 0; + packet.m_hasAbsTimestamp = 0; + packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; + + switch (nType) { + case 0x03: + nSize = 10; + break; /* buffer time */ + case 0x1A: + nSize = 3; + break; /* SWF verify request */ + case 0x1B: + nSize = 44; + break; /* SWF verify response */ + default: + nSize = 6; + break; + } + + packet.m_nBodySize = nSize; + + buf = packet.m_body; + buf = AMF_EncodeInt16(buf, pend, nType); + + if (nType == 0x1B) { +#ifdef CRYPTO + memcpy(buf, r->Link.SWFVerificationResponse, 42); + RTMP_Log(RTMP_LOGDEBUG, "Sending SWFVerification response: "); + RTMP_LogHex(RTMP_LOGDEBUG, (uint8_t *)packet.m_body, packet.m_nBodySize); +#endif + } else if (nType == 0x1A) { + *buf = nObject & 0xff; + } else { + if (nSize > 2) + buf = AMF_EncodeInt32(buf, pend, nObject); + + if (nSize > 6) + buf = AMF_EncodeInt32(buf, pend, nTime); + } + + return PILI_RTMP_SendPacket(r, &packet, FALSE, error); +} + +static void + AV_erase(PILI_RTMP_METHOD *vals, int *num, int i, int freeit) { + if (freeit) + free(vals[i].name.av_val); + (*num)--; + for (; i < *num; i++) { + vals[i] = vals[i + 1]; + } + vals[i].name.av_val = NULL; + vals[i].name.av_len = 0; + vals[i].num = 0; +} + +void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, freeit); +} + +static void + AV_queue(PILI_RTMP_METHOD **vals, int *num, AVal *av, int txn) { + char *tmp; + if (!(*num & 0x0f)) + *vals = realloc(*vals, (*num + 16) * sizeof(PILI_RTMP_METHOD)); + tmp = malloc(av->av_len + 1); + memcpy(tmp, av->av_val, av->av_len); + tmp[av->av_len] = '\0'; + (*vals)[*num].num = txn; + (*vals)[*num].name.av_len = av->av_len; + (*vals)[(*num)++].name.av_val = tmp; +} + +static void + AV_clear(PILI_RTMP_METHOD *vals, int num) { + int i; + for (i = 0; i < num; i++) + free(vals[i].name.av_val); + free(vals); +} + +SAVC(onBWDone); +SAVC(onFCSubscribe); +SAVC(onFCUnsubscribe); +SAVC(_onbwcheck); +SAVC(_onbwdone); +SAVC(_error); +SAVC(close); +SAVC(code); +SAVC(level); +SAVC(onStatus); +SAVC(playlist_ready); +static const AVal av_NetStream_Failed = AVC("NetStream.Failed"); +static const AVal av_NetStream_Play_Failed = AVC("NetStream.Play.Failed"); +static const AVal av_NetStream_Play_StreamNotFound = + AVC("NetStream.Play.StreamNotFound"); +static const AVal av_NetConnection_Connect_InvalidApp = + AVC("NetConnection.Connect.InvalidApp"); +static const AVal av_NetStream_Play_Start = AVC("NetStream.Play.Start"); +static const AVal av_NetStream_Play_Complete = AVC("NetStream.Play.Complete"); +static const AVal av_NetStream_Play_Stop = AVC("NetStream.Play.Stop"); +static const AVal av_NetStream_Seek_Notify = AVC("NetStream.Seek.Notify"); +static const AVal av_NetStream_Pause_Notify = AVC("NetStream.Pause.Notify"); +static const AVal av_NetStream_Play_UnpublishNotify = + AVC("NetStream.Play.UnpublishNotify"); +static const AVal av_NetStream_Publish_Start = AVC("NetStream.Publish.Start"); + +/* Returns 0 for OK/Failed/error, 1 for 'Stop or Complete' */ +static int + HandleInvoke(PILI_RTMP *r, const char *body, unsigned int nBodySize) { + AMFObject obj; + AVal method; + int txn; + int ret = 0, nRes; + if (body[0] != 0x02) /* make sure it is a string method name we start with */ + { + RTMP_Log(RTMP_LOGWARNING, "%s, Sanity failed. no string method in invoke packet", + __FUNCTION__); + return 0; + } + + nRes = AMF_Decode(&obj, body, nBodySize, FALSE); + if (nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding invoke packet", __FUNCTION__); + return 0; + } + + AMF_Dump(&obj); + AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &method); + txn = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 1)); + RTMP_Log(RTMP_LOGDEBUG, "%s, server invoking <%s>", __FUNCTION__, method.av_val); + + RTMPError error = {0}; + + if (AVMATCH(&method, &av__result)) { + AVal methodInvoked = {0}; + int i; + + for (i = 0; i < r->m_numCalls; i++) { + if (r->m_methodCalls[i].num == txn) { + methodInvoked = r->m_methodCalls[i].name; + AV_erase(r->m_methodCalls, &r->m_numCalls, i, FALSE); + break; + } + } + if (!methodInvoked.av_val) { + RTMP_Log(RTMP_LOGDEBUG, "%s, received result id %d without matching request", + __FUNCTION__, txn); + goto leave; + } + + RTMP_Log(RTMP_LOGDEBUG, "%s, received result for method call <%s>", __FUNCTION__, + methodInvoked.av_val); + + if (AVMATCH(&methodInvoked, &av_connect)) { + if (r->Link.token.av_len) { + AMFObjectProperty p; + if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_secureToken, &p)) { + DecodeTEA(&r->Link.token, &p.p_vu.p_aval); + SendSecureTokenResponse(r, &p.p_vu.p_aval, &error); + } + } + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + SendReleaseStream(r, &error); + SendFCPublish(r, &error); + } else { + PILI_RTMP_SendServerBW(r, &error); + PILI_RTMP_SendCtrl(r, 3, 0, 300, &error); + } + PILI_RTMP_SendCreateStream(r, &error); + + if (!(r->Link.protocol & RTMP_FEATURE_WRITE)) { + /* Send the FCSubscribe if live stream or if subscribepath is set */ + if (r->Link.subscribepath.av_len) + SendFCSubscribe(r, &r->Link.subscribepath, &error); + else if (r->Link.lFlags & RTMP_LF_LIVE) + SendFCSubscribe(r, &r->Link.playpath, &error); + } + } else if (AVMATCH(&methodInvoked, &av_createStream)) { + r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3)); + + if (r->Link.protocol & RTMP_FEATURE_WRITE) { + SendPublish(r, &error); + } else { + if (r->Link.lFlags & RTMP_LF_PLST) + SendPlaylist(r, &error); + SendPlay(r, &error); + PILI_RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS, &error); + } + } else if (AVMATCH(&methodInvoked, &av_play) || + AVMATCH(&methodInvoked, &av_publish)) { + r->m_bPlaying = TRUE; + } + free(methodInvoked.av_val); + } else if (AVMATCH(&method, &av_onBWDone)) { + if (!r->m_nBWCheckCounter) + SendCheckBW(r, &error); + } else if (AVMATCH(&method, &av_onFCSubscribe)) { + /* SendOnFCSubscribe(); */ + } else if (AVMATCH(&method, &av_onFCUnsubscribe)) { + PILI_RTMP_Close(r, NULL); + ret = 1; + } else if (AVMATCH(&method, &av_ping)) { + SendPong(r, txn, &error); + } else if (AVMATCH(&method, &av__onbwcheck)) { + SendCheckBWResult(r, txn, &error); + } else if (AVMATCH(&method, &av__onbwdone)) { + int i; + for (i = 0; i < r->m_numCalls; i++) + if (AVMATCH(&r->m_methodCalls[i].name, &av__checkbw)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } else if (AVMATCH(&method, &av__error)) { + RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server sent error"); + } else if (AVMATCH(&method, &av_close)) { + RTMP_Log(RTMP_LOGERROR, "PILI_RTMP server requested close"); + RTMPError error = {0}; + char *msg = "PILI_RTMP server requested close."; + RTMPError_Alloc(&error, strlen(msg)); + error.code = RTMPErrorServerRequestedClose; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + } else if (AVMATCH(&method, &av_onStatus)) { + AMFObject obj2; + AVal code, level; + AMFProp_GetObject(AMF_GetProp(&obj, NULL, 3), &obj2); + AMFProp_GetString(AMF_GetProp(&obj2, &av_code, -1), &code); + AMFProp_GetString(AMF_GetProp(&obj2, &av_level, -1), &level); + + RTMP_Log(RTMP_LOGDEBUG, "%s, onStatus: %s", __FUNCTION__, code.av_val); + if (AVMATCH(&code, &av_NetStream_Failed) || AVMATCH(&code, &av_NetStream_Play_Failed) || AVMATCH(&code, &av_NetStream_Play_StreamNotFound) || AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { + r->m_stream_id = -1; + + int err_code; + char msg[100]; + memset(msg, 0, 100); + + if (AVMATCH(&code, &av_NetStream_Failed)) { + err_code = RTMPErrorNetStreamFailed; + strcpy(msg, "NetStream failed."); + } else if (AVMATCH(&code, &av_NetStream_Play_Failed)) { + err_code = RTMPErrorNetStreamPlayFailed; + strcpy(msg, "NetStream play failed."); + } else if (AVMATCH(&code, &av_NetStream_Play_StreamNotFound)) { + err_code = RTMPErrorNetStreamPlayStreamNotFound; + strcpy(msg, "NetStream play stream not found."); + } else if (AVMATCH(&code, &av_NetConnection_Connect_InvalidApp)) { + err_code = RTMPErrorNetConnectionConnectInvalidApp; + strcpy(msg, "NetConnection connect invalip app."); + } else { + err_code = RTMPErrorUnknow; + strcpy(msg, "Unknow error."); + } + + RTMPError_Alloc(&error, strlen(msg)); + error.code = err_code; + strcpy(error.message, msg); + + PILI_RTMP_Close(r, &error); + + RTMPError_Free(&error); + + RTMP_Log(RTMP_LOGERROR, "Closing connection: %s", code.av_val); + } + + else if (AVMATCH(&code, &av_NetStream_Play_Start)) { + int i; + r->m_bPlaying = TRUE; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_play)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } + + else if (AVMATCH(&code, &av_NetStream_Publish_Start)) { + int i; + r->m_bPlaying = TRUE; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_publish)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } + + /* Return 1 if this is a Play.Complete or Play.Stop */ + else if (AVMATCH(&code, &av_NetStream_Play_Complete) || AVMATCH(&code, &av_NetStream_Play_Stop) || AVMATCH(&code, &av_NetStream_Play_UnpublishNotify)) { + PILI_RTMP_Close(r, NULL); + ret = 1; + } + + else if (AVMATCH(&code, &av_NetStream_Seek_Notify)) { + r->m_read.flags &= ~RTMP_READ_SEEKING; + } + + else if (AVMATCH(&code, &av_NetStream_Pause_Notify)) { + if (r->m_pausing == 1 || r->m_pausing == 2) { + PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, &error); + r->m_pausing = 3; + } + } + } else if (AVMATCH(&method, &av_playlist_ready)) { + int i; + for (i = 0; i < r->m_numCalls; i++) { + if (AVMATCH(&r->m_methodCalls[i].name, &av_set_playlist)) { + AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE); + break; + } + } + } else { + } +leave: + AMF_Reset(&obj); + return ret; +} + +int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p) { + int n; + /* this is a small object search to locate the "duration" property */ + for (n = 0; n < obj->o_num; n++) { + AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); + + if (AVMATCH(&prop->p_name, name)) { + *p = *prop; + return TRUE; + } + + if (prop->p_type == AMF_OBJECT) { + if (PILI_RTMP_FindFirstMatchingProperty(&prop->p_vu.p_object, name, p)) + return TRUE; + } + } + return FALSE; +} + +/* Like above, but only check if name is a prefix of property */ +int PILI_RTMP_FindPrefixProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p) { + int n; + for (n = 0; n < obj->o_num; n++) { + AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); + + if (prop->p_name.av_len > name->av_len && + !memcmp(prop->p_name.av_val, name->av_val, name->av_len)) { + *p = *prop; + return TRUE; + } + + if (prop->p_type == AMF_OBJECT) { + if (PILI_RTMP_FindPrefixProperty(&prop->p_vu.p_object, name, p)) + return TRUE; + } + } + return FALSE; +} + +static int + DumpMetaData(AMFObject *obj) { + AMFObjectProperty *prop; + int n; + for (n = 0; n < obj->o_num; n++) { + prop = AMF_GetProp(obj, NULL, n); + if (prop->p_type != AMF_OBJECT) { + char str[256] = ""; + switch (prop->p_type) { + case AMF_NUMBER: + snprintf(str, 255, "%.2f", prop->p_vu.p_number); + break; + case AMF_BOOLEAN: + snprintf(str, 255, "%s", + prop->p_vu.p_number != 0. ? "TRUE" : "FALSE"); + break; + case AMF_STRING: + snprintf(str, 255, "%.*s", prop->p_vu.p_aval.av_len, + prop->p_vu.p_aval.av_val); + break; + case AMF_DATE: + snprintf(str, 255, "timestamp:%.2f", prop->p_vu.p_number); + break; + default: + snprintf(str, 255, "INVALID TYPE 0x%02x", + (unsigned char)prop->p_type); + } + if (prop->p_name.av_len) { + /* chomp */ + if (strlen(str) >= 1 && str[strlen(str) - 1] == '\n') + str[strlen(str) - 1] = '\0'; + RTMP_Log(RTMP_LOGINFO, " %-22.*s%s", prop->p_name.av_len, + prop->p_name.av_val, str); + } + } else { + if (prop->p_name.av_len) + RTMP_Log(RTMP_LOGINFO, "%.*s:", prop->p_name.av_len, prop->p_name.av_val); + DumpMetaData(&prop->p_vu.p_object); + } + } + return FALSE; +} + +SAVC(onMetaData); +SAVC(duration); +SAVC(video); +SAVC(audio); + +static int + HandleMetadata(PILI_RTMP *r, char *body, unsigned int len) { + /* allright we get some info here, so parse it and print it */ + /* also keep duration or filesize to make a nice progress bar */ + + AMFObject obj; + AVal metastring; + int ret = FALSE; + + int nRes = AMF_Decode(&obj, body, len, FALSE); + if (nRes < 0) { + RTMP_Log(RTMP_LOGERROR, "%s, error decoding meta data packet", __FUNCTION__); + return FALSE; + } + + AMF_Dump(&obj); + AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &metastring); + + if (AVMATCH(&metastring, &av_onMetaData)) { + AMFObjectProperty prop; + /* Show metadata */ + RTMP_Log(RTMP_LOGINFO, "Metadata:"); + DumpMetaData(&obj); + if (PILI_RTMP_FindFirstMatchingProperty(&obj, &av_duration, &prop)) { + r->m_fDuration = prop.p_vu.p_number; + /*RTMP_Log(RTMP_LOGDEBUG, "Set duration: %.2f", m_fDuration); */ + } + /* Search for audio or video tags */ + if (PILI_RTMP_FindPrefixProperty(&obj, &av_video, &prop)) + r->m_read.dataType |= 1; + if (PILI_RTMP_FindPrefixProperty(&obj, &av_audio, &prop)) + r->m_read.dataType |= 4; + ret = TRUE; + } + AMF_Reset(&obj); + return ret; +} + +static void + HandleChangeChunkSize(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + if (packet->m_nBodySize >= 4) { + r->m_inChunkSize = AMF_DecodeInt32(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s, received: chunk size change to %d", __FUNCTION__, + r->m_inChunkSize); + } +} + +static void + HandleAudio(PILI_RTMP *r, const PILI_RTMPPacket *packet) { +} + +static void + HandleVideo(PILI_RTMP *r, const PILI_RTMPPacket *packet) { +} + +static void + HandleCtrl(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + short nType = -1; + unsigned int tmp; + if (packet->m_body && packet->m_nBodySize >= 2) + nType = AMF_DecodeInt16(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s, received ctrl. type: %d, len: %d", __FUNCTION__, nType, + packet->m_nBodySize); + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + if (packet->m_nBodySize >= 6) { + switch (nType) { + case 0: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Begin %d", __FUNCTION__, tmp); + break; + + case 1: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream EOF %d", __FUNCTION__, tmp); + if (r->m_pausing == 1) + r->m_pausing = 2; + break; + + case 2: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Dry %d", __FUNCTION__, tmp); + break; + + case 4: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream IsRecorded %d", __FUNCTION__, tmp); + break; + + case 6: /* server ping. reply with pong. */ + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Ping %d", __FUNCTION__, tmp); + PILI_RTMP_SendCtrl(r, 0x07, tmp, 0, NULL); + break; + + /* FMS 3.5 servers send the following two controls to let the client + * know when the server has sent a complete buffer. I.e., when the + * server has sent an amount of data equal to m_nBufferMS in duration. + * The server meters its output so that data arrives at the client + * in realtime and no faster. + * + * The rtmpdump program tries to set m_nBufferMS as large as + * possible, to force the server to send data as fast as possible. + * In practice, the server appears to cap this at about 1 hour's + * worth of data. After the server has sent a complete buffer, and + * sends this BufferEmpty message, it will wait until the play + * duration of that buffer has passed before sending a new buffer. + * The BufferReady message will be sent when the new buffer starts. + * (There is no BufferReady message for the very first buffer; + * presumably the Stream Begin message is sufficient for that + * purpose.) + * + * If the network speed is much faster than the data bitrate, then + * there may be long delays between the end of one buffer and the + * start of the next. + * + * Since usually the network allows data to be sent at + * faster than realtime, and rtmpdump wants to download the data + * as fast as possible, we use this RTMP_LF_BUFX hack: when we + * get the BufferEmpty message, we send a Pause followed by an + * Unpause. This causes the server to send the next buffer immediately + * instead of waiting for the full duration to elapse. (That's + * also the purpose of the ToggleStream function, which rtmpdump + * calls if we get a read timeout.) + * + * Media player apps don't need this hack since they are just + * going to play the data in realtime anyway. It also doesn't work + * for live streams since they obviously can only be sent in + * realtime. And it's all moot if the network speed is actually + * slower than the media bitrate. + */ + case 31: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferEmpty %d", __FUNCTION__, tmp); + if (!(r->Link.lFlags & RTMP_LF_BUFX)) + break; + if (!r->m_pausing) { + r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; + PILI_RTMP_SendPause(r, TRUE, r->m_pauseStamp, NULL); + r->m_pausing = 1; + } else if (r->m_pausing == 2) { + PILI_RTMP_SendPause(r, FALSE, r->m_pauseStamp, NULL); + r->m_pausing = 3; + } + break; + + case 32: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferReady %d", __FUNCTION__, tmp); + break; + + default: + tmp = AMF_DecodeInt32(packet->m_body + 2); + RTMP_Log(RTMP_LOGDEBUG, "%s, Stream xx %d", __FUNCTION__, tmp); + break; + } + } + + if (nType == 0x1A) { + RTMP_Log(RTMP_LOGDEBUG, "%s, SWFVerification ping received: ", __FUNCTION__); +#ifdef CRYPTO + /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ + + /* respond with HMAC SHA256 of decompressed SWF, key is the 30byte player key, also the last 30 bytes of the server handshake are applied */ + if (r->Link.SWFSize) { + PILI_RTMP_SendCtrl(r, 0x1B, 0, 0); + } else { + RTMP_Log(RTMP_LOGERROR, + "%s: Ignoring SWFVerification request, use --swfVfy!", + __FUNCTION__); + } +#else + RTMP_Log(RTMP_LOGERROR, + "%s: Ignoring SWFVerification request, no CRYPTO support!", + __FUNCTION__); +#endif + } +} + +static void + HandleServerBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + r->m_nServerBW = AMF_DecodeInt32(packet->m_body); + RTMP_Log(RTMP_LOGDEBUG, "%s: server BW = %d", __FUNCTION__, r->m_nServerBW); +} + +static void + HandleClientBW(PILI_RTMP *r, const PILI_RTMPPacket *packet) { + r->m_nClientBW = AMF_DecodeInt32(packet->m_body); + if (packet->m_nBodySize > 4) + r->m_nClientBW2 = packet->m_body[4]; + else + r->m_nClientBW2 = -1; + RTMP_Log(RTMP_LOGDEBUG, "%s: client BW = %d %d", __FUNCTION__, r->m_nClientBW, + r->m_nClientBW2); +} + +static int + DecodeInt32LE(const char *data) { + unsigned char *c = (unsigned char *)data; + unsigned int val; + + val = (c[3] << 24) | (c[2] << 16) | (c[1] << 8) | c[0]; + return val; +} + +static int + EncodeInt32LE(char *output, int nVal) { + output[0] = nVal; + nVal >>= 8; + output[1] = nVal; + nVal >>= 8; + output[2] = nVal; + nVal >>= 8; + output[3] = nVal; + return 4; +} + +int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet) { + uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = {0}; + char *header = (char *)hbuf; + int nSize, hSize, nToRead, nChunk; + int didAlloc = FALSE; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket); + + if (ReadN(r, (char *)hbuf, 1) == 0) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header", __FUNCTION__); + return FALSE; + } + + packet->m_headerType = (hbuf[0] & 0xc0) >> 6; + packet->m_nChannel = (hbuf[0] & 0x3f); + header++; + if (packet->m_nChannel == 0) { + if (ReadN(r, (char *)&hbuf[1], 1) != 1) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 2nd byte", + __FUNCTION__); + return FALSE; + } + packet->m_nChannel = hbuf[1]; + packet->m_nChannel += 64; + header++; + } else if (packet->m_nChannel == 1) { + int tmp; + if (ReadN(r, (char *)&hbuf[1], 2) != 2) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header 3nd byte", + __FUNCTION__); + return FALSE; + } + tmp = (hbuf[2] << 8) + hbuf[1]; + packet->m_nChannel = tmp + 64; + RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel); + header += 2; + } + + nSize = packetSize[packet->m_headerType]; + + if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */ + packet->m_hasAbsTimestamp = TRUE; + + else if (nSize < RTMP_LARGE_HEADER_SIZE) { /* using values from the last message of this channel */ + if (r->m_vecChannelsIn[packet->m_nChannel]) + memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel], + sizeof(PILI_RTMPPacket)); + } + + nSize--; + + if (nSize > 0 && ReadN(r, header, nSize) != nSize) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet header. type: %x", + __FUNCTION__, (unsigned int)hbuf[0]); + return FALSE; + } + + hSize = nSize + (header - (char *)hbuf); + + if (nSize >= 3) { + packet->m_nTimeStamp = AMF_DecodeInt24(header); + + /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading PILI_RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */ + + if (nSize >= 6) { + packet->m_nBodySize = AMF_DecodeInt24(header + 3); + packet->m_nBytesRead = 0; + PILI_RTMPPacket_Free(packet); + + if (nSize > 6) { + packet->m_packetType = header[6]; + + if (nSize == 11) + packet->m_nInfoField2 = DecodeInt32LE(header + 7); + } + } + if (packet->m_nTimeStamp == 0xffffff) { + if (ReadN(r, header + nSize, 4) != 4) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp", + __FUNCTION__); + return FALSE; + } + packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize); + hSize += 4; + } + } + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize); + + if (packet->m_nBodySize > 0 && packet->m_body == NULL) { + if (!PILI_RTMPPacket_Alloc(packet, packet->m_nBodySize)) { + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); + return FALSE; + } + didAlloc = TRUE; + packet->m_headerType = (hbuf[0] & 0xc0) >> 6; + } + + nToRead = packet->m_nBodySize - packet->m_nBytesRead; + nChunk = r->m_inChunkSize; + if (nToRead < nChunk) + nChunk = nToRead; + + /* Does the caller want the raw chunk? */ + if (packet->m_chunk) { + packet->m_chunk->c_headerSize = hSize; + memcpy(packet->m_chunk->c_header, hbuf, hSize); + packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead; + packet->m_chunk->c_chunkSize = nChunk; + } + + if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk) { + RTMP_Log(RTMP_LOGERROR, "%s, failed to read PILI_RTMP packet body. len: %lu", + __FUNCTION__, packet->m_nBodySize); + return FALSE; + } + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk); + + packet->m_nBytesRead += nChunk; + + /* keep the packet as ref for other packets on this channel */ + if (!r->m_vecChannelsIn[packet->m_nChannel]) + r->m_vecChannelsIn[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); + memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); + + if (RTMPPacket_IsReady(packet)) { + /* make packet's timestamp absolute */ + if (!packet->m_hasAbsTimestamp) + packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */ + + r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp; + + /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */ + /* arrives and requests to re-use some info (small packet header) */ + r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL; + r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0; + r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */ + } else { + packet->m_body = NULL; /* so it won't be erased on free */ + } + + return TRUE; +} + +#ifndef CRYPTO +static int + HandShake(PILI_RTMP *r, int FP9HandShake, RTMPError *error) { + int i; + uint32_t uptime, suptime; + int bMatch; + char type; + char clientbuf[RTMP_SIG_SIZE + 1], *clientsig = clientbuf + 1; + char serversig[RTMP_SIG_SIZE]; + + clientbuf[0] = 0x03; /* not encrypted */ + + uptime = htonl(PILI_RTMP_GetTime()); + memcpy(clientsig, &uptime, 4); + + memset(&clientsig[4], 0, 4); + +#ifdef _DEBUG + for (i = 8; i < RTMP_SIG_SIZE; i++) + clientsig[i] = 0xff; +#else + for (i = 8; i < RTMP_SIG_SIZE; i++) + clientsig[i] = (char)(rand() % 256); +#endif + + if (!WriteN(r, clientbuf, RTMP_SIG_SIZE + 1, error)) + return FALSE; + + if (ReadN(r, &type, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type); + + if (type != clientbuf[0]) + RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d", + __FUNCTION__, clientbuf[0], type); + + if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode server response */ + + memcpy(&suptime, serversig, 4); + suptime = ntohl(suptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, suptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, + serversig[4], serversig[5], serversig[6], serversig[7]); + + /* 2nd part of handshake */ + if (!WriteN(r, serversig, RTMP_SIG_SIZE, error)) + return FALSE; + + if (ReadN(r, serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); + if (!bMatch) { + RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); + } + return TRUE; +} + +static int + SHandShake(PILI_RTMP *r, RTMPError *error) { + int i; + char serverbuf[RTMP_SIG_SIZE + 1], *serversig = serverbuf + 1; + char clientsig[RTMP_SIG_SIZE]; + uint32_t uptime; + int bMatch; + + if (ReadN(r, serverbuf, 1) != 1) /* 0x03 or 0x06 */ + return FALSE; + + RTMP_Log(RTMP_LOGDEBUG, "%s: Type Request : %02X", __FUNCTION__, serverbuf[0]); + + if (serverbuf[0] != 3) { + RTMP_Log(RTMP_LOGERROR, "%s: Type unknown: client sent %02X", + __FUNCTION__, serverbuf[0]); + return FALSE; + } + + uptime = htonl(PILI_RTMP_GetTime()); + memcpy(serversig, &uptime, 4); + + memset(&serversig[4], 0, 4); +#ifdef _DEBUG + for (i = 8; i < RTMP_SIG_SIZE; i++) + serversig[i] = 0xff; +#else + for (i = 8; i < RTMP_SIG_SIZE; i++) + serversig[i] = (char)(rand() % 256); +#endif + + if (!WriteN(r, serverbuf, RTMP_SIG_SIZE + 1, error)) + return FALSE; + + if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + /* decode client response */ + + memcpy(&uptime, clientsig, 4); + uptime = ntohl(uptime); + + RTMP_Log(RTMP_LOGDEBUG, "%s: Client Uptime : %d", __FUNCTION__, uptime); + RTMP_Log(RTMP_LOGDEBUG, "%s: Player Version: %d.%d.%d.%d", __FUNCTION__, + clientsig[4], clientsig[5], clientsig[6], clientsig[7]); + + /* 2nd part of handshake */ + if (!WriteN(r, clientsig, RTMP_SIG_SIZE, error)) + return FALSE; + + if (ReadN(r, clientsig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE) + return FALSE; + + bMatch = (memcmp(serversig, clientsig, RTMP_SIG_SIZE) == 0); + if (!bMatch) { + RTMP_Log(RTMP_LOGWARNING, "%s, client signature does not match!", __FUNCTION__); + } + return TRUE; +} +#endif + +int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error) { + int wrote; + char hbuf[RTMP_MAX_HEADER_SIZE]; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, + chunk->c_chunkSize); + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_header, chunk->c_headerSize); + if (chunk->c_chunkSize) { + char *ptr = chunk->c_chunk - chunk->c_headerSize; + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)chunk->c_chunk, chunk->c_chunkSize); + /* save header bytes we're about to overwrite */ + memcpy(hbuf, ptr, chunk->c_headerSize); + memcpy(ptr, chunk->c_header, chunk->c_headerSize); + wrote = WriteN(r, ptr, chunk->c_headerSize + chunk->c_chunkSize, error); + memcpy(ptr, hbuf, chunk->c_headerSize); + } else + wrote = WriteN(r, chunk->c_header, chunk->c_headerSize, error); + return wrote; +} + +int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, RTMPError *error) { + const PILI_RTMPPacket *prevPacket = r->m_vecChannelsOut[packet->m_nChannel]; + uint32_t last = 0; + int nSize; + int hSize, cSize; + char *header, *hptr, *hend, hbuf[RTMP_MAX_HEADER_SIZE], c; + uint32_t t; + char *buffer, *tbuf = NULL, *toff = NULL; + int nChunkSize; + int tlen; + + if (prevPacket && packet->m_headerType != RTMP_PACKET_SIZE_LARGE) { + /* compress a bit by using the prev packet's attributes */ + if (prevPacket->m_nBodySize == packet->m_nBodySize && prevPacket->m_packetType == packet->m_packetType && packet->m_headerType == RTMP_PACKET_SIZE_MEDIUM) + packet->m_headerType = RTMP_PACKET_SIZE_SMALL; + + if (prevPacket->m_nTimeStamp == packet->m_nTimeStamp && packet->m_headerType == RTMP_PACKET_SIZE_SMALL) + packet->m_headerType = RTMP_PACKET_SIZE_MINIMUM; + last = prevPacket->m_nTimeStamp; + } + + if (packet->m_headerType > 3) /* sanity */ + { + if (error) { + char *msg = "Sanity failed."; + RTMPError_Alloc(error, strlen(msg)); + error->code = RTMPErrorSanityFailed; + strcpy(error->message, msg); + } + + RTMP_Log(RTMP_LOGERROR, "sanity failed!! trying to send header of type: 0x%02x.", + (unsigned char)packet->m_headerType); + + return FALSE; + } + + nSize = packetSize[packet->m_headerType]; + hSize = nSize; + cSize = 0; + t = packet->m_nTimeStamp - last; + + if (packet->m_body) { + header = packet->m_body - nSize; + hend = packet->m_body; + } else { + header = hbuf + 6; + hend = hbuf + sizeof(hbuf); + } + + if (packet->m_nChannel > 319) + cSize = 2; + else if (packet->m_nChannel > 63) + cSize = 1; + if (cSize) { + header -= cSize; + hSize += cSize; + } + + if (nSize > 1 && t >= 0xffffff) { + header -= 4; + hSize += 4; + } + + hptr = header; + c = packet->m_headerType << 6; + switch (cSize) { + case 0: + c |= packet->m_nChannel; + break; + case 1: + break; + case 2: + c |= 1; + break; + } + *hptr++ = c; + if (cSize) { + int tmp = packet->m_nChannel - 64; + *hptr++ = tmp & 0xff; + if (cSize == 2) + *hptr++ = tmp >> 8; + } + + if (nSize > 1) { + hptr = AMF_EncodeInt24(hptr, hend, t > 0xffffff ? 0xffffff : t); + } + + if (nSize > 4) { + hptr = AMF_EncodeInt24(hptr, hend, packet->m_nBodySize); + *hptr++ = packet->m_packetType; + } + + if (nSize > 8) + hptr += EncodeInt32LE(hptr, packet->m_nInfoField2); + + if (nSize > 1 && t >= 0xffffff) + hptr = AMF_EncodeInt32(hptr, hend, t); + + nSize = packet->m_nBodySize; + buffer = packet->m_body; + nChunkSize = r->m_outChunkSize; + + RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket, + nSize); + /* send all chunks in one HTTP request */ + if (r->Link.protocol & RTMP_FEATURE_HTTP) { + int chunks = (nSize + nChunkSize - 1) / nChunkSize; + if (chunks > 1) { + tlen = chunks * (cSize + 1) + nSize + hSize; + tbuf = malloc(tlen); + if (!tbuf) + return FALSE; + toff = tbuf; + } + } + while (nSize + hSize) { + int wrote; + + if (nSize < nChunkSize) + nChunkSize = nSize; + + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)header, hSize); + RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)buffer, nChunkSize); + if (tbuf) { + memcpy(toff, header, nChunkSize + hSize); + toff += nChunkSize + hSize; + } else { + wrote = WriteN(r, header, nChunkSize + hSize, error); + if (!wrote) + return FALSE; + } + nSize -= nChunkSize; + buffer += nChunkSize; + hSize = 0; + + if (nSize > 0) { + header = buffer - 1; + hSize = 1; + if (cSize) { + header -= cSize; + hSize += cSize; + } + *header = (0xc0 | c); + if (cSize) { + int tmp = packet->m_nChannel - 64; + header[1] = tmp & 0xff; + if (cSize == 2) + header[2] = tmp >> 8; + } + } + } + if (tbuf) { + int wrote = WriteN(r, tbuf, toff - tbuf, error); + free(tbuf); + tbuf = NULL; + if (!wrote) + return FALSE; + } + + /* we invoked a remote method */ + if (packet->m_packetType == 0x14) { + AVal method; + char *ptr; + ptr = packet->m_body + 1; + AMF_DecodeString(ptr, &method); + RTMP_Log(RTMP_LOGDEBUG, "Invoking %s", method.av_val); + /* keep it in call queue till result arrives */ + if (queue) { + int txn; + ptr += 3 + method.av_len; + txn = (int)AMF_DecodeNumber(ptr); + AV_queue(&r->m_methodCalls, &r->m_numCalls, &method, txn); + } + } + + if (!r->m_vecChannelsOut[packet->m_nChannel]) + r->m_vecChannelsOut[packet->m_nChannel] = malloc(sizeof(PILI_RTMPPacket)); + memcpy(r->m_vecChannelsOut[packet->m_nChannel], packet, sizeof(PILI_RTMPPacket)); + return TRUE; +} + +int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error) { + return SHandShake(r, error); +} + +void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error) { + if (r->m_is_closing) { + return; + } + r->m_is_closing = 1; + int i; + if (PILI_RTMP_IsConnected(r)) { + if (r->m_stream_id > 0) { + if ((r->Link.protocol & RTMP_FEATURE_WRITE)) + SendFCUnpublish(r, NULL); + i = r->m_stream_id; + r->m_stream_id = 0; + SendDeleteStream(r, i, NULL); + } + if (r->m_clientID.av_val) { + HTTP_Post(r, RTMPT_CLOSE, "", 1); + free(r->m_clientID.av_val); + r->m_clientID.av_val = NULL; + r->m_clientID.av_len = 0; + } + PILI_RTMPSockBuf_Close(&r->m_sb); + + if (error && r->m_errorCallback) { + r->m_errorCallback(error, r->m_userData); + } + } + + r->m_stream_id = -1; + r->m_sb.sb_socket = -1; + r->m_nBWCheckCounter = 0; + r->m_nBytesIn = 0; + r->m_nBytesInSent = 0; + + if (r->m_read.flags & RTMP_READ_HEADER) { + free(r->m_read.buf); + r->m_read.buf = NULL; + } + r->m_read.dataType = 0; + r->m_read.flags = 0; + r->m_read.status = 0; + r->m_read.nResumeTS = 0; + r->m_read.nIgnoredFrameCounter = 0; + r->m_read.nIgnoredFlvFrameCounter = 0; + + r->m_write.m_nBytesRead = 0; + PILI_RTMPPacket_Free(&r->m_write); + + for (i = 0; i < RTMP_CHANNELS; i++) { + if (r->m_vecChannelsIn[i]) { + PILI_RTMPPacket_Free(r->m_vecChannelsIn[i]); + free(r->m_vecChannelsIn[i]); + r->m_vecChannelsIn[i] = NULL; + } + if (r->m_vecChannelsOut[i]) { + free(r->m_vecChannelsOut[i]); + r->m_vecChannelsOut[i] = NULL; + } + } + AV_clear(r->m_methodCalls, r->m_numCalls); + r->m_methodCalls = NULL; + r->m_numCalls = 0; + r->m_numInvokes = 0; + + r->m_bPlaying = FALSE; + r->m_sb.sb_size = 0; + + r->m_msgCounter = 0; + r->m_resplen = 0; + r->m_unackd = 0; + + free(r->Link.playpath0.av_val); + r->Link.playpath0.av_val = NULL; + + if (r->Link.lFlags & RTMP_LF_FTCU) { + free(r->Link.tcUrl.av_val); + r->Link.tcUrl.av_val = NULL; + r->Link.lFlags ^= RTMP_LF_FTCU; + } + +#ifdef CRYPTO + if (r->Link.dh) { + MDH_free(r->Link.dh); + r->Link.dh = NULL; + } + if (r->Link.rc4keyIn) { + RC4_free(r->Link.rc4keyIn); + r->Link.rc4keyIn = NULL; + } + if (r->Link.rc4keyOut) { + RC4_free(r->Link.rc4keyOut); + r->Link.rc4keyOut = NULL; + } +#endif +} + +int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb) { + int nBytes; + + if (!sb->sb_size) + sb->sb_start = sb->sb_buf; + + while (1) { + nBytes = sizeof(sb->sb_buf) - sb->sb_size - (sb->sb_start - sb->sb_buf); +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + nBytes = TLS_read(sb->sb_ssl, sb->sb_start + sb->sb_size, nBytes); + } else +#endif + { + nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, 0); + } + if (nBytes != -1) { + sb->sb_size += nBytes; + } else { + int sockerr = GetSockError(); + RTMP_Log(RTMP_LOGDEBUG, "%s, recv returned %d. GetSockError(): %d (%s)", + __FUNCTION__, nBytes, sockerr, strerror(sockerr)); + if (sockerr == EINTR && !PILI_RTMP_ctrlC) + continue; + + if (sockerr == EWOULDBLOCK || sockerr == EAGAIN) { + sb->sb_timedout = TRUE; + nBytes = 0; + } + } + break; + } + + return nBytes; +} + +int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len) { + int rc; + +#ifdef _DEBUG + fwrite(buf, 1, len, netstackdump); +#endif + +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + rc = TLS_write(sb->sb_ssl, buf, len); + } else +#endif + { + rc = send(sb->sb_socket, buf, len, 0); + } + return rc; +} + +int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb) { +#if defined(CRYPTO) && !defined(NO_SSL) + if (sb->sb_ssl) { + TLS_shutdown(sb->sb_ssl); + TLS_close(sb->sb_ssl); + sb->sb_ssl = NULL; + } +#endif + return closesocket(sb->sb_socket); +} + +#define HEX2BIN(a) (((a)&0x40) ? ((a)&0xf) + 9 : ((a)&0xf)) + +static void + DecodeTEA(AVal *key, AVal *text) { + uint32_t *v, k[4] = {0}, u; + uint32_t z, y, sum = 0, e, DELTA = 0x9e3779b9; + int32_t p, q; + int i, n; + unsigned char *ptr, *out; + + /* prep key: pack 1st 16 chars into 4 LittleEndian ints */ + ptr = (unsigned char *)key->av_val; + u = 0; + n = 0; + v = k; + p = key->av_len > 16 ? 16 : key->av_len; + for (i = 0; i < p; i++) { + u |= ptr[i] << (n * 8); + if (n == 3) { + *v++ = u; + u = 0; + n = 0; + } else { + n++; + } + } + /* any trailing chars */ + if (u) + *v = u; + + /* prep text: hex2bin, multiples of 4 */ + n = (text->av_len + 7) / 8; + out = malloc(n * 8); + ptr = (unsigned char *)text->av_val; + v = (uint32_t *)out; + for (i = 0; i < n; i++) { + u = (HEX2BIN(ptr[0]) << 4) + HEX2BIN(ptr[1]); + u |= ((HEX2BIN(ptr[2]) << 4) + HEX2BIN(ptr[3])) << 8; + u |= ((HEX2BIN(ptr[4]) << 4) + HEX2BIN(ptr[5])) << 16; + u |= ((HEX2BIN(ptr[6]) << 4) + HEX2BIN(ptr[7])) << 24; + *v++ = u; + ptr += 8; + } + v = (uint32_t *)out; + +/* http://www.movable-type.co.uk/scripts/tea-block.html */ +#define MX (((z >> 5) ^ (y << 2)) + ((y >> 3) ^ (z << 4))) ^ ((sum ^ y) + (k[(p & 3) ^ e] ^ z)); + z = v[n - 1]; + y = v[0]; + q = 6 + 52 / n; + sum = q * DELTA; + while (sum != 0) { + e = sum >> 2 & 3; + for (p = n - 1; p > 0; p--) + z = v[p - 1], y = v[p] -= MX; + z = v[n - 1]; + y = v[0] -= MX; + sum -= DELTA; + } + + text->av_len /= 2; + memcpy(text->av_val, out, text->av_len); + free(out); +} + +static int + HTTP_Post(PILI_RTMP *r, RTMPTCmd cmd, const char *buf, int len) { + char hbuf[512]; + int hlen = snprintf(hbuf, sizeof(hbuf), "POST /%s%s/%d HTTP/1.1\r\n" + "Host: %.*s:%d\r\n" + "Accept: */*\r\n" + "User-Agent: Shockwave Flash\n" + "Connection: Keep-Alive\n" + "Cache-Control: no-cache\r\n" + "Content-type: application/x-fcs\r\n" + "Content-length: %d\r\n\r\n", + RTMPT_cmds[cmd], + r->m_clientID.av_val ? r->m_clientID.av_val : "", + r->m_msgCounter, r->Link.hostname.av_len, r->Link.hostname.av_val, + r->Link.port, len); + PILI_RTMPSockBuf_Send(&r->m_sb, hbuf, hlen); + hlen = PILI_RTMPSockBuf_Send(&r->m_sb, buf, len); + r->m_msgCounter++; + r->m_unackd++; + return hlen; +} + +static int + HTTP_read(PILI_RTMP *r, int fill) { + char *ptr; + int hlen; + + if (fill) + PILI_RTMPSockBuf_Fill(&r->m_sb); + if (r->m_sb.sb_size < 144) + return -1; + if (strncmp(r->m_sb.sb_start, "HTTP/1.1 200 ", 13)) + return -1; + ptr = strstr(r->m_sb.sb_start, "Content-Length:"); + if (!ptr) + return -1; + hlen = atoi(ptr + 16); + ptr = strstr(ptr, "\r\n\r\n"); + if (!ptr) + return -1; + ptr += 4; + r->m_sb.sb_size -= ptr - r->m_sb.sb_start; + r->m_sb.sb_start = ptr; + r->m_unackd--; + + if (!r->m_clientID.av_val) { + r->m_clientID.av_len = hlen; + r->m_clientID.av_val = malloc(hlen + 1); + if (!r->m_clientID.av_val) + return -1; + r->m_clientID.av_val[0] = '/'; + memcpy(r->m_clientID.av_val + 1, ptr, hlen - 1); + r->m_clientID.av_val[hlen] = 0; + r->m_sb.sb_size = 0; + } else { + r->m_polling = *ptr++; + r->m_resplen = hlen - 1; + r->m_sb.sb_start++; + r->m_sb.sb_size--; + } + return 0; +} + +#define MAX_IGNORED_FRAMES 50 + +/* Read from the stream until we get a media packet. + * Returns -3 if Play.Close/Stop, -2 if fatal error, -1 if no more media + * packets, 0 if ignorable error, >0 if there is a media packet + */ +static int + Read_1_Packet(PILI_RTMP *r, char *buf, unsigned int buflen) { + uint32_t prevTagSize = 0; + int rtnGetNextMediaPacket = 0, ret = RTMP_READ_EOF; + PILI_RTMPPacket packet = {0}; + int recopy = FALSE; + unsigned int size; + char *ptr, *pend; + uint32_t nTimeStamp = 0; + unsigned int len; + + rtnGetNextMediaPacket = PILI_RTMP_GetNextMediaPacket(r, &packet); + while (rtnGetNextMediaPacket) { + char *packetBody = packet.m_body; + unsigned int nPacketLen = packet.m_nBodySize; + + /* Return -3 if this was completed nicely with invoke message + * Play.Stop or Play.Complete + */ + if (rtnGetNextMediaPacket == 2) { + RTMP_Log(RTMP_LOGDEBUG, + "Got Play.Complete or Play.Stop from server. " + "Assuming stream is complete"); + ret = RTMP_READ_COMPLETE; + break; + } + + r->m_read.dataType |= (((packet.m_packetType == 0x08) << 2) | + (packet.m_packetType == 0x09)); + + if (packet.m_packetType == 0x09 && nPacketLen <= 5) { + RTMP_Log(RTMP_LOGDEBUG, "ignoring too small video packet: size: %d", + nPacketLen); + ret = RTMP_READ_IGNORE; + break; + } + if (packet.m_packetType == 0x08 && nPacketLen <= 1) { + RTMP_Log(RTMP_LOGDEBUG, "ignoring too small audio packet: size: %d", + nPacketLen); + ret = RTMP_READ_IGNORE; + break; + } + + if (r->m_read.flags & RTMP_READ_SEEKING) { + ret = RTMP_READ_IGNORE; + break; + } +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, TS: %d ms, abs TS: %d", + packet.m_packetType, nPacketLen, packet.m_nTimeStamp, + packet.m_hasAbsTimestamp); + if (packet.m_packetType == 0x09) + RTMP_Log(RTMP_LOGDEBUG, "frametype: %02X", (*packetBody & 0xf0)); +#endif + + if (r->m_read.flags & RTMP_READ_RESUME) { + /* check the header if we get one */ + if (packet.m_nTimeStamp == 0) { + if (r->m_read.nMetaHeaderSize > 0 && packet.m_packetType == 0x12) { + AMFObject metaObj; + int nRes = + AMF_Decode(&metaObj, packetBody, nPacketLen, FALSE); + if (nRes >= 0) { + AVal metastring; + AMFProp_GetString(AMF_GetProp(&metaObj, NULL, 0), + &metastring); + + if (AVMATCH(&metastring, &av_onMetaData)) { + /* compare */ + if ((r->m_read.nMetaHeaderSize != nPacketLen) || + (memcmp(r->m_read.metaHeader, packetBody, + r->m_read.nMetaHeaderSize) != 0)) { + ret = RTMP_READ_ERROR; + } + } + AMF_Reset(&metaObj); + if (ret == RTMP_READ_ERROR) + break; + } + } + + /* check first keyframe to make sure we got the right position + * in the stream! (the first non ignored frame) + */ + if (r->m_read.nInitialFrameSize > 0) { + /* video or audio data */ + if (packet.m_packetType == r->m_read.initialFrameType && r->m_read.nInitialFrameSize == nPacketLen) { + /* we don't compare the sizes since the packet can + * contain several FLV packets, just make sure the + * first frame is our keyframe (which we are going + * to rewrite) + */ + if (memcmp(r->m_read.initialFrame, packetBody, + r->m_read.nInitialFrameSize) == 0) { + RTMP_Log(RTMP_LOGDEBUG, "Checked keyframe successfully!"); + r->m_read.flags |= RTMP_READ_GOTKF; + /* ignore it! (what about audio data after it? it is + * handled by ignoring all 0ms frames, see below) + */ + ret = RTMP_READ_IGNORE; + break; + } + } + + /* hande FLV streams, even though the server resends the + * keyframe as an extra video packet it is also included + * in the first FLV stream chunk and we have to compare + * it and filter it out !! + */ + if (packet.m_packetType == 0x16) { + /* basically we have to find the keyframe with the + * correct TS being nResumeTS + */ + unsigned int pos = 0; + uint32_t ts = 0; + + while (pos + 11 < nPacketLen) { + /* size without header (11) and prevTagSize (4) */ + uint32_t dataSize = + AMF_DecodeInt24(packetBody + pos + 1); + ts = AMF_DecodeInt24(packetBody + pos + 4); + ts |= (packetBody[pos + 7] << 24); + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "keyframe search: FLV Packet: type %02X, dataSize: %d, timeStamp: %d ms", + packetBody[pos], dataSize, ts); +#endif + /* ok, is it a keyframe?: + * well doesn't work for audio! + */ + if (packetBody[pos /*6928, test 0 */] == + r->m_read.initialFrameType + /* && (packetBody[11]&0xf0) == 0x10 */) { + if (ts == r->m_read.nResumeTS) { + RTMP_Log(RTMP_LOGDEBUG, + "Found keyframe with resume-keyframe timestamp!"); + if (r->m_read.nInitialFrameSize != dataSize || memcmp(r->m_read.initialFrame, packetBody + pos + 11, r->m_read.nInitialFrameSize) != 0) { + RTMP_Log(RTMP_LOGERROR, + "FLV Stream: Keyframe doesn't match!"); + ret = RTMP_READ_ERROR; + break; + } + r->m_read.flags |= RTMP_READ_GOTFLVK; + + /* skip this packet? + * check whether skippable: + */ + if (pos + 11 + dataSize + 4 > nPacketLen) { + RTMP_Log(RTMP_LOGWARNING, + "Non skipable packet since it doesn't end with chunk, stream corrupt!"); + ret = RTMP_READ_ERROR; + break; + } + packetBody += (pos + 11 + dataSize + 4); + nPacketLen -= (pos + 11 + dataSize + 4); + + goto stopKeyframeSearch; + + } else if (r->m_read.nResumeTS < ts) { + /* the timestamp ts will only increase with + * further packets, wait for seek + */ + goto stopKeyframeSearch; + } + } + pos += (11 + dataSize + 4); + } + if (ts < r->m_read.nResumeTS) { + RTMP_Log(RTMP_LOGERROR, + "First packet does not contain keyframe, all " + "timestamps are smaller than the keyframe " + "timestamp; probably the resume seek failed?"); + } + stopKeyframeSearch:; + if (!(r->m_read.flags & RTMP_READ_GOTFLVK)) { + RTMP_Log(RTMP_LOGERROR, + "Couldn't find the seeked keyframe in this chunk!"); + ret = RTMP_READ_IGNORE; + break; + } + } + } + } + + if (packet.m_nTimeStamp > 0 && (r->m_read.flags & (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK))) { + /* another problem is that the server can actually change from + * 09/08 video/audio packets to an FLV stream or vice versa and + * our keyframe check will prevent us from going along with the + * new stream if we resumed. + * + * in this case set the 'found keyframe' variables to true. + * We assume that if we found one keyframe somewhere and were + * already beyond TS > 0 we have written data to the output + * which means we can accept all forthcoming data including the + * change between 08/09 <-> FLV packets + */ + r->m_read.flags |= (RTMP_READ_GOTKF | RTMP_READ_GOTFLVK); + } + + /* skip till we find our keyframe + * (seeking might put us somewhere before it) + */ + if (!(r->m_read.flags & RTMP_READ_GOTKF) && + packet.m_packetType != 0x16) { + RTMP_Log(RTMP_LOGWARNING, + "Stream does not start with requested frame, ignoring data... "); + r->m_read.nIgnoredFrameCounter++; + if (r->m_read.nIgnoredFrameCounter > MAX_IGNORED_FRAMES) + ret = RTMP_READ_ERROR; /* fatal error, couldn't continue stream */ + else + ret = RTMP_READ_IGNORE; + break; + } + /* ok, do the same for FLV streams */ + if (!(r->m_read.flags & RTMP_READ_GOTFLVK) && + packet.m_packetType == 0x16) { + RTMP_Log(RTMP_LOGWARNING, + "Stream does not start with requested FLV frame, ignoring data... "); + r->m_read.nIgnoredFlvFrameCounter++; + if (r->m_read.nIgnoredFlvFrameCounter > MAX_IGNORED_FRAMES) + ret = RTMP_READ_ERROR; + else + ret = RTMP_READ_IGNORE; + break; + } + + /* we have to ignore the 0ms frames since these are the first + * keyframes; we've got these so don't mess around with multiple + * copies sent by the server to us! (if the keyframe is found at a + * later position there is only one copy and it will be ignored by + * the preceding if clause) + */ + if (!(r->m_read.flags & RTMP_READ_NO_IGNORE) && + packet.m_packetType != 0x16) { /* exclude type 0x16 (FLV) since it can + * contain several FLV packets */ + if (packet.m_nTimeStamp == 0) { + ret = RTMP_READ_IGNORE; + break; + } else { + /* stop ignoring packets */ + r->m_read.flags |= RTMP_READ_NO_IGNORE; + } + } + } + + /* calculate packet size and allocate slop buffer if necessary */ + size = nPacketLen + + ((packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) ? 11 : 0) + + (packet.m_packetType != 0x16 ? 4 : 0); + + if (size + 4 > buflen) { + /* the extra 4 is for the case of an FLV stream without a last + * prevTagSize (we need extra 4 bytes to append it) */ + r->m_read.buf = malloc(size + 4); + if (r->m_read.buf == 0) { + RTMP_Log(RTMP_LOGERROR, "Couldn't allocate memory!"); + ret = RTMP_READ_ERROR; /* fatal error */ + break; + } + recopy = TRUE; + ptr = r->m_read.buf; + } else { + ptr = buf; + } + pend = ptr + size + 4; + + /* use to return timestamp of last processed packet */ + + /* audio (0x08), video (0x09) or metadata (0x12) packets : + * construct 11 byte header then add PILI_RTMP packet's data */ + if (packet.m_packetType == 0x08 || packet.m_packetType == 0x09 || packet.m_packetType == 0x12) { + nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp; + prevTagSize = 11 + nPacketLen; + + *ptr = packet.m_packetType; + ptr++; + ptr = AMF_EncodeInt24(ptr, pend, nPacketLen); + +#if 0 + if(packet.m_packetType == 0x09) { /* video */ + + /* H264 fix: */ + if((packetBody[0] & 0x0f) == 7) { /* CodecId = H264 */ + uint8_t packetType = *(packetBody+1); + + uint32_t ts = AMF_DecodeInt24(packetBody+2); /* composition time */ + int32_t cts = (ts+0xff800000)^0xff800000; + RTMP_Log(RTMP_LOGDEBUG, "cts : %d\n", cts); + + nTimeStamp -= cts; + /* get rid of the composition time */ + CRTMP::EncodeInt24(packetBody+2, 0); + } + RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp); + } +#endif + + ptr = AMF_EncodeInt24(ptr, pend, nTimeStamp); + *ptr = (char)((nTimeStamp & 0xFF000000) >> 24); + ptr++; + + /* stream id */ + ptr = AMF_EncodeInt24(ptr, pend, 0); + } + + memcpy(ptr, packetBody, nPacketLen); + len = nPacketLen; + + /* correct tagSize and obtain timestamp if we have an FLV stream */ + if (packet.m_packetType == 0x16) { + unsigned int pos = 0; + int delta; + + /* grab first timestamp and see if it needs fixing */ + nTimeStamp = AMF_DecodeInt24(packetBody + 4); + nTimeStamp |= (packetBody[7] << 24); + delta = packet.m_nTimeStamp - nTimeStamp; + + while (pos + 11 < nPacketLen) { + /* size without header (11) and without prevTagSize (4) */ + uint32_t dataSize = AMF_DecodeInt24(packetBody + pos + 1); + nTimeStamp = AMF_DecodeInt24(packetBody + pos + 4); + nTimeStamp |= (packetBody[pos + 7] << 24); + + if (delta) { + nTimeStamp += delta; + AMF_EncodeInt24(ptr + pos + 4, pend, nTimeStamp); + ptr[pos + 7] = nTimeStamp >> 24; + } + + /* set data type */ + r->m_read.dataType |= (((*(packetBody + pos) == 0x08) << 2) | + (*(packetBody + pos) == 0x09)); + + if (pos + 11 + dataSize + 4 > nPacketLen) { + if (pos + 11 + dataSize > nPacketLen) { + RTMP_Log(RTMP_LOGERROR, + "Wrong data size (%lu), stream corrupted, aborting!", + dataSize); + ret = RTMP_READ_ERROR; + break; + } + RTMP_Log(RTMP_LOGWARNING, "No tagSize found, appending!"); + + /* we have to append a last tagSize! */ + prevTagSize = dataSize + 11; + AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, + prevTagSize); + size += 4; + len += 4; + } else { + prevTagSize = + AMF_DecodeInt32(packetBody + pos + 11 + dataSize); + +#ifdef _DEBUG + RTMP_Log(RTMP_LOGDEBUG, + "FLV Packet: type %02X, dataSize: %lu, tagSize: %lu, timeStamp: %lu ms", + (unsigned char)packetBody[pos], dataSize, prevTagSize, + nTimeStamp); +#endif + + if (prevTagSize != (dataSize + 11)) { +#ifdef _DEBUG + RTMP_Log(RTMP_LOGWARNING, + "Tag and data size are not consitent, writing tag size according to dataSize+11: %d", + dataSize + 11); +#endif + + prevTagSize = dataSize + 11; + AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend, + prevTagSize); + } + } + + pos += prevTagSize + 4; /*(11+dataSize+4); */ + } + } + ptr += len; + + if (packet.m_packetType != 0x16) { + /* FLV tag packets contain their own prevTagSize */ + AMF_EncodeInt32(ptr, pend, prevTagSize); + } + + /* In non-live this nTimeStamp can contain an absolute TS. + * Update ext timestamp with this absolute offset in non-live mode + * otherwise report the relative one + */ + /* RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, pktTS: %dms, TS: %dms, bLiveStream: %d", packet.m_packetType, nPacketLen, packet.m_nTimeStamp, nTimeStamp, r->Link.lFlags & RTMP_LF_LIVE); */ + r->m_read.timestamp = (r->Link.lFlags & RTMP_LF_LIVE) ? packet.m_nTimeStamp : nTimeStamp; + + ret = size; + break; + } + + if (rtnGetNextMediaPacket) + PILI_RTMPPacket_Free(&packet); + + if (recopy) { + len = ret > buflen ? buflen : ret; + memcpy(buf, r->m_read.buf, len); + r->m_read.bufpos = r->m_read.buf + len; + r->m_read.buflen = ret - len; + } + return ret; +} + +static const char flvHeader[] = {'F', 'L', 'V', 0x01, + 0x00, /* 0x04 == audio, 0x01 == video */ + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00}; + +#define HEADERBUF (128 * 1024) +int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size) { + int nRead = 0, total = 0; + +/* can't continue */ +fail: + switch (r->m_read.status) { + case RTMP_READ_EOF: + case RTMP_READ_COMPLETE: + return 0; + case RTMP_READ_ERROR: /* corrupted stream, resume failed */ + SetSockError(EINVAL); + return -1; + default: + break; + } + + if ((r->m_read.flags & RTMP_READ_SEEKING) && r->m_read.buf) { + /* drop whatever's here */ + free(r->m_read.buf); + r->m_read.buf = NULL; + r->m_read.bufpos = NULL; + r->m_read.buflen = 0; + } + + /* If there's leftover data buffered, use it up */ + if (r->m_read.buf) { + nRead = r->m_read.buflen; + if (nRead > size) + nRead = size; + memcpy(buf, r->m_read.bufpos, nRead); + r->m_read.buflen -= nRead; + if (!r->m_read.buflen) { + free(r->m_read.buf); + r->m_read.buf = NULL; + r->m_read.bufpos = NULL; + } else { + r->m_read.bufpos += nRead; + } + buf += nRead; + total += nRead; + size -= nRead; + } + + while (size > 0 && (nRead = Read_1_Packet(r, buf, size)) >= 0) { + if (!nRead) continue; + buf += nRead; + total += nRead; + size -= nRead; + break; + } + if (nRead < 0) + r->m_read.status = nRead; + + if (size < 0) + total += size; + return total; +} + +static const AVal av_setDataFrame = AVC("@setDataFrame"); + +int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error) { + PILI_RTMPPacket *pkt = &r->m_write; + char *pend, *enc; + int s2 = size, ret, num; + + pkt->m_nChannel = 0x04; /* source channel */ + pkt->m_nInfoField2 = r->m_stream_id; + + while (s2) { + if (!pkt->m_nBytesRead) { + if (size < 11) { + /* FLV pkt too small */ + return 0; + } + + if (buf[0] == 'F' && buf[1] == 'L' && buf[2] == 'V') { + buf += 13; + s2 -= 13; + } + + pkt->m_packetType = *buf++; + pkt->m_nBodySize = AMF_DecodeInt24(buf); + buf += 3; + pkt->m_nTimeStamp = AMF_DecodeInt24(buf); + buf += 3; + pkt->m_nTimeStamp |= *buf++ << 24; + buf += 3; + s2 -= 11; + + if (((pkt->m_packetType == 0x08 || pkt->m_packetType == 0x09) && + !pkt->m_nTimeStamp) || + pkt->m_packetType == 0x12) { + pkt->m_headerType = RTMP_PACKET_SIZE_LARGE; + if (pkt->m_packetType == 0x12) + pkt->m_nBodySize += 16; + } else { + pkt->m_headerType = RTMP_PACKET_SIZE_MEDIUM; + } + + if (!PILI_RTMPPacket_Alloc(pkt, pkt->m_nBodySize)) { + RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__); + return FALSE; + } + enc = pkt->m_body; + pend = enc + pkt->m_nBodySize; + if (pkt->m_packetType == 0x12) { + enc = AMF_EncodeString(enc, pend, &av_setDataFrame); + pkt->m_nBytesRead = enc - pkt->m_body; + } + } else { + enc = pkt->m_body + pkt->m_nBytesRead; + } + num = pkt->m_nBodySize - pkt->m_nBytesRead; + if (num > s2) + num = s2; + memcpy(enc, buf, num); + pkt->m_nBytesRead += num; + s2 -= num; + buf += num; + if (pkt->m_nBytesRead == pkt->m_nBodySize) { + ret = PILI_RTMP_SendPacket(r, pkt, FALSE, error); + PILI_RTMPPacket_Free(pkt); + pkt->m_nBytesRead = 0; + if (!ret) + return -1; + buf += 4; + s2 -= 4; + if (s2 < 0) + break; + } + } + return size + s2; +} diff --git a/LFLiveKit/Vendor/pili-librtmp/rtmp.h b/LFLiveKit/Vendor/pili-librtmp/rtmp.h new file mode 100644 index 00000000..d415e16c --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/rtmp.h @@ -0,0 +1,356 @@ +#ifndef __RTMP_H__ +#define __RTMP_H__ +/* + * Copyright (C) 2005-2008 Team XBMC + * http://www.xbmc.org + * Copyright (C) 2008-2009 Andrej Stepanchuk + * Copyright (C) 2009-2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#define NO_CRYPTO + +#if !defined(NO_CRYPTO) && !defined(CRYPTO) +#define CRYPTO +#endif + +#include +#include +#include + +#include "amf.h" +#include "error.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTMP_LIB_VERSION 0x020300 /* 2.3 */ + +#define RTMP_FEATURE_HTTP 0x01 +#define RTMP_FEATURE_ENC 0x02 +#define RTMP_FEATURE_SSL 0x04 +#define RTMP_FEATURE_MFP 0x08 /* not yet supported */ +#define RTMP_FEATURE_WRITE 0x10 /* publish, not play */ +#define RTMP_FEATURE_HTTP2 0x20 /* server-side rtmpt */ + +#define RTMP_PROTOCOL_UNDEFINED -1 +#define RTMP_PROTOCOL_RTMP 0 +#define RTMP_PROTOCOL_RTMPE RTMP_FEATURE_ENC +#define RTMP_PROTOCOL_RTMPT RTMP_FEATURE_HTTP +#define RTMP_PROTOCOL_RTMPS RTMP_FEATURE_SSL +#define RTMP_PROTOCOL_RTMPTE (RTMP_FEATURE_HTTP | RTMP_FEATURE_ENC) +#define RTMP_PROTOCOL_RTMPTS (RTMP_FEATURE_HTTP | RTMP_FEATURE_SSL) +#define RTMP_PROTOCOL_RTMFP RTMP_FEATURE_MFP + +#define RTMP_DEFAULT_CHUNKSIZE 128 + +/* needs to fit largest number of bytes recv() may return */ +#define RTMP_BUFFER_CACHE_SIZE (16 * 1024) + +#define RTMP_CHANNELS 65600 + +extern const char PILI_RTMPProtocolStringsLower[][7]; +extern const AVal PILI_RTMP_DefaultFlashVer; +extern int PILI_RTMP_ctrlC; + +uint32_t PILI_RTMP_GetTime(void); + +#define RTMP_PACKET_TYPE_AUDIO 0x08 +#define RTMP_PACKET_TYPE_VIDEO 0x09 +#define RTMP_PACKET_TYPE_INFO 0x12 + +#define RTMP_MAX_HEADER_SIZE 18 + +#define RTMP_PACKET_SIZE_LARGE 0 +#define RTMP_PACKET_SIZE_MEDIUM 1 +#define RTMP_PACKET_SIZE_SMALL 2 +#define RTMP_PACKET_SIZE_MINIMUM 3 + +typedef struct PILI_RTMPChunk { + int c_headerSize; + int c_chunkSize; + char *c_chunk; + char c_header[RTMP_MAX_HEADER_SIZE]; +} PILI_RTMPChunk; + +typedef struct PILI_RTMPPacket { + uint8_t m_headerType; + uint8_t m_packetType; + uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */ + int m_nChannel; + uint32_t m_nTimeStamp; /* timestamp */ + int32_t m_nInfoField2; /* last 4 bytes in a long header */ + uint32_t m_nBodySize; + uint32_t m_nBytesRead; + PILI_RTMPChunk *m_chunk; + char *m_body; +} PILI_RTMPPacket; + +typedef struct PILI_RTMPSockBuf { + int sb_socket; + int sb_size; /* number of unprocessed bytes in buffer */ + char *sb_start; /* pointer into sb_pBuffer of next byte to process */ + char sb_buf[RTMP_BUFFER_CACHE_SIZE]; /* data read from socket */ + int sb_timedout; + void *sb_ssl; +} PILI_RTMPSockBuf; + +void PILI_RTMPPacket_Reset(PILI_RTMPPacket *p); +void PILI_RTMPPacket_Dump(PILI_RTMPPacket *p); +int PILI_RTMPPacket_Alloc(PILI_RTMPPacket *p, int nSize); +void PILI_RTMPPacket_Free(PILI_RTMPPacket *p); + +#define RTMPPacket_IsReady(a) ((a)->m_nBytesRead == (a)->m_nBodySize) + +typedef struct PILI_RTMP_LNK { + AVal hostname; + AVal domain; + AVal sockshost; + + AVal playpath0; /* parsed from URL */ + AVal playpath; /* passed in explicitly */ + AVal tcUrl; + AVal swfUrl; + AVal pageUrl; + AVal app; + AVal auth; + AVal flashVer; + AVal subscribepath; + AVal token; + AMFObject extras; + int edepth; + + int seekTime; + int stopTime; + +#define RTMP_LF_AUTH 0x0001 /* using auth param */ +#define RTMP_LF_LIVE 0x0002 /* stream is live */ +#define RTMP_LF_SWFV 0x0004 /* do SWF verification */ +#define RTMP_LF_PLST 0x0008 /* send playlist before play */ +#define RTMP_LF_BUFX 0x0010 /* toggle stream on BufferEmpty msg */ +#define RTMP_LF_FTCU 0x0020 /* free tcUrl on close */ + int lFlags; + + int swfAge; + + int protocol; + int timeout; /* connection timeout in seconds */ + int send_timeout; /* send data timeout */ + + unsigned short socksport; + unsigned short port; + +#ifdef CRYPTO +#define RTMP_SWF_HASHLEN 32 + void *dh; /* for encryption */ + void *rc4keyIn; + void *rc4keyOut; + + uint32_t SWFSize; + uint8_t SWFHash[RTMP_SWF_HASHLEN]; + char SWFVerificationResponse[RTMP_SWF_HASHLEN + 10]; +#endif +} PILI_RTMP_LNK; + +/* state for read() wrapper */ +typedef struct PILI_RTMP_READ { + char *buf; + char *bufpos; + unsigned int buflen; + uint32_t timestamp; + uint8_t dataType; + uint8_t flags; +#define RTMP_READ_HEADER 0x01 +#define RTMP_READ_RESUME 0x02 +#define RTMP_READ_NO_IGNORE 0x04 +#define RTMP_READ_GOTKF 0x08 +#define RTMP_READ_GOTFLVK 0x10 +#define RTMP_READ_SEEKING 0x20 + int8_t status; +#define RTMP_READ_COMPLETE -3 +#define RTMP_READ_ERROR -2 +#define RTMP_READ_EOF -1 +#define RTMP_READ_IGNORE 0 + + /* if bResume == TRUE */ + uint8_t initialFrameType; + uint32_t nResumeTS; + char *metaHeader; + char *initialFrame; + uint32_t nMetaHeaderSize; + uint32_t nInitialFrameSize; + uint32_t nIgnoredFrameCounter; + uint32_t nIgnoredFlvFrameCounter; +} PILI_RTMP_READ; + +typedef struct PILI_RTMP_METHOD { + AVal name; + int num; +} PILI_RTMP_METHOD; + +typedef void (*PILI_RTMPErrorCallback)(RTMPError *error, void *userData); + +typedef struct PILI_CONNECTION_TIME { + uint32_t connect_time; + uint32_t handshake_time; +} PILI_CONNECTION_TIME; + +typedef void (*PILI_RTMP_ConnectionTimeCallback)( + PILI_CONNECTION_TIME *conn_time, void *userData); + +typedef struct PILI_RTMP { + int m_inChunkSize; + int m_outChunkSize; + int m_nBWCheckCounter; + int m_nBytesIn; + int m_nBytesInSent; + int m_nBufferMS; + int m_stream_id; /* returned in _result from createStream */ + int m_mediaChannel; + uint32_t m_mediaStamp; + uint32_t m_pauseStamp; + int m_pausing; + int m_nServerBW; + int m_nClientBW; + uint8_t m_nClientBW2; + uint8_t m_bPlaying; + uint8_t m_bSendEncoding; + uint8_t m_bSendCounter; + + int m_numInvokes; + int m_numCalls; + PILI_RTMP_METHOD *m_methodCalls; /* remote method calls queue */ + + PILI_RTMPPacket *m_vecChannelsIn[RTMP_CHANNELS]; + PILI_RTMPPacket *m_vecChannelsOut[RTMP_CHANNELS]; + int m_channelTimestamp[RTMP_CHANNELS]; /* abs timestamp of last packet */ + + double m_fAudioCodecs; /* audioCodecs for the connect packet */ + double m_fVideoCodecs; /* videoCodecs for the connect packet */ + double m_fEncoding; /* AMF0 or AMF3 */ + + double m_fDuration; /* duration of stream in seconds */ + + int m_msgCounter; /* RTMPT stuff */ + int m_polling; + int m_resplen; + int m_unackd; + AVal m_clientID; + + PILI_RTMP_READ m_read; + PILI_RTMPPacket m_write; + PILI_RTMPSockBuf m_sb; + PILI_RTMP_LNK Link; + + PILI_RTMPErrorCallback m_errorCallback; + PILI_RTMP_ConnectionTimeCallback m_connCallback; + RTMPError *m_error; + void *m_userData; + int m_is_closing; + int m_tcp_nodelay; + uint32_t ip; +} PILI_RTMP; + +int PILI_RTMP_ParseURL(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app); + +int PILI_RTMP_ParseURL2(const char *url, int *protocol, AVal *host, + unsigned int *port, AVal *playpath, AVal *app, AVal *domain); + +void PILI_RTMP_ParsePlaypath(AVal *in, AVal *out); +void PILI_RTMP_SetBufferMS(PILI_RTMP *r, int size); +void PILI_RTMP_UpdateBufferMS(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_SetOpt(PILI_RTMP *r, const AVal *opt, AVal *arg, + RTMPError *error); +int PILI_RTMP_SetupURL(PILI_RTMP *r, const char *url, RTMPError *error); +void PILI_RTMP_SetupStream(PILI_RTMP *r, int protocol, AVal *hostname, + unsigned int port, AVal *sockshost, AVal *playpath, + AVal *tcUrl, AVal *swfUrl, AVal *pageUrl, AVal *app, + AVal *auth, AVal *swfSHA256Hash, uint32_t swfSize, + AVal *flashVer, AVal *subscribepath, int dStart, + int dStop, int bLiveStream, long int timeout); + +int PILI_RTMP_Connect(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +struct sockaddr; +int PILI_RTMP_Connect0(PILI_RTMP *r, struct addrinfo *ai, unsigned short port, + RTMPError *error); +int PILI_RTMP_Connect1(PILI_RTMP *r, PILI_RTMPPacket *cp, RTMPError *error); +int PILI_RTMP_Serve(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_ReadPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); +int PILI_RTMP_SendPacket(PILI_RTMP *r, PILI_RTMPPacket *packet, int queue, + RTMPError *error); +int PILI_RTMP_SendChunk(PILI_RTMP *r, PILI_RTMPChunk *chunk, RTMPError *error); +int PILI_RTMP_IsConnected(PILI_RTMP *r); +int PILI_RTMP_Socket(PILI_RTMP *r); +int PILI_RTMP_IsTimedout(PILI_RTMP *r); +double PILI_RTMP_GetDuration(PILI_RTMP *r); +int PILI_RTMP_ToggleStream(PILI_RTMP *r, RTMPError *error); + +int PILI_RTMP_ConnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); +int PILI_RTMP_ReconnectStream(PILI_RTMP *r, int seekTime, RTMPError *error); +void PILI_RTMP_DeleteStream(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_GetNextMediaPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); +int PILI_RTMP_ClientPacket(PILI_RTMP *r, PILI_RTMPPacket *packet); + +void PILI_RTMP_Init(PILI_RTMP *r); +void PILI_RTMP_Close(PILI_RTMP *r, RTMPError *error); +PILI_RTMP *PILI_RTMP_Alloc(void); +void PILI_RTMP_Free(PILI_RTMP *r); +void PILI_RTMP_EnableWrite(PILI_RTMP *r); + +int PILI_RTMP_LibVersion(void); +void PILI_RTMP_UserInterrupt(void); /* user typed Ctrl-C */ + +int PILI_RTMP_SendCtrl(PILI_RTMP *r, short nType, unsigned int nObject, + unsigned int nTime, RTMPError *error); + +/* caller probably doesn't know current timestamp, should + * just use RTMP_Pause instead + */ +int PILI_RTMP_SendPause(PILI_RTMP *r, int DoPause, int dTime, RTMPError *error); +int PILI_RTMP_Pause(PILI_RTMP *r, int DoPause, RTMPError *error); + +int PILI_RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name, + AMFObjectProperty *p); + +int PILI_RTMPSockBuf_Fill(PILI_RTMPSockBuf *sb); +int PILI_RTMPSockBuf_Send(PILI_RTMPSockBuf *sb, const char *buf, int len); +int PILI_RTMPSockBuf_Close(PILI_RTMPSockBuf *sb); + +int PILI_RTMP_SendCreateStream(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_SendSeek(PILI_RTMP *r, int dTime, RTMPError *error); +int PILI_RTMP_SendServerBW(PILI_RTMP *r, RTMPError *error); +int PILI_RTMP_SendClientBW(PILI_RTMP *r, RTMPError *error); +void PILI_RTMP_DropRequest(PILI_RTMP *r, int i, int freeit); +int PILI_RTMP_Read(PILI_RTMP *r, char *buf, int size); +int PILI_RTMP_Write(PILI_RTMP *r, const char *buf, int size, RTMPError *error); + +/* hashswf.c */ +int PILI_RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash, + int age); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/LFLiveKit/Vendor/pili-librtmp/rtmp_sys.h b/LFLiveKit/Vendor/pili-librtmp/rtmp_sys.h new file mode 100644 index 00000000..880457c3 --- /dev/null +++ b/LFLiveKit/Vendor/pili-librtmp/rtmp_sys.h @@ -0,0 +1,123 @@ +#ifndef __RTMP_SYS_H__ +#define __RTMP_SYS_H__ +/* + * Copyright (C) 2010 Howard Chu + * + * This file is part of librtmp. + * + * librtmp is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1, + * or (at your option) any later version. + * + * librtmp is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with librtmp see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * http://www.gnu.org/copyleft/lgpl.html + */ + +#ifdef _WIN32 + +#ifdef _XBOX +#include +#include +#define snprintf _snprintf +#define strcasecmp stricmp +#define strncasecmp strnicmp +#define vsnprintf _vsnprintf + +#else /* !_XBOX */ +#include +#include +#endif + +#define GetSockError() WSAGetLastError() +#define SetSockError(e) WSASetLastError(e) +#define setsockopt(a, b, c, d, e) (setsockopt)(a, b, c, (const char *)d, (int)e) +#define EWOULDBLOCK \ + WSAETIMEDOUT /* we don't use nonblocking, but we do use timeouts */ +#define sleep(n) Sleep(n * 1000) +#define msleep(n) Sleep(n) +#define SET_RCVTIMEO(tv, s) int tv = s * 1000 +#else /* !_WIN32 */ +#include +#include +#include +#include +#include +#include +#include +#include +#define GetSockError() errno +#define SetSockError(e) errno = e +#undef closesocket +#define closesocket(s) close(s) +#define msleep(n) usleep(n * 1000) +#define SET_RCVTIMEO(tv, s) struct timeval tv = {s, 0} +#endif + +#include "rtmp.h" + +#ifdef USE_POLARSSL +#include +#include +#include +typedef struct tls_ctx { + havege_state hs; + ssl_session ssn; +} tls_ctx; +#define TLS_CTX tls_ctx * +#define TLS_client(ctx, s) \ + s = malloc(sizeof(ssl_context)); \ + ssl_init(s); \ + ssl_set_endpoint(s, SSL_IS_CLIENT); \ + ssl_set_authmode(s, SSL_VERIFY_NONE); \ + ssl_set_rng(s, havege_rand, &ctx->hs); \ + ssl_set_ciphers(s, ssl_default_ciphers); \ + ssl_set_session(s, 1, 600, &ctx->ssn) +#define TLS_setfd(s, fd) ssl_set_bio(s, net_recv, &fd, net_send, &fd) +#define TLS_connect(s) ssl_handshake(s) +#define TLS_read(s, b, l) ssl_read(s, (unsigned char *)b, l) +#define TLS_write(s, b, l) ssl_write(s, (unsigned char *)b, l) +#define TLS_shutdown(s) ssl_close_notify(s) +#define TLS_close(s) \ + ssl_free(s); \ + free(s) + +#elif defined(USE_GNUTLS) +#include +typedef struct tls_ctx { + gnutls_certificate_credentials_t cred; + gnutls_priority_t prios; +} tls_ctx; +#define TLS_CTX tls_ctx * +#define TLS_client(ctx, s) \ + gnutls_init((gnutls_session_t *)(&s), GNUTLS_CLIENT); \ + gnutls_priority_set(s, ctx->prios); \ + gnutls_credentials_set(s, GNUTLS_CRD_CERTIFICATE, ctx->cred) +#define TLS_setfd(s, fd) \ + gnutls_transport_set_ptr(s, (gnutls_transport_ptr_t)(long)fd) +#define TLS_connect(s) gnutls_handshake(s) +#define TLS_read(s, b, l) gnutls_record_recv(s, b, l) +#define TLS_write(s, b, l) gnutls_record_send(s, b, l) +#define TLS_shutdown(s) gnutls_bye(s, GNUTLS_SHUT_RDWR) +#define TLS_close(s) gnutls_deinit(s) + +#else /* USE_OPENSSL */ +#define TLS_CTX SSL_CTX * +#define TLS_client(ctx, s) s = SSL_new(ctx) +#define TLS_setfd(s, fd) SSL_set_fd(s, fd) +#define TLS_connect(s) SSL_connect(s) +#define TLS_read(s, b, l) SSL_read(s, b, l) +#define TLS_write(s, b, l) SSL_write(s, b, l) +#define TLS_shutdown(s) SSL_shutdown(s) +#define TLS_close(s) SSL_free(s) + +#endif +#endif diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index 1c23949a69ba311b6b22e8c3b8deb7997671db8c..8c56515479a5e5e0b477cfba36979203fcc09534 100644 GIT binary patch delta 29 icmaDA`YLn-GasvyRA%hA%^ZAeydWm`%gw^l*ZBdSZwbBt delta 29 jcmaDA`YLn-Gaswt{KcKmH*@f@@q(BZlQ#=XU*`t^w9gAJ diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 575000bb1915dc25f45c97cfc55023f7a6cf1fe5..924f4daa027ebdaf67a54fbc984be95dc91d9c00 100644 GIT binary patch literal 123847 zcmdSC2YeI9x5v9PEA2|Vt1e5jXr@C#?<6F_MFt1AST-#oiZKS97))$KLPC-~>Am+3 zr1#!?umAMkdnLWsH)o}lcdcT{cJ6!c-ut-$bIxzh^s+NwFJIT%($P6-(#ss-D93Xa z&dLb`$N;){#D?bfj+VByi$}CA-@Cb~vt!AK#`dPwE$qkjBRbpGc{y_A#n*epT{w}m zaX!w^6>y`uG2B>gJ8pY!95M;k?OX@9 zAGbfZi93h`?ojSX?kMg!?s)EG?iB6}?i}s{ZZmf&cO`c%cO!QzcPDo*_c-@7_YC(u z_X76{_bT@(_Zjy&_XYPQ_Z9aww~hOe`-%IV5JCx0EW}Dg;v#P1AqAw63?d`RC^DK% zCR4~%GL1|pGsq5PSF(Uqk}6V7YDg_@rR!)X-H#qX527kPj2=mkp~usc=&AG!dNw_e zUPw37OX%hFDtaxwf!<7Sqj%DK=>7B|`Y3&ZK285eU!X73*XWz{9r`}~h<-}HpkLE( z=@0a0`WyX|{=*aA!i&7bJ9szm;{$vlKad~7593GjWBBd)3H)S!8b6cYf!~Rr%a`)y zd<9?0*YI_G1HYKxo!^shB%lIq#YxwK=oA_J#JNUc#`}ha>NBGD2r}$_2=lPfTSNS*ixB2(@5BX2{ z&-t(TZ}{)|pZH(-Klr~boQ1at7Q02UxGY{vfhA-aU>R%~Y8hb}ZQ0H;-ZIHD)iT2} z+cL+pi>1U8vCOwDuvA-iv(#G_S(aLsS@yCtSyozFEUlI{OS@&gWq-?omV+%|IoxuT z#b)k!6eJQp**Vt1Z`AZnWHDx!rP?z21H?^-^vd~Es5@}*^)mX~9 zb+~nub*y!qb)t2Ob-Hzybw}&Y)?#a!b)I!sYn8Ru8nrI8F0t-mjaiplo2{#@`&idn z_qBFf_p=^gJ;}~5Z?oQM zy~ldL^#SXH)`zW6Sf8;zYkkA|ru8lB+tzoiA6Y-PeqsII`h)dH>rd97t$$npvHmOY zf=#dslHeB#gn$qf1_;{=<#wC_TBA!+8gaF?5pg1+t=9F**okT>>KTy?1$J7wI5+W+J2nZvV>ujs1K3PxfE!f7t()IEj}8$u239OY%wuQb-yg4VH#VBc##NcG7rhk~CGC zA%9HktqoS>YnoTi+qoTHqt zT%>GKE>*5ju2!y7Zd7hjZddM7?o}R89#$Sxo>ZPuo>N{_UQu3G-csIGK2SbZK2yF_ zwkh8!KPtZ{zbk(!|2n9{>aaOvhtuJ4_#Ht<*fGda8;BI*xH1 z?>NbEs^bjD*^cuZ7dkdOE^%D$xXN*@;|9mgj@uk}I_`1Y?|8`ZsN)I8(~kc+UU0nZ zc+K&q;~mHQj*lFlI=*mx?fBO5gX3q%Z;n45|2T=$;uM{d)8TYGea?Wh&^gdK#5v44 z(mBSty>o(dvU8eqrgI19PR_Z`QfIld!ddC8an?B-oQs{iJNI-pI#)PXIrnz1ajtWA zI5#*qIyX5FaUSYC!g;jwIOmDZQ=F$e&vKsYyuf*}bF1?*=atTDoYy;Va^C8^!+E#! zKIenZN1Ts4pK?CyeBSwz^Ht{?&bOWKIX`rM;{4qCmGc|t_s*Z3zdHYL{_Wyiyi0J| zU5d-)^12FKA=d!cVAoLB2-j%WcCPWRNv^4`8Lru`Ij&t?C9a5TzH5Q2+O?aj-nGcJ z)V0jDm#fLO($(T>b+x(LUF%&NUAMdLcHQH8!1bW(QP*Rxr(93Fo^w6#dfD}g>kZeN zu6JGUxju4z?E2jGg=?GZ8`lr6A6>t?eslfh`rA$1)GfG0x9nEjZnwu>;10OM?g8#0 z?jrXH_el3x_jc|H?uqWH?rH8>?%D30+&jBV+@*UGBTx_qgwM-{-#H{j~cT_p|Od+;6(ya)0Lj-2H|7XZJ7eU){fXEFP;z@Q5Ct z$L}fd4D}524EK!iO!7?jO!4gO*~K&0Q|zhoRC{VXyL_bDQUO&mEpSJ&$@G^E~c(!t(8?&K};&i7V$tGrQfgLilD9^Pf%eZB484sWM-y?29m zKkxqDjot&i2YNSo5Aq)DJ=%MW_gL?7-qXAncrWx`5q1vG)`27v8VE+q~a+fAId}qdwjz`edKtbNf8L z0$;$loo{>JINx~R1m8s8B;RD;6yH?eEMKi}H(#AE>Z|uP_!jyW`4;<@_?G(C`S$g- z`#OA`zV*HhzWsdr`!@Ox@u|KeeMk9D@tx{B&3C%*Y~NMBt9{q_uJv8#yWV$$??&HE zzWaRl`yTK;=zH4tjPDiSTfVn_@Ay9Oed3+z`_%WP?@elG3_7CwF`G@+)`zQD(`X~7(`=|ML@bBm^_LunQ`D^^O{@wg_{-}R< z{~rEj{zm^Q|7!ml|9bxh|9<{M{Hh=P$M{e4pX1-+zs!G?|2qFo{@eU_`S0^T?0?+< zwEsE(EB-hAANoJ?f9(I-zs>)r|1baF{(t=c7H|dL0$+i@pr9a75G)846c&UFiVB7n zj4jx%U`oN%f}IL>F4(1DK|y6feZd|Dtp#fe))urCtSi{RU}FIk99nR6!HET@7o1yg zUcvbVTMDiyxT)Z-f`jKvYZVn6$+!DAWa8Kag zzu) zBj^kUg27-YSQH!@92OiN91)xo+$p$oaF^iRU~#Y{SQ;z~MuO$RdBLh+bub#-J=hf7 zH`pHR2zCb72R8(d3?3CcI(SU**x+%&f!@(zm&jnuzz7_l+_*rmU@SEUI!QVpekSF8~`9l6sK`0OkhC-pjP&iZ+8X6iI z8XFoHnh=^CniiT7Di6&I%@0+Cb`32ERfZOZ7KIjvmV}mt_6)U#)`ZrE+Cu9>>q8qt z2ZYoRgpLj!A37m)O6b(kX`!=1=Y`G>T@bo3bW!Ny&~2gHLwAJk4BZvFJ9JOz-q3xa z`$G?f9t}MidM@-@==IPWp*KTsg+2;>9Qq`*E%bZnkI&7!jB8T zF5Fi5P2sm;Ygh=2VO!W9mcnva2|L2hurC}64-O9rj|h(mj}4CxPY6#5&kXMvE)JK3 z=ZEXU(QtjZA-p8KPq;O_CcHM>7G4+LH{2fX2p%w=1?+)J+zBhbd_>u6F;TOU$hTjao75*UnVffSV*WvHOzc-9nyS}yc zCC<)CoXjbli}Nn3j!xL8d1J>?_TR+bb>WDn*2a#GEu4dMs#N8-aBj|{TEOa2ca6FI z6=jh{ZSDJXtZQs)E^BLAzovO@XUsjKxODooDJ3&X$4@PrIeq-JnWg39XHT6xY5e5! z;9m>a+iT2%EtAr zosktSoo(&WPWID|^0xMdy2_3{G_&St)9U6m%}gNv*Shs<%bGhnTh=yqvQ2! zqqDSa&AQfRCRmGm`;JbQ&A3}hV@LA}&9S9(V_oyQwsw|(mtGW$TGp;;+pi<$4%Ia` zF`sx4`oCffcD)!Tanrd`mvEE0Dcn?Un(9A~pPp35!ZMqR}fb0u6USH?xS za&8_spR3?@Refqe4XI&upgLGBQirJ{RAyMoRdLl^4Oh$U#?^6AuAXa9$Ee$>+pFW$ z@#+M1qB=>PtWHtKzQiqN&3Xy9l-r%#0~_{oR#UsP|F$=`vYOq{ta56B>W{g1*nj-~ zSeoP4HMg%EzeX#v9VSj}*XnYY*2eW~n^s4gJMcuOu6gB7GgmfEYHnOUb4BCqNiT9S zZZEEpTfQ{jOV&27XsqG+EI`(s8w13!QKQgdCIKmvuDkk zGRdgds_7N>LT~nznJZ^6pEPr-NpD|GZ$R(#nrE$OYMN!z+o0(U>V;m@=}a#^dz{Lh#*Mo8Vs&i1N1Vx> z6?6NlqgCrW8<)2>m$k6rxNU7?`^K%@ncUf2(;n5)uAk`|=W^%8HS`}`$X#Uq!4__- z`3IMAmz#fZ6?e7y2iI}en}2WJC5-2l}9Suz5-=*Xc2L*~*sIW;T*cZ0KN<#>B=IYg*P$tSql&V-Fid zyZ%(hh8*+XCTIiFIyOL0H1_c4H07BwcX2vpHfim{PM6I}=D&668f9PBWM{|Rk#w?^ z<)UBa=6>74{lfjKM$`prwJz`n_c!O=!u`qprIxGn zws8M&|ElxV3U$|IOr~N5Tg9}jY-w(fx!vsVs94pwYHdsBdYt`-b``lP!7jJqbDQUK z_&@QH;%p*p!4V;mII$7gm^e|0L#tr1p29O4==vA`j-i#?%t-yI);p6BMli#wjT)0l0XbOzzeBM74KHnLZEak~ z7Rp6h!!2Sxm@T$CHx`w$)%dEmcGlfu?n1p+nT@o68<8=eWi700H*T!b7N5fnW_lV6 z8(Y`28OUHY$Y5pej&T)Zq3SBtY-MK zJ;*Y$Cy9~0NF!NJn#c;$OjeRrWHo6ady{=gD_KL`U572k9j1$p*3?*`I7A z2ap5FCUOutm>fb>!Ujch7&)9AL5?Ixk)z2mw<-1G$mh zL~bUxkXy-ZR8Kz<}Yk)O#g~qzsW!3U&>KJDdnk!TB$%q zYNK{4QJE^#L7mh^-PA+9)JOfafCgxghG-!T(*blK9YhDyA+(4NrNiiOI)aX*qv&Wl zhK{A%(e3FtI-X9T6X_&6nNFco=`=c>&Y&~tEIOO+KzF2b=uUKJx(l64i)jfhrDZfi z%jrBipH|Ra=>l3wt7tW?p|x~3T1TU_o;J{hbP-)lm(Znjce)2%M)#yKx)*Jv%V`r` zL7VAHx{9u*Ep%_SNo`hFs;ku1YKywJx{unbu2I*jZR$F8U$tHBP&?K2>IQW`b$@lE zdVqSMx=B4qJy<n+RZmk- zSIZR&s>gDPc>XqtM>ecEs>b2^1 z>hW%76>dopc>aFT+>h0YeIc>fP!+>b>fH>iy~i>VxV->ci?I>Z9so>f`DY z>XYhI>eK2o>a*(q)aTUa)fdzk)tA(l)mPM4)z{S5)i=~P)wk5Q)pyi))%Vo*)eqDU z)sNJV)lbw<)z8$=)i2a9)vwgA)otoG>bL55>i6mo>W}JA>d)#g>aXf=>hJ0w>YwUg z>fh==>c2oZAgn`BAUqHY5GxP?hzP_6#12FPA_Gx?IDj~TxPZ8Ucz}3;_<;C<6aWbT z2?7ZLDFhM*G62XxAcKGm1~LRl5s;xkh5;E4WCW0rKt=%>4P*?Eu|T#1vOSP-K*j@^ z0AwPNNkAq8nF3@gkZC}s1DOG2CXiV`W&>gC!ySRl0kRX2oq_BEWG;|mASFObfs_G> z04WDD56FBV6+m_cvH(aWkSZY6Kx%-nB@vsd*qjpuQV*m7$U-2CfGh^G1jte#Y@FT$ z$TA>%0*L|H3rHi7MMq?G#%%i5Pp|n0$(okJi8PPqDPGlsJT%@|Vq%sn#sg9LJSbAl#x=6I9 zp{_KN%*b)*Fgcl28cUWKt0h)P71h!D;>yZceet}c`-$kjeO7nOTBN>oeypahEK;Wj zdI~xglK2dtTz+iY6kSqfteDd?nAMc*YBW0w%|@nCn_paq)mIlOt*NSw#>y&^0i262 zgA*<_>u}4->ZhckqOvTuu(+%L~wUKzG}DrKPcjk-8{r!FsMPM#G8S4Ym3! zV{w$$RM$tU>$U$Dv*L_J7gdxe8{yWp5hbdyq@upGrYu>{m!a+G?g`C{R7dKH>m%4W z$}1`>s^_t!x}2^=r@7sol7&-IU0T^t7KzP|6tjwo#>(qzs#x=?sALT`-q=h9cnzve z=&n{=TU%LCT8!N~7Fo=4i^UTwuB~8=uyToBPS>OFlw^%eD%kt+7zq|2@7GA^Tw-Z5fz z4VY+cas7Nf*gMd3L^scx`uUMMt#so_mRA~j#og#JyqgCr!-W-C8dxpy3QAVzedrJ* zWzFka+cx5*vvWR3_`% znrJ;MHa2!eSqbVsFQHG^$&?1%?6oLD6Oeo604nG9A*6so2PF24VsQHnQH&8 zOKL-~HeKl+-=jw;jYqV8N!M)l6Iu;7S!K`KujpWI&e2G1u{I`}BK`vnN1K!@YUkHf zM{3v@m#NJEP7}_;EDk(}4(0~EsF+QK*?f5P8Aj$eH?CmkfHsy;QZDOsPjeN1{;wpVq;uSO; zn8A>(1?u$H$h)#hM(bEbGN} zWieKdpv6Fgg*LIXwCZbW z*nwBqtj?F9rJ+5>mjIPzCZh-%g}cku;{wQJHy`Z`?KZh|t16DNYL2l5ZiKDwaW>T> zU4SlzVq{H4DTP+U|z7GrMeAz3tXe!|W0b zmrWuK+7e1zf?=0p!Lats)Vlg#J&KIz5~+(M+e&#IYv_w=>K2$={37%imewO)!%NKe zOMAA*xip!rWoU0`>MUDq*hG_9G=h6GZE=@0-wVwRCDt`xXiYs9VS7})2Aa^;&;s-t zFxBkJZs|wkyGS-Gv+~z^8QaU%R_K~7XkOS=98p&7iB@WCe646^7>CSargqYXriLb{ zH9l5l73E7}h9w=}-Yw9E>SWcW>*njb1-huXPFu=$>j3+snV~#c@f+H9ttrw2(RL)M zZEV`7ag{d4CZaAZ|LCe@R7k zah<;F-~oNZMqf6MvN4Da&D!LrSIOaMS7fw{ERK{m)N6a`~N(x+0V|;l9(wO zt^Tw#q-1}-C`)4T+DNRX^!nR^4yF=_w}z@>b7Q>}?M$trD;Hfs>8e+t^(bSWu`S29 ze(@PT8k=9v7MsQO#k$Yc=rh{rqmSJB`memElC6D=yHox;bQ)%K>Iz?XH&*10Xm9Fs ztkw)CNilY7hi#wBE4s$`ThL{QF$vuzo|I|Pcsm-0(it1qBX^fB7-K5931Iz7>n(aZk!p5wU|7dJj9#V=y{L|jORU^e z`u<~RI+{eA@yS+EZDZ$ZJmafegv-6eWYfTwbLJW6Npu?7&8e${rkkXmL6>3OT;h#F zPscO|K8N;0y4lx7mZmj+5sl|`Gmb3QPF(QBu(W}laj_MtA+T7qp@f~KX;YP9LH0^I zrNr4yeoC*S(!g#pv7$`WHtU;ur+Eu4yH|l$=vm6=U3Bu1dhO;OJ36Yzb6B=5OO8Mv zpurS_0ZuY8?T~|=-r!Q1Et1&8QH;|%^DQr~sBBA*9huoSK9934L}{fS&^B}_%Ip#!8g=XMda#Z*lpChC zA2WuI<6U$KThLbN%DQ_ zM^Utq-6d!_pt+sxlPlTwD#ku3uv%c9j~%YpS1e^+56@iLSu6Wf4J*&`%9=%bD1V{$ zE-BjDq2j#ewax9?Ee&0{EK<@iPpb-?sc?N!8Ce)fmhZoLi7lQrgKac%p~n>2n8Xh9 z@$f7WnT4X*oLnPIoDgfrpQV_|;(7X>!D2+mmE@_Wbum3_T2Dw8l@s-*<)&A*giS$g#?W@BrFul3Jn~mj zo#^&;c*btrRq1N}Jn|Q>dhIBb4GQzvc8RTHLF+$ zD`5x3_*qciis7Yx@R7%tG(ktnE+ESCo>`0*|>Jq`o>l4>=Y}glznQ9RYckG zEPjH*kA3V1dWjUH!sL|tF)JjxZ*jCLwji>E9h{Wu?q%qnTD@lX(iqNw4Q%jF9+~O> z^U!}>56PFZfrlj>UBc!|{Y$iESM(g;gJ(sxwp?dRcf-+}r3zhl=)qMxz|xNS^uf9N zDW9blby7R3IrE9KV>1{VMr)IYL6#^wr%pF%o!QvSx+j~mYHKR+>y&ofxFlA}zDOoJ z%EDaq*ov2RdDcnTqAGE0R+d<}#-glMbnP`POHglS4~5hvgNkh7W!}MB_CS%IsyD4? zESKaHWvhJ&W@jqp4b|q=%5oG*?e1yIxVAXT%Cr0Q(2NSH^O9L1I-lLeU~^nU@(A3r z8vSEgwiJKI`R%G_DgD8(C^ z-dXoW|IGbH_s20OQ8T5rwQOrz8evnOVG-GhI+?2_gH9bgc+w-;4;7YXEMBG%|9FI7 zsp6~eN?g8Fv#DD<7{CEB@!_8Rf!!z2&x7N~2FXr!0H%|ykz z6il724UN?#7_S^{sMQ47aX4EXCHDx&pxVwkhZk2f2FTW{dRC7|t<>de_Q;G{iz3Cz zaytpdQhNfs@V>6StqC7l;kA#1cVhRVWw0tbFB+*%PM@cu&Tc6g?52YqiVc!%t%z3) z*v`ywxX9WxyQdXpvpQ?XiQzcePR>C2)Y@3o*pBy7ifY?hTbkItDXQNtbx6Ptf8!2O#l`4{Ta2_h8)E;?q``#ijVQkR5Wc5;nwnEh?vWog7M+El|pmXC#&zP$$y6k}P2# zP%$#|L6_xbl&t8Tq@K?-vbXgiQe0G|k-QTn%X$|nt{_wMeh(_<)94M&r=kjP*0E7T zyW*CreLrf~_b%G%hRRCq>sEaYPT%;~G|@kVDMa%~A=z9LS4g!3IUGvMSpuvp)UkW8 zcuz_1507F3`LwoV0`W1SE>hpXhF9Y*ZaA*+-k?DDZT?Mcpk< zU7CGlVV_40g_BI-f0#l(BUpwM*lbo`gXNJ_egT#9>3$iMt0Rl>EKIMumr*wLbSCqF zV`$mzW^?@3W`llAY(Dg~>^`^Nw^O_I`4N`0_&AoX6Ox#m>d5>-9@7cSG}Iyn2TR` zXJZw+r@>b8$(Hjqil#2(GdJ;M%W0@yP|a?cvLK7+vHLxGpZOLA%kwAzL!V)%coBBp zNgK2ccf2h>^dU%XfvZ=Z9^}s`*sK21P^}5Fi6!}&#PS=eO)pt&xEWtqQCHu^ytLgqD!jHuP9@;1CoMzThUvzRYJY2E!rGieA=$4 zjz65KDKBRiUuw$PQG02f{_(-;K+V*XnJgv9j%OwOB> zm-0&48E-M$3U+;Cv-(gr>lP`KY^bt(zTMPS^fWC}L2rtpjj@d$~B3a@C^DCNUJ7XJTc90aGzu2j= z_MoM{rnE-?+-e=t>&OlJ#$+jJJJa|yrT2_sm_XJU#8mdl$P>p9?0!J}^RB5vN1|xf z^=*1l_QZ>Q%4f@tGIj|gF098n1|_pjN+wCe45H;8A1lPz50mR7>-H$PINPLT5G-n3 zyP}94|L6yRT4&LO*%gHNZbf@t5uMN87Z{f1)(MzOL!VPguAA7(pMB)V($Uud*2(>o z1UnJJ`-NqA{1anili`5MIt`P^wS%TDEBw~W_CXb8?5j;Fer06ib=Ps4btWcIrad0S z*YAz3ZL7MzpN%g{K3mh?2C~DD;>sF)@|C!$%PuWdumz>@dX{wuRGi;i#mZuK_&>j* z3O_O#u5(#;Le(AeRE>Z5Gk$5a&PAP_d)CoU4B3;i7&`&M+eyjSFM7`?MXfnGYq7zm zEsl~=Db-`EK*bq7$7ZTmwsB^Ud6HvmC2GvfQv*9F`|6Pt zsX>t`_(@i~Pg7exuc(}ToW+AI!>wFwQQEZ{_V1?a7uGuTpU{&(4vOp=I&;Qr%X%R+ zpzo53wVjNl#ei^ekOTWJ9A)ISjmz5_+gGG*>+H5W``Vgp<7|Z%-II=N&4>52@xgQQ zxtnzj3e1TzlwV6zQSsWw){X4G7@k|>9~qr5en7u*(Je^2y~@1x@WE&$yVR3- z*Tv?_#7~k(h}J_;xvH9542R+p{#gV8ss4*p8v=C6@IvlwI5>S$3lngOA{IKU^uZFQ5^2 zAsN3)=;d-HrV{P9RM-N^`QN3@~YAmEWb}vUS znVV2L+n~{NwHl?_qO-JC|I})|6~(LjtyC7W2TS@r1?wHCJrKE#;~74)mJ>Ok){`1~9a;^<7`!~q3i^X zQU3%2wydVX9SR}Lcp;Drx_F1eKw&uJ9SVbl z!NL%sNEj+G9@vFIE&_5fkj+500NHvOnJ?q6;b`o|L zb`j?Je`%Y`Okh0rXl6jlkVg%)9NVIQGYSRaIkQQpb8)yDjX&pE*v2oDI6snEgT~pD;y^rFPtEpD4ZmmESw^o zDx4;qE}S8pDV!yoEu15qE1V~sFI*s8C|o35ENm9G2wR0qgiD3Xgv*61ge!%sgsX*X zglmQCgzJSHgd2sMgqww1gj%sXcr|>78TJUIz^Z07CoX@^of44Kn#dMF(ej> zVR3*sP#h!<7KeyM;!tszI9wbdjuc0Uqs1}eSaCaXdvTmNUYsCK6eo$3#VO)cahfXNj}L9mE~QIpR*@&f+fOT(MXz5lh7~F(Q_W^ThdLg}AG@K&%w2#A>ldtQB_? z83ONeAXfmn63A6Rt_E@qkZXZl2jqGnHvqX2$W1_Q2679KTY=mL`K#PHv04)Vt1~dY+ z9Oyit^MO_X-4*Bppp`(YfK~&o0a^=mH=uPuqd@C{HUM1+bP>?SK$ier3Uqg%djMSq zbWfl$pnCyr1iBn(6VMevn}Mzbx(euOpe;c62D%T>R-kKut_9i#bRE!rfwlwf0NM$3 zJWH0eUOY+koB< z^bVkR0=*09-9YaFdN0uXfZm@Rw?*yoL(ddP?YWkyy==&eW)!t|3q7D2MeT_~9%x2U zdybF;no(@V#dw}*Mp1im&>J+PsJ$>?8OasRD6Yl$dW~ijwRZ%)Kr@Qk!+{>rjN*P6 zUK%rY!el_8iQ47r7L?~*P!v|#0WKT`>D@LMr*LzF` z!%S#KQM=h)Wb!hA@4BHGMeWk|s7%&r(Tt*YBRg#J!57c?=)s6)6t!#C>FrDfqFtUI zoK_EuLu1Y)&XUCYvA6q8wq`V=_)vWKG9owi=n$o$3q6WvBTYg*pczH&%JF!UOY-A% zb~K}?T`Vp%xu!ugirVeqK_;^dXhu=H?mNO{8vh!b$Sk|qz^?Bg|3%NSc9S=h#>0qa z6ko$~8E&%5ie?nwLqfNUtnJ1djCSowXK{ML4 zYmkO)uuJ}_z{Ny+w`unqhjszrQZ0>WMw@nt(V(4;*=EzOCT2l1+O#W%2Bu&dG^4E$ z%OO8BqirDiWJNRDhM-9{G^1@8TI7Oew2eedLwk(hC@_=hY}!>h0~arDl!9ioX*b{u z?KTND$%tmOY1i2d#mH{UXwNMTXhxfMAI$*9%YtUKX&1|8cC`uCfbvE&+O%V~e9(-x z9k3X4M>E=X>e=3irLxUMdqY!a*~)@uw3VW{p~Sl8ixf1YO}nFIXaRZ+7?)|bif-v= zM>Eqiq$2WrFZ!K{ML4yF(_D zo57m3kS>OmO}i!33pArmyA+f&n$f0R^D%|g^(@}}P|~Jd<1v*<7jV!(vaxN%GBK5V zmtA~}V_9X3Ja%^wpEa9$#3uC31I=hVBuiqZWDKvT^_lL_EQ!Ty!wj~t9f1y}5{N&s z?}lcyX*Xg_t)i<0x`NWf(C)jKNP<108Ex8ymORmnHtnKH?r27vb_d1O=Mqcv#4q_s zMKjvY!V=06&1gFpje9~f+Act=e9(-xi_ybWpgp1)ZClYNUo@jlyP{$0J(cQHCX0*MUF?0kSnb3^3H_$d`G^6cpG*5lY-XCa2+k2>;im>Pvn$h-Q zUSiqNjJ8iuEcG>7hR6~p#OctCw$D+iPiRKlS16JCHZvs{e4UhB=_PSF-O!A-Z%{4u zsbY4u%xFg2_o$ajGD)ka)4xn;M%zz$&PB7%biHXhypoLrLOL{~eK0DeX?f|D z^gXRPnxX0Qqw}VEMl;$+phBdR7jnd%nGS!M*9Nv z>lZYmy&9!b$6`GM_Ta^UP{@X6wC{#WnLBG5B@>#_UXT8n`;G2znDEo08SRTuCv&xA z&@rGH?MqQ%Y0CR)Lz(mon$f-t)5+XPS)|?4NwmoLISqSB#-Qnl0oUiBy-=^uXhwSz z%Jc)8(Y_LeQlHNo$}#zvDkn6fy#)nRr|V3D@mV_)n$g~hYW;&|w6~#F>T)%EWQpq3 zUX2-EzT4YTEVUilPDM*IFKpIRFUc1WIRM*D%NmfCHz zsC7d#+7CvB)H+D7&;y#$4k(tDJJLHeqy2CcPTkoXh%1?ddqgwZk3#Kyz-I9tf~&t+ zRy3piSX9nf*Lk8D?I+|LCl2~rls%vs?I)vhYS+o3bWLbR`)R0?6(-WHmJB7C9?fV! z6D6|(axzP@$FfmGP^NMN?dS9%()4IX`}ruD4@b(7`Lt+8`$edjPop;~>ZG0YXh!=M z)UHpNtut4-K|3cjqy17$AuEZbTg@e#YZvaUPNUqWM>E>5zy$JXZOH^Op&9L0qhwaZ zPNtZX;Gq;Wqy4&ki@IB!x^zY~qy0uqA)gT}LkjHV!;EIM--62dbiWMBW;CPyc9cy$ zoyn>7rJ@<_cl9y6k z37XOVBqsHrpc(DYpmeW#eP%SH{W%m(UB+i_;>nhi9?fWf5e2h??7EFjh7mghn$iAB zAA;1_hx+zj-%P!Zg1zc5Y0!-Jw@@vgDb-MhT6o>jjP`f?8lMr(X#b$E#n%nZX#W@m zQ}?!pMq@Zv(3W;t(Tw)bdd#jFewY=_X#X;|=#rJt1Desk4fV3NXmfNqpc(Dop=RpI zOqLSNfM&G+h)P+fKGQzj(A(m_hA=&v(f$j{X5AuXlFg20wEvF6S=U>cgtc98l3SMv z&1nCtZwctj7`Dhj=3^?F(f)6KMU!l2J)s#T+Uv-3Lo-TNOd#tFVk-M&k`o30Yc!+e!BqZJG^6D2pCq!N8Kod5 zk!uI-1)5O`V*fmz_h?3GI{Nnx%_z-6-~SxVDD8;J^dFj0+8Oow4b3PO zqg;QW8Kp8*>K8PlG!Lcv2hAw$idwx!GfGwH-Ul?JREq-rfo7DVsFa2sl>&K;&$8^K z(g=9Wj%Jh=qIkcc8KosCl?F4{Yc!*@2P&qa0rsGn3C$?QQ18D$GfK-*ISo3jhe*3Y zAJd^3rDl}wE1FSSjVgUXGfMlQLYm4*Lhq6*zMRmE(puE(6YZ}%no-&pCDZhj+(XQa zW|TTnyr0pG(taqKrqARaaW^!hbO0*$H=0p82vz%3S>4f$lG^7$)1Vop!%#I%M@$xI z&!D{wXh!Ksl3B@#KSVQ1Ct(8r6`D~x71jGzt?b%sl3!^+GfHQm zbhbgG=N4m>W;^jNG^2DjivKrgM(I4%&IQdVU5J*M(2NQ4oOH3YIgUlte{hL(6*uZq z=~C%3>2m1`iQ#r00{Sq}M}R&G^s!5&tEFqCYo+TXhTVA_=o3I+0Qw^E{~+>hyouu& zS<%A4=Q`Oi0e6i5u(Y+YgRu(#e+o^^2HDfg@eb)O22_+7e(1@~62lKYl?A9M-OqrE z(gV_iK%WNsToR}#Jt95M=!Vjx(qllM0s8C~=?U&%p#M`V`iZBgNBf-gqB$X=NG~&t zB7I)ND88N>Mp1f$(FCP8fig^(`Z5_zhL9pomfn%xmEL0*MfxhxSAa5r7_fuebQs0T z@@Qv!bK{x?Eu9l9%PZNhk7a)+tltXf%9F_;j1LZ;TV0nmKBoCE`$;0Il@<@4Tx}P+lZ2mY2v&<=y2y*RgqcDY0Dl-J7}ybLOxPHN8cyli!y=kUx|^l0TL|kw29`lRuZgkiV3_ zlE0R>$=}G|%HPS~%Rk6J%0J0J%fHCK%D>6K%YVp!%74j!%m2v#Dx5+Ts_=?Mu_}Th zDmKNgNQ$f|ibHWKF2${Q6tCh_{7QimP=ZQGDOAGB0A-*uNExgQQHqqI$}nZPGC~=t zj8aA`W0bMVcFOiZKLyIhh|ht30rX3tY=HP0=r*9=0R0x|cR;@f`U6nbm45>IGtggv z{tEOrpuYqC1L&VXSug$@=s!UJ1)c+*08fGEfwur}1zrGN1l|U`9e4?N8F&SF2k=hd zUBJ76_W-vs>4z~2J=t-#*~{O!Qs0sNi7-v#{Lz~2M> zy};iG{QbZ`0Q`f%KLq^4z&`@~qrg7~{Nuns0sND|KLz~Lz&`{0v%vok_~(Fs9{3l4 ze-ZeXfPWeISAc&N_}74c9r!nZe-rq(fPWkKcYuEv`1gQ+ANUV|{}A|(fd3fyPk{dv z_|Jg<9QZGQ{}TAGfd3l!ZNPs6{I|e=2mJTI{{Z}t!2bmN&%plz{I9_O2K?{9{{j4; z!2bpO-^n3c(N2kT#3?G;X>eAYqM{x6_JC7Vw3FRDaEgj{MwF|E?bI+&oT8$g2lf|EQPIu? zdxuk0v=hPJ;}jL`1TZh0qN1Jf^#-S?XvcT?;1m_@Y%UL+qM{wW1x<7}{4QT>q>?Dw zN!mb@QF@%Bq8*TBvNX~I746V#Kn6_%PEpa0u*PIC%!E@^v=gc#lUHV(qN1HNjml(Q zTNABk7dMdC()Dyk(GHKoCZ7bu&xBJ{v}2(3cBTT+&U6N+)$5K^RJ0=+zsc5&Q&hBr zmO_(-9-RTFsA#7wBTZ&K;1re3crY{Gl*Yq|Q&et1tKlZAtT;vGW^^z&=Po=JcBsVnY~?mI z9BoqW5vQo!nI@cUI7Q_ibTBvQo^XoF{pgcBPEmOX?eoDYDvzQ^CibD>_g5-UpkXdJ zMdj&ik_Mcj@;@}p4X3EQfVKvBSvs7e@-iA3uyj4+6qVP|#lQ;8hEr7DL}LTSEHh3~ zc?V7N#3?H8qf>8iipodbvXKp^sCoTBnYH|=aVMdfQW&Vo}^zC{BA2QUpz zQTYLF^1~@AKci1poTBm@nqI$8>(P2S5L%U6a zNHXFS9U{6Iijh5#?v7J*NN8h#*=4~gIvi-*N1UR=jUxHr6dgYF$Q`HX2=r`k#3?!o z(caM1v*Q#U1JT@2VqNn^3Qo~61Z@p1K(7JgGR-lpTl(2?ijI+J-Xl)YF$T>H<4|Ut zqGNkBH8jBtI7P>VZh>aUDLN*13sgr>m~o1ZX=r9B&n!4a$4s;}q1}4IDLQsQ7ZcVm zXPlyAC-gOe^U~rJ9dpsn1kN)664|fnjZ{b3FG)-+B^;$_ZKAIktm6;^-LxD{dx2AQ zRG?$dI7LTgN=TV-ijEpIHI+#hB+!6Ubkw1psocBldcY|<8qhZnoT6iKmc&?zCUyag z+L(${bnKobv3PBmaf*&T(ZN&#X>p2nG$j!o#3 zFHX^M2zr@1bS9jl<4`os6Q}4n0-f^0DLRfum)voRj^ogNIEgklwl}S2zq7n^HT%WJ z47*%c%zp83G?g#tI1wFkzJ z%E-b-%>Jxv_@iZRgJ((0we7acFw&-Mcop?> zXrx&BiH4Aozvy@a#Zoy}DTURIzvy@y{rbRPbi9WGX()Jk@E08)qDJZq|CHe4-PQQ& zB^`g!@d?VMV%<{Yuwc9M7agCYb6@$3j;~N9hi;A`WaTe9zCoo_=2zOxq~R|*zDNHY z`gpu-+3XriSH5~5{|VL7^vU$h!!-Ow$FCVmCaXd^{-Wa#R7%tG(ktnkU=x4Q@ptA|Uhx;53c98ufXqSn%wKf6 zP$#vcnspLGUw8hZ(~Hij(@k3E9QcdQf?V`+=Px=#sMkmSqH_R>^i;j+Hp|Yzxg@WD zvd+L?bPh$4)b5_PjMMTLog+{obzU+nr1BS?qtUNl_>0c%P%3pS)?Tbnb#O z{lH&zmY`7T8+k)HCSOeD#9wqqP%w46&LkLbw3+ye&iSa;Km0}K0@O-fu4a!cQJoqP zz`$R0R-;&IPso?Q=-dr;{xklfvmWJBYa;;&$&0bED3%p2(v1;>myZ>>iT4--wTCPclHLXN+#hR z`HRjb)Xs-q7VjZi*jf3D&XuT~v99yvFFITDjWb!eJ@6Nut*D&Zb#f?O6Mxa!hB{e^ zA>C@pP?G8Si_UhG%u2z@ESZ_V=v?22NYnEdo%^F?KCmc5=F{>Qod=>~K8@a}n4Q1q zJQ%g>Q)cVTRc_GEiNELsOd%_7q+87;n`>_TMd#s|Kt8Q4nLsA~qVp(}%*xTp6mya; zl)_(h9-D7bcZ*Y(&d6VMo`5OjGlFGEfqe`z^B0{bqjEmoFN3m~zvw&-Wm8XQa%z34 z{6*)PeawGG{-X07RL*AvPv$=jf6;k9s`awd<-}ifUW6(9pYa!+TQIr*6aJ#}QcUVU z;V(L`KlBOA3(v>y{)0qeXYl}8Vmji#%`3!2Np3Gz^!3_LG=X0o(b?P(i z!wtPH8-LOHBFbjnB4v`z&R=xCg2GwXTbYD&<1ad2?^^=;GKMWO((@OcZ{=4s$#&Kg zf6@7FuOrWmzv%n`6UaJ)n94pGIbM`!mv7SY7o8uYXx8;@deO}MMdxQInRQY!NgCR8 zdj6vGOBDRC`HRkNn96_3Uvz%gKS^ZaFFJq3By#Pbz2Gl8f58NLP5w*6Uv&PCiv7)B zbpC~^edjMa|3#ht;V-%>|G-{( zu?L9ycr^_vr{jd3p zuGN^zf5=~S?Sl#YSNuiST2${_wX$ok>G+GTeNj5wpwV-SF-o(Yco%=s)rsQ$%wJ4^ z+*})6`%Fh{xzGFgyw82Un4Gg` zcIH2`v$He1Cv`T}*<5E!ovn2qs`GH2N9t^=v%Suvbsnp;qt4DckJovk&XdyAPMW$) zQ$J}MDorD#X^b?Dm!?!{nk-FIr72UIZj`2cX__fbv!!XSG|iW$o26;7G%b^+mC|&l zG~FXj_e;}yX?j?i9+#%w(zL&*fh-Xnwp-= zH$(a|lQRn1g@xtjC8bQ8n3j{2>Fb=4l%JI{CDE6go9@pV>PzbwmzEL{86T4nA0HXs zOy9DXwqft2+$kL+qGRLYqN5{Y;v!>W;v?hYx-gfA^6#upP_K2y&cnW|2I zUfEX^_VDFS%kyVf{6fMjejAbQo6+5$m6zks$nfQ~%`Bh%AtQN(6HsNzmwxMES(0Jd z`N_;$phd70UX{*-CHytY%yC;g(4bTD&7|8COhS(Cfvr=^kU3Mrp7Qoe@8 zfqysU$})V`NVeug^PIoA{^O%#;$tHd;v=J?Vj`m>BjUr$H<3W~M@A&Y#>YfdsQy=t zJY6@@-1jdQ_3H1Z4EwVN@TReYI)6(oRcKo887W&BF#g4?SJJdf>ZwA)9~%kV{+>2l zA?eSJq#YB@j|B3p-PMqul9b0`o*Slrp={fv?Cdb@fbwPK<%ad}P4{K^vwb;XgENxy z()>A@xnb#pr}(pcgA&_jm%LPNtHd7t%59LFIxQ@*AUDsK8P-2NIVUNnAWZKzX83ca z<%ae0=VztryKbxZI@+sYy?sfkzMR~!fr&%&v-h*8R_06c5`VU6xp8r4}8J7?q5t9&?5D^_27ZDW^8x@mKNgv5U5)q#i zUg045#mH1$MrM^2xspz-l7pmTq06@lvoO-X{@*WjL{fBYOj<(uquw|%Tvh1U$jS@t zi%(7|Uud&pWU9fxaCrHXr}|Q8vPMyj-n&m$n!iF7o3AwzZdm1);+A|7Zn;xJ3J=rd z@O$s1oYWafIliv>spdM&gbCZ;stz0|r;peVgq@(oH9t9}l#G|K_pV zb;O?@yK`I$QRICAyQIh7|4G}0w`9rv>_B`01nj4m#o{=7!L07_AIgD4r^zXLz zUGawYZrhY{XKQnPW1oC$_P;y$iSVVQMMwVIZ9F0=GA$vd!r;f;*vM3CCY%3aAOdaM z7?)LSdo7IALjV5rdsJL(LKMAMcx*&eTy%JJLPTukM{#s&QgVf(IMm2f*xz$JROq!L zjie)(!u*TD(|bkQjJA3Cwh?Ef6gAPD{4b_NUo+r_!NdDxCQbH*m7T&8{rNd5zOcYM z`WY|z>VclQgR$e7zp-O4KaezcHPVm!7Y33kN!fY%IX>E%e*AVfey!NhdKt+k{B5UT z>h&7IcT1c5mexO zJG$M2bJ8;zSM>0wuM2dVr0IqY<`i=(MM;yU-qO@ZFG}D4YEgoZp3GeJp;45C zxH+Q=qr>CMmnGYrua)ISbB;OJoF`5FrHRTJC{2Sln5Ua(7}YXZnuh!@sFtp~W7qt= z)+kGKLTPo(Gv88W5f+(mr3kl4Q=&8t(~B^?s3kSrbzki+iQ`9W^%@Z!5kF^iVN`r< zn~=z|N?vKcyUJ8oneQRh`=n{4G>y_z9c`q#_rZO!mrGKm;fyYfPl#$05*1sT*R|%2 zTB?;E1)I#9jc|{Zrg4FA>#gy?mn4wH;pTdiwKX< zl1qpzP3{fzyH(cLd*=6z9m`kDEwDs%U>`5Th^PMWf$ z$*(7uZ8WO=Df!zrk1dRd(;H}XIL#^|Au5KPmS*!O^Y2=^mG($~ScG|qg;}v2Y0A~p z&C}BjKDzU6S8KgKBE!S!86x5$%k>PF8kXy-D!Qe%rH)bb>C!Z#e9>orcCp{aafOlb zkyK1%Y<%hQWN}*RYROgBR4w%^*OSEt(o`T#v-IQ&^(^)q)9tPMPmM2(2v5*@t*G$0 zxH$wODmGT@nak2{YH3wvR$E)z7+IYoO*fU#>YV2qc9WH6HOdlSWpW9Yc4U<^SCKSH zJ-K;$Ry)h3ahb!7Lq94aiaLsp*4k!i9a*|rZm2TV-j+T_CKpK4!U~z(KmO*6V+#|a zDSjjumVp|#47Lo{Qmd@rwv4chB#)z|=@w~Pq^EYP(USHZzv-GcOZ(4Jg)v$d%fdL( zl3Hb&K1&*Da^TRHF45Cms;AlIhn|Bc+&+#P4fHIE9pqIV_tkkC2pQi)E|jAq&&M4@lE$X?jqa z)@-nBvur1ak4e*7XGQy_S8J z{nE5Snl?()CTZHd!E(TIkkULaO$w+Fv?5H_m zIir>3q~&GHDa&bTdPJJGNz-;|dUS*3tmPbKIWJ9A&JMjSJO8_7X>n)EvoR$Dl;}up zIH(=AWrcai@}X9kcP;N(-nV=pO;1P@D;Q5n)6*L)A6Y&&TI@5@v@6hJ|BC~Pt}kU+ zvwTLyB$S?GE#F#xqzIMwjF&AxQ6IlZ(;jKss~2IPaV+m{bmHRY^kQ-GS}ztEsf`FD zW8*l zM9o&4XjW{sSgq1Tn?GD^wOd)qc|n>^R#@V(dW|cyL)>^4nBvdyPiAQ+HmYl{gxKiF z(Ab#xo}tkZUE@N##&z!*8WZ0=CZT6^M6bB0m~xlI)&@pGN3KdJp<9oHI6AV>=;)~K z{1e$TG(Nm*cxbn7@!evgdvuSFPGAB+znZf)vNqw5a#urtzlw}d5qO-IL;ZPIk2NYs#~L(+8otTo0OD{3kStZ~-(iDnDmMH$$vzb|WY z-V{?X2y`mV3L{+Cik0f5F(QleIHnHQT30&;I_&SC(kK zwTrc@NU8FDz9+56h+$WLDiFQy)}C7QdPvjD>#STip86}%>#IetpERAWGN`3wDQB`Tk9CDH>A5{K(kE-z@Wj-3ndOP}6kng*k?A>H zsB3@a&R+hFK;xQdwi+*Y~E+LLp z)P%@{i14(ih`5C4)Rd^m;Ym4}vC$)pwUyDve`4E22BJIHI-kF^W-YQx>pW@tP?|oH zrjOTK7g!fsZeT9d82iyb(wX!b%m8x=uf5TYiask znl4Kdf2byq^*gPs*{`?WWxdo=yfq3k&K3Uo4pRzt}<*a#0ntqU`A2~C0 zF&haCZC6D`_FJE;ETQMEhpdOC=_hIWS(<(!q0V1dDAgJ3F;UZc-1^eUfr)Kde@hx< zsg{(?GGK0Ac=)-XYSm4O+P=f6Cev9~qtT3NQnIizx{3BBnf#f+oMBB^l(&kYo7Zj& zu2IMAty`~2)7BBual@Nr6i&ms<+yHpCP3X04!@|kK zNUb-aWO`l_vy)PM-NJ>rdeOBc7a0}EWN~CGevgS|!yf4zoBk}0!r~b1#dxMst>q$< zYQ(I#U8_o8pJ=W#FtMMnz?Z7`WYop!j-7_(`2BfBL86AR2(M@?LPeyA6Fo&gF;t8d z$-*yg6uBZ_%oR6_W#V4(fOt@>73;-Du~}>tkBMi*L2*=^7O#qT#RuXe@kvlnkQ!tO z@&+{wY7!J06dlwds83M;pn*YygN6r<51JI@3(5(~51JLUJm~hI%|W|@_68j^qFgP= z_^;?(^52sG1Tk*;)i`CIw7xukVB&!Myd*mE9yGu8)|1v#B850>zXgtxGuGGm8;AB{4Py@(0xcnNF`{K_1>V~);Fwg2K5f=t#u{*8Aqj>R7~0rL36b} zrO}@s{KE>e>DvFvMr4bPn*Vu9+D3_a1=_jkpKPg3QR`ctv3|sX__6g9t=Z@CD$`bj zT0F96!$zr8H-hz<^&duDe8tw!M2g>BM+~DrQ$>;}5Fz}VCqf3+OzcgKw-YfU-29m_ zjQWZ{@hEB+RoDJp7&GhE@5JS;b?&fUvi@NG(Rx|*w*D--3+A4~rBXvG*Gi?PRIV#( zB$YZv?xH4I-(7m+kS$0b3@X?B{fjt3`U+Ul(DHjT)@PMk|EXI-Y{nX`Qu}XQbt%6v zWiu9P6(;`ww&i+Ob@U;%&1DM_&9>OwHjm9~t81%gyWUpc)<7y&so11qmx@CwPN}%0 z;+BeMi>;xpk*%?D;hAyQoK^BE0uaujVP+l)^x(Pids&fWshVG?Vq79Rq-vd zyf7$-MQ>l~NbODytzR3Klw5X~6@!LO+J`EAb0QZMJ$gk%bPbJ)ii!)3j_lDbG(I9C zCbUQQxUR9?;(LZi$M^Wt1bRewkB*5<=oT8qs$(c4r|{6O3EjGd_UhWLSGVXMk>N4j zS=4G%J_knr%W^n8EB(fNU!PP)IO%EWK867e*jyjV_^QaZ*`p<>$6=V?pOKuD(@yDRM=J=Ah z^5OTyDgGG)lcuLnHik*guH5X$(pgrXv0-liqym3_9$!+Zg{9J(9-hZ?d|rW;WFq5- zJnc>_Bkj_3jonqKstPTM8+OtsXH`gHoR&-vUvmEB$y&VyvTE!s-IuF1SJLRkGOl*b zH`e&Ie^Rn9BXFFIwfRJ|t+sKt@wN%JiMC0$BwMmA#g;0S7E);`l~z(|EtNJ>36)A) zsf0--d@Es`tc8`pPP1j$GHqFePl+&ss>L-)Dg{!RC6)QwiN&miFOW;4l-hNm5j*X& zVPYb&Oy!mlE%w^R##>yWj4%dP#_x&w$(d|DrrZb3Ha^kAmzI>D!QW#h4o31JB`*!u zKF&Q=S$T}_b9}k{VBEjOMQ$KNrPVPeL$AeQIX>3BjZS2iZ8leywnC{yuCvXNN|aWd zrpEqD>b$C51d@<8u140`=1C=boo#_sVv1T08<3Qq6?lJH$7u~w8@Xs>iLpi=_1x2G z7=tuzpi{A`wCM=_$m5+p+#RQlRGAwI%mq~PUfsZZJ!ew$+PJ)|ZyOtUZ}`yuq@v%! zno=@;X1E@j5}K>ukq`*LBS!Xm z>RV*eY|36H`}!=+ptQhe%ifd9D5><3%2=uNrUmiUrN;e=94r+A*2Ax**|xOHVnm&< zU+^1E;zUrrO}3M^mu;tPr)_6!XQk3tDs-Piq>?C=VNw~PwH4cW+XYIr$@ZG<^)Y36 z7_R3|ul@c~8Nk6@dY^FFeOsAkWAtWw%l0M&-b}A9HN+?oj>M8Zf%_E~?7k54`$yP;c#1!}XCf#W9!M z@2SlU@cnwJxq%HY85@o?HXNqE6~y104{Bih+4hSVBbCuo8B-+wra_`U`nCOGC%wuh z$%^ekTG%RkOaG+3y4}Pdw79ZmlvP8;!?D<|XvzKM?Z<@D>86$pmYS8e!hY#_ZH?nU z(0JP#w*qMsNae@4KN}HhuPv2Hv^#pULgPjfZ730`t!Z&v^_;*GkKJl_ zF~4lL+3j|RosnL$R8pkEdCj-M?zVe^dfORhq)8=1ucxe{mZSRn()9E4=%MM8r|3V8 zF*B**~r?5A&H`jL8*xtn6)ZR=glch36D(O<0y1_gosHeS^HgDHe zZ*tSLktBaEZ$wf~y4K+_bn?y6PJ6jSlV<3%D|F%BitmPa58>{%Eg=K3>y^x512 zq=g_*)DHGe+UA@9{OjzT09aVp&Zi@Wl}1PFsB?{7br+Y5?LF*0rE;TGrk752*?Zgj z32U*vkDV?hM=H6+_Wt$(QpuA_K9i^Y#!^IPZ_ z&VkGgvyZ$Y-BEUqw;58I$qZ>}%{K0G%QF=|H^Dx!!t*osWIM~Z>+H0d!gY3^RA&EK zDbux5PPI>y3ZsOgvQlQ*Z@eNmIrdzsFhrV5ZfH=(LD0C%;=U1C?gC6#4G%p@>`S$W>R*nS)PSYlslU&ekG+E*$E?6)h= zj$+a)BQ?jDHG(Nc`o*y+TGHHVrq3OOq?X)yR#oRRmPyON5KY_HGO4VP%EEHzO#7Yo zyNq+DRF+ER=F(G~(b(?egk`_q{(yb8{XzR0J7bkwq_RjVw@PKPR2W|^x$=ZndVUJ@ z-W5+BfmbV@GXDJ4iYJP|wiV9`XM-v^Ayf#f{aFILQsH0aS*a}7!m*-A7{xrKb%T^_ z0i`iI@uT(^jmebKQ+b6;hl`Dqx~!U*YXnn6w# zsp!W%eQjW6urz)CxKtG%G49qZDVrn6aSa1{M>R)vhsmKhR7bF*hEzCC)=8yUD(j`P zK`I-ivT3v9TJsP`E!1|@ahRpDSs&qU(f{2qmFJ~$$e0c(x$YmzN|7--OYE*cnwg(# zoH@8c>6xXS<|vn4>+$p@Nnfgdj?j7}V-khWbLa;~16O}k>+%A{Ha?}T#b_54+7$s~ zWKL39#*opN?fi2Qh8V^Ba0o)9L=Tjh*WkkwNN_L)NZg2e5AFb zt@e>NL0m&`E9zhD2-AMrUev#;4@5h-p=y0lEk~>)u86Vqqf&W{U$y`7A?<%HN3l}u zo9xdi7{M*SlYO~?eRXlLRJy_Twe1og=elJ$S=5r)xxvxPaRaY$(eSubc5AQgEowPN zAGpoXW;6mR3~&r4w?VaRA8XqTzKK*)iNP>dH;VXq!A)w#j%0C)Z7+ z@}6UoBgv8MNYOT6y#AC_o|eipQrX43W&bNFS4ruH>(7&n=j4)>J~^MYE3WZImfVj$ zvScwNkeM4Dd3t6Xx!NA~kl&IhwH#A+M|xo6S&lhcRTOF!u#c)J+c+_OP-0-K`HqEU z4T4k9vqgOaZ{6y+jh@WWn95)0SR$1JWkp`@SgF0$hDu)NVB(288UmlX%dtv(i;M5) z*6F#UBdKGwmB6!CnZu?t{t0|wwPQ_L4mf%bm*rr+VRGQ}t)jIp|4BUuQaS55Uq!V9wz%l{i1|Us8;&;}Z#mv}yyJM+ z@t)&-#|MrNrSg(gPDtgXR9=?KDXE;6${DF}mN_StR~~Y3W_Nswe>gtZCbUEY$JbIh zuaDDT)&KoeD*w=aU{qX7Dqm=wd=q+}(%!nXpJ8DuBPl!AmwH2vKR=rxxONJ>db7k7 z?eW{e{@iq}*&1uES8t`A2TIofOIEs!HRjT72WaPua_cCiZrEc)E;Nx5|OZOgw= z`oTa(1G68w`m7L(cV`^GabL=_j^9}i)E`3V&V!I#BV5o2B~Ibw8EtM?DOn=Z3z_K4 zTkovqtga10u6~Q<(!f?{orY!txESM%bVfO&2`od-L{__V$zxaUKFQK%M)XzK z{^?o1ZUkwX);NlU*(8+@%+-v9W1Xx|xJs+AVkFi(W1aC@MP3!7bIx|o_RbFEprf;s zv$L}cLHqMX?OF_CK9b7EToGNptZn3_yR%10?2DbG;jrpL!jj98eArz(Dh-F&g(>Nrgp9Zn1I>atl-|Csnxeb*4C{(rG(Wojzxp zbFy=alUWQp#c!nYtyCBee=n6wo6JL;89_by*YC{M2ZcXqog_0HY8|PXwO&%|BeZ#k zL0K6EeYgx{{ymQ&mwvUWH4GQ;>jPT-_B8FnPe1DEqg*|@-B8*$lx%PGE83VZkR}g3 zXOUjF48C$eU+JhY64kfVzNSIDr-xl@^T7ImEfA3c=Uj&Q&RNbv=WOR3Czs?uO69Ut z7}ftQm0vbGi=0wh33PJB`>Rxb(-r`w8l;s>yK?F|)0dL3_250Trl;rl^~PTE7a@&N zD7_a`RDm*97=*Hh5O}?72FFEefmWuhUjCecz8Qm)@;Qf;Op8abUGJpnzCKKQq$g!$ z6a>=gK6tn>0iZ>+~kA7AeCy9DR94;`gw^ z?qr>)tXXesV^`|?R?1jFq62dD%D_pPP$P{Na)zB=cPlmNy`!GsXl+Z z@n)su65jckbBFffYovNDA09PqidJ?>YUDt}9g*!mrnV zt}j)6CQ}X34h8Kn>zR||&$;@F&B9xyV6|_E7e9N^BB%c;_`au56&N*m!;ZRs?DU@vUD=T z`HS=Sin9~WKU^FUO{CgXyVBGabxS)#W3sPcQu*l(m*T2XF#*>#u4|>*T&gXM`%#U# zJma%>w5jmfIxa>l<)>j?HWv+}l~h{?c3!%gY$TA~r9uK8S3Sn&F0YF=5-QcU#af?f z+$_P31bWS0aRSgLa*dnAsKL6WTa@(P72fR__D>#nzxsKtz4~LZCs(QwyrR#MoKkGs?k!7k!q||}Cfw~;$npm+J+uydA&hP~`N_8==+351Q(p-~WQ(Wm%?Jm_GQtc_#UQ)ebqidQg z!QxZJqW{ zW&*FC&eJYTOOJ@sbA+~#RLPuOph}mzmNTq%EtBftb*>ds9r7o0bFMpGOtTcb?sDBN z)uB>NEOy=FVpuXvs>83GYSUM31A}MR8dtIQ!Fycm^sPr|KW@-}9H|BE>Xl$)BFweL z^|0}|hxCm{X`kDs|HuQ*<<^}`Hs9fT!r1(AeVZ}b=1=KAj;(C--L8Gc=6m&R#%Y`1 zqyIRpsO5+ej9|1|y33vMqT+aru)^G?>yYb6S^Qp*DtDR$LVL{hl6LSCtHgD#6H=X2 zI;-P4iW&~yXz0Pa0j`oxfy%XBTSX*G^u7tHB+itQl%%zmg#%H|9{RXum)0o&gd^qx0SR7 zcU`wOTkozX)x33X#;U9)8|$T&%+|Xbxf_?X4r4CJ-AtPck}9jk)nSLGa4&yppomF-n?&go64lbR@V(u!C)i`%2`fPW+ zJHg$~-QL~7&Cr2vn=!;(sTN69N_E~QXSTbGcAf3+Zsyczmg;=1iD`ZIgZ!(tu_~{0 z|I?S)e|~`-&lmjF>v4T(?;hh^vChrK=Ax@Ea|2nP==KTgM)zPco$O9=r;=ek z=ZmF!n^c!ww|4}q>(_1W+>M~<$V1?Gw{@WM!JSvqM%lD_1T-dvBBUxFaiQ*>Z9D3|$vs!QP`^XJP`_QO5B%E~>h5`5 zsJrL87q}O?3GAIx0Y?SIIscx3)7Ovaf+uYmTkMiFg z?w$P0lg4xrTcyg!c)p#VACW3go$0$+I>cT^3)t45+^D4fybZu7IpA|AAJ#R`#YPz?3DGAK z^)r~qB`$CBxIG?^*HhP1&vU(0pO@+}sh*PRd8xi3)%Qw|cTb3?X;sI7r%Ni%uBeWvzp5JL{%!Kki1AlJ- zqh?02PBPt}p)W}olc8-h`F_JbSs~gEL(;QSGPrLtH6$&^pZPb}UgbM)7txNBq5c4A zNdNvL286`5jR+a&r^1YzP(5+_tZGTOJ^cT@+L%IZ7|nNZg{UWbMpYU0(Vj6xeVkNJOZA){^;ZH> zpQuHBl2p%B8TFLPqRv!-9(DhJ>KG{LBNF*i!wmZUlH-8L%m1O{po$EXMxXnHSxE@A z`3%pERYpI@lS}mTrFucC3~A|sE(W4MQ;U9qRJlK;iqq=s%A(I~j2``e{i|fljbPHW zud>%}2n)ONt2_1E#M=^U|Nlg+165lZ*?AQryU??^%E;d4Swdu&N%c*szN1I>-9Thl zYLVsI=B+9tdsn&09^l^G10JT_n9L%w<|hBn*Yy6C*bd6i%g*OZ_5X)r8`xdD$dW$e zfeNu*9PGd5Zi~f*m4o_L6xz6^yu3ek)_(yJcB;3d|;ilp6@8=Y~vS3}p0_PwJ%}l5YIAtK5Z!XK#gdKhLwC zgW4QrA8nn(^PE&a50v>Z*BhQAo)1Fud{j1c`j7k+I5YGBf7a%zt*jk=>4?UkSf$aIn^RZOF zlj`>smhS$1VeX$V?6vu;101bh13CKI^D~#eo^L$gdcN~~@44jp!Skc%vgaqMayfEY zs{HV?RDY4`uTuR@s=rJ1kFB0xJii(bqk4rm$XktRvS1;DwYxR+xmX$8N(Q%+!R__w zvT_f38n@FKYoq$zCEP#p-+u~~YZYy6(73t7m!qw@mOj+sts};GE%rq+m`DF*aM0C{ zVtDIlk79UT+>^=%+@`QX23IS0(}maL_1cbl>+zIN@HH}6zw08nI)VB(9})D1@Wi&a zp|_C?R%CE6hjz)7inpmgr4p%tt5y3SnVd*0g-gtT!uSJVV>C>y;4&F{2mfnss__}r8&N8^Rb{YFGJ-zGg?(Inu z#?!mrUNX2&>B5A!k9Pp~`FZ<#`+57zV6zNn@BBV6sHbt9O_IS58SIq7E*b2W!5$gxmBDp4dsDorUZ1xq z|C>_h4jEie2463O8%kKT10Z;5!`dx_L zS>8hP5bqorTwez3_pk(q=uhkp;yVd@54id}3CkZi^SuiK2Tn5?+(?@R)eju~?BKms zI}8?kZ<+{#QK=8OZLl-m>$z z_ck9c2@!O=1}w)E8PJ?1@8@$_o%@8$0DJQ*CLos8w8@+V{OS?~EP zwjSkuReK~cI8NJo;n50Pzu|pL>$TpL!3pdB`NNpr_q`ufSm5-2?ERF_8_z9zKa;^7 z0)_cPE6kVPuVipX8QiI?FyDGFU6H+V&nN~$^t1Q3D>kb5K;pnFzG|s%^}43voKW+8 zeA|)`HQ+%bv_=ftf$u)5eGo6;9Nxgk_!8gY5`M(5_+5xPq*bRnyy%L77>|iaLJEAC zjC4%HEX>ADkXV3Quoz3R9GmbwPJ#WKN#D$kQsx`Lvt;Jsn1D%0Mk>h($cevuz$0;ASiWd9ageGW)7Vseld6;_R3LV(lQ-4$^i|M-JMAgErx~6~vmSc7%hr;2^G! z`$23S#MbdJ9s%)nJc_;8hy6H!6F7;NaT;&oZM*~G=J*Of;4*&3uR`!-f?%~-IBTFL zs2`^n^-v!nAm2{n;UpeT^6VtPPWH(iox;gJo$Sxa{#+bWF5>8l0&(DOG~r4>dvpYC z&qdpFah$jYVxZtBO~6YSefyK}Dw?aN(^jo6H>AO>#g#Z7+P z&sa=CGEzZ(d8Q!~eq@7s z^UT9a+ynCE*@o?S6g#jRL4DWd zxUAa*&CwErK-ufwj%V;HNUxqMMEwrvgf8fY9_WSM=nKkHpLo_E4C=Z*bzPrhqW&mQ z-}T2~0w`;J%37bY)~BrXIY#PFMFz4!yzA$Hc2J)-SbrvFf%4bC2}PKP1-KcDz;RT6 z36^06$bbDiaX0S4eRu#5Vl9fX0h>TP>pz4?Kz-JK3_I}zp29QOjlI~919%RHa0Ewj z3@_m%PT>sB;XGc&>v#ii;T^n(5AYE_!Dsj!U*cS7 zhG7IoVGPD$0wy6DsYt^VOhpE=kc}MVVFqSl4vIkgZ*Vhi#S$#X?YIl~;C?)ab=ZK- zcnBP;4IabecnZ6)7aXe%p2J}r1$|0`lb}Coa1IynI^M)P;23N05kAG|_zK^GW4pm+ z{DR+w2;tZZF@fVQ$-2x$uHA*3~^gOCUi{}B4B5Mm$F5yU;D zJBWEm9}w>lj@J-k9WormIb5_yRu& z(NLfonjjRNLD?Hp=7yBD;TYtg5VVhmfaWWR;4$uZ0KZR#O`5RNV#+0e? zXF@co2kN*qXj)A7sSJNl35A3h$ zNB9bqwV4YcXoL>v3DRhm20w1Z3fznPu?x?E`f5fR&1lEXh*$ILLA;u`0Qqe`2IEkG zd02pTcnGwG=G1X>^4|PakWO>bX-+yVoM?cCpxw5h94&@{&$n2L<#-sBrv>qAK^iTH zU5j^xXjuam*uZhoG7i*1%ZZqRsUUVOSAsfdNjzE}!BKpPAMukAt%yad)}S4=V*jni zVm!!0EAr5aJha*d_TTCmT);bc&$wi0O*?2^6KxO!^3j@@v?eC4iAihHX-ztFHd_8(ps*MoWu z?}i>oMJD_p58>n?oIHf@!68t`;nYp|Kk$VR5#%Gn1!5D?6@Aekb8s_gBM}?19mF@{ zEGS3BTOh8H!5|)y)N5oHkdMeTu>Z&#u@d*;0qnwaARm#`OC+Bs1R|;#!VrOE5WgtO z5JkI;;&V|Cg8WDExhV1<^(nr^_d-Nd{%AfQ-2y`~7UMAww_z!s#C{yWd!X#mls%>f zEU=*+x`Vomp$=kZfHo1c0o(8>UdF4SE@Mr&4s}4;V<~%VH%tcYDK-apVl~#_Eqn~} z9!Fl|C~F*rk0Z}<zya0jTn_78$%r#;6``_F~wKw2G$b%zeP36#0Rt)Tzt@GK7E zM)rqjF%Xoq`)J$?%GZ4Z z=pVWh9ap8!iyXzEi;aeRDy*^xXyOrC)W_0^9Z*gA`EK ze#D_4ap*@J`u!n9{~BPQ{l|hl^=F^`_u&YR3Ne5<4WQi&XbJW+fHpILHZy?t2fU2a zLJVwy2vGk6=ixRi#cOyE9|$psZ3eN;AhsFAHiOt^5Zeqo1-2V>QHa5`%fao@5$o^} zsFNX-V+ipWLYaq526Z(g2gGa0VZ0#3(CgrY8%dao4D1B)7)qHEsmsJ`P!mHznG?rj zEw%~%BLefXPT+YzClZH%C8 zj97+MxEEjHGT7h9-WUQhIg-4LB;O;S0BvwoFv#~P-XApv<4_FpJ8GK{qietd8>S%- z({U0PKzzr9f&GtZ2Z_a40^T3<4ydQG4bTFuKssYdXDsQAeG>aY8yrVo#+hM7KMcbN ztN>*icR#4paqM&4MIpvhcjKwM@zmY;T+GHzcpfi-JWe3333YKjDBA?u=>+O|!e%@M zVlm-6{0ibRF%s?35u`bhG$)eg#C@P$PCP2aBrh7HDM)Wp7P9d)D95Ddgh-++NoKJB zB-%{URAgW;Xje%u2$5V3H9`AI?tuXqggdbsYw$6?#UqjE7vsg!9dWt#e<5Ysw#!dXzxXKj0^C{#+0re*tELIQeNW{)Het|7}jpYr?9<2Ag2w?UozKg1^> z_WmzH?ERFyFX3gJ!7F$b7x5O}#RvEppWzF9 zjqmUSe!{Q#Lx>xzK|u}FL~U4LhYMa@j}SCQGqgk-gdq|!h(~*LLRa*_4d{yj7>q=W zz-WxaL?pw9DVT;V+=x8Pz%0x`5$5A&+=?Ywj@xk;?!ozVp%{))7>fx=LMkR>Dl(CcTues+W@9eqVIdab zHY~$R+=*4V538{j>#+%2@dzHpPCSWcum}5a1jIAvGkgc)lS>S8DSNIPk!TOfm79VL zQ17`*a2MEb?m-;GOCYX!#4}Gt8^nOv2gE-_rjRT-v z_wWyVA;b&^>LUcy(+uio2K$>Ku^5zf2C_iFyk-*nnZ#@6^B})7FX2Za3hcNZw6lT^=n0O=f+-;H1+<3((kURF0@5ki zf!)}P4?sBzz7k>}yEoYX>_p_C5OZ)p zXa}=5;RT!m`It>iW)qXy#AFWX%psjQq%)^KhGQgVpa}CoIp#cxr|}Vp!JKb|xXFfk zpk8n4hJF|TKW2hh+_VDsg1p@H1Sro<&*BqkX?!Qd+?t?n<~k7y>UM5C$j97CNCq*T zOM9Gq3nZAU1$8U2q#7 zt^s+FSo@}SOoGhZzqV&yf^S6J{DrW8x27lncoF{Kz!$CfpW|*0CAmP4B|1LdYyj}}rs)6>j=sJWU9_>(oc_8nL$m=4?x`_H;M4lIs z=SAfC)<$TJP;^Is48(ja!7@CF{WySk@fkiBVljDLOqmxqKsQiViw7VZ?0az`9>7Ly z#&Mj(c_D72o!mw{xh)cTAV#;%1#!LY7|81q%Cv-iEOCN7Eg1=7x`Z^BkmeH7TtYsU zT*8k+ENu(wZfOEIc9wGNEWHJ!wUk&dy&%Lg4=D4pCZPXVHU{HxJ7^cnR)hR4dmEH> zc~g+?a`L)78Pkvn>S6f~JdSVi3w{$~1?5@M9vx8xj++&?;W<$L6(@vPSql!hKs#AU znO2Skd00svR+5L6r|~-85aM590|ug|G1wei7o%R)|6j{FsSZpnP}k#$J31%6aGaLfjP%%6C_1&_CQojPIhH zchN@fdI2vAakq*(An$k6mhT>dL_CIF*dxR$fonj1SKR<&v1%w*;9d}mRn);M+S@AX z`JOP)w(n_&LeQ@7AqMvx#c7-s;$Aa6s0+$+FJ-xxvfR55N5HoC{vpJDHE=EZV>m{F zHg(^2kjMLj!29=8SND$vF}$A`-oF()K^z~b4&HyDHmH{ehJku{;6ZE#+dgm!zY9T? ziq*tnHE~!?99G|l^W-2o9 zJYE9zzxH}GLkr|#4(5Vw*0Rl7wpqtE>)2);+pL=gwp%wHw99qp@v0ET9ncfhNipRp zCLYC@mfC#924ueU) ztfCGqNJkEc@59Hy{vUoth)3$7DVl@#A1MU&^vG^JkHbQ2Bb{xevyF7N4aR8D2Dg!y zZ4YA`KEhY{Mu_dLL7BG4f;!#KKDSQ?b+?_m+fLm*S{p8SFaeW69v>yGN1wtoploa| z9;2Qg>xOYO3na33bVGT|3EI_@AL3&no+6#6Narcid1@gj=Tj^24nD;{gm^j*T|hjZCO=P; zpQp*s(;U;!kl$x0+cV?f12KK(X&k_FLhQ2RdNe>b3P64Dx(~FQT^sQ%UIKC5brPrW zCMd@)_P^^(P{+Hz$B#nnt_B4)P!rU_ZVPPajx3PR-K4pjxbAVIDO#X4+9Co`xB-JP z9Ms<)>SoV)Oobn`&pr8=g*liD31YdY7)L<6-t#Ih;w`+74?+I-P!D^)6Jl>2kdM7} zQ6C(qdm|Bpc(g|++ydgim;CSLnA^J<9bbON{pZB*Z=kNP8b? z?`wkQpbhNn3XbW0lyzTU3;<=`mk8R~KHBTPG^8T~lzSiL-j|1a@g!(7`-%I0Vz|FI zhF};UmP@DXTl2R_GF_!gIN8Nc9n zAr1z?gkW5Y>tKcrPIyocw6%kcK%E~XzXz%FgVgy!>ii(_I2Z@o`@xQ&T_5a@Ug(4V z7=)o1j!_tk2}lBEJUAJ&kAs=WMlPnK0JA|GIXDl*;@~3O24Zq>CGNy3(Ebln#|PJ9 zJvM=QK1e+ud=xuD+z&p3J=l+fID{8)3>+^9PvI=+j}E>D>gnLycn=@q6VN^meu;1J zJ$}T`_)Q3^Njz5_Dy~5-)PWTaxKS7N(GX409IX(Fa6};%3Fv^%=!Tx?jeZ!2AsB{{ z7=!VcgcPJ99U1T=2l<$ZLfiz21-J!^u@ozC2kyqbcmQiqjE&fWhp`2t3g2x)I@DqV22A{T#pbm zMl-ZT8-yVeF^ETdbV673zzyh&0T_%#jKFA&!$c&*hbfqbEZm4Z%)l(nK@sNTX55M; zSdQCq7w*CRco6Hb0h{p7(5Qlpj?M`Vh^bILl^M@*zaL8 zJg5uedYE_~9*#mR0I@ktdpS%yIZT-jQ{RWF=fmXb2=#b``a9AT)XxzcXD`sknb0=k%K3(9|!Oaeg^wK8i97`fK+6{59;YC z^>dW{9X*BDKv`cTb}tgM7m3-6%@79CcyTaBV=QK2J{Dp<9s$SAi=^@5dmvuNi2X6* zb<7I#d#nd~Aq5$rogKRkcYG)Zg5QLA$&H3+j6N8OVIX!d6@of=iFmy97jR zI6*8<(0)$PeonCe6FqSQ$ioTpaDqIXSOxZfq8JBp3@_m`&<;*q65^y24M09l5|fj} z_u&ET#|t19FTagX zKweH&2jw|c8*w0wQ{6BI)Xga$W`nvtbqmPHsYkH`#Pk&H@zi-xrc=M-cOgzU1MTEA z?c{V{Bw{$GgLZvdVh;|1GM)YcKY(NQ%=O^&XIfwYMqm`k!x{2$hCH0v0``CAahw4A zKSTYTVgF|p1cQ1#+Xihh5F;@f4EF!ZSQKIbZpL~%0`l<+_3{dz ze}#HEZ-xhT(I3R`JY_gfyFAb5&MyM_KhNjRlmGK)@dn-!;sWKrz~?Vm(E&Yi12T}0 z8CZiYcnB|nvR|O=7rwOiDEpAZFnB!`}LDT zT&#{-sErtKEL@}=UnIX5ry>LND;HOTa$o#EE!_uH*M<5&@XtrnGIwUCfC|V^Oc4j7 zfPf4`Z~!tC7q}-XqrLaEou%!)=WXv@ws($9O|x8?nQ7|(<@x>ZIj?)qJ%0N<&+~qs z59cnvSHElA?wU{8joNF>yf%_J5*Ur22W!iD5E-w1hP}vU?U5i@mrWshU1z3sYOK2% zJ*|`Fx*vmJy=SiX%=MnRULWh_zrKklc!8JrF9?2a!LfMO&l9L%1!n&FKEC89)&{`_ zd)?qJHbkMn4O21ehWqi{4SLafY|_IfJ#4byP4>H~n(MfYm3+cCd=~_pThgA6jKO@HtGE)m zZI;_+&)vM4EkW>W5`!2*9j9|9?)6vi{jblni~Sr3f-UCTGK_TQV$Ln~Jcs$Vyn&xh zTV=e}oLk+<)&fq&`?U31^uF~&zQp_VTQX@3=R7V)zrTHeEd1Yb1lu~$6}xH+uPjswwE~=1i!oM-(~RoD8^IDa#nCVX8GMLznkTc4D#{WKhEVcu3$Ca@_i6& zcT?N@p~vm#WaJvk5^d<#4?zj+tzT+C~Wyd$z%g*j3;Ilg$I0v)tl*3Lr z?3BZ~=G|{rT?S*&YOcj-&|t|8o@& z@DO`B#E~G_<1>4FW{=P8@tHk7v&U!l{DIHy*%t(Vxy!%GspM7O!%p^^W3N2+ntAU9 z*wx;vk=Nc0IP$*}8Nfi!;$kl4Q{?ftnfBS`zFx$$5Hs&PjaPV=_k-Y{Hgu*7)y!lz zkMJzdW0(7z(*pDCub_%sxtn{j_x=0v*?&jjHvTQ;VV>c+AUJS5olx&UEelwLUJmH{ zz~@15&OVBzbtwVb3}2IfuXCN8I2My&Os66lSrQB|M6mj{FZhJ)-83eL)x`kW32Ka0@F~ z&sMetVR#Bd7)lf8aRG1h37-aGvvzc$E5(#kiDx$R%x0e1><4~fQxG;EOeR@)Uh~Vj zlCN09x*$9zh9uNK<}5DeQod&c8-uV#6!9c7l{w7gNnYgTAZ*!;*0jZETKY^&pK19V zukl6@9@~=kbflV@%*Jz$^_*ip=eQP_^SBO7V=nV~fj5yyEB&<6Pb>YjItTr>GFz*) z{EAGEPbG^SZe$gAvYSI3=135>4pFc5XvQ#}GAfwJWc1OxmRabh^#T^Nlm<@YO!U(F zd@e#yt*_u}mU9C)V@Iu5qL9nQz_z8enO6I*5i)a_9czsWFXVFW@{_cw&rPj zE4Op>v-)l;-?n@BI|$pU(N2waYP3_Mof_>{^CN3m$A%zmuebKYNGFqQ)M&3pdo|jt z(O!-AM?brRUC5`sd^+f>!zc>z{0?I|9E2U+V#l_~r=xs2R%3S^-BQPe)ZrN&ef9*o zp3saI*#8Mrm_`jV*}`9#=>+>Y!9F^TK)p_Grqeh|vDZ!y^C*w=6h43AG1%FO$Dywi z-{LdA;48ij!p?)qA(wpQ)%jz-M_!%f<@dD1lTId%1bWk#$9RcXc#SuMu*HK>^6xR*l)K+F61h%MbF*z-0fEG<~|-o&2Dzp?MXhyP52$?(C;;e5y{9P z!an?da~M&=1S(N0Lam6|%tOtHI+k)K7o%>3n~1odhj14WPw+G^V^TC$UoA%5UKCT z%P@bWej?S4+{r&d7^PlRThxnEFG{^A^`c^kBatNfk&LXOWEG`el=m%4y(smf)QeIt zY6qnKMU17ANlalnGmv+*oTJa;a<1nl^c=0{XgxwLu5Y(T%!+u4P_qxBtqI0$1xjzQls`i^OXeq*}hy^9${D*BGeAParR=sQN= zF(pjEoy06<1$v74oFCYTx-tI-VQe#6a2)m;Yp=2P8QYC0dJ>EGK31LB*)*^U_Z0gi zFY+GV%~-pL{f6)PiA`+5tg&jwnl<*%AnawPUIWQyF8b^x(_R;IJ+ka2%U<^1%l>=m zzn3g~$+DOI_p<+9uOic4KL%l3fZpSVFaqyOoDAZoV0UqL7pM2Qc`T!m(@`_-T+ZhT zZb8ksCwPIEd5t&me#EUtj&Wb{E%J;rXPi0XcCar9=a)+(&|EC%BP>Q|QM4+)F|> z@<^D23=$S1g9Ptf!lm56N}flq31&<9gZ&)ja1bVj979WDP$yB1L^TrC@VnY!Vi98) zO9^Tw)-V%0PHez@iDz;S=Wz{cCq9gs6V04x=0r0mnmO?!+=Y9*XJpo<$2!541LVd#|(YU(C0_i@-rK;)13Y;il2Ou~+{42>#*;k5bI2{}4c_4-fd0?gU}JIv94E4y*`{r?NXj4;H(B<{c9ty1AQIjH49y zn^K8cQmT=C%0jNgz6W+9hZ&e*;O)G?o4ARAAMy#GVV;5dAE^I<`}h}m4m=WsgPLJ) zgXB8M-Uiv*pjhHbM9zcyF#vfFlJ_8a4@zeQe%1_<#h@C_;Az;|;6_fvGY6lI83$j;C0x!`T#G#X{(3n0H9lb@Y7c3S z8HNlaj}l}$M5aR~Ad4so+?GSe|@G43(F z4m(M|1ev9~?R53i<(BRy)9=7drr*Z{m?8ar^pMe#9>^-AH|k}0KQr85#t7VCMji!B zVh;9{VNV%LsK@>J-hnXV2J9$9%?vd&?&5Kt;dx%9#Z1u%2I#U*_K&4#E-b=|v`0s6Ao|({T?Y>}Q0!Biz)86}-xa ze9Wh)F+z5)iu){3bWXUG$S^mdMyuvqp&yTESJzM#m9qi`c zAk2;>krMQfZ4cS9$bJmZ%9cg;8@!F1$o>F%WPi&K$UJ);o}2AXviBhO>;oJM!W`M; zG^06f=t_5@v6CEko#U=^x;arP z(~)!DWn9g*T#q^OR$z|2J9v~Ad70OElXoyro_X?Qm-hodv6l60z|QhEC@I!6fEjm-&7+=6kpD&6RJi{0q4R`^mST zd~@YrhgtIP<~|?z-#^55oN%$9Ga{B`I*e+S;L{J+?Tc?w!!o`Tl2qa%J+ z6!atkvlN_yT@?%=jdU`}!hQ-$D8ozz)l9)VQBX@A4K#5E=W;$5VZMSJxQlyvfQPZG zf+u*I*Lf2&6_}~OOa&kEF{}BJ_58vne#5K<+u6y%ARN^JGmScluGrZqJM(=h;ixzg zNunPk$>&sx7>oHv+1aQ{>}-^GY1A}ou%A&+vWC4ucxrcKa_S7sca0 z*!Ad^w8M@^pNKq0%VV@WM#nM)^N$|Q7{)UZbB>r&OMj-DZHH+L(Q5|lj$b3bfTl5m|;<-g%@FQ|8+J?M~ z4h3OxKy%t)zGAhC%~ae2J1(~4;zZ*xu@!PEBpCB9~<1uC%(+@Sqq+`Z0Ipm?<7`q))P8Dj7nanigJmxCy zV|5UYHP=|r8k>w~jm;#RTnf}BlpyvQrO&RhJ=;UFA$ zEXUK94s_xqVu`0WeQ^)t^gd3n<7%13T$XY!7jQ9h8Fv%6ayxn$C!=wnuqg<~H^Yv` z$6z<(6G=j5;|DMZdm2BC4921Ec$tlt+4yRvQo~HJ~opW+R^=Z_#PF=I(2 zo?YUZC7H;wq>$0L_mT?SL5Z6$F<*%rD49(iOK=M%=W`LVF1Z4Emv|pa%vy3M_i#TC z@dz*QDtak-n-BPyPx+jU9PMvEhl0@mj36v+&T+KHJ(t=^X?GIPf2mt2O=kpdp|k*V zlol}+d6YKbW=qe;ZIs$esh&&yyehqt$N3}(%hV`~p+BBg=2^azC@d>R?qzZ>lY5!m z%c}AGvKpH3{Iat+mkUs%O#Wq_UnZBb<=lW=5o1~ccc?N zNunR_uUy9EsSG2JQH*8`<0(TX<)^a}?^L;6mha-fAgpLb8`^UMojI8Z5>cx{tqQd& z)T&UcLamB)-Q356Jj}Da$$NarCw#%ze8&%LL>`rU`G*7O zxvCj0I2Jpoibt zzbf}vbvK^1-^$?HpI4|=WW~q9I_c2e^x0s>IO;x$6s$bZ|cI00r|0=UgG|NP@ zOf<{H6S3!sCt=qUW^5>=d9&tHu5XK@drEE z9fXthIazO$M_}KR?RxU%s55yL>P%K=@&nl6Wc4OL%l|O<L!@|yY;?s4khK{(C5O>2RDPLtEL&gfxUFB0g( zDGVcnEJmW{v^vhkuBN@r2mHi3Hn52;Y-2mSkmEFUr>i$zz3J{}y8D@aBI-^z`*gEU zH~aKhGGXEhbFG&Ud%N88$5gZ(P!@C-yp0pQ%!SP(jK|in5)L~Ydp8cTs7vZ zF;`78Dag1cmjVhIO9>OGU?P*zLyfF!^igvb=C8ScOSyup(O=C!=%rQV+*K-rM@)kd^BM4`N z$aRK!XQVTZQp%~sOf$?h<840SS7bR;&olKm)81z855ifVGfV%o^gqk&8%Q0?s#?^uHrr(=Lw$THQwhV zR`WSu;pgh?_58wSwxa*pd-xl3%sv=|zBewMBab*_ z1!i0jh0iYNjoBC2*@6@XlS&pN$>&s-(!gn)!C9;g!i8gy!NN)=Vvh^!SVkkK8EEV>Id7d^>yyui!6!P~sY2Yi9d7j5KMeq#r_`3v{7NWP2ZySM|L zI0^e$Y#)oGaZihrk?rE43`f3;b1>Ut884o}B|MM%i@)Uu*03J8ym&KP`5iqh)Ua~@Z-oEz|d`%bfP$)h~M)7Zh1mw2C# zSj}hXWr=xxH(9vkXfJ!) zeeq5$%Oo3nURFRM#f(F5%O+5RewJO%D&(;2dp4n7eGA&r5qqt-*Lrp9)vH&pUOnF- z7S^j*Zx{8$7)cRkt#@1Xvzf<2>Np)U*PqWtT*{5y!foiI{t+JMDW2s;ychK!@(G`^ zk0U|Y&jqf!R!r{OvKGJoXr*7fE#I8!AkDrA>2#D z6S$WK{WLt!*X$0$#!kqfF^zOa;QeaML+*|8Zd9*Py~asQMjnleQM<8$Gtfh$du_Y` zchGn%tGJ7M(MO{`8uigAmqxp4bhC|awsAF|^A&EU@i+cPMvedQZxA-Mq!n#w&k5LR zQxq}8kw_op*JPF^vox8d$sA3ExSu8&HdSN4P4?SlzfB7;Pm_6?>bV3nH2FP%qklW5 U+5i1xM2r9X$N&HLVbcx&4~8WQRsaA1 literal 118716 zcmc$n1$Y}rw}y9SrClv(SC(W+4s#l2YST83EhkP)wd_zqbsQye15RxxZQ8WmQ)XsP z(x%*$nVFfHnVEa%NLqPifhF7h@9lH{Chj@koMG6R*Gt!}Zfb2CKmH|-aFpXY8)xT) zqF0JuT{L7vq@}f~dF`Sh%}e)*G`6)a9@5a#xUz};xNb;W^Ex+2j=Xq#f$%*iat_YJ zdAUMvIJW~gf*Z+=;zo00xUt+gZYORgw==g3w=1`h+mqXiTgI*Anz%LGTCRm_<@V$D z=QeQ%aljqQ9myTV9mgHdoy?uWoxz>MUBGSTF6FM^uHmldZsu<1?&colp5&h5p5vb9 zUglomKH)y)KI1;;zTm#(zT&>-e&l}QekX)b!V?>@6Orh|MG8nEDI$HzP%?}RClkm- zGKowkQ^-`ZBbi6$lL}Hvsz^1dA-j<}Qcw4#>*+?ii5@}^rAN@C>2dT#dI~+Ao<+~4 z7to98R=SN|L9eFU=?(N|dKi^>e=L6je=>g>eAe}sRWe~N#We}R9Qe~o{We}{jc|A_yT z|APOT|BnBW|AqgZ|BL_EMs0SR!=~6YTY=4M^V@pa`q~EA2HS?&M%YH%#@Qy?rr4(2 zcCzhaE4G!|=Gf-hDs45kkZpl&v271q*tXObv8}YNvaPl4YiqOZXFI@lkj=0iW;@b$ zjO}>aNw!mMXV}iRooBnyw%K-x?K0bywrgzH*>1GmV!PdTm+fBL1Ga~4kJ+BIJ!5;` z_LA*Y+Z(pGZSUDWw0&ax-1e32TiXw|pKZU{{{r{j+i$SnY`@KZ zr~MxL{q_g#kJ+EHKW%^0{+9i1`#bh`?eE#&w|{E?%>K3gXZtVqU+upMoInIB2!bN0 zf>ZDd0ij6fCG-~h3*&_G!USQWFiDs!Oc7=XGsT7C5^+y)Z?REaE;fm)#b&WZTrch~ z9w;6xf_S)ilz6Oof_Socns}yoj(EO!k+?;?RJ>fgO1xIQUc5=XRlGyITf9$vP<%vu zTzpD=R(wHxS$s`=Q+!8!U;Ie?RQy8xTKrD@QT#>xUHnV@*Fhb2hr^*bG)IBM>+m~z zIr=&VI0ie0IYu}}JH|OCI;J?LJ9cvH;wW~MI_5a$IVv4Bj*w%4W3gioN7%8{5pk?^ zta7Y%?CWTA?B_VZagf7s9OgLEag5`5$4QP;9cMVscAV$9(6QNZiQ_WIm5ys1*Ew!< z+~T<1ahKy>#{-Us9gjJlbUfpD-tm&-RmU5Sw;k^}K6HHI_}uZ8<6FlMj-MUBIsSC~ zBN53aiIOZiC70xpd{U9rN9r#Pl7>nx3+F6<N=HaXOUFqkN~cJtOJ_;vN*72MOIxLF z(iPIx(st1OFR=}ze$>3-=U=~3wk>1pXX=|$-k>2>KX>0Rjq>0{|L=}YMw>3iuX z=~w9w>2H~nd0CJpS(SC!Ef>lGxwqU;9w-lyhsz`7G4gnMk~~$OA*R&<5_wN~Z@E!kE;q@mUN7%2A1EIzgM7Grlzgmwf_$=kntY~wj(om+ zk-SyjE?*~KFW(^FDBmvMA>S)MDnBMaEMm-rTB^=b=c$!yjT%xHsEgG-)Udi# zji@WtRq9%GU$sr$Pdz|ANHx^M)Fahn)Z^8Y)Kk?n)U(y|)C<+k>Lu!B>Xqs>>UHXk z>MiQ+>Rsx+>I3S->SOAY>ND!|>PzaY>Kp3Y>U-*k>L=>w>R0Nw>JRGA>Tl|w>OW56 zv^hnm>~uO^PLI>)EOPd7_ID0)4t4I}9OWGAoZy`7oaWroxwCVYv&32EEO%BotDUvZ zdgmhN?#{iO4bElG70!K}Yn^p!t9FNWw|1ZQp!SINxb~FxtoDNTvi6$xruL5Z zzV?y!srH5Twf3F%qxOsTyY`p%uTFKl?$8xo(+hO3?$>+ief0tQV11ZALLaS<(9e(Err`b#X46 z%kGj~vP*O6E|1IW3b=|~eO>)rgIt4M!(BVLM!UwiCb%ZLrn;uNc5=;h&2kmHDqU5s zYFCYGH&?AI7YlUm2tIf6EwZXNYYk${9*8#30T}Qc&b{*q7 z!*!b+hXh*R8JGTo1V(c0J;H!S$l+CD(hd_gx>jzHxo) z`p)&e>u=XTu73--0=2+dpcV8g=v~mKpl`v5f{_KI3Z@lIFPKrVV?kNLoPxOpbp`bW z3knt%G!`r?h!iX@XenqdXe%%Zpy1Gg6AMl%IJw}If(r^REV!uPs)DNvt|_>-V0*!J z1=kncP;gJdy#@Cb++Xl?!7~NV7Ccw*e8CF^FBZI8@Ls|D1s@cAQ}AuUcLm=U{9f>P z!9Q;5=G~H8c57~*ySKZ)dx(3adz5>edy0FSduR79?p@s>cb&W5y}-TDy~w@Ty~Mq{ zdk^=X?!DY$_ulS(+^gKH-D}+I-G{ghH@FXVALc&XeT4fY_sQ;4+^4$FcVFPX(0!5n z3ip-ntK8e&H@Y8mKjePc{fPTf_ml2t+|Rn7bHC(%-~F-sbN3hSZ`|Lye{}!kad;$; z>`^?b$LY~Ly2s@y@c2AqJ>xv%Jrg_=J(E0>JySeWJ<~kXJyo7+PmO0cPpv29sq@r( z7I+qV!k#8itEbI#kmoSZ;hrNr$9PU~7kN(foa#BvbC%~k&-tEljmp8FP>jLzjaR?d9#~9poM69qAqGo#>t9o$Q_No$1}#Tkf6bo$syiR(cnEmw0#g?&00jySI0_ zcZGMgx6QlWyTQAkcYm+p1@EEWqrJy?Pw<}RJ>7ebce8hkcdPd*@73OGyf=Dp^WNvZ z-}`{~3Gb8M*S&9e-}JuaecStv_dD22Hwxb?{HXAY!mkT|E&Q$U_rgDX zHlN)m_++2cSLiG94fGB24fT!mjq^?NP4&(2?c|%~tMTpTtM!F^b-sF^*SEm8%op)3 z_pR`)^fmeR@iqI_`S$g#_igkY;xl|l`Hu4)?>oVFijVkC^_}TE*LR-peBTAW3w<~F zZuZ^cyVZA_?{?oEzB_$)`R?}J=X=oii0^UVQ@$5`ule5dz3=6ea;6Kp6$$zN-NdNKvll`aoukc^#zsi5L{~G_b z{_Xzj{MY+$@Zada*?+tLF8@9L`~463pYT8Ff7bu5|2_Zv{tx^g`akl2?ElvPo&S6P z5B^{LzXt39As_}E0V$vb^nfQ&6zCNg7#JED78n^A6&M{DADA4N5||p87MLEG5vUH- z1a=G5210?lKz(3AU}0cUVE4dYfrh}cz>2_{Kx^Q@z^1_Afg=JZ1kMPY88|C&cHr8; z_P}+4>jO6gZVcQMxH)i3;MTz1fqMcE1)d2!8+b18eBkB4r-9D`p9j7Od>QyE@O9vu zz#oA>1AhhnF5-)9Mb4svB6pFm$Y0dEs87*=qJc$2i-r}AEE-ibu4sJGgrX@$Q;TL7 zl@yg0)fDYkR9h4(sw-Mjw0luQ(ek3TMa@O~742WNsp!z6ql!){I;-gXqRmB@68wr=5^oMupi-@>`L0>cJ&V$2J>yyd0Ah0QIiTGur+MoOC-*RP4JZ40}G6qihy zJaP8ak};D?r%f3%d0I)?m>H8Mj2|m>a?kHB`e1wuiy?7}#C`+izg|-59}QncWg; zShcRXX>D6;absK4hK9DtvaqXHX=HiB`qgd0Wle3(Eul8{)7G-)mipR?);*$Tkx=8x z$eIWfi2b#8{o2wH2NaR1!e#&MWq>=f=f-wIp#vqx+FrJ&j4ZDP!z)j>PagzSP^NN-_$lXS$gc8hV?5V zm60{gEgQ|`!mepCx9I;)s%dG)6M)eAbr^I<|0J8RYgo*aRd<@SLe1-28Y3kvqOfat zHp7bM6=pDIwr6qkxnWmw#oTPJge&EOTp2fqo6D7R^9+ySGXh30qmR+g7+?%Ch8WDS zf~(}JxN5G3+l{N`LR=kJZ|q=mn`7$E=AK*^cAJwM6T3*VPT{*EX&U zMOyLHq&BjA=V{9u$445LPFvP6WBdzTnA@9c;Fd0l^^&y>Ya)wce~h%&SA|#wZ(;wr zaZy#MI(pn<4QfR+0Do5km@skrlo`{fPaJR6TOHLa>Vn>kiPM(PSUP^%q$It4qk6r& zrWcvMtg&%=lHP`>Uf(Y0HBOkYeA1N0WyyL6M)mr4L2t&yhRKoXGnOw;iqDAZ4eWwm zIsNV1{=rvAX zzI4L$h8a_sUTpR_l{<|acJal=h**y}lRGQy@>GQ?*S9q+T^%WHV#9Is+J=^mTe&m2 zv$@7SszU8QGd0fT&WmZ7Ke&*)DES9lxUI=Q*v4I!{DUjGtCD|kEw?@S2RCpxCja0T z?$+cV+`-+M{DXVA2e@HdxqG?$xciOC##CeaR_;OWA?{&gM<9m-eIR+Tc_Lb_Q^Ky& zImv$;8y%3=u>pFVwTC|&Rh|}h z1=A~s+Sm-ZCTDG{^h;6c8DUp(I%zf^u40G7$w4OntvzJ3`d;PU;f8JFUgKWp-r(Nk z-s0Xib~0ufI~%(gyBf2$aqn{Paqn{#?ouM7(W7Q)IueYjhgZaO`I#4eJ_~ zHZjYlNNY?!y?H3In$4V%W#(j7(cBnaEU-Y*1s+$Qy}oJnvQVUrEiG1Ll{Yh5*U+*e z5??dK^H~%NG>w#5?B8(TalLECG%at6w1izQ z_IFgQY*?|jsck*Z{sY^KJSo9;x52X_vpD>}*hq0U5w_rn5Ko*qh+<8gsKjYh7?sAn zn!&S9iTcGHiJN%#N(?Z?EiDZjx3ZskxkfhCNB?Rn`iMW)ouhdsy-1(Uq_?pf5aT7% zkMt)4IE4%(gUDbqWN~Y>UfEE!xUFdoTLU(%S-1PL_^Ou8cP zGIN{RGcg+B7!)g@X;?thjon!RnPKfjX0x75W|E!BE@W3SixeAs7<(Ff8DV2@qhT8< zA*Ce9{+UC9#!{ovSjPT|u>WUG&l(`>w`_^kwy}z>l1iIck8Idj8C^3Lnf+-%v|SH~ z8?sKwwh|e0m?SaSVrHhZpkej;B6mlv#jhs%-AZL=Z$l2r^axOWKoKG$w7m|y} z#bh(tLbj4i$faZ(xr|&+t{_*EtH{;l8geb!POc-@lN-p5soJII~n zE^;@yhullB zZTs* zrG?Z-{WL&}XfN8E_Mv@gKiZ!Tpabb3I+zZjL+LO&obEtJ(2;Z$9Zkp3v2+|APbbic zbP}CRr_iZ%8l6sO&>iVcbSB-I?m~B^vuH7$O-pDg4bn0?ht8$tbRL~gD`+LHqSds9 z?nY~Ah}O}1x_~aEi|AsygzirFpnKB2XqfIz8|YHnNSDzFT~1ffm9&ZOLs!w&bPZig zn~mkhN~6ix$5>^oHr5zxjb>w=v9Hl$v>I*3dSipJpRvEO(Kx_3(AZ=gWE^Z9Vi*P( zhZ=_&hZ{#2M;b>NM;pf&#~Q~O#~UXYCmJUiCmW|2ry8djryFM&XBuZ2XB+1j=Nji3 z=NlIo7aA8C7aN<6Eyh;k65~>1n{k&bZ#V!MM@5$++3L z#kkeD&A8pT!?@G9%edRP$GF$H&$!=szBO0~rEjD3D=5h6C9F$Os@Kfs6t&8ps$RV}XnVG9JhTAQOR10x}uM z6d+T9Oan3<$P6HCk+>6(nLu_1vI~%1fy@F@3}iNt5+J2OfV?_6E`bWGRqF zAj^P6fGh{H0?0}rO+fYmvI@v*AZviE1=0*;9guy2v;b)Z(gtKbkPSfg1F}DmjX(|n zav+dRKn?u1Zf}y%FTRDVlYJ+9vi_CD2MW@jTr(or57DKyh zC|q8}JgQ6UOX|Y2>#IsDf~MyQ@r=Zi3sx1+t_X&!tHPy0mR@PNHW;d|uPq5CGIBCH zOh_b^#*!t*YKhfRc~z*cxS}FlS3D=-ej2)u%Ic0;3)Yp)4OiEe25Zeg&qT)}5}VKy zi;+#6LW?V{6?0Aov+CLNtY+t<+0ZmxTc&n!ivRaIo*W56Fd5bN{X2&YhkfA8fIAtvM{P^S<{P#mZ*?h z(SInxBgFxsCbqp;yJFKO%xXAPK8Gb-RaahD5v*kYPPp8GE~7KLm>nZrTaSs>6xYo) zgS{I)hjj3)uA3XIjh1dK$+8Mduec9A26ymaWw@XmO9QJVRzZpCd=MS{q%^W_b@N6% zBWE3XT$tTu~RSt->0gQ(IkMV@CXVHp}XTtdW?OPoZV+44Rdew(~3+ z?vTN-CRkfp9>Tn_>cUDhtUI!YpO$atk~Gt z6=Ef5`n-ldy^?&)aVn`u-pp*5R3Pu5UB9$?SR7%j5i{-g(bk(}Tgs}bTO){%2E~ySJV#9W#wCaj$=L8dj#TV!@CdtKY!XY+7qbVD3N`ni^OIXmw zmF4Z7@9Wf{Li5XO%<YByjnz_Xx*3Yncnx;RZ>5wGT=zljQqeF0X zx-va}L61Nhk5Jv>_Sx)rv>Kdbl|5^Jp+jEVHrCW#8Hivxj7`))>nn=*|MQDSQaly zUP1dI8SLY>(N=4AN?t>c-ei8HWo=~j0JdjczdEvM6KmaU|Ux0>vG8nS8 zK&{ytd2cq!P+hgzQg}Za_RAodJel&n(AG~Xn%1t0j_T7FpQ0frGU^MP)F=mrveAvexP}4pX@x#z` zc)KZ^f9JC~qO?53mXC`=xZs;p!MXw#aiTItpwsAfCpH_0g0%~RtQDFG;rc5S#?>1e z>=R2+esqUyuyso;^X3R+t;%s|Jg_~PRLk&e)~(G_o!CJ;xG265X3MkL)fKT(-ZY+q z#-4U<7D!cfxTZXzHXRLo?FN{kFq^OH%BvDKJE6_6b{j0b@WS%Cxoo6iSud_F4YP`^ zWeciitEw6w?A5}=|q$!H!L_39{BhYO%2yGpdPwA;ket+F`8syWOSxIwnM$Jx}3v<6)) z#mJgUw3Jw^iRKhSn||%d&kqJ`*kYr60j?2nJyu(luw8(*)7ouWz3oAlsvwHLwzGEiJ&Tfux#U z)gk>*Y!}I9Wmf*tUdHxvHRYz}S~M?eFOCqa_IN9`Hokq)%rXuoizT&_HZ-*~!D!=S zRaRcMIBZ$c@%wcMw7x1)b?LhK0UZKeSX>)j%68}g2cemzJX!Hu+ICG+qz2j!B{dC= zs~T2B$9UX6ERL_i*(NtS`B_(#{9))a*6LDH8^p$4UR74jwjOLhjU|Yi4(30*ysEg? z+;#9rqHiy&FPlf%7{rF==;UWs$uVd*z-kv<6fCK)i|(Zpn;O&lc(m@D!J4&@_I*8n z5}J;(nzl#H${vUK*anmx60pHBvCZaBMaO|w$3&Ud%%6iIN#%M*N=WT}vBUg!Hku}t zNqb1v!RI`*ODgwvyVyX;vKn8WvkhBPkGK$h2V0}J_}16e)U$Dp9j+87`q}0zi6tcy zt-t6QQldX!k|nWNZNyhoX8m1;4oM{tYYmmf$&K|&v`cCg?YU?V%2d4ut%q6jjBPo# z^^48$q43-?w%9DLD>i+uL!aSRA9Lh3*MDWz6>RNe-JS9`qSGL&Q+xQPyR{;3LHndW z$7;=Tk`!ilc-Z#2th{}Uza3rrTaz$dVo4<}8t+2mKssaVdgNZT8j@rc?YkIhn5_&_ z+r$ItkyM~H^|RSgLt<^5rfeQYpB=2J;3k0et7vaA(+O6wlLN~-?lJUA>d*^o*|@~Y zJ*DqIiKfFzD1wiq2GlgPt;93FiiNn`i%&N7Y&n-a<2-{-LpwONchGc`)br>vsDn$a zQJCo@&4DkWeg6*jwZSE6jbBCMnH`LSi=rnkcw$&m&(65miqsNVI8;BIoux&mD$9cG zjdV)!vzz>s-bST99b#fd8Lw^DH_cA-9$I#+f@q;L0f4m|B1E(Gsh8~|JsM1f6%XjpAbinKT9x^#dFL(gH1-IDV;0fSvHQ1Y|FvEVH8_V*lbRe zn3bDEVpS3?Ew(Vh>W}Ui&8)dlZFZh&(Jp3YE!qs>!uZ(n@LFhNBb3Y8CHi*M?e4aH0hN8gK+yq$d zu)Ss_>tM6l0Wp3SjDEvjUFR~2H;hBcJZ-gK-ubs6>PZ_ z|B_E+YAWj4Nn3p=Nof`;rD=KTmCQYDNs>lM`utQhtX;9bVFf!o#R@86 zpBlsEA$B~ApP=w#ANzq>B4wyBA*Ftj72@5uI8+&)A6(21PD)Mpa&%9v-emWZFwTJW zZ17JUnVJ3-=s&uXUu2b-GFG%*I~UJ=v61Q(cZ2AvWVP>I+?2_gHA0wcrqi|gbGVC z7B5qXeLTXiRI$}}1ukEz*wh_87{CEB{^6eef!!xC&x2#f28m8}2&R*{lcuH<&v*Rv zA+gpp<31GiX5B#&a|Zm1Gl&z5Zbr{|A)BW|MeC z>?;7P<&>&D4h2)EYfEEI5{y+&bf}FAvg2^JI7;jhPDHg`at<%1W(|<7SIw-Rf?BD| z)$Ea3wH5}86XkX~ilz1hcKv-_OLHSWxx#B7aqsx!F8 zEUqw^$Xxbq61Q3v;rNmxp>_ePrFPpaYBj-9c3G)BQ3n^JLTVkPSE!2~Y?wjdc}B3d zwz@VE&Q=tgpHg;lu~ybMi&w86usXgJvSzYmZjjw_V>g`e?pKJde3S0u*tVf?>drm~ z;o{lsfRNP=)&o;5O`VahK<$b=f{wN6Xx)@nmqfpM#dZ$36JZ;uL~&e=${FiAO>tP1 zgOxSxWAyCm(#2+P*`9Bl*hZsKma$uW(G_NK34SePha9YgEpgs}%Bfu^htg#Wl+yMy zlABQ{*tL?J%|4)FWXT6zw%bs$ylawXKGVqF*^NkXQISUS9+WKYTBNvwOwIfKsF+Wq zw=|#fO1xReMvdqdw^Z$iP`j>c(N@)0R7AgS)m7v4jekuO{iB#dD325p%{6|7G+0F?YTboAjgoV^60>>O%dQWx5nwL+ zXp3P|6NqhX9Q2=Ki==y z&7#^wwm(3%E_S*QJFZ|Iy1Jg-=QjIxYPbFvQ)tMu0mo)_w)l?UgJK`m%o4%IkXUr2 z&i9{Ta(i|yIWrUH4l()x0x#d;wj?#FFEOdgJd=vfewc^Y@*=u953^B=or_h~nU~^h z-=K7tdOf=_7rXAx#wvDCgRSHfE$4d_OwSTNnL|2GP zS*N}<+veD&Ao{HvFXff6Gu~ph6>R^;W)Glj)-6&d*>Gun?6pKN%-Zza>Qem3P+~qj zu=hsctm~~z!m&Wj@wm9I8b5onwE(-sP}lyw-rlc!37E^6xbWiW*)dxvl_yGkV17lD zXlHC=%np)b^A|f+jy`Cqt1hWFKeyV4bUAX%zA;fs(Vb~*nlgLFa7-ZU43bp#iOA!} z5bSt*|(@>2HIrUbt- zvhlk8IL$s26DW;79>gc;4Xc}1w0}PvGa&J7E&4W)9flNFRO6Gc_)T4QX{nqoD6QAC z?7O1k+^#BC6tlzsx#gAkk;!tM%RU=bcg#~Y_TkU^rOh5hon1QDF;5KHld>>70m0iz ziPtY?&zOr^GjrBrgH3I09AFbAyGB}Gkr-^|qf(b^!TP1tjI9b4r*RxY-v=(QU5-;=Ii*cYPz z*v|ZMP-NH8nKNEnHVa`1`YtYC+s3$B3=bDWF0k*yA;w|bxU{*UWm(#`&ThN2udRtT z&Q|ELCV-cKOX}HU6LyP&-T6smaZgNUcGr?&jkAuuVGLKbU$M9Eje5mh(@WFj8&Pg% z*W{9pYVFHWY3Hsfb!b~nD78!1q&haS)u=V0I#eA0@XwmoY$V;Zk6y7-L5o`!zKVC|=vJdLz3phUeD!N5<-l9nf#wv{wTh4x5Ujj4@9Zb zRH^8tHdYAbt*z_vdqgZuGnZ_8z~GDc#u5L(`ceCJM*G32I6H5}%3x)6Eqippl9&@6 zTg?ao^(yk#!v~`k>{3tsT^E}x<3C9pA=(c|<;w0V$BydR+-V)Jjza0_c}vI7Q_ZX& ziz<`zR+&>`o|4*6K!r4w5vx|#vJ(rmF^Pp)qLQPFv*J2-%ftL$WIq}8f~gVZu2&T* zDT|IpW+qQV$uvDBcgfh=-`o~iPcZFgqWGNd#LG4kxQe!XEw`V8qG|d}?h!}tKDOWP zvY(HNbGs8}`~ZbrGfaGsvtNX&-Ks2HBS+7i&C1%+?LgzJ?~-cvVcAr@6jjr7#6*FH zB8}@?7!);S9AfWx>TB`cX7oZ1-cySTw~xb@qjY(elI>iYU9e(@XKcsN{u0Z670NE^ zmMpu`iNOc)xgV~S*%#0tyO4}uCCqZU7E=lJSSswDcg;ff!m53}b3G4fO{m_zYPA+pExVUvmdvdvoo&$Qyjrc&Y|&X#V}5G2-+|&)Jyt3U*n=hWo`U^u z)b4{6$1w~h+_W~bY}2Ni>e5jBF;_cu#C{)I`qNm(paF~)OT6|6wjif2O2e@~WPg~w zMTr5&7`F_8yd9x}TkVh7ALSZ5Gh^=^7>}=`a~$Djf872=jDpABB4)&!na0o9U*d*c zYJb-Loc($G3-%X*90BA=AV&c?8ptu1+F!Q6Vt>{Cn*DVk#{xMH$mu}N0DAxb1GzB^ zD={7D(Im}vET=Az8#^z@59}W?6r=q^AjfaEe+=Y=EHFm<=M2VZ|HA$ykQ0HNn!qvI zzp;PMh=}%Y?cV`83CPJ?>_2dffptoc5gE-||K0vqazaG4|6?!?)oI4O!Lv?%ElOX^ z)_kHQJb`EYK*0v&OeC0&W$Y0Na8WWTvGY>s zgQd_H$VKfKMPYz2oM9A&fx;kRurNdzDhva1F_6tbwgA}*o^X!ZO=;GT-q3i==2RSd+@wf&Lh4G*h|XHVcfHc3A>0ChUYm zX$rq7elV1vB{BA*urr73#elGju&XdjC>CZ5B|@nX6v~7-!d#(Tm?z8^Duha*N~jiU zgx!Q%AtclZ^}+&Sp|D6;EG!Xr7xobL6!sFr!rnrIuvBOimI)DIxv)Z5DKrWD2&;tE z!Wv<%&@8MI_7z%$R-sK;FKiI@6ZRK23I_-W3Y&z3goA}c1VaGfP~kA)aN!8yNZ}~q zXyF*)Sm8L~c;N)$MByaiWZ@LyRN*w?bm0u)OyMlyY~dW?T;V+7eBlD&Lg6CeVqvqe zMc67_B3vqL6D|`j7p@Sl6s{7k7OoMl6}Ai43D*lZ2sa8h2{#M32)7Ei3AYP(2zLs1 z33m(k2=@y23HJ*R2oDMm2@ea82#*Sn36Bd;2u})62~P{p2+s=73C{~J2rmjR2`>w; z2(JpS39k!p2yY5+32zJU2=5B-3GWLZ2p*(lNJJ{~qD{1mf+&g(Q4(d5{URdK zDQcoFy2Jv}EqX++SSb2LzZejU#9m@=v5(kS>?igY2Z#g3LE>O>h&WUnCJq;O5J!k3 z#ZlsDaf~=t94C$!Cx{cpN#bO2ia1rACQcV;h&zfqi8IBW#a+Z*#aUvpI9n_cOU0mA zCe9J(isj-walTj~R*F?(wOAwWCf14}u})-z!WBTS1acLStASht@sKyC(d3y@oZ+y>-!Aa?+{6Ubda?gnxXkb8mL2jqSr4*+=($U{IL2J#4yM}a&B z}Gt70CXVGK|lus9RhSH z&|yG_1Kk1W2%sZ@jsiLw=op}5fsO+@9_R$16M;?wIvMB`pi_ZP13De(44^v#-3jPS zpgRNI1?a9oX8|n+IvZ#S&{Cj5pk+Yk0G$i89Oyit^MO_Xtpr*Hv>Ip)(A|L60u2GJ z16mJs0nmj&7Xe)ibP3Sif$jlxPoR4N4Fla9XamrtKpTNB0~!Ik9Ow$5D}gov-3RC@ zpsRtd0lF4wGthNF_XXMlv=wL@(Dgt!0NoGh{y;YZJpkx|KsNzB27#cR+qnT6Mmi)2q#%qvRbb!eC`+EKg_y>dl6inpM3G6~Ou*-MLd6mLhL&e4wI zU778Y3glk2%MI-)K7h8_(2nB6XwfO!QG5)|@|y~zgt0wY(2fofP4h%MI%IU}3fj@( z?2rxiNFWv17;ot#VZI;NrRwDvZ^8c^P7N5_sRk`LO^u`_z) zj&^j+>fGLnwQ`i8y``zMY-K?^I?B-8Qey4%MGD%{QI58j7GTzZ^)8B|qC@)G(TJVslw4-D94uQt6@Ps>bfW6SnQl6~%Ep0oA zmgi_d+a%0fXJ|*qGIU8o4Cah>bgV$%B#2*Hw4-Amv`d2cWkEYS)}VC~3C?28T1Yzs z%drkkyMT6dw4!6qXh+9}l#tq=)h9o+bZkV^q%vs-7FtL+j!kHnRPOC|u`!Niwf$Li zQja(Uee*y&Iu6Z}SW+^U*VpDucSM%NVzrSB#&8^s4oM{td!pX~?dUiT?UGtWdkM4$ zWrlGgS|^bRJ4HJ>PC=hM(Tt&bjcm< z=(ri}2a{yvq{*rYr2>u}x1mFhEJ(+lXxtMlNXI?tl-R8<{1qd4u^=7yqf#y`NXJ8H z*|E;pz5Ut*eJ4%%Jc>?PSdfk<(4gxqNXOIYpAQSt@f>>O#)5Rbh_-pLARVuuORg+P z$LpO~C$S(MZ)FS}$x2BqNXNTqn==d2@d28rKA!Ih7Np~2)J|n)bcqG&_$)86>?}yf zmnfF{{4GOd@gwDQEJ(*UsMIYMq~m*(NPP^N5)3|vN-PZ%e4Y+0NXJj8mijg_yIN)z zq~llAOGT%o)iZHpNi0amA9>`jQx>G-?>zE{UtnsZ;1PVMmzD)7aj2I=BgN8>H-wBV zNQpV zSy_OsL49zkWvsuI;-Av zn`LQkF3Fpptns%3BGRt|3sRboBB|XyZ5gL!K}uDqkUB3VE2Od@rQOi4M_7GIvrIX@_*;EiwZOQd*9B-DW{bO(@d?EJ$fJ3Z=f+x0GYz^;S+S zNU0eGQ>W`pg0WdU6AMymLA9P?K}zdUD|NY=J+gRpMxUu!p4vqkL*@#9<|QvLGcuwbX8#MXdu1QaT(JQtKeSLMJRp=_nM- z3OeZ;3sO23g;RI-7Q{;?;Z9kQ(g~=Y4@oW7LvZyM&dP$6PDbU7b)6>*QaUZ)IC0RA zM%f7qQaTfrQ@c(MrJKZpl+Hn&tn`u&wPY#D^ejl}e3Z;e&#Vd(awnlDP4srWJRBJ zsJTRQZRe}iMzOl-S&-7Tm_RDJNZavK}vU^az5QJgK{zpQo0*uQ%`4dYJI6JNa?IqG%QHz5mf79r^|^2DLsxU{LfgB(o>k+|AYl8J&Q^GCoD+m1(fbmug}ba zlwL;B)Mb3;CZ1?H=~h-T165Na@q=7GDPzr1S*}rtWPmjmC1W5MA13WkE_`cbeT~ z+G17~r1V{G(IqOO6BeZOBkE;s(aF)}z=D*1LCw^YnJgukfdwi3j!IdlzNCG)rMJa? z?_qitr1TfcX5AuXlFiP7l>SBGtm~~z!qHuD0+g4D1u4_+C15UN*dhZVlc_97*`8m~ zB-&YLEJ)eW<;ZhmLCOjykaY%0D*Hs_@nZ-R*_Z_Plr>nwbSD7ocR;NhwLv z(x#(rB!vYjdr|PeW)CW$O8NVzX2k!uI-0t-?efC+S&LYRgHDGx@) zo@PPH!%(&REJ%3->huf?QXY+3J;8#M$DvY}Yr*;@I=-lqC!%8aSdj7*)aWJ)Ql5?? zU1vecJE4EqSdj8A==-0uAmw6Arsr6Yaw+Qd7zQjf48CDZhj+(XRFf|L(H@g8PD$_Jrnnm&_z#2r|W zvVn>{&4QEG;RBKxtMUTIAKzmSJ!j&G80VOT?% zk%1oDEHg6D<5{qV@|_H8DBmSBT+kChpG{y5<@@9Z8EsI$Uw#1SlR%%^B0nUvLVFtM zGse6<8OgGIS!8Wn)AFWBOW5UNe_z1Lqq1Se+NQSk%OdPqL3??Hu(k0+TjfvWPvy^mz6SJlpl@J(SkZf-NLw5G zO{J}|mX=@|vbU=l!H3Db!ziRr{suGtEl>t1Gt6jykbh@PL-|MfC;4aj7x`EDH=u6; zeH-XIK;H%W9?vQSy1ELN5%yDNJrdn$V=VP$WnL0PIaD$A6JvRqlAtW=tmeUw$oYGsYG zR%uq&Df=odN~_YQtXDQD`ziY?8q6LCV3(A&Q}Za;S2ca=3DYa-?#UaYa<+1ga;|coa=vnba-njOaSL3wkek>mn&B&S1MO2S1Z>j*DBkU>y+!28yOg_? zdz5>X`;_~Y2b2euhm?nvN0djE$CSsFCzL0Zr%VFO)BpuavKqZ)KLq*_(2s$B0`ya$tY?1?^b4S0 z0{sf;*Fe7k$~xkAK)(n21JECV{si=ApuYh96)5Y2zXSaP=$}CU0{S=5e}MiAJO?}h zo&wJUZv);Aya2oiyaRX%co}#Fcoldj@EY(s@GjsBfOiA$0p1IIA@Dxn{lEu+F9N<7 z@V$ZW1AJfL`vKn{_yNEV1bz_kgMl9c{7~SB0Y4o09e^JJ{7B$O0Y4h}F~E-nejM=Q zfu8{UMBpa@KNwiz=wdZ1HK;k1;8%^ei87CfnNgr?!fN> z{GPz?1$-Fzy@784ekt&cz%K(n0{n8|R{*~f_$J`@0e%(mtASqw{953ffnNvwzQDHt z-wJ#i@auu!0Q`Qy?+^S&;12-)K;Sn4e-Q8o1Ahqc2Jis~{&wK+0RB$k?*jgA;O_zcUf}No{(j&e0RBPX9|Hbi;2#10 zQQ#i~{&C=+0RBnfp921A;GY5hS>T@o{(0bE0RBbbUjqJR;9mj$Rp4I({&nEr0RBzj z-va(^;NJoMUEtpX{(ay-0RBVZKLY+^;6DNWQ{X=X{&V2J0RBthzXJYi;J*R>Tj0L~ z{(Inm0RBhde**qz;C})BSKxmG{&(R20RGPen@}B!Yr-7agz64xoRv+ejzSZEb~d3p z7LD>?6RH!?AqO_0IvEY~WD}~>(5WkILUqS@Msj5nsym~@gs!p))miA?1va5tf{vZC z3DvR;X6!qeg-xiIqgh^TLbU>2a$^&!)o7aon^3Jq!!EK3)%vs%#cxt4K*~`zp}Gid z^JEjMyQ5Q2u?f|^P_1ihLbU;XyUr$5m!W@NY(jMfx^#t2sP2QF`LGGqHRzECn^0Yc z4xO+G)mAjhg-xh#$YyCp@u?fpG8yKUmQAQ`Lc@I7gz6#al`ET2Jru2zQEr{F3DqOe zr*k%;dUR&Hqyjk(?Q&xiswbjtHa4Mp3R-l^CR9&Hv%J`Z>RIU0B{rdYZfa0z*o5i@ zXqFS3P`wyU^I;RJThXI4Hlex=t@2eXnNC!0{+o+g}ZY(n)0 zbVzQ{ov{hko6#qCHlcbO+ULV2RPRKO9N2{FJ!qH-xv>e=N72?o z0!zmxRG&a23*)VGHlg}7y5z_vRG&j*3ll6en^1ibP4i?Es;{6^SJ;H=>m9O@jZLV& zg~k?OTXr^~`fdm9Y-~dH12oRUCR9I00}E6y4VzH?3~lmb6RKaLPgXXe`VE?7V-u?1 zqeU)kLiH!Kw6w=`Y(n){G_rujBG;nZ*;K~BYQmDkxl62(8fZB z%fcpf@@U&lHlb5Mk$l*MP6<77XA?Tr&h4#iLZ^=QmZqMaP3Ux^xuwL~=Zh3Jp|cQe zEiJ&T0V|2k8R(FHb~d52H=1|KCUo{gGs`%XnN8>%h^Ce%n1M~`9MU1s>}*2k@D72R zScYUap>rgfS;{jDo6tE1ZId`@ov{g>l9KKkatCUjP1NsN^!yV}ft{c@a%vgj)|_5#r?sL9bKK8gw3&PN^%XLFciklnZLm zc>-E?tb$HZgU*xDDGO@Qc^VpY9X04Y6aDi+4LZ+3kK9m$&hybWFVvv(B6P_WHR#;Z ziFFcc(0OUb&{I%@&dbp@XVjqcDl|`ho81$rLFcuoor+{g;d8|H-wC+LFZE_mWp&rDXb2tLFcpR*A3L5^92-01ANN^HRya9HBuk5rvxACuGR-0 z=}?2t*HA8%V3s0>1=|rd=zJ5MyNeohzJn?`baMA8 z4LW~Bg)XB8oxh-aYV~#qAR}tf`8)b|#-dAy8g%}Jo?Su>I{!u2RN75)(4C_OHHtc^ z9W_}eKJ;}&4Qh6DPMvPjI_H2I)Ev3!<&GNE6x8b`YEaWqq_gTxxA)Zwa!KC&0-XUh zsCiK&wY#S+GP8r1rtRO(o4hG2sEVgq0{)Sxy1l`?nM zG)hURL2WSlXYMzqzh%NtiyG91p-$#%$)ICF4QeA$VMz+S$WkU95P-24;z&1$9-$Q1 zvh{M5Gaq7q2)Ca>#SU1^BO7frrjxmovPe6m6K|0jP=ney)ay2CP@9M{J%Ae2rl3&j zb8t&JCLRangc{VQqhRWEok=j(XfvS(wVhC{XHbLME~u5dT+JR?ygH+A-z}&?tr*2p zdqTdbL9G;Z{xj5|HV5TXYa`CW$P+cF%|o@+Zkt7|18PvKM1|BkNUzWdYEY{|v8)`8 z4mcbfm!im&lDWZ>`QZ@b2$#%dzsRk)E?CKsU5QQ~LgCb%y@eo>Nw`zgptbpW3|+8+7FnJC;&P=i_+l~cP;4yBuf8q}7ePF6TZhg!0fWO~$~ z7D36Zz?aODnNfq<%5FrO9yO?~Ldkr%NS4f}MGb0eQ8AxJZ&l2W8r1eh?YflNI&+m< zv~xlYYHgT8R#$<{)0n$l94QdDFThtxm zG^I151~mgy$Y%u0kU|P-P&*8j^XYyWl#@||+L0)mdODL+>q|uqYR7al{~1w(+VQBI z&j_B#e;U-Fb`q*}vD4**8q`k36#i$ZLG27o?tg+B)Xv7F{u9)ob{$+Qf9Z-YXjVPG9x3x5yyikMMEuCgJ8RnN2HK^U5TXcy^=ma&W z-GzEtTXb@CIiLo$dr>p>WF|`qW-ksV`|CZs~2=P=ngTD4TVQlu0%_YEXL& zg|n`=G70B~8q}WbUIOMaCL?N4dnUi4Nwl-hP=ng@U5-3A)S&hfCXjUoNhYdjlo2PD)9VmNuOpHK@Ifg8wyYPAPb_6a7D zYX|KDYEb(e6X-JJE)8l>`wA6%8a1eWi>lp64QfB2PS2nQwVzR|Cs2dhZ>ZGeTCjeJ zj^7s1{zS#@p$4^oP@|iuL7kvT*HMGI4gI@@8q`Jf{m)T@x{S&69BNQ^qF#@o26Y$8 z^#p2A_n=abpayjxO7#qCP%lEQE~5tZKIq;J)S%uU1$qKCs1HJ=G%Tc^Kn?0cQM^Y` zgZd6Al?I#E32qOcHbpOy;UCzet=gb@Sg(&l#WZxgP82ht2KBM1_g|m}^$Dn)28Gp0 zq&c7l^~osRUDTjH4OO~>8q{}0g*26sfVd@Ad^w>8^_@|#TlBh)s6l-eN~Y;4xrdk; zHK>=Mcn_ln^)eJq(`RyzxC3fXFGt0mMh)r}sM@W{>WCWDtGgX&8q}a(i>hflVxmAh z2ghYV4eIqM-Q%c1eG$t3*Qh~#cTD9!L=EbDVFLdZYEW-L_3l-xl?P}+4eHBKI@_Sp zd5f`1C!q%Q6)4`rsKNMZUf&0``_#8aTE>({mN%`9EZelHrn)o~p3SaIm4?~jk!iUG zE&XXMV=xn$Q}kx;x6S%G;Qvk_arEeSw-i)~c`b{5U#~6=#yM~L27SLI?4f>ueik=u zyQ`0WpuR~zNIzIVL^pKM57iIT57&>-kJOLSkJgXTkJXRUkJnGoPt;G+Pu5S-Pt{M; zPuI`T&jcF>HVQTy*aWaSz$SxD1)Byo7ueij^McI>wgA|AfvpeN`hjf#*am@Z2-t>! zZ3nQ81lwq^jRo6yuuTNpWUx)eaL(4x(a+V-)6ds0&@a?4(l6FG>s$1#`X&0M`ZoPC z{c`;Z{Yw2R{c8Oh{aSsyew}{3euI7^*rtQ67;NQWs{vab*cO3pcd#vm|6}hy;G?Ma z2JWB0&QOKz1%V*Fh4e-dNC>@2=shG$2!w1%0Yc58Nl_FlptR7dV5g&i6hXR57ZJsR zilQQ-yuaDWhJ@nvx$k{G_jx~`_qoqkV0Y%sIsfyYGjnEUcMWc77$ObBq`@N%nbI&) z8pcS&cxjN*Fhv@sOT$yr@QgG(Ck?b|o-{0xhUL<*P8v4m);S@1iZY^?=q>t`e>Yd; z+N8m6fN{d;0Ppwv1tg_pCh(0q!w042Gz<*P%t}ZcHehf@!fE z2@z4@!9}|NW4%s~3^2C+hfTfzyDI-S0nX|hDE}Ynr6Q^InO?GqQ`Uc2^#`O@em@l{ z_=;Yze`!<2+8s40v2de}#q~=0{||OA&!D01L{e67sy=$RNgM1b(#6J-dcluz zKkpw#Gcb{l^JGwL>x7J?Q3)CD=GjRpp13sc%SDbi#z*wRtNp*(ce{4z{D9%xXx59a zF~InSuipR7eYbh{zukMAclQP<{{*6F4>Vlz>!* z5{}A&yb>6js-3~y^M>|rgTz8dYhy)yn|v?vzkBi%>`ojU7V>ZBcyL0<;OOuoCqKr= z^h(v^a`!(BgfF-C^RlA3S5q%7;6HzS4~>k74rTNTiUcvL>$Bu-Ax0GP2W{fem;v40SvBnn0IAcp`h?a(i($Gj6 z8ZR@pHnuUgHMWz6CerY@G&GflX4YkFBGAPv@J0u|^Au-FH(Ip`*+1;;oN|KwWg7m8>dh}A{VL%R!Q)GI*ZvkX-q)1w#y(n8dKh~edl`F6LyR=UN<#~2h+AgtYwSl; z21o-;Gy~O6>n=_{&%HrbI`M?>ROsD6(){QjBR@TZS5k8B>kJrJ=1f&=2jU zp~Etx$C$2nOGjzw^najRns13%bZfERmayo8?ig##eP|P;aS}~nO8bN~bn!MJKDQ1% z+c@>QnxMK z9&t0jR1&Ax*r@2x`c*f`6+@&`mQ5uqbP0)I8M+0SqtMAybkf11U2L=}ux6gR!p~W3G zexw&SSQ?TG7q_L&@QEw>j*Scs3f78?4k;+^l<~|%`|GUnoL*dtGz=|NTvOTc{Y%>m zii-%no2r+MS07rtuZ`bO+;`HDDh=$2bbp$jR67#0*RSj|HaOCopkYCzRd94@I5jP( z=1<1ov~nLfBHc9pPPud3XZ*jTasy3BRM-K0%@A{#^q4ZH$n3kIbePn86 zYWC1lo10?vN=}l7$wex;qu;c%ea1$I(fknRcfKAswJ~+nN_)__ZR%v|OdT2Srb@#! zZ)s2JnY8`G38l^#jGsNnhHF*48^)fdeh)3PKi4GMnixlBNW)BTnX|lQHv2WMbN}c1 z(kYSN9uJEc-+OFGSQH~wM8R%kN;aiFwB+HYG`;OlOT+9!ZEwD_J}Lkt?iavs4N`N=S*Bqe(-2-nroV;*Yag)m|M7(;a*7pUj-fDpfjtnatm^V!CKD0HPO@~@WzBX~(F>&14Bn_{73x2~}aEbjNC|iE<_F_nQR5$^T)Q+}y z3;oITi>R4r`q^|t8s3tIck)cXntqdp&C;-|$V~#%ZGBSlHV;qvE)u7DlDRn$5!$?E zbVOK4Kty;{TtHZG^T>eak+E?B;Zd>S(Q#qHEh9t23r(ubetJRg-d9j`OpEA9_Q8O# zu+Uik6A~8?71TT^ASNa%COoV~Y*bh@Q%LVb#B4BgTw7vRq+!bva|vnKs_iQUKg8pk zYZUyRkoQ-D%w_nMAahx1*v78}of2jI{LSV4s+f&tljv=XH{CXK_0#Acot}`E)HNd^ zEi*AAB|WQGN?MY8bmxRD?zHU86(yzNZE4tk!t5|RDWS93Wv(#5XyWS;eT%hsrzK|% zp`=2ubx%mmb{koD$8j-03^Z3XSL##f?L2d3ky!WzeW@yDE=Aefw1{i(Nxrv5OU+fy z)kNY$-{-r&_+hGh*YZWLhPjp&y_(YS-V$?dX?XviM6bRUy#Q(0{h;WX{mfy$0KF>> zdx`{zcKfy*Wo{_UOU%q0KUiXB-nfrn)S`tQxnaa*Ic|Ft{dFhbYR$}Tgn5;@xjDuh zYi?nVGq*IiGPjn71JZC%8a|YUL(*_q8a|SSBhqkmmAS3(H?}o*Fn2U}GItjK(r_%7 zerLouE)6H7;iNR2$}OAQwpUg{MzTALE7QGGGUHNHlC|k4G1Xt@0(VNHyG>?~lnmyW znVEd8Zs9k4$u+=e)?aR(kvJqJi^(YS^MV)kLRuyaPf5+;t9uIkty6&hdc*L@um~<32ZaSiCq+dhM7blwBBLX@ z4;LL09UL?`G&nLkEGaQGq+3G9@QASP`qlYf`X3SXLwwOqFuVEPF7qIBqL~x)Po&{f zX*j*qJlLFU9wH5Aq=5n9ik2clbXD`jtdx-{Svg(Y>7InVXwurQvgF_);3a zk%k`%eyhzq(#$EuQu8SDXmgGUY2;GO1GHol++|&S3Ym%Waa*N%r(z5bF%T3G+dR2uQ@U_ zGwKC&Y50(eEH*EDP(jPhE6f~_zm*1#HP5oNPb=0UKo^}<<874 z#=c#=SA2G2qT8M1PC8*Sm$uklT8)Oeb25ALXZ}|6thQW3u9D|)=Bl}+a?5n@7~hiH zTP(>Bzj-oFR;uh@y2@i!tMPh#${06W9o#ZCAvu$OYa>>*W;ON3n;tRdmdGthN;mL+ z8d<-w)vb+ISd1q%>3Gdrwd?fOtD44&J@vo3wSu%|@@m(mZ2cj&6ZHd1HwX-(4n4Hd zgqC^h5}%%s=#B{z-2Ny{aUr3;O6G;s<=^2EEZ8E2ebbZ1UYHlIy%-hEFjOcqNqWrk z8rFT_>jRACJI1$j=eU!+BN=^htjXhDGd!NGTt87#m_!9pO$3M#5h>zCJJCh-5rc$B zj1ZY3TTB$w#B4E7yebxo#bT*gE>?=wVxxFhyeIaHW8zbBNqix`6j%NH{8T@aUj@I% z{Hpr}_=Wj3_G{zU-mjxyXTNTK{rm>{x&1Qyvi-*RJ>&PB-%7vDe%t)s)1zF>PybUi z&Hpq155L{g@V$P>+-Tm^uVZ|N?5qR^@)o4uQu9Xh>mrdjYya}?C2yLy^3#Bf8gHB5 zG3S^!o40t!jvuArI>U!F{3H!OuP~4Gi#6{s@AQl1rvVKYa|cNS=SaWu%YfeB2CVw` zdw$oPbjJ06u@H4)p;~`mlD1HMR*rUH`WH)TmsY(?9yjl2FFasAsO5YXuX4Gi{ ze(L*+`7cp(mHAimZ|0lk-_5tof0%EZ|CCBGsT7xrK`M$=RH>AZN=d1dT4lat5f(p7 zF;UL^yYS~+NGUCqGEylk)yHyKL1C~gn_H(pN!WvPUC$`*ZFs)1lovW>aC_68)I+-` zuZ=%l69(OVo`!?_~93IdjHnMp{OjKM@ zSX7I@EuckMY*=_mbWDJDe<^@7oS=Z_(J?UrEt|)*j0tNI5)>ZG4UYPSYoOQvZVkJo zrHsgS-~HKDz95*zy{8=Rhg93>(hBs(p{vJ}Iw&EdO=kXW3H@9q|Gk7kF-6{bbnMv3 z2qsnmVWGj}>sJ{Y?7e0j92yige!O=#v6%fT=4l<4-$ez#Q)+Qos*0LxEKZBdQo-`5 zrJ|*hrLx7}QpNI^R34E^IjNMFicu;ishFi=k&0C+HmTUxSgHwszc@=xOD#)nOC8~F zsb{G#6$ibUB$Y{0nITn^R4a1ktNKf|N^TwPOk4k<)#g0E#1w^p#}Dz0>Xcv@T@*wx*#xbvEoqZL{3bGLfg1?B3SE7Dg*Qi*3X zQj*h(l+aguc&mkbP%_h)wVtEz8|pIz1Rl`>j#$4;A^_0j*?K4FkMHGfC3L|bA- z&DEBMmPVGwmL`_REln-WEX^%3QgKSfC6x+Nc~mMDrBX>Mm8HU-QDwEIg(XglDsgRN zX=`bx?Jx90V=bs!SO-dFv{Z7mJ*Ic6H#A){+}a5|+t(weoz)MBCxS^l6QD(0i?IF{ z6Nv8md5`|@`0PQ$S-N+j6^$0w;E~FN>{Nbym)7f{tCIgxXKi&Jnn}yz^gF|y$zSvb zbeMzsf>pQ^v~Bxd*Gf8R>1pX@=`G4w`iS0^ewO~62xtd*y_dD7bo9(A2$8qW&gZQZW-YhM+V+I`|W>sB+ydEyTtH>l(f4qa|yvakMA_tx9;8dq!K2T8d8ar zO7+|%zMoNlB#Q3R)12CBKl0qdqZQepU{~RkE89CG)JK7Azltj?BQ2vWqb)g>F_y7X zsVSAZQVEbspj3jS62k2Igk^$dA`Qy5NXw+&cdO9NTRExJl1go5kOkvhE!p3`{X+rAg)`o(JN1ao4gwZ)kKJef9cqn2Y*X)cu*sl+a|oFKJNNu`CB+Wm9eGUYF_RYrPF&pCjmC42u`FbDJgG`Owk zviXlHa2=?~>#f|L;qI)A9KGw#YF&3uDskFKpr7+uE?T}4HCI|bvs|)#Zn*{!FK#qPEN!}fcPQ)Q zd#_uU_LSlsP7?E{-SA*YdmNFJXT0MRm&^t1z~;7L!URsqln+ zQ>i?W+g7{5ytiUGC%14z!=k-6-h#B77!l#x9oV4pg$^3llGalCK|?AGC-J^9-rF74 za*Wv4@>ZkOWHnnYR!+Xy*}6%kyHt8erKePS-8*6z4A8z2tmxS3d$q`z`L{0?87X~B z78w>#JZMDh9l-fPQvT_D&4R;KyOgX3dYAk9ph3Jh*4m}WLbEVSo<4~Q*W(aPT3R{W z4xqUt@L2t68tqg!M7y!aBi`eE_qnW(TRHPuW^HP1W^Ha|H%gESgM6Y?l9pLpSmXR+ zt>mp+D#NrfePnK(p6%U(v$PXF3TFH$ID;uT1M02sPuvC(zGDIpV%ZzRP;;h}Z3&2ga{dcH#-o`Hhc2CGi(M~TowRexw&XF>^ zB#iQ2rDIZ4q3FB$`)qt7k2>>v5=GwhuFh3Z&R@MZ3p^RDZ!fZlw{&ec)1%nmuj1mn z`?WQ}n#h=BB?_ratWkbh&^Qp;AecN@l?|Q0s7Ox-jQi z)2tq;@c-#~))CeWsf>_H23OZ9?Cx%-9cH_C)vDbgAvrB2D?7rXINF_@u*<3m==!R;|o_x2+dXueFwMI>m%BUrNahx}6ZQ@KZ)066E;Nih8 ztsi|`nQ5I>uD`*$@otB1 zlFH=VI(p~5sa5nX>)TQxE>GUA=w|D-dp5Y;xnBnn z-dm+|QL27YEvALMN|6zxU@q67;EeX+i^L>kB@SuHESi(chm_)tB6;C^e6OHs1a9+#a=l zXT4_SkjTAyjuGD5^~wgRydjl0_3Iz`6a6mS!qLx)<72&_`^(PM)0Sg=T$(m+&@QW% zL*9FZ?j-L%s?P(pt0t_^IjnDRy?<3LiG0o0*V1m1XoqC&n99_IJtR#(Ro5?Y6vU>; z24mcO<|2QGqGozYg~sKOaP64}E=1fNmw&hZ!QD6OEvd{~V!bVu`M#Xi$G+y7S|RxX zwh3Et?hM=fEDNOaic~`LYzCVml~<**gi8qp=lI&Qv%XbI*~)6Gl=dqtl?A!&^K6f3 ze_NQ_{$VSaZFcT5_?5L;ZMNL@QehWe%s;jNv6A+`Iz9OaiaXhpk<*>WNR!=}zHL>u zRb$k*%(X0F^{UJxsG?Rx-ZEQtTMb@gmb6qVYqZzaF*B49?FnrQYpHL3 zl8micQ^B-jf2Kg%UoMkt$lQTUT*uP zwx?`QYyW&lDqFP7@oi#`?S*?g@g*&S@7`Txp6%6p7a?nTn@Btv2C?&vu(HSu#u(qo75vGTS%X6Ug?@%|;5W~Xw&`u;6y<95N~TBY=B zPn8P4T(CXueed-=@AbWc)!HPdG4jSHWO71S{edfT%Put`J=2}kD#MeVUNGXEwVl74 z>=&fM(+|FGzhwJ7|1`*UMU=69Y5U4{)%JD4ea<2`IHhtxDxXT_VE%`!ZQifAvgx~- z_PJ3$INC;gf`A(O%KzT>6SG^}54Im|*QN5IR1Qhy@KW2)wi~uzq(Y*ckjkmtw!XWH zoG5mT@62895a>j?htXE3s%KmfG!hr*;M7z8IafSFk^7 zuSg9l*(=-q?Ntcc-`_bd%a#3)rSb_EV(-6qtk{ya#;JUXRMxw+GO#{Pw9|X9DOO3FuhwCo*FBaAp>nsXyr?l{0#c zg6zR8?A{_=zg1{U`5A9-Zg0(iZI7|X+FRJ;>@DrgIX{!iC8>NamCI83 zLMm5Q*xT5dA{yJ;I~dzag%ilH7%DkERQU>krrIE>otiNMCJxh{bm5A-b{WHawnTdH zmiIK)`?Q#LW!gJbGA21%#I!0HM40(#7wq+M#9JH_&Q58mIedyWjT5(+R6axQy`NF! z{8Jl|w8^8kWE)Cg+gU7~Uz)FxJ?uQ~yWHN>-pk(G-pAfoDg^jzseB`qZ>93xa(jRK z0PU`yohjxuseI1_nBo5?t!3K9y|~ft#B6QYj!PSvlHu|08~ML9sGop($9-*1?rmt1 z^G~k7`Cf0H%5IdDqg6Mpr6;4KdsOFyY(~HQYf8Z^*E(UOyA3nWl!VmO9A7!HoxAC? zDXn4ow+8apsq8D;cRbSLnSb#qvwzTNzIPzVJ!tTt&_RQPymKu3Fnd~_J=H#3DnCf& zN2y#ddX`GU7ddOKQlmzdU_Ie7?AjwF`OgiIy-V#G_AHUujlI{Mr58Su+urt3Qu#Tz zW1(wDj5A{`>GtvV3EJgXM$$;B+>pv0zW=LW7E`3=4IUJ7{icDw9qCtbnSGLdvVDr3 zS>3Nv;f~>Lsr-5ODwBP>eTH`B{5PrG^jBxfdy#!LxA5$X*?aTsOYO_-%k3-dEA6YK zT3p+KRfAL&sj5;fA=Q#nEhW{`tJ#^?+Sl3F^XF^!jr`a9Jgizqs@w>D#Jh8slWKYI zUyQz8JT4={lX3r4%|sR!9Lu#C8auc4nlFL%DXDfZ(tDjJCPzP;@Lt5qzi?F0`}wOD z6sIl9YuXKGea3kI=FNpSub)a3326QlFCr-aCbNBqJ#8_0uV>PGh5xZf++3Xh#D;xe z;WrNI|L{fpu>A-p-TD>o?jB2~LoT?JP$?5FMLie8qm zpSNF-s#U5sZHlSgXf7BJwfm6On&k{Ed|ktS+5Y7{1$LK^|@_Wls+)<9ng2UiY z9IB&)qokvhqqL)pqpaf*saBP0HK|sYY7ME@lxi)h)|P4=sn(Thy)};V+Ffpk*5 z9X8?ba5$V&txuDaq#7XA5UI|Q>Rj#FfmfvZYHppvH`w(K)mmQAshrCHfAqI6yx9&XSpNR(ZUhuXz6Gr)ljL1Ni|%m5mJp@?r7s^>uBd_@8}@a zD5f)hg_$-{~DY&k1R;%g7ICu`u@NRZU6d0pLSFz zaz>}!XMgAwMPHW=a|~yS=tz}nvn7r+sW$(cOLUG52UlD29GQ+Rsm4e(HqSBAF;c26 z`2TyaxOs1!clK>%ykp|st>j9zWx+($G1)Pt$n|)~la3j8mz^oq)_0eE+A&*Sw#ZQ_ zG?LGa6}j-}c+Rmv8>gOkyx@4z@si_Z$6Uue$9%^tj#s7HPO9ytN``inYA307mg*By z?IP89sdioCSjZ@~7#>Y}LL4&h8n+xABa0dnZp!5|~+NPvQ7VX`sy` zwNe~AnIo#b@6Hh&yO|^U5_hj-pO%atNVV@0$9}2y`@7xakmGRCR6gqXL>p6%IgUF{ zI8HiFIXIUYAk~3VO_1s!sU}J_X$7OnX>CgAIA_-e5vjUKH0{q^CWu;!{r$}Be|ui| z&E+;;p)g}pUr z6dW$qwB?RFPNAKXIg3lxBh_^8ELP?2=6`<<$|r4-BKQKQ{5hzzG;>hL4QE-cx#|d7 z%)>%O=0@7AGbGF#8K;psrPCx;Zsr!5Q#x&ybf?4VWKOAOYjaAK0e-^2J*RX&%AC?! z(OJn^+37D;ju4}zI##OV?#?NlRlQR@briX-J&jc<=)qIU0hwG%>z6+-bk=g#W-h7b zNOcTr^~#@1hJ+c*<}cy6p%**UTdZ%jVEx7M`P=#*Oeek5{<|T6*kR`%oZRH6p0lB| z1v5-%BgZLc6X)a3rp{*0=FS*rtW+m^$Ac+SohsF7QhidY)1^8?sxz5kI$JthIa~8* zTW34|+rc|N%#!L;QhnO{_t{eAn->{0p4Ij=ZJ+)B<_t6cSY9y0boMGVcXakG{Eq>5 zXPC}Gh2L-&nPEDIIENOTLpvFZp39$MI{C=gQm4n6?i?Z2=cW3hR9`MQX?A8gM-@FE zwx4&BcUGyspxyW$HL&oE&N-f4r^q7__OF~ID$~iAw56Y4S7hmFPR?)&KR{tGG=cOb z%53Kx&b6G+IG>g3JgLqvGD-Z`)4;!B2`{lD%E9DUA~$zbh-X~vvaj`jdQJYoi@c=q0Rf) z8`Y1b%C!BWR4;33Yt*KA`UN_7hBkB7Z`kNlaqsm^F4q0`pK~tsAhxfw-gRyv$DEs` zx^jtgt5jEM&G{caF6i9pe2*gZ#|545OLcX@tw85q=K=1eI6rXib8-r@MyhM2x^AhH zyKc@yQeCeF@{o3H`d@xh(0QCkvYaQRxPBQs;^7+4XM5<)wg(3(D|A3lJj%tW#<>=d18Vy@VBM9MXCp-xCAjr_rRgQo!>_&o~FU!1=(BHxtiJ5uG7$PB@oy-x#b-)Au7&j)>x@$cNVm7&n>vhRhw=6 z!^ac7r%kR9^4=9H)x!j@$SFXu_kw8QM-g3-uEsnj!lQ^Wu7<8g-bWGHJ&#EB=rUIm z*W-S%u4Ym_Ce@E=g!(DZiQIkCP(MR1coNS$5Ne}feipjoTx}`Z)za0<)!NlYs>h|u z&UjL)r~WTJcj&8aH&+kFGgo)1ezN3WK9%R{>*~+a`h_mn0I8nNKS;XMwT^J5Y7Zf*XSE})oL=~d>q>WJ-m~<07bm)$<(}7;p1Qxt(mAd% zMdop?@ve!~PJcwul`GZHe6^db)ozN5Gqy`o{rqn2rn_d{v%Nx(1NtKLtn2xE7Ao}Y zU%@A5TysTl*L+KVsq%%hQspkssep@7f^MucUfas$Wa>8>xOP)$blUWIT9m@V#1OOz^!} zWF+vtSY-eAy|`E3*{}VJoxR8>6kHz_`h){s(=iiI;Q&6yr#ORiLRej> ziQ1@(`bfb@jKXM)!DP(CVywp&Y{fS00CBb+0`w)>a-y5MP@GRqz<9qB?>Rf>4mtJWD8Sv1kF}W{XE}^hJLRL^5cLEfpS& zzy#!C5~hH9+lYsac-W}3jr!V&hn?-%*^WID(HIZnXnz^R!Tt&sU=fyp+_RH=cJ>qd zn|K@Vf*9F%U>ANA!tp5DA_uF%wjJc1lYQ4oemPZ?LK!>)V&J4-oPi()PU_{PUQXKY zY=t&xhtBANuIP>=(0@+q>0Ac-%t_sxJFy2JU_aUeD zh6OfM26b~)1MzYZCs%V&7mlC8)eC*l5A=m=C5Vq}JJ`O9?Yr2%>mb;+i*373;S-$3 zXZT8p3S~iVR)|1T(C!K?K-(+O)(Qzogd54Azbg#GaE!q?OaOgefj+M=1=BDc#H7Lj z&@YeL(HMQvAC&dz5DW$R^JumZmDYlFD!qnHcmr?Y9c%`DU5WNpBF2^8!)`DZRoVx( zQ|Utx?@GkG5^b(Tn=8@gO0>BWZLV|{#JZs>tt=!1S3fCMDMjbx-?7>2`x5y(U~Mj;1dF&+~kF&R_wBxYb1 zp2jnngXi%gUdBAUf(2NFC0L5(Sc%nGi}iR7oA3tS!aLZEt=Nv8cn`a=7yEDkAL1~M z;22Ke6h6UeoW*&N`;{+)eX#P^_zpkdC;Wn&_yd1&t=A9ipZ+CK8jrvT3&?4I7b?OZ zRZ#=%PyY4L0Ko`DBpRX#njsb~(FX0&35+lP-Ov+#z<%PNfFvY?eZ_w`(vgXg$N~GH zKYilQ{^w7B__Oc%(-;1;K|lDv0Q$gxJ{Djxh`s*`5O@D|Am;vzU;f0~pK;ZnSo`k) zarWN>V(iaY;!k}2kAm3xe+=U4e-0P%Ij-O;z6Hl4|LeGc-*5|m3QBES(FFk zUlkjicodcK7^LLI^2t@><(HKn;gE+KCJ9NYoU>vK`1HI7?126;`$iholgiWCB zRcLb++FIohzQ8q*ACFPj$Ef3DRx|{C_!!%JED@yLm23T3rxxYZnvOYm9^`E;+EMERXzSk*qP8C@f%eoUezhs1HnFQ6k1?2x zsbIgTy&CjE?PEBDb0Btgii19=LpMQ_xREO=?IgBHq4t1zQUFuNR zA8fyFU9>?LbVUZpgSz9f5Gz4F>JpQ>#H21UsY^L^DW@Lg)T@U2Xn>9&x9jx+?Wp$@ zW@9^uLB0L>20w#-t?xorR7Xp60LNdS2ga2#iG9)AhZfV^%{3bAMp+SDK$6OfDL zVEqOgaS9jknGk{0A&@!*Qinjsgg~|**a+Rg_5cpbOP~%x z)FFsE1O*@h^l=b<6Vwj_Ks|zHfY=1Rfo<4J^e zI;cnRr(pZRmxTx^jSU-e*37Lx}Sc;P%ej&6Wl)MaOy-*{le<KaL#BYzbl$_4r=sybSMZAW!MCdOd` z@~{@`L7qgDC()(Q4#X(BJBVxa5>VG@+7!(;qOWmTfI2m70AkvZG8++lbFW+?o-$=9JsK5t?8U zX5neTdE+pTbj@XAeP;Hwhu%U8iF{rCvV!n0=CnhoM}(abm09C%@K?FSPJ^T!wvk2J3@49hpy-jmg&ec z9a*L$%XDO!jx5tD1}xX91IWux>+zZpoh_&c`lK`M=uA91)8@{nL0@(LLWn1b*AtOo zpLk*-reh{f;2bUp(WN?wM;F@Eg}&@E7UQu8w7JU>A>z$=6qS&ORHWezY{L#Ay859s z%7VV^+8cdAd%AAGM*M)AxFtk4`n_8xuxz(QAUC>^8-zl1FNHElz)-Ng?pv@Mdxhvh zU3yUO9yLJ@_Q(PC?!o&fsqdRjq!d-`JxCSxkj<4ar>qE|Dt0rBm%1Z=<8I{b{= z_)Cc1yx+SE=%?OuFdwgia(YuvZ_4RY3qc?U`%srYlQ0F_@d5S=(YF|AQ{P8GpY~;& zeLH}@>r3DDrSJM)!T0!4h<@RC9MrKNW%YX+&w#e|BTxI$&;4Df3S!aUjWiIC{>!i) zuYofAQ)Yk491sZdazH~o1;&B_FN5+1e1^}37+43iV_+C)%fLxs`vb|Df#+~Rh=c|p zuM(n>h4CQ265hs6yeGsU73E<B$A=8#CVKqpYvkZ$OS zKIo4Wkkdmv$UrtmgY69=2ZvDa6a%Pp3j13MZAqcOQl7zccoB0!E~ae2J0M;u#5IL@ zr0l^le2mjLhfDYZUxJ)Xp}j*(ApqpV(58q%9NMBisQ=I>&>JH`J%&!gR7?l)8~Q31 zArH&(J$@5n81)}U{|=*Xh8bZ&1yn*6R6`B0{b9st*gz2LVU#_LvWLwA{WRT&pio90=erMis48{ zCWx&k2ejWqj6KBILmWKB!9yH8#KAKMFW_a&#{w+IQmnvgtix+~9dF@XY{d?+zj??D z4{h`u#9`LJ@&zG)7azAP%k34ju6Xx}pbqqaWzc^h69s3R2-g2C^|4 zV=(~|Q}85a;%PjK=kXHA<@8rUE~k^r>C3SSYq0^F@Fw2D7Hr2Z?8XN;fI~Qf<2Z#+ zaTXVF318qVe1mKF5kKQs{EpkWBgBYeP*4(OP!1+o;XnmcLKRd)P1Hes1R@0Ch(aSg zj^=2AR%nY3=!|%DM=$in01Sc~Lof_!7=bK|!WfK4E+%6dX5cA2gXi!f=HeAB#1bsS zO02vL)_ylKg9-rYdzQot~4nN>0{DPbK1AhsT;Rgds zpfnzV5f<3tLPhwaDr%rM>Y)LG5r#-KL=!YaELx%s+M^S?pc{Ik5BehkNl3;}42K7! zK|C{F#c~jz3}TQ$+cOU03tR*3%Cw;}==aRW(F$xgGZW)50mLftftk*!Cvxslj=R#x`M_G`E+4T_);*gz!43HPu zFJd7UgL-G5z$qa{I`J5)p)2}%sRYftDZ^W67Vf#7wlD1n0^67Lk7WCD2!?`wmCs-fHen04 zfjUU)AgRNok}!cjorbIyretCqCg3#?zbUj~3VAt2 zTkl&Trc(c@tT&bVPmMrRGzaaU%KB5Yu?VZM21oEI&ImEB7|P%g1cJ6tqwUkWp+5%V zNjwYsavFUwZ4bzaY1eTJw}p7JB5HuXeDVqOMqkkOCu#eW&)`jvPfzZ|6?~5$g_uqr zO@9=Xz&g|2NX81#=INVo6!h8jvqH>JP!2}a1ofT~jKT0=1m=Q$VFr0TgZj=mj3Z!N znJGZKXHwspCb=ZM?$l>2-uw83n=jCnYLbGRVH3*_qy?DsD?(FJ|b547)v1z3bb zpq(##BE*Xh(7qR|g0bmEV*DcQe32Y^aShgi{prP@K;2)ehcK`|z4Sa@#X?-hcc8v6 z*9Eb7IUJc72V(Iueeg2*_A>oEm)xF9ZqKcR{vfaC5`(#G@h0BJ&$x}hgqTNL=FygU zv}N96tOm=@`yAimnh^5?5ru{zr{>QAb)5eNwD;+&R~`p3e1#ak@)TYKaeU=VXzyPa z;#Ka>7uE^LGrXbIx9ki1#A z5^QH7IkS+QS;YH`I-nC);5E?yG*K)rg)-=YffxjqS#mRr&b z4#3PS3=UoAPmG`3%ONrOgSj1r#UcgJZh^zQUh-E<_9?NLc zGWv4albC^{pv}w93bDL0YM~B17>zO5iTyYz#EM5?hZFSWioqBH+OuK{w&4#URvMs! zeqY%KEW2_o$c>fc#;UTg!GYlz3AVTD102CIAy!kD)zo`+2*zP5sP}5#U;Qc02(hLL zsPCG3n1*LC2VdhS+z?`IdvpWwUArD^f9<=tBgDFrC=K3U*B|uLx&>H)RiK=8l(UX< z)`y}o$iem0W&LbCivu{0lR|7L2imm31^RRY+uYC#^xX#fZUcR{;W}>Nwh*s1M_W+G z*C^|?d3Xi1?KSfBHTrpD4Kx6;*f;`XKs+{X#Aa*-Wp1R*jg+~m5y;C;Eio4)+NPzT zyiMQXdm&yA2km&h8EDJvv%&UXCud&2if@E?qanzvH{y_s86dyjcpo3)un=#W@hB?6 zjWnc#W!_|&H(BN_;_(*!^j1}*AOl$-uin~^gF?JbId4hli!^gGn|9oqJ8Q?v##eRn>VVmW>jf>4Rg#So8yNWdT@f$eP` zgYls3&67bsZ6=>KKL`44GqK$K3KoERZ>G#GHPIc37!2CCB^4e}=9Z}-wp-}?EiZxo z-9i~#R)JjHLLY8<1LWhDt=NHGpuJmY*H#7PKy0>F0qx()Hn-M80D=&LM(6?BwUxST zr7l~k%U0U7l{RgqO_#NcY);mIM^8?%47K6^{ zg}xX7Vz-TUZX1V*m;~CpZ93?eZM1tEZQZsIORyC5#kQ4LjkP$2YeHGiS7khg>R>F|RTsp37ct)ziU>rbF`9xH?TSNdv_nTcfv%u$cG0$7{Xie> zO2lB$N4v#kx#qk;Z-a`9*FO*RUo#zHeeIp z#5>r6?H~ZVcH;vaz#$yLah$@ZIExFogfH+F$dg_4)vh1$Gsu-)zvDLU2=QJqC@6_C zCYN&}isEBawq~n21T3is_hz*_eYD@G|CO0TyE^ zR$w*O;WfODx9~2uVh7&C9_+(G9L7X~PH!;{v+jkE}E@pss?cRhfpx<|&!DX=BJ>}s<1rXOg#B)zqjKO3O zn?2;q9`a-lZQ4VB@1dXfP^Z20<6io2Z*|Z=ds%;PI}qo+#CPumEW#>~hkN(q7>L6M z3d(`J_#hY!(HPYG0}n>vO>DSp6HDnkPG{#AP;N7ezTu4_Fn?=Iza3X5U&F!P~QWwh{GTZ19^5} zCY}fTQ3(2j#m!1@Qt&x51!0%*@c;&+fT4$_E& zf;v*ad?f9rT6oi2oeAE;P7z+CJqh~P>uV6bq0I~S!0=@!uIZ_O?=g1?70A(C$ zj$WW|jwE3$=-VUHKs}CZz(x?$Bjn?eQ=m;p{=lC?9IXNJ;`Q*`W=1&`|PobVEtpY&>r2;1JvOdbvQ;Hj;#dSKlVBff$bloe~z*J;|8dpUys*A z06L&MdV)F}rw+%d!|`QU5Bm5xeRKRIJ_hxmS>gn-IZ+p(2*+S}K#rW4jh8`uPizD2 zII$1J^~8@L9w+J7lXXBnPIdy@KiL~&Fd0*khqa&{C+U}ytbdYzIaMA`R6sirzf-i~ z6nS}y^-etr>VJy$PEr3;$8i?th4`5Ef6V$Ho6ra?&=SLtg^^f@l~|1rLEAs3?H^yq zE!-C3lWM3B`tlR{;FEzMCq8)^FJUg;#e1MHKe>iqaZ`v-Y5S+N{nL8r4D#vIJ{XT_ zn2voo3hI8Ex}K)3r|JLG)cG`ZK24oZry~brF$eSUDvsd{&Ixg*ILe|N0uhCVpsr_V z^O@mz7WCDbSFi(Y`^*7+gP-w>5NBPeis~Rw&XOl*bMZci(b>a7oFlI1oS?4fXwx~i zaV`ne>D+P<({q%0jxx_t=6ULIJ{EBp2m0>(lVIODPm-NKCBy~Fx zaz)U-%eBx8#O*S1yG*&4U&ovHU5GD=!GOon3T-eN5|gnVA7DRd-<6UmjY!bWD>0Y^ z+IQt?Fg{!%##d?aD7#Hvr zh{aX<;41lcm43cTZeJz0uTIASkk?m9<1zF{GEzWWzNRf-)0S__zzUZArUSa8 zC+1@*mV=!7<~FF~x1GTI-_lp#t_CsumKc8fn-Jd#5XbMH0Pla-7xc?_d7xjuV}b8} z0n1*CMSIZJYsBFiakxetu3g1-u+8sVfI5B8Hosqi*RV;5ABfWr>snIFiR zA9(+Vcd< zR2?-j6VHP_`H6P?L_Dab__;IatDpOTc>TNq>=Qr#iaSEw@Iza~qZ?iU@wh>oZqS!E ze!>kQerW{S{7XyBz;k#3XK)!;g!r{CLJ^M9keCem^4DWHA;fRA=eHWDg~9M(1nBqQ zJ^;(!w1eEZNp9R6g>R_j@oJqA{q;@6`ME1t151{}I&t7VqDRMI2^<`rdj8 z*KrHCh4`Z@`e6Xx!7dQr+fK0k+tn}>naBq3-#!5P>Ccid!2-(plXCu~oIe+0CCI@) zsmq_g;}0SJ3PTe#MGk1wUsFJz{>3)`+5`ITFZ%8deRroX+(^bcyanob=MUj03{Vk= zj_8bMF%Pfc9IoIi;pg`#s-q_Q!i{9Cz-!nf{E8JvS(F3i73+asSd7(Jiy!ej*naW0 zh(|Xp#|CV~x43~{g`Xh+5s1QgOv7{>#c7-sehSMdETgcDG6}QrG*03?E($-jE^?PDz4*a;aB<**x^D&RL1|)(tU?zS+IQoe(v(qg50UdMggTj1w|1AWeEVdf3{H57zrAhU4UhUbt+K1GzE*YGkbsb&Ux4tFErjmRRr z9sP&PBzytKa}tX=9s3Ah&I;@$`~uc;6<2d@C^#a9qex{K>72nzRKDlnuVou`>{tN~E4=0@N z^kOD$bl~swpO4x4o2|du`kSr4*#-pkz~33rhkodDKs%kx<5(VG8!z)JJ9&$D_yoHe z@Fm}{kMBZ3L>TTO!oDIrD`F9Pj&P3=XR!vi5n*=`*CEFUIY!u3g!e8&j}dx|c!)<( zFT$(?J$GORIkXV8G{-s1zl z;A_6c?M45{&-@h%Vh*P--nW<(>@~(-V=~CZUSsSvMm{kmOlB%IG|Lcg$uUU>h&|*qSUtzeI`$^+Y# zJN_-+VK@4Y*LVD9=r{hCP>|rgO9-b2`c62K0mP&41bru@kcvA=n8YIVlyEC|@i6Kp zyvDn{&qvs6g1si#XTlHs%&+{8_kNJw4pMiJdV>;4V+3|O$W8~@>7W9NDa9QQDyNe9 z*w>(qZ0B{}!#)Om&rclSAZq!(b1?WY%sN=j!Db!YmuSrA`_IAP37Bv2R4SN84GlEY zMkmK`EGM&!3s{SLAAB8p8hi_zxE=RD_z|}9I8ULs#6!_nVmHDVkJ^c9C#s#Oc494c zoPt`3XLBxUCaRTqH=dpN7*Fsdc9G}?65l``iN7O*#D79Tk_?jUIB6hB45I|OCYdei zOx#zJcOvN$F6T^m+Jl-&cAaF`$zAA%`I5WSi$27o zcCvRM+04miPBwG0nUl?&T#wv+zdJ~ld$O#PPvvypX1w#*K5Jxi_&l@@pyBk_S zF(pjL3`5N@)C@z-Fmyf(IG&TR)1lY%3I{_$${~1uiuWNU5j9d|l#yf}A|@iY zlqzPTMv59KYNV)O#ztYA>m7C)Ey8*K-|eq{=l_uBmcOm1n9vQ{|cZI8Wi0QlCZ5 z)NeyUT5rsn){a@yJUeX}_La5@y{D;>rr$L0RGPn=wjKSZz06yD!#?zy_7ew?aoQjJ z9SVkpkoT}Ibfqu-7{EZ18Ol+l;qHc=&r0+#Yz^=6C110b{h?rZHYLbrcs&isXt>=B z@8lScV-a>T+)jpj?(hp)jol14-|#EAl6Bn1OQB%I5tMNT8!-O}vyL#|2sfS{V4n1D zgkhHSRECp5CRxZfJ&$~fC}A>H}g2}@;)E(348b}6pZRhckF%CIP^YB9;0MEs*-Bv zvIH}aGV>_CjoQi+JdGP1^*jedL8iOP>`5Q`5kVCC%S<4Vbkxc$pcpkXry&1K`DY%7 zoH8%vZe)-tgG?D@${=$GyD>}V$9%>W>~z z5mih_cB5ypk{u39)5OA9kXeqnPbrV7&{+xB4=rjyG9n6+fwi&a{m~FOfvt`?Hb~*CSmR?`{V{?m^fvJdhn|Ac}Zw~^yEa@FxkeT2W8MzlW)ELt zk9qs}o*(%m6!=fu!T3Xw?Rfhf-w!>Hk0BoK;rO8(jsC|EV+=*;fBaM`n9fYhF@84f zoWPkZX9ef6l8d;C^<2kBWIz5^Ht`a2ozNY-o#2iplp~)BO|;U%JY+RtA!*A#gs6a3aY54274_$4tp#-nZ>yK zLhonca?a*FF2^nlujX3JRcNllE!>0s6xvUrxeB*pmco~Lm7ToB+w8%f3hk+IAKziN zA~O{ohW?8ph$5B*%u_T1^AwF{EaULGQ#6HXn5C!|yDDm?jZWq#bs1d%?xI-fa5ulQ!rog5|(0b#bzu%2Xhviv)EpW zKMn;G)t{I|1%7toTJFbO6ZKkh2yUyyE=t1DYe_F+h$Rj?DH+BX^jV_M5`C8FvqXO- z_E542d6(#+L=PnwvYIQniuGKFJ1V&i{gph0OiMmRh9!T6f>OPd>ZMdKrQOj>sq9K4 zNh6yAiYcWE`II)WkkfI$rR!La+bvbI^akv>^cn26^h4AvJ-|PqU{Y6l(g%B*6oGn^ zMw5%0lcvy#T}(3HB+s36H;=L%cQVPHO!|PY*vD_7U~)hgx?!HlYD_lMp6iiLVv!>epRL`2)MkmK`9PZNZs0pTCjlQPd$-Qjl z37*CtroPK=>|*LC?BUB$P!>fZ@-G{~NbI3(EOIKF#8fJn#$xndrq?pJUv?1};|9uZ zVH0*=rjN2mcnp1%>7(omz6%BA-7r_VTICZ_qg;)0HOkc}uc4ND+*)}vC!uz^n=3yP z?_s&j%JosMk8*vK>!bWq?%+w>N4dR}{}Bo*dU7-q@aziDtgx2~d#SLGiiMoO66~d7 zDd%G!74D$o3f5xI3UgMNv*LE_zv3QbULo^}Z9K;dyvz=EMeuByb?4xoBsSGEBQ4}x*{a4PRiB{$^A9GYLVg>T3bQ6{Cw(gAZf+WgfQVAkp# z>8ntTNO3kg@#wNVOH4pL# zkMShic$S^K!JF)2KXx!f_A_KZqdx<2&okl}gx+W9eTLp==zT^Z6Deg1a+y&{HQxUj zW|=XI*)(u2xAQjcXJ#amkl{=_nW_JoFQTuRdYbteU+^{G@(aI)f?7S+>aA99wR)=6 zQ>~tAy=b>LjZm?ee_4=>ZfBl85<`S-BBR6sj&$5%Zc!%A5&X@d* zUDlhm-mJ3^=LmYx8+B*rQARr#u$E2S&KB;$PG&#IBe?O|ZhW>ZXYb-&-sdAeMeW(< zpKbox=AUi;**}GXIcm-s#Au3{!||AFj#=h-_8iZg^9;}P8n5#vAMha`<4)&%!#)mR zzB#{#f(98kgwY*YH^{o7AF(7cl%pBO2y!VvFAb&0prMj#YG`0Si#U-}Si(~5reOsa zaWxxoUkx|2nLD@(*)=?lxf*;%HN3zMUc=5B%+_Fr#v>_2{l->0n8&fWhsF~)8F$gB zheka#UdrWM$vW0^E!QK@##^w*Mz`5$XN|kq!ylobsVmXA>n8IyHDK1J1=vZ`NqCQ% zPQzSHTQE!0eYk@ryK35sd78}9^ah^a|s78$h2pydI~*75^?hl19CF1W$gFx+5kFZ9qF z&tQ^CVFGTg)vjCh)T*ae`M2t$RUfU3I1xFtUdU=L<}$8jGw!QZKdt&{eTLV0lU>+B z>xb-RKYDEaDHODM7uxbDK^|>-XwyTR4BFgQ+fvTr9L`7Ww#&JatC4@(1~wv(wr#wI zdTsCX5ual3ZR)nE+xDNj2lx%|XnO>i*n9i&sM)S&yPEB4w#&SI71v|Nc6)9AH}~@p z?yg9R9p>m*!=+q~+&XSy6Ss3W_woQ{>v(~eF;mCu z*k^}1I=TG7&R$nxS(R-hod2 zcj~`$At&JOJI}^GI_;zL3a;X6++OEq?qCb|@HkIlC!Nn>C!O!Gn-BSzgZzmd%ndk{ iBj`yV`Z0u5GD83NPnSdf`{%&-{`=?u|2vpF>VE+(Aw;VH diff --git a/LFLiveKitSwiftDemo/LFLiveKitSwiftDemo.xcodeproj/project.pbxproj b/LFLiveKitSwiftDemo/LFLiveKitSwiftDemo.xcodeproj/project.pbxproj index 3ec30eb6..627df507 100644 --- a/LFLiveKitSwiftDemo/LFLiveKitSwiftDemo.xcodeproj/project.pbxproj +++ b/LFLiveKitSwiftDemo/LFLiveKitSwiftDemo.xcodeproj/project.pbxproj @@ -7,6 +7,7 @@ objects = { /* Begin PBXBuildFile section */ + 768D63AD7BD5B7D2959CC88D /* libPods-LFLiveKitSwiftDemo.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 051291DCF26E653942D6929D /* libPods-LFLiveKitSwiftDemo.a */; }; B289F1BE1D3DCD3000D9C7A5 /* camra_beauty@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = B289F1B61D3DCD3000D9C7A5 /* camra_beauty@2x.png */; }; B289F1BF1D3DCD3000D9C7A5 /* camra_beauty@3x.png in Resources */ = {isa = PBXBuildFile; fileRef = B289F1B71D3DCD3000D9C7A5 /* camra_beauty@3x.png */; }; B289F1C01D3DCD3000D9C7A5 /* camra_beauty_close@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = B289F1B81D3DCD3000D9C7A5 /* camra_beauty_close@2x.png */; }; @@ -20,10 +21,12 @@ B2C8FACA1D3DB8B3008D44B5 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = B2C8FAC81D3DB8B3008D44B5 /* Main.storyboard */; }; B2C8FACC1D3DB8B3008D44B5 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = B2C8FACB1D3DB8B3008D44B5 /* Assets.xcassets */; }; B2C8FACF1D3DB8B4008D44B5 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = B2C8FACD1D3DB8B4008D44B5 /* LaunchScreen.storyboard */; }; - D76381970CDF6883DA800952 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C703895C713844AE5F37BC53 /* libPods.a */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ + 051291DCF26E653942D6929D /* libPods-LFLiveKitSwiftDemo.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-LFLiveKitSwiftDemo.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + 19A8327F404723C1C4B67B2C /* Pods-LFLiveKitSwiftDemo.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKitSwiftDemo.release.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKitSwiftDemo/Pods-LFLiveKitSwiftDemo.release.xcconfig"; sourceTree = ""; }; + B16B5A0921F8F93D139F7961 /* Pods-LFLiveKitSwiftDemo.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-LFLiveKitSwiftDemo.debug.xcconfig"; path = "Pods/Target Support Files/Pods-LFLiveKitSwiftDemo/Pods-LFLiveKitSwiftDemo.debug.xcconfig"; sourceTree = ""; }; B289F1B61D3DCD3000D9C7A5 /* camra_beauty@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "camra_beauty@2x.png"; sourceTree = ""; }; B289F1B71D3DCD3000D9C7A5 /* camra_beauty@3x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "camra_beauty@3x.png"; sourceTree = ""; }; B289F1B81D3DCD3000D9C7A5 /* camra_beauty_close@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "camra_beauty_close@2x.png"; sourceTree = ""; }; @@ -40,9 +43,6 @@ B2C8FACE1D3DB8B4008D44B5 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; B2C8FAD01D3DB8B4008D44B5 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; B2C8FAD61D3DB9D6008D44B5 /* LFLiveKitSwiftDemo-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "LFLiveKitSwiftDemo-Bridging-Header.h"; sourceTree = ""; }; - C5E86117C8AB61338C12909E /* Pods.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.release.xcconfig; path = "Pods/Target Support Files/Pods/Pods.release.xcconfig"; sourceTree = ""; }; - C703895C713844AE5F37BC53 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; }; - FD5AE5787FDCE4BA8C28E2EE /* Pods.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.debug.xcconfig; path = "Pods/Target Support Files/Pods/Pods.debug.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -50,7 +50,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - D76381970CDF6883DA800952 /* libPods.a in Frameworks */, + 768D63AD7BD5B7D2959CC88D /* libPods-LFLiveKitSwiftDemo.a in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -60,8 +60,8 @@ 0044BAB3BCE83EE63FB6F37C /* Pods */ = { isa = PBXGroup; children = ( - FD5AE5787FDCE4BA8C28E2EE /* Pods.debug.xcconfig */, - C5E86117C8AB61338C12909E /* Pods.release.xcconfig */, + B16B5A0921F8F93D139F7961 /* Pods-LFLiveKitSwiftDemo.debug.xcconfig */, + 19A8327F404723C1C4B67B2C /* Pods-LFLiveKitSwiftDemo.release.xcconfig */, ); name = Pods; sourceTree = ""; @@ -69,7 +69,7 @@ 656DB83DA3620FA72C6B7CF7 /* Frameworks */ = { isa = PBXGroup; children = ( - C703895C713844AE5F37BC53 /* libPods.a */, + 051291DCF26E653942D6929D /* libPods-LFLiveKitSwiftDemo.a */, ); name = Frameworks; sourceTree = ""; @@ -129,11 +129,12 @@ isa = PBXNativeTarget; buildConfigurationList = B2C8FAD31D3DB8B4008D44B5 /* Build configuration list for PBXNativeTarget "LFLiveKitSwiftDemo" */; buildPhases = ( - BAC96C6840291F2B616FE902 /* Check Pods Manifest.lock */, + BAC96C6840291F2B616FE902 /* 📦 Check Pods Manifest.lock */, B2C8FABD1D3DB8B3008D44B5 /* Sources */, B2C8FABE1D3DB8B3008D44B5 /* Frameworks */, B2C8FABF1D3DB8B3008D44B5 /* Resources */, - 7490D167BE18C3C7DC2FE381 /* Copy Pods Resources */, + 7490D167BE18C3C7DC2FE381 /* 📦 Copy Pods Resources */, + 5683F74F0FF045A7193DAAD0 /* 📦 Embed Pods Frameworks */, ); buildRules = ( ); @@ -199,29 +200,44 @@ /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ - 7490D167BE18C3C7DC2FE381 /* Copy Pods Resources */ = { + 5683F74F0FF045A7193DAAD0 /* 📦 Embed Pods Frameworks */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( ); - name = "Copy Pods Resources"; + name = "📦 Embed Pods Frameworks"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n"; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKitSwiftDemo/Pods-LFLiveKitSwiftDemo-frameworks.sh\"\n"; showEnvVarsInLog = 0; }; - BAC96C6840291F2B616FE902 /* Check Pods Manifest.lock */ = { + 7490D167BE18C3C7DC2FE381 /* 📦 Copy Pods Resources */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( ); - name = "Check Pods Manifest.lock"; + name = "📦 Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-LFLiveKitSwiftDemo/Pods-LFLiveKitSwiftDemo-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + BAC96C6840291F2B616FE902 /* 📦 Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "📦 Check Pods Manifest.lock"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; @@ -348,7 +364,7 @@ }; B2C8FAD41D3DB8B4008D44B5 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = FD5AE5787FDCE4BA8C28E2EE /* Pods.debug.xcconfig */; + baseConfigurationReference = B16B5A0921F8F93D139F7961 /* Pods-LFLiveKitSwiftDemo.debug.xcconfig */; buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CLANG_ENABLE_MODULES = YES; @@ -364,7 +380,7 @@ }; B2C8FAD51D3DB8B4008D44B5 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C5E86117C8AB61338C12909E /* Pods.release.xcconfig */; + baseConfigurationReference = 19A8327F404723C1C4B67B2C /* Pods-LFLiveKitSwiftDemo.release.xcconfig */; buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CLANG_ENABLE_MODULES = YES; diff --git a/LFLiveKitSwiftDemo/Podfile b/LFLiveKitSwiftDemo/Podfile index cf2bcce5..0602d100 100755 --- a/LFLiveKitSwiftDemo/Podfile +++ b/LFLiveKitSwiftDemo/Podfile @@ -1,4 +1,6 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios,'7.0' -pod 'LFLiveKit', path: '../' \ No newline at end of file +target 'LFLiveKitSwiftDemo' do +pod 'LFLiveKit', path: '../' +end diff --git a/Podfile b/Podfile deleted file mode 100755 index 3d1f0976..00000000 --- a/Podfile +++ /dev/null @@ -1,10 +0,0 @@ -source 'https://github.com/CocoaPods/Specs.git' -platform :ios,'7.0' - -target 'LFLiveKit' do - pod 'LMGPUImage', '~> 0.1.9' - pod 'pili-librtmp', '~> 1.0.3.1' -end - - - From acefbb6e242adc82e47dd99e8bf2d40fd0982005 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 16:19:36 +0800 Subject: [PATCH 22/39] add carthage --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c1a34844..a4aaedab 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,13 @@ LFLiveKit $ pod install -### Manually +#### Carthage(1.9.7version after) +1. Add `github "LaiFengiOS/LFLiveKit"` to your Cartfile. +2. Run `carthage update --platform ios` and add the framework to your project. +3. Import \. + + +#### Manually 1. Download all the files in the `LFLiveKit` subdirectory. 2. Add the source files to your Xcode project. @@ -54,6 +60,8 @@ LFLiveKit * libz 5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. + + ## Architecture: capture: LFAudioCapture and LFVideoCapture From 11185e3a4de2626e7cd804ca04b2ad474901c7e7 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 16:42:02 +0800 Subject: [PATCH 23/39] add iOS encoder --- .../UserInterfaceState.xcuserstate | Bin 17738 -> 18429 bytes LFLiveKit/LFLiveSession.m | 1 + .../UserInterfaceState.xcuserstate | Bin 10986 -> 11298 bytes .../UserInterfaceState.xcuserstate | Bin 123847 -> 124674 bytes README.md | 28 +++++++++--------- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 1eeb98fc49a942faecdcd4eda90ccebd4d20389a..1db0ebd35c5b5cb79d95a62828c4d5a404f9edcf 100644 GIT binary patch delta 10465 zcmZ`;2V9fK`+sk|2_z5#WG4_HkPt}7h9p>(x)<&}kP(pzq6}v>_o_H>8*8;%E8?oZ z+PbQBwbrV2x9--}VXfL~tF~Het^HquIDWtW!-sd=-SfTgJ@-7%J$LVsTJZN=Fi(nk z$-I>LN+UQizeP5`gehgpnacS!?fkzmzcP22yUac2A@e8m1n>YKgn@7%05KpINPrY5 zK|Ig`14so%kOtC$1#|$NK^M>i^aOoDKQIX7fkIFOhJq4M28M$QPzgqZG2nGD0dQav zmIC2aPZdrb82SLl5-A zY}f?80^7m%umkJ}d%=FNKg@%JVG;DfVXzdAfMcM4BAf)L!P#&QTmTotrLYdJhW~-@ z!>#Z`xD6hGN8vGe9G-w*z%SuRcnY3|XW&_Q4xWeK!SCT!_yfEJAHYB0L-;3r1ph)T z;vqf?Lop~8Nl-k}q6CzTQjr^ZkQZgc@u&%Ej#{9Ws1<65diha5)F0)c!KeaNqAFC4 zMxoJY3>u5Zp^0cAdIK#&Z=%I$33>}HMa$6JXbakkK1AElcJvY2fp(%@XgAu2K0}Am z5p)KfMd#3YbP?S~zo1{y9dsAnL%*Tl(R~as#0X<7!cka;H8>8(<0S0YV*^gbCTzwT z*p3}I3uohIxH-)BQ8YIY5~mfgU<&u(QuWIto~vj^CN?C0zk>{<3}_5%AY z`y+dcy~jRe<$tpO@K|0XPsEGj$$1K%l9$L!;^_h(pjQwF_<>*0MBx|Y6#13|)S6+S z8ed0)$VXf9*ct#50@rvc0W)742;=Jl9r;OvPB8*T$P8hI)-lB-j%XX+Ut@-?An~V| zPE0OS#ta{n+r3kDRenLKFQ=j+e{4N1D`dJhRF7ar4tQ4G-B(r5j9@C5u03DqprL9va6#05UNv z0RsT(L~>UG1Tg7Lx&$Uf_K6n)(MxqvKuo%k?!?(e*L-SfV39!E5y+VQIv^*#i24k( zpE%VXF)1O# zNEs<7BS|Hxeh)MS%|LUI16~C!KugdHv<7WJThNY-CWK5O)5$!tkSrrBNIh9k-Y3ef z!5&-F32wu*W!iyU!VoDLoF{%Y@SRu^DCbG6h2=%Q3QNbat`MHxe1 z6zm=nEXWhLXcV;lzk+3DFG}_aNfzaavl=A>yTu~OOKk>*+6)PNn$#|^A|{ew5PBa< z-+P9E;?T=$fp?;N@=HU{rSv)ddH?wpMWgd8d^y!cCI3!-WJqc_m1-Lh=puw*RH$G? zo;aIwvIYit1d^9VJ3drj@vq(d|2iTesj9#g@jyXM=y_D0xLK%&-Qs9-$Uwn#Fq@h^ zm;q|ROfZX#A!ErnGJX}91L&d$=9AaS1TvBM1BtN{{GFS9Gkb8iKI5&X1iQ;NA*|1M zdNUXD9Nf2=t z*bVl8kI4*DOJr|0Uq1ilMGT?SXcH{e_H23bVjB#T#p@4;0Hb&V_`Z&9eF zBr#8{=u%NqmR~V8V2&N>@0{|aLmzMNAeh@tLESHc{tAA7uA&z-bRRsRpbyF0WH|+W zr-5J72TfP1LKEn6IwsKhv%ZA&1fqW-*mD)ckPR`cBP$yi1}9L@eq5vu4c2CJW;V_l zjDXP;1V%y;jDljaimWDU$l6sf2F8XIwvN0TR2bE;-`~31`J50Pr`z!iARf~A^}i+w zlVJ*&2My#ug`Hs+vV-g*d+K2~*d6vDpOAy(=+mGmYKV%kZ^&792JXvp_=7^v zy8|`ytmtQP6BdR_KMuSi&yt4x7!H9$>5>GC$=*6xLOuo-=XP(`D6kT8OxG6+8d^<;Q}V>Jb}d_VEH7?Z zURYh`8(!71yfDA2qD}%W8d&W*z(p?tnYtF1Q=+fgi(9$Z2whoF(VTdGZzc znp{{1KV`JkeD=cw@F4u0(UOZKf|}7Ka+zEq-;jtvQ`JVlr)X4Fk#AHa==_+?n}qUzRh=ljXKsJ$8@HI>cei@;HkM9rm926=lI?vrAB_Z^JFq zWN*~SSMXBM48MjK;6?Hs`JP;@hnL|M<}~?%0}ROtMjmUsKreMkvun(-)$m7n9o~R9 z;ZNim`I-Dm?vX#9r8e+3yc-1j1^x=}kRQo)a-$yJgTGO*o8+g!6ZJdV$B6mw76>4S z5V=KelV1Y6H7Y*}N0I;2NQ9y&%pGz!NJ-aR%!Gd3ya`VSf}}|KKSC;`MjG-P`JLRS zLe1_p2o*93DUeAxg%TMdN@G*e5VS=valhwS12 zO~TpWtUZe!C=_2ffb+!mr#Vo3U`b*`SSZ$TfEP?my3>Rx6j@L{Dq-|%Q2{DMMaYMS zpkg$X17REp=YW6%LJmZ5Ad&;3wdgf843#n>R1`FF4n%Q4%z?Rz)RM3jE~<)d@+N0k)k)8c?wuLkD8oit@&Bfe_G!o zzo27&fv>cY<#Mzx$nqVu0@b0Fs2;6CtI-;?mIE3N#Bm^=16mFwa3GNbNgU8|Ao*P| z53LXJoQ2*;AE1qB6Pd`-WIe=_QUzT(;N!p$4h#=^rZ~u{rLQ2oy13XEq8iwuUzFUh zG=FS)bybfFpRbY39z>IzI`lCI=yfj#3;|QRz14nnFj#tk1F3cBa}F3uVvkPwCBqw~ zj-oG^n0j;!9Y-fPkj8;@4w&lEmsF9bIA9Jc4|ERHrx)dY6`ZB7Igs&umM)v@F zT|wWVZ_#(?dvuip77kcBVB>(D0}c*2IpA80uAv{%b#w#WL_Y<~GdYmOfou+R;y^Qw zZVB=OLre?&x)Id{r6q+KWac=<9&esVq*R`V?dA5 z-&Du{aKOz0Pf)jB;(VHVZXfgiq@HIni+PMtg!woOhhqU2;s`V=&^EPuWP1+0!hwz) zXhL-nSeKgR!(t|X6^_O+I2KboY|4RGInas&ZK-0g94n}!-oz@b?$@q}u6rdzN_-VP zLn~%zsLeT$lP8w`%U-9fObkPj0BdnVpq){RR^r4!8)KsXDJ<6EWUS^u3l6lTC_O`) zA2F}7)`(^WYg>orET?bB)*Yz=JCsz3nZ_2Dh8FGqyT$3?7Lz#ux?>m9mK1QH17RAN zxUnZs9GGX6V(ewQ2Id%LflrJq#!c`mfkQ?U#!YGIA!B+(A9Il}zqGWxFi)%x++{V& z?xD$QK{s|GC&#Z+^TCvPm%#jtM1LFHktSNWEpCU~;|?6?#)0k}=)r-Wt8gcri|n`y z2YPX!KY7f7fh4g{N8gaDUM0TKeY=$u4-Gx_>lku@PP7NA&!G2?C@l%qP=4tn)d zBgeh*fM9QZa9`XH_vb)w8a4WGpf3mdt)jLvi|IobWojvH0&Oid(Z!g?zB)|f-GDkY zD_A<#8t)&DN6=Dc88O%4ksKKGG_$}}^aub~<53*Q<3Pc)l@gD|ufMp_#}hE^U@!;r ziSyZDbj|4|FZgRDIvG!WNoX3T5wDN~MI_MCs_8cy&v|JBkLTk+kW}zY0x#s~F0~=u z!EXjhEyhbYKu0m`IjLp%otN-e;5rVJaNxDTU2B|j4Sx5f+V%K994O^LS-@b6i#~%l z;!SunBg9*nw)jK5El_6Ym^6kk94P0&_;dIpyn|Nm#Jlir+F%R*1dhjhu{m(j(AhtR z1LHU_;;Em7+-x8ItijDVFq#8YrB8jV!CVdnee5tkf{)^39H`(xB?qcFQ2k#%_ROUk zP5fmSX)N`>>>G{67aRP6&K-?%FL=zq`NK=&!#~gweuB;PN<-w>pbo|bU4|~w_-4@1 zKcq?_>INUWjeiLYPD{u5SL!Q+)6&8litplkf!k?%>g2x#eonLa@qPTqGYfmjfeDS1 z`B!i9bHHf-?uae3VdM?2sLyI`@=1S%? z+nwZaU@>uaD=F$$HMVPU;~4w0172>+zB-T{#DTXsdivpPY?aRrp)rUpU<=tImM-v1 zIk1caZ*ySzDz=y%%9gONao`;etmHsF2UZ2jGi5#dR#q33R905|DtlCo9pURgq`I`U zldrNezqlc&vLmUjvD7(M5cLJ-AY09jVn?%M7$G~BIlzu%$1`o&3G4)VN^NE*vJ)EK z0v9rurPnT=;_G;{8c1oZw%N`LjZFXwtKsF$ECk9SuwN!O2 zX(*ocf`;dYo&(ed3K~ZqVBeytGrN#|gI&bFNiBhfrS%-3vE@AuYzUdb`)k;x>@s>% zFpqtQUBQ75IIx56kmwxm;=t};9wec&+$ywj^%b?N@|9HvQ=h;e_T0c}yWSWT4F69J z@r9g-UB|Acn>O}c4s5JrX@{EvgB%+F2ka*5XzWG~Y_4NBb6^Wee7;&Xvfjq-X7sDs z?byifV0W^V$JWr=Naq z%PWRe4tlm8_v+!Rtg3A6=Ma089^7JG5Qi?0TUS1Vq)3g8zGP25AIE79?0jxl=h*WN zYeL|nL+iiDUVh&23J3N)Z}=U1_4%lO;OIHnGx+Q5O7 zlB;9yaA5EAs^8e)fjIj+2R^L}X@MmA=|ufYW<$?4L)!}W5&M{mgcebj# z46Ek>9^@eo?B~Ge960jK7kNBh_=~}dC*TP=Koi=7#MzLxKMOrMfrIYY9y~EG_9X!c zj|Q?s95_szFK7L_<}=BQagwLv#l0jMAMENV?J8jQ$bmJGACnk7IQEzQe;Hh!I4ZY$ z2j5sLoaE_whMwK4@~eCVFky_6Nn+9&7vp7`(WLnr{m}G?CZ7>BpNz#) znl@@^_Lqp=IFEiY8c(wZKmA@b8Q0)xcm`e3-@?oAa_nD$SJF>L>+pK|x#$Z1nT7N# zP@7O}8NrTa$5W^GQ_rqtXVLFF^XT`Th16Cb(oZxUczt;Nc>{Tac?G;8-bmh9UJY*= zZw7BBZ!hmK?+ouU?<(&G?=J5NpXKxU;d~)KhA-jE_zJ#?pUoe@_w%RoXYgnG`3w2W z_;vjC{0;mM_?!4U`Mdag_y_rC`Iq?D`1kmK^8X2wg=xcFVNJr?h4l*SA2u*7FRUnR zNZ8P@*TP1JjSU+gHX&?h*ymw~!;Xd>4~OAV;fiooxF$S4JR#f|o*r%vw}jim9pQt* zXNT_&zZQNk{C@Z!;g7-}hd&X-2>fb6f*?tdEYJ%a0=K{`Xd=iFv=Fosv=Q_a3>C~0 zydhXFs26M$d??s1*dh2>uvf58uwQUMa6)iZa7*y3;I7~|Ar^{+3SqKPFH9At2~ENh zVTI5yoFbemoGzR#oGY9!3<%#6)(N)>_X-aSZwl{3h$Hk7jtFOj%O8;y(K4b_MCXXE z5#1wtMhuD=98nNa6fq=XXha}lU&P%=Riq&@Ez%TeiL^yJBHKiEiR>BKJF;(N|Hz?{ zWs&8PBO^yej)@!>`Fdn+b-1kw6qHl8F=| zl_*_g7Fk3#kwer()K28j6?GAH6ZIDL74;Vl6cveviHN9H^rmRBXo+a4Xr1U&(PyFq zqR&N#MMp))MPG*?bt39Zu|O;rE5&MYoVZwAw_2Qd$09>qM4Z5rDlwnuEQ*gmoSVh6tHwdQ^JcFa1J#QhGspNqR;4t@L~857K{R zdYMx;K<1N`$*N?WtX4KlHb*v3wm`O2_O|RDS)Ht2wp#YSY`bi)?6~Zl>?_#?*(KQ( z*>%}X+0U}uvR`F)Wsl@o9xYeNHS&0Qf;>r{E;q|9a+};CcgefRN6TyE8|2&MpU98N z{b%Lp6iP*sB3YqVq$<)B4uwmRrSK@S6|X4z zDEx}qiVqbZD~>A8D9$OqQe046Qe0D9SKL(mthlZCRq;>>l|rRLsZ}N_b;=Z_NtvOv zD(y&UpZH~UwKk_T6tD^UKOEIsx+#2Ria9#N>Syg+N(OL zI;*;>hN#A>CaY>x(^S({wW?XFC8`aoy{dhx{i=hiFH{#*->Pn@?yE&=sX9a5Qr%VE zL)}Z=SKVJdP(4;XNnN9!rkN- zzO4R6{hj)O`l0%f`Y-k0>VGs58j(h`jjNn?`6CB2?BF^MEiN}7`NZqkXQhq@$Py3V0% zqHC&aqs!HG*H!68>HNA$x+%J;y1BYlx;46Wy7jvEbnojn>Ne}P>bC24>UQf6>yGM< z>%L58lVkkJl4M!3GPxkRJb7X=mpmnTT5@gj?BsdLf#j9RtCQCy|0j7v^2X%N$y<}R zB_Bw>nUa>$H)VXvn<<-9PNbYqxsY-><(rgSDgWr9p4Erxg?f=bS})Pd^(wttZ`IrN zF1=fyt#7Jtu5Y1lt#7OEr7zJ>(r?xu(Eo1W8~idulEGql)zHe&*3iL_Yv^j|Vd!n> zXBcQ0VW==v8%7((873HrVX|SWVTNI*VYy+u;eg?i;kx1P)Pz)LYO~aishv~1rS?qi zliDwJQtI5)1*vbOE>2yV`gZEd)DKd(rS3@Go%%`Y!PLX4$5OvYJ(>EqQED_BGmRc& z6Jt}qv4gRnaiDRqvCufgSYj+SmK!G;IpY-LG-IuCwsD?ufpMX+&bYyN$au;4Fin=0 zl-4$_XWF2&!D$6)MQKCQ%F`;+s?$cNjZ1qyttM?j+Oo7wY5UVor~Q}?)7kW}bYZ$E zJvzNbddKvh>3!1srw>ZcPcKX#kv=}1OP`YNpO#*mzA}AP`iAsl=_k|gr2lPVOvuDD zMVLe;wJF||Xi7F^n5?E(OwCO#Os!2_Oao1MrUFxusmwIlG~Kk^wAOUWblwcjLbKES zin+PDg}Ie^fVsq6VXiihF^@M-G*2_vnrEBmnirTCn%_3BHE%W_F`qYoYrbm!(Qm$K zzGc2|{=@vp{Mh^?gUN``$j%s)F*;*P#sISt>ptrN>mln=>$ldc z)*r1mt+%YdS|8eg4cquOfi2P&Wz*QSwj^7M&2Dqq+_r36TU!TPuHV+xHprH5E3y^a zM%pIYh;6d1#_6Inw%@kjvEQ@*Ksq={QsPnkQcDkTv}I>E7@gmrMXOgm&euAmE&sZYUk?c>g?+38ssW= zz2+)&jc`@EUU&IjlUy~f>8@JWa@RW72G% zr)18~oR_&Eb7|&>nY%O3W!}oXpZOs3VU{v$U{+q%(5#ZIVOdpK)mfvn-pX2=wKZ#d z*3PUwS^m9QpJg4)I-PYc>+7tGS(mfE&AOU(E$gv6-QC_j+)doG-SgZ5_agTa_fq!; z_fGd7_g?pB?t|__?o;k>+&{T*yYIMvb3btZ>A@bpN8pL{h&{0$sYm9~deS@|Pis#H zPp+q{rpAcF!E@d7ljpYQj_01|A20N>-Y~Dw8|hVf$a`7b^Kp~IKIE{_n$!CyX)M~Irpr49?b>E-T?C=FgZq! zb0=c-czY<=JHJyaK?yU0sbH$-PtEY&X6`U|nS0D1%%9A^%tHV`AP|8d5Ddg10?0um zNCL^g0F1x{%)kPyzyVT0E6@&f0_mU|$OPG-4;Tmrfx%!1$Oi?W2#f~f!7E@A@PaAe zH82Cb4rYTnU;*I367VKy1Z%-MupVpx8^Ica0na+C&1_6 z3vd#A2~L3@z>nYp_zCPzBX67HXgl>Y)*upbe(M zcCZud2D4x4-+uab#m67iBL zWG-1q7LmndDOpBVlGS7#*+TY^x5-{|kQ^c(kWa~H+y&GI(to)aJSp%1Jw#8?8}uf( z0be1NbS>3znt+(yyGbzs(Oi^3$K?w&Tr2+HD~RKs2o!@wOb|1iDXV8nX$u;An(?(q zGUcmj%xR`4lf_grRry&tJxA9S4lDO`sHrI&-@x}3G1*PSqnMh(&xUh6bq&lYrk2Uh z&C1~$yX8U*gUi=24NN1oP$!Mqtrw}F=^CS7%UohEGruxdn5(oi?LxcKZZu;p^BeO! zbDg=t+@#%UE-j|zw1Tq*T7ZEo43tF`^BmcY?Y?1uMJYe+o%zfIUp81<*ayGHr^oZfXJZ7HIp0p2*Il~0{*aZ*} zCTtBr0MjhmYYhkh0@|BqbFYZ=ydfa;rLiy|qdBxMb@bM@pO(n@KFwsJKrEwQ4-`NN zR6q@)K?@K=`_cY1j}D*%=^#3IJYI}Ibr zN$UR|N^2f^VSoL|{{bMUsHUpCymwVuWgWl!^R1oDM4Gp57L*O7F#6T2sm8Buz{!-Y zrm@X~ZjjbA=B-KmGF&N3e} zN0_7h(w-oTnZX>R!)XcC(~jB>@ApbtzE)S-Od<#LWAtl4Uyw^nY1tYe=nwMf2s)CR zA7tU`gCe}m6NiGrmq8VSa^LBDKnWNQN3VH3iZvjtVW@u`q*RvcId2iup>#Z#N@qn6IdzZf$1+x|XkZui7~6WlKn`T!j95jqS$1RsHq z>0COG&Zi3+!BOxDPe@NWx{xO{j3+dytuRu8IzV82|f5?rTMA=rgT?@&4Y;++0O!gx^cT1enD8 zQR~2*+lSZN+lM!s z?)`6gV_+WjW8FyKd5(1;Y%4uZcRoFDI0P0vBRZ7sd+Neqk&kXM-T&X{#>0_KbT`wa z_ns53;0d32PPiJ@Fk$s@6n(!Q*3yGqf;z72ILI<#8{l~O3Y-96g%jZ<=!KMiKo8Ro z=|}WqdW0UOpKO4WnHW$Hr^0D)I-Cyb>8CW9SB%f-F?yW;zQ)Z{?-*(?9#dEB8BNlMQesT*aKBU()N`JJF-MtYymA!gX*x+yFPiP4pD~ntn%rq!*usTDTSN^wHP` zx5FLuG(AJlHo#qQH&5dn{fc(*#XvoGDy{`LtHmq*55N!p&*a1KL--N>hJH)WQ;oJg zAAWtG?ATm?QqTa-;fPv$R5h4(BJ%E5J0eLU(Xr@xNNNB4eD2l9jkzR^_;GgGVOVh*eXOI)| z;eI`Gp;Y8XX{aS?g<8{x^bvhbpRj;o0bl`KkJ_Sks67*c+&<-J0b&7W0pU4)?9=|; zd*Trgx>9U>^l_$ghjbC%Uc8MWH$SX;dMGFd4Po>fQD2mc`l0?P4-G&A(I7OK1pzD& zut3OyKo*Et5X6FD7Km98vJvGoF=!|%M8i-K|K>p@ERaxsRLX*pEEvUt&A!&#EYMKR zAl_Z|EgY6vILuQXUm%t8&79Jzu~~&<%7z#6dFv3qqbY;!P~#~qkeayb@d|F0ejHb( zSEb4_yJVJ)@%SR}IA7jdT~jrp8G0?6=!0H|Mx!xkEE-yGsJlv{;#XhJ$oavrn zqlXXo)cC-iMGFvz7V;{zi0Ouwpf@>zH8U=O^5>yqLGo8<8CuQ{u0Si%Dt^KuR1aID zHK>6bZSB<}fdxq{P)nI}Xf0aDPg{>Rpp9q~+KjfKt=!J!9C17gTChORf@l_KxlluD z7qr87U^`hpiB_FKd(hkby1hI*?+(l;=6!!zNtvf6*B>=l5bNhCZoo5%HQB>^=m2-h z5X0kmpF3&L=w;p~O`kKqWROn@XQ0DOH_Gp-qfFD1kI@lM8Z0Mk&`~Da_ZHaH|0(*6 zTW>UCbd2v_Z#1-diDPtv=eZs=@V^Z#;0=Ih*hkT4a;JQ3EaAmORZXPMp|7|*wqzdF z*W4|e4Wn<-d9GKAp6~dM>zQH+LqDJke3U^yvcOo6eqw=%%S%k@d%GhZb3MAk z0t?mj?O9k>*{l=4qgzZ^1GL0m)Ttw*!?2PR?t8Vg#opw)UzZ~zuyA^#OI6Ijri1#MW6 z&Vug#zaCV_El676jV!9FFc(%=mwTGD*X&bYPf^`dv{;VSOxXXv1>zVS+jI+LL0cAd zq7~HPmnV+r!DAf@+P#q1<3ya~%j;Rtfd%b-nZ4m@YF`!FyrCJ}UQF$A3dmzYM>>+v zpZLVypWMHop4g4keCmmL_A*$|xmh>6HGydB1JRBJU7qPC?&#A^7IghjRs8=d-E)mC zkfw5nNvfoYewL4ZFBbIVI+--y9Gu5zg19fv#r<%97VrbTS&+?wK8<()9tiJZ-e={o z{8i;D3x-lnUZ$s{u3wpFY=Jbw=X0vd%lu8hLHn$EV5Z;Td-7goK%cVVrT)=oK^5Ur z-)_a&gG=ym7W9o|K`smWv7mn=s7EDC9uq@f;?cZTGApR59*<$cfTt-D z9?xI3@hg~Dkbx{1@+I7Vvt_J7f(v&)mYh6fb+}iiTI>w|r226#=}O1;x!!8+=e3 z@mdymSWx;L>IS^|rCqn+tt{Zl52ua+&pfxbec8~Lu0b%$A<&V6QI<)PlM!kgK zOMK=(rk?Xnt@cfIRjz(%?zi{{J`KX>@pt%p7SynymIZYU_(yyJ^NwdU3tnfz8@!3+ z+qIDe($E}FId8+hTEo*2Rv?Y$3hj~JjN01Kp4#T~`W4^c(;a*TU&Yt(Z}@k7odvvd zjAg+%7K~@XD=e6>4&TJL@NH0!@A`t%t1RGkYN}7Mru$-2fi&DF<8jr6{KcR+qs~)N z>x)nMd}qJ1+A_Wv)rUt?AeHzO+#_m&<&R+9-FohEYNnzY%tQQ`Pp$DI7EG+idIr6nx1I#BfKttKV|==lh)4)89TG%>2_OAg7EETrlm@=VfBg-AHk1ndK<=#k(85l*B&-NyKpqxe>&9qw(zFgD{z(>t@Mf0aX zB%bJ>uS#ISjOVM8iJ?iU`4chZTL@n)*+{JTDoG)B;vi1qBB_M;)-(Omp2Y&*Q0A~; zE(_+dU_J{LYy|bB6=_Y{@XvOnJ^$O$Cv)C*7P4TG?{zT?Hu|2MxGVN(PUhI-&GJ`h z{GN$qzwq9d&n^k?8kRgwFG(J+ykr33E&fdwtazrpWC$6`Yb41h1uR&~f@N!%Gh`U$ zef4tc=u=kQr*3@q@Mh*q$;g-I5^p)-ZDAz~HawR@HR0V(BN;_%NG+*j!73KK#RA@{ z>l?`!GM0=Zyqc|H0dLgnSg_uwPoc&Cac!&}R5H4}yr-wOws5$oZ{7H6&w$#|!^-@c z+{7&9-7sOzENGyr^UP858ktI_k?BkjnZX<(uah^JZe$jj#h0ra$ZRsJsm0xMt_W^? znoy8=WIlJmr6dhx0pYm)t`ts@n$A6NX}nEaEb-S`v&))V%U_W0D*t=4Sn5ebv%esX zEZFkYUy$`oc>~!%Hj+&&*vf((EZF^A?pw)@7d-*lNp`Vd8w<8mM*;U{%Qz)@hwOi8 z#(U%d3;3jK7k9R0tdbliAHOtw#7AHcPk;+)6$z3#Ruao?7mB>R9cJb5)jv;hhnk{N zK5MNxMNW{pT=NLPo*FF$TuTv}D@xHU78Jhv*nO$Jg|K=4<+w`I`Q9bQ9g? z^9nO=g}d{m`Yha=FV*+u1702;j?3^!T)`LXM`1qR;IViP-i+VHC;4J~II$3`m$cyv z>)rUGdM3%@%jpBiAYSJRNFkX?j*y!HQ31Mu#DL@gQ-CGF7SJ}JOF*B1z5)FL@&aZD zEDop-*c7lM;O&5e0mlPQ2b>M~D&U)d3jsd|TnxAza77>#7zCMuTtPoUo}f@rCa4rl z70eOL7jS|_f>naI1oeVV0`ES-2ZGN8UkQE?TohavLSdj#F4PJWg~>vr&?dACox)UM zy0D9|o3OiZx^SUzvG7gdGT}+#_rlAfnz3H&VZc;JP=tARHHZw1~7yeDErL?jReib6zEQJ5&)D~c02MFT{`L?cDj zqKTr(qSr*zL^DORMRP^-MGHjBMLR_ML4D{g6;(~!5|n0b#RN|*x24zWwzU7RD%6Au&*7Uzpc zimSyn;yUqo@dWWiu~$4>yjuLR_;c}j@dfeE;)~*|;@`yA#W%&b#1BG9NO*`mBq~H1 zq7G>h5*rd1Vhgc{I73oH(n4B=vQA0l9(k{Nt&dk zq_reN(p}O+(o-@}GDtF5k}nx4DVJ18sw871QzWlRrb?zu=1DloBFPfT2FWJL7Rffr z4#_^rQOR-1=aQ3>vy!hQ-$>3&ewJL7Jdz5eYH5-*S!$4)q%EZx(jL;D(q7VRX^u2k z+F$A|k(NqFNXw;_(rRgqv`#ukIz>8FI$ip@^tkjJ>3Qk*(jP-Dp{+yPhPDsw7&N5gyn|y533K`7Pcd7SJr3m%Zzi9g%$@`%-pVc2@S4?1JoP*+to9*%jHf za2#$3PYW*$9}!*?K0bU}_}uXM;avEl@Fn4^!`Fm2hOZ0X5WXq=Nci>eeX z4VTO1QF4vkAa~0<$UDh9%e%@mT}ge)hX2()j6-~Yt^@^UsX3$w^Vmj_f-F=9;zOz88uWJ)C1I0)pOK~)N9lm z)tl8@)!Ws3)$gkJs}HCTsy|VGrar0uUVTx0S$#!)O?_K^SN(_jFZF|H5RIclqobmg z(dy_H(Yok_=%i>vv?PoVk2TBV->Ng*!HoW*txO$Vn2*M7JDZ4T5oO(#ufO;=5ZriZ4drk5sLldl=78Kx=LEY+;jY|w1dY|;D~2jg&DK%6ix zJ1#$NXxy;4VsG5GxcB1Tk2@51SR15`(kiuTZ3}Ic_Eqgn?QHE_?R+h#U94TIU7_8q z-KO2C-J{*BeOJ3*dq8_o`?>Z`d`f&_{FL~&;`hgY6@MZAm-tKZzsBFw1?U2G!8(a9 zOc$Yx(y4SUbQ+yQm#RzCwbr%Mb<}m%b=7s(_0;v!c?an#b+78S=nm;_>VaOWPtv#3 zXX*>|!}K0~seYusQa?&xryr|-ML%0VPtWNW>EF~Z*RRso>l^j!^c(cM^(XY_^>_3S z6T%Xl2^k546DkrWC#*<#Ct-iWfrLW|A0~X4a4F&Ug!_rgM15jnq9M_oXiKywc1s+T zI3jVPH*sm=io~}P*Ceh@+>m%a@nYhyiPsW;PkfMsk^+;0lO#!DN$MnBQhHKGQfbnt zq)ACMX-d-6q&Z2ANgI<6C4HK7FX=(DHo0|jyW|eZ>B(J^dnNZt&Q0#0JT&=@3e_qseTtno>+{Ozlh^OzEbsrVLXLQ(sem(?HW;uc^XR zZK^emHjOn=(`?fM(?Zi?({j@)(`wTi(>taUrk_ohOjk_5nQoYFoBlREG(9l`bBH#x?| ztv9WAtoN*sYymczEy|{}scjltye+|&WNT$hw`JS<+WOfB*aqA3ZNqKlwklhVtw%>NZcF1-byQ@le`Mx<1vRHxLYj7b@v@@mTKDYH`MrYuNVn6e~gY0C1HT`7lC&ZOM8 z>+PNGeeAjRJo_MfzI~{@#_qLGwokRsu+Oy5wlA?a+Be&`*>~Fa*x$1sv>&#AY(Hv0 zWj}5I&VJYa!~q?|A#?;e#15q++7au}I`oc2htuKqI$Al}IyyKyIeI&?9eo^q9b+A@ zIhHt9gX4nZA7_X&&S`UI zIEOgLJLfnzICnaCIrliGv z*J{^B*A~}y*G|`Y*PYaWRAs6uwS8)CYFX;Y)VkEMspC^8r>;nCNPR!`j62fpc6V@h zbf>$!xO=&KyZg8Yxd*%R-6P$V?rL{!jeCrHoSSveaL;tlanE;i?$z$K?)C1C?rrX! z?mh0k?$6w(-51=K+`qc7x^K8|yYIRGOhakGX^ONKX|ZW>X$fh`X{IzwTF11iG&XHo V+AM#SfPt{-Nxo=$Hvgv0`F{s#Ue5pk diff --git a/LFLiveKit/LFLiveSession.m b/LFLiveKit/LFLiveSession.m index c0be5fd5..a3a27abf 100755 --- a/LFLiveKit/LFLiveSession.m +++ b/LFLiveKit/LFLiveSession.m @@ -15,6 +15,7 @@ #import "LFStreamRTMPSocket.h" #import "LFLiveStreamInfo.h" #import "LFGPUImageBeautyFilter.h" +#import "LFH264VideoEncoder.h" #define LFLiveReportKey @"com.youku.liveSessionReport" diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index 8c56515479a5e5e0b477cfba36979203fcc09534..390dd66634d84f1e48fc828ea348ba7f4d324239 100644 GIT binary patch delta 6206 zcmZ`+2YeLO^MB=Xz3p!9cK6zbyAVLC^iV@p1R@XuglZr$1V{oQAQ0f@q&F!C(iBq& zk&Zw_rHgc=SE(XJKst#0Q24(~2*r>8yU*wLc4pqZdEfcYd-HC_=XIH3r+4VxAg4sB zR&zLNX0tf|mna`CMg?doT8Sp1RcI|*hc=)I=sUCtZALrLPiPN1fR3OO=nOiKenVH# zO>_%AMt`Cw=r8m)He(C6Vh;0Iz~R`2?bwTbI1-n|QMeo~k1OCxxH_(f8{ww7CH?@% z;tz2g?ug@YH{25^`f)P;7-!U@LiN89jMe34h(wej(ZAmQokhCN1Ne2=~5=d9_5lJK|B#jIv z!^sE&BtSkT6UhuRlPn}(ki}#L`GIUCo5*Ielk6h9$*RNGgWM+%$V2js z{7uc&LamgeJQZj-wNWSa(Q>prt>mYbX*8`z>(d7GecFnCKwH!Hv;&Q!DYP$5rTu7s zI)J9pbecg2(m^zn4yIXjH1$)U0Xm5W>1;ZOeop7od2~L_r;BL;T|(E;Z|GXOj&7z~ z=vKOe?xv^cX?lkKLeJ8R^fLX8UZGd%jrhp)AdrSQA-IK%$0_St&>U=<2A{zkI1E;( z3MuOzs07S4E`F&B{a_4?1tSyN)mE&j4+zRaqhMuC3at+fgL5@$x;juE>OncE8Z>F^ zM;4&DXdar67NCVt0V+c^s1CKjattj(Um*j^LAfX|I8ZC|mNqThqz+AvOCCHpH6y)B znw10>Xk%NIm@PwXQ65xb@l;AEv1^^0RqA7PvGU#jR<1`yY-A0n8I0A5O-sCeUo5K) zHK9_+b{R>7kD)Crl&xqR+K#fjC1vy(l9rsF72lyvLRM;8^5Cq*w1H3;q5<`?)(jTv z%1ZwV^(d-^YNMTKSMat@!n=cNy@&2a`-0{4mGS;yyuNO@amtvxb0< zB0Zb*D5{Z*MluX7B~DJFQ@yO^f@KUoV{ArJavnO3qF)M3Awh}NFX(L0V<=D0p=kDu z3T7Cz+y!(AMdhN45R;33g(i>^oM@=l_$s;{l3s(Rx#)Lj1}PoeCZ?tzL%QPG|3LQ; zpO0>%JLoPnhZfK>AKhnYK7{uentdT5_{7jV?rA8-|H1pAtd)%W2Zy1kLi8NHzzAbZ zFvTip1s_0bXaj8_7CtP*YOKLptV222fQ`@&+Cx|91Mff*B(vC77}r)1u?z8wv4myp zz)t7@anKRs7h^Z}AO%N2C+N(W)`dke!8FR>a>JoU-LvaP*REbE;@#*vbw?#+N7t!c ztx`m_QN^I(cW}k-v2o2)`(!cxBxVlJ!|$Nz6KD#W%5a{BKEsu96*LpgLT#ZpbSt4~ zaT0J1RHFdb#IM?$85zLNEB}O`7@{Pg(R8p;9UB-r!`Z=nW>8ieB;W)j-_3 zn0|@>O}`SN^{6~FH7PlxMS35W^UPk>s4hXRrCifa$bdVeY@C3$b&idz(mQc*ayRzO zOdgP!#cGEU>tm=}f<|}TBR)2caX1fmN6{s+UihQnAd4jQ3L#Un^xiDDxDTWTzp#|4 zkb(!GsKvN1PR0Fjf9MYbAPv$N<20O(its?lfFUgC088IBOQQ;(;E^HgVR$$mfwN&C z41!D;ycj8{E%u{Gmb$8t#kP%^^NK)tGM>t4iKoEOTs#dvVKx@tQFSJs zorh=PAPj?1058dgKgaVCpNHq-c`zJCe7pkHLD8iXuEyVlf?ETh z<}$9ZEiib5b9ukT8&FhE4y2Tt{eU-x%r?TfT)Y{^m#m@B_Rt!B#5-UDOn$wFU3l+X zQSQV0VIoXoQMPK{qD4|_Rz{{TCiSD@C`d_oa~z+5DKM3-dn)ht|9iVE9d#C; ze}(J?m<}l=dHyvN)@A$~%z)tQu&&`7Z%uI%--4Mi3liG-V#Y~tMRN~7eABc6zKI_* z+u4xdi%HLU%k(M!2Sw%M|KVr&Z}=SM!n}O^9KRq4=EE{r&VWi5dbL**D~ei5G(<~u zL{ALFNKC{`EU*9;!WZx*EP}5f2XY~ADN+be1QO1kb|SK0nUO0W7Q+(uEQGJw?^5Q~ z7JfD%ij>QNg5pGz3gjKOA-qg##vAh5 z#s7ozg^Eq;k)~|lBlSrG(vUPFjY$k?0xMu8tb*0B2EKu{g`^p2PFk=cP{{kF6|95x zu#LrMgrDIci?PUdh3iNB6szIgT+$zYc&$h$14D{sP$icPf{m{gS!8Hvq9L#; zmwW=7A*FMl#DQ7;hGZtco)oHREABB$O&95mw`QWHMXQkFX(+j3%EkUpp8GIUbvz%p#vNmZ0@yHk+dc`~+L` z$y_pzz3hbjp-H>8%uGy6{v;!_KRcjHQ23JMgj|kh%iRj^hFs>cKf7M9t$=)8Vz7+K ztAq?fe|En%SV`8Qr~)#&0$EMgkRfC(Q}2PjunG1tY3Y{52Z-|7P39$cPoqT z0BjBU4dKFqj7$A6u5)4uGNn=OA^Tn-vL6n;M&uwl9Kv-n{P+^r(hZTsJ0@w0r z1T6!fnWh>_YS1a}sQCfx82pvxU?k#9dS_>(( z4%~p7tR`SoEK0pKq^$1$!&@`@eskuo zCESKP%-!9W?pkgbef<7p?_nRcNG zv@7jKyTc=R41dBC_zRxG|KQozv?q#W(UNA2G%1xEM>p2Ld(M(ibF-4dq; zmm5Ndvl^pA=_hnp0FwZw0j$cWBWN}q8Nguy>;gK5j-})1cse0~ z)d6e@U@m}d0ql51JUW?v7IHa-PNmZVSQEh70M_Nx>2wC28Nm7gHUwC^$99aUpyKre zm%1nV7tlrjHRLOrLvsVz9KeqQ|RCLi6O{3XeW!J^k*#e0)!Tpc@0&9>8J%OUy^(dnG>lpop>r zMI5Eukb!QeKdyV^$sm5Y_+X(w(VabB9W2cgvl8=}Xcvm^#M)2xRvPyndI&`=VNmU( z`{^|LGd)-gm@|M~46^`s2e4-eosNp=QF@H^ghdQLs2RXY0GDBXR?QHKUe+HpVIA$z z)U4qhf)%}d@L?H$m2)hp^YlWf2Mpa%LL81MIC1=R07sNWc!~a65=?R)y%fyx*701! zvvZLN$jcLs-ee5{^-wolV>JlD``&Q>J$gSrE-NuB88Ff!8F^7fcCo348k6Z{4#^|S zND;f|ZYMj~O?MwTK-hLhp0InZlfFYMv5KoktF!v5P3zM4*fq8_ZA(977uh&Q=g#cX z8erGb2^4SP2%F)S-AKkVDELt*E_?uOkDdl>dO z>`Bb-3EDmedZlOYKodsJ-e)b(Ffix`w)qx~qDSdZc=mx=6iOeNKH_ zLo|Murj90BQ(x0i(^%6*lc?#d$xfRJQ|q)k zUZ?2F=zO}ex~jScx<&NQH>nG|b>!<4H=;!Gd=)cex=nv_S>z^AmhRTNOhAxI=LtjHbLz*GO zFvu|9u*9&`u-vf9u*R^~u)*+?VZY&L!y&^F!)e1WhI57shD%1HG1BO7U~FN0-}r&C zjWNO4*VxZEz?g0vXv{Qb8HXCj87CMg8K)Sh8K)a(8iU3;#ysOH<6h%=<5QDp@|hZ& z+L+=^olOa*Zl)flk4=3|{Y(Q)>863EDW2bb60bBzqzORBXe(ahIx>Auz862Q}bB!c=JT_9P?cBeDgx{*XCvB73Nju zjpohft>*3KUFKuv6XsLqGv@0StHo_8YkAjF!&1vq#}Z>{X=!cgXz6DeWEo)@W${}A zmU)%}%PPwn%Ua8N%eR)TmhF}umYtT}mc5pvmYbHxmKRpP(Q31bR@v&bx~);x^452( zm8?~))vU4B{?^Z|h1NCJZ>;OA+pIgRJFR=H`>Y47m#jCecdYlU53G+k%&9por{^pj z#|fN`^KrGg#@zc{JlB=$&h_N_aLHT>m&y(1Msnl0+1x_zYi>EWl3T;A<<@h1xD$Tv zBzKBC!(HQUaJRTS+&%6AujVa$IB(}A-ocmSEAdtMYJ4re4j;|e=Uecxd^i3hzBix5 zr}F*zG(Lk5@RRv3_@(>`eii=>zm6~Bx9~goo&0WoFMpCh%b({j@>lq4{O|lt{ucj8 zz=B%P3VOjJaDrbDY=TFqC{!0}3blp0LSvzc&`f9{yeFgx*}@`Wxv)jpCj2P;BXNs!a?D%@SAW|xGvlfZV9)AyTX0pq3~FE63&O039l92D!fbh!0@r*IpIa&2f|N; zpA0`8{%iQ1@W|HDb8v7e!fg zi5@XREHBm)>x%WnhGJu}iP%w$7ki7zVv5*L93ZBP*&>Laietq|;uLY3I9>cwTqb@a zt`m#I4dNDYoA{&nlekMfB3=-G6R(Qb#XrP5;yv+!_(D=iYDp`ZB#XpJf)pWDlqySA zrFW$oQY|UQFEy2#OD(1Mr4OVwQdj9CsjoCd8X=98MoVL)anb~7k~B@4Bjrg2QlYd= zS|P2HHcDHhZPE^Dr?gu-E}fFjNav&r(k1DJbWeI9J(B*E{*uFFy=;`tvLM@JQI_RO zaxJ;3{GQxOZY{TyJIEd7PI8i*Ci_RoemNkIktfQN<*D*#a<05e-YjpGx63=^o$_vZ zue@LWMLs8AkT1!X<*V}V@-6wcL**#rsN!hh80MJlC~&NG6gf6HesFAZY;o*&9CjRa z9Cw^?oOPUcTy*^5xZ}9zc;Kw=Z0u~}Z02m?9O<0ooaLPD{MciwkCbUt=IaXxiEbE#aM%jqibs_3fhs^+Ths_AO# zYUk?VN_P!(WxBFlLtVpMBU~e0qg~ToGhIQ~9M@deeAhzPB3F(p&$Ys}!FAYm-SxMd zb60RTb;r6VyJxulbKSY_Rqi79ckYeuE$*G}J?{PPgYLr~>hXB$cw#)QJ+Yqlo{pZ* zp01wmo>b33&tT6`&u|ZT#(2hiCV8fK)_X21W<^vyN+l&)iBXykV0cDIbUYVrKP=dz(4A<^9UL!n@kL)?4KL&b!gO#k=2o(0kZ>%zM&%+WU+5oDch= zeC>QAd}Dl5e6xLXeL22D-wNL$-x1$g-xc3=-wod_-y`2s-!tDoz88^Pq&?EbuB@RC QAtenEeNgf&{f+eeAMCbnP5=M^ delta 5909 zcmZu!2Y3|K7QSVBncbP$-I>`kup4>}p@Z}$LIM&9B47+5KxhdeG-KSe^xhUhT1=yZ zfPexjAl1-|hzdwAQUnC#@f3Y`6GD0K?R?+P+;h&o_uT*d=iGbq-?`Oowu}Bme-69L zR?w{Hv2RYR_+SnyM3c}mvJQ?uL8dckz2T5vK(406Yk%;dJ~V&c+|%AO<`g&%iVBEIb>B@j|=^e~MS&m3S3i zjo08!cr)IDx8famC*FgL@IibGpTR%kpYSDo8DGIy@hyB0Kf(XPPYEW3ScsKy#74@K z3Zx>bL@JXiq$;UKs*@U|CW#@zdZZzFgES*8Nh{KgB#`dp9nyomOL~%Cq&G<>eaS#F zhzuj6NH!TqrjV&*Hu;1sBn709tR{)I}vKQ-!*zhkB`xM$yW&F0DuF52Q_KQ`(Hi)2_4|O`zTBJG2LVm-eK+Xm9!+ zO{DMBfpicZOoz}BG)RGl=vX?Aj;9moEIOOcp>ydHx|HV7Pw6VUny#Vi=_a~|?xp+a zetLi&qDSd5dYqo1r^B;IbKEn0oJ_>2o4V6nY@G$OVF4TmJH$ZhCXH6ae2d~n3=D<| zFcB5lyEDF53e%B^(&FDMr%+3)PR~$t$T7(dgh3X z6gw^Lv;oC3Wi94mwH_VYc1#_a(jhgoRJo;CS?B*Nw@0RfdQdgo+$gmynQCXTtN~Po zYF#_0^&Nf+?PZSbL;KMIl-09uTJne?DMK^6b?Mk6Gj&MH@XVwk>Cgxo14^_v2`@C3 z54%0G@g%B?>Y+pEaQK1IMf1^-@I9kSkD=pXyQw-p5pHT~9CHd~p@}S&vd|Q0%D`&| zEQ3sJt{+kDJoF(0(^fLfIdneJUa@`aHsOoZ6~1rc%=zdS)bKT#!*~&0O0-A4v@)@2 zFNdSe(fn0(9aYFf*Pvw{x&f^qH9X85-Rw5H8wjv3T(kv%&}1CVIB+61>&I>RAEm)=+Aun*z$R09}Xa)5UaQh zE{pxp6}mwJbT7nFxEv~jE5JL@gAwmt=0&(u1T)U=^V8DsKsG-`MnO+(We)HBdbTpQOxbI@EA3n|dMgs#PL zzztFD<+u@UjNiaba8uk2-h)JVACjOCB)>x0U@>8;Lx90Dtd*arJ`4;St@!# z$QNVbgyP}*{(nA3m`4DGZ*W!3?;|r#LRmN&?GO8GQilQ1s04w2xPP~}c*fX#+z&M@ zk)`5+iFPqAK6XTAQlG&oFX1V`sS$um5EvYZ?+_Ri4%+;chvAW^LLtt;!*M1a0YhLY zq(OQi9)(AveEb0ngHg=MaV&&Iwx*Rscw)qQEFOo);|Y)f!yyw!6rwUH7EeLtSomUK zB&#H;;b?nynK`I-?n@HQ!}A$6@y9Sa4=;c*%;vD&U2`$c$;V6ZQuqLZFt(I%xwrrc z`8W^fLl%6Pj|=fK$cB#?HJp6o;99)?HSGqx5dcEavqzWI{sS@-Qd34nHP1>%kzWZg zU*Ip_djU&}jEtl)1^5e8A2lqUa2x(A;@ox^pT{V}s$!y@Z5a}x2QsHE~J)r-;4K0y6=NYdH4WKe!jLtk+mJhM_>v}f4R2f_|$9Oe~(YY zRG7xr)>!Z;KjZVSX@9{NU6=nH=&O5r52&D(b3Mio{Oo(M!F4&o#(A`zJ=umqMu z4txr^kO%ot0EMei8R8{AqOx~c;%EO+j9JTIIjm&wRj``5MRAl# zZBmC7h3DjtPsuD`lDeoNOB2>8Mu*gYZq6z)X@qlfVQh)K$t&{a#oys9v1*Ifq$4YQ zqz!3H+L1R&d-4|P0H48n*Z>>hbJzr%SCCi|M>??#C_}oCc-R78z%J&W1&+W`=HmkA zRsKElJ`(asB7B)gl3*)~<#d-jm_quq_>q3FEsqRUP%DpQ!8b1zAF*1(6sw^|9tpvY=Za1Yf$@Z; zGb`CpC67#kosinOG_upkj2AA?gm0l|sbUV9hbrWgxy<9En^Vdf=gDqjI4}|IEgK3C)A0I zxSDQ$W>D18k~gF@Z*alB{#|KS2K^?A$Q>v ze9!ojq!q)}6khqAJbumM5ApHNAb+AHy8E(KW_*V#fLfFfKd{@~ORB74B z!g%PHNBwZ+MI2~3S|Oqkpno2%2v=V!s?eH|ZR5Xpei}_{74Q7l;5ti~cOf;Z#W!7A zx>L=+zP9r}N0c_8jaUrKv?1KgqmAKLw(L6Ia_yVb_DEPsThNxY6>Ux1(6+Q4eG_iO z9k>g>!9BPS58(He^ex2B88nv0(N458+pHe4^;zH%Jcd8u3H$E3Dc`HdQAsp~ahUd@ z$+Ry#g@41J1+*XSPY1wXA*_V3o5670HzaN-&RIs&XgVE6Gw5)57Q&hkHiWPx#GonN z$>>PRva*1VqNC{;_#6HSVN^h~=!Y~rgmDOy5T=nbSs{EOpoZ6~lgm$}Q~$TaG&-Hm z2w`mq>q1!19Ozj_DIcZ_{@3yox{xjkVPgoJLfFhK7ncb%&83C^Yq^Xrrz=9(8p2!% z+ZguE?icR^C>fP4Pf*#DbS*N|b@a1M&&s5cG<$K4r5otR-Y;rw>!i%20w($#HB4Zq zG1e*#)n>|4ayeV}7xYUyhHj(V;qMR@LRe$~gs>xooy%z!%BMSMZtm` zlQ<_2S%Ew+?(_%Nj<6EOCfaRr@f}jer1Wi(kvxFiDKbvbv-DiI_{^lt6u`)U6cj*J z**U)vYF2#li(L8&$ZE2lY$Th=R?CR5ov~iGK z?V2$XYmrvemeF?7_R-GK7HYrI9@L)K zUeI3BUeR9D-q8N4y{)~g4c^lcokpkA8FVI{MaSvvIzi{qxpaPABi)<2MBP9g=n8aS z>JIA8>h9}J`ttgU`pWvM`s(_&`cC=;eINY<{TzK*|FM37exZJ`eyRR*{XYHo`Wpsn z&>Hjxqrq&j8k!m2G;}j`H}o*{H1swk8j=jjhIB)gArv%>GfXf{GAuF_7!)IMSG9oNSzD z%rWK~^NoeZtH!$~YBHEqQ$Jl;IfJlVX#{Ec~+dAE6=`GC2|eAIl_eA#@}eBFH0e9!#A{LuW^ z{KOJvX>5tN^s*#ck}S!VbW6|@vW&A#uuQT{u}rhfuq?6USaL1-mO{&N%Sy{?%Ua7e z%Rb9R%Ok7ZTEp7NnqW<~4z>=prU$JV)=X=*HE0c4$5|&>Ct35YMbel@!l>&RiF+E0@3}a)Y^{TsoJ*WpX3A(cA~zByI{fjhn$O;+AkZTrRhcThDFe zHgUVS-P~SoKX-~d&7I+X;;wTyxm(;F?g96=4cUlIW23v7#QOKdr|Ew~-w*><#RV?5*r=?CtFB?H%l~_CfZ^_BHms_9OPA_T%>R_KWt*_G|VV_FwJKcpY!% zt-OurgS^T|@#Xo7d^NrXAI-<`&G~q~7oWms@T2%Kd=?MkhZyS{c6xemLIx=y%GxlX%&cHMB@a@}#=b3Jf9l#oOvtz?i)l0{M^ucS(T zDM~6Y#YlCf`cfn54XLTrL5h<)OYu@Ssk`)!lp+m~21-MuG-;Uhku*-4AWf2{OEaa} z(p)K5S|@FmzL2&CrR~zU(jIA_bU-SSzLzdZ*QJ}%E$N>0Kzb-WmWiyFjj~y`%Yy8X zU2>FMLyneXso+shr~Sh2@0Y)m56Wlc;Lq|o`GR~&z9Qd|AIgvAC-PJIPsN~E6&pK4 zxfEG(D_*6h(m-jWyrpzh;*@x$o6=qBq4ZbMm273KGG3XeOjBkkvy?eXp|VlgrR-Mr zD*Ki1l!MA)<*0IAxu9H9t|-@(8_F%^u5!<WhWkePvVGti=bPx8?Az$Opqf>O>QO7J_0%S6Gqr`J)XlI!m1!ROhKn)m-&6^(%F^x>wz=7OBV8Q|b@uPwH9q zs(M>}r2gek^7r)*@n`wR`ltBk`wRTb{44!y{Ga(Z`ZxQ(_3!oX_ZRsO`;YpM`%n6x z1iXQ^f%L$bKqxRZFe5NOur!b#*cUhuI1%_ca6WJ$a4B#ra5r!-@F0ptMH!;Fl9LP} OC2xAZ}L`pCTDvCPC-h06= zpkVL4_ud8Uy%+4gzI%3eHnTOmN&LU(=lz9v&hMT%x8J$*&B9g7TiQEDkKV!&j^`+6 z;RH_XQ`RRqchEu2ZS5_sE9VYsU3frqQ%C!}L5*!qOIz5F>j!nTu5xqaxXW*JS!+2f zXX89v0q5n0a>KZtxm~zjx#8RhZX`E~o5t%25uvF1$QO4nY)p@iMyG*ox7X6 zpL?8pf_svCnR|tMmD|E?<+gEea$j;^abI)Wxo@~{x$n5|xu3Y7c^B{H1N;trKYk~E zFh7jnl^@BE;m7ln_}%zv{B*vQFXt=xO1_4#;~V(7{674Cd?UYzU&0^2ui#hl?fgOf z8h$N*B!3itEPn!jGJhI>CVvipK7S#934b}ik>A8$&0ojg$lt=>&fmq~%Rj(B%s<9I z$v?wC&%eaK%5US};NRxo<3Hp-;XmiU;=kd)=YQgV<^SOSrW~bIq!P7L2X#{~Eu?*D zUpjyeqC@E}bOaqu$I*#&3Y|*#q$M;+XVO`;n(j^O=^Q$rE};9`!`WAhcen3B_pV2SrcKRLtk^Vw|r+?9ZExbjr*er@gv$!k;7Qdy)vZJNH zWuRq z4zzSw4z?U>IozUKj8<#fwgmUAuZEEicWwXC;XVcBfC#&W&oCd;jsJ1low z?z22-dBpO#d`PuTD+FooJn6ooe0FT4D`aXIf`jtF3!m>#cLF^Q{Z4`&*l=i>)oz z<s8iktv6V2w%%sF z(|V8fe(OWlN3Bm-pSC_{ebM@gb*uGt>s!`$tshuFwti;)(z@OHo%KiSFV^3!e_8*v z@ixI`vne*s=CT#o{I(+7j<)``fwm#Goo&Nyqikbs6Ks=hyW94#72C>eGi-a=s%*8k zkZrbYo^4-S*tXEtY+GtuW?N}H(AHr)*mkJxaGP#B+IF1nMB6F0(`{$j&b6(xU1Yn| zw%&GyZL{qf+x51aY`5C(u-$FD&-S405!>Uor)%JPUF8GiW92jDOJ%$Ao${md zi}Jhjm-4Tjw+nWgU9oF+m%YI5w-?!WwD-3Uv=6cGY#(kPWglywV4rN?-M)vt*j{Fz zVc*MMWv{h|?6d9j?EBio_J#Il`%?Qd`%3$P_73~O_CxK5+jaZV_T%g)+E1~cZa>R@ zu6>>TBKxKG_4X_5o9)-wueaZ1ztw(+{cihx_6O~c*dMn)Wq;QGg8gOt7W-@VH|_7( z-?x8c|J444{cHQT_8;s&+kdnFY5zwhsztS`vZ|_1)uZ~s ztDc~qte&Qxsh*>ruU@ELqF%0UR5z(ttJkSFs<)`Ot9PmQst>3StB9#6SXPYRBcbKLUCT+3SqAk~2wKi?Fc8GSE zc7z7nG1~FkN!qE}8QR&}dD;cq#oA@s2JK4iD(zbB2JL3;HtkOB9_@bZA?;D^3GHd^ zIqgO56>Y2by7rd#uJ(cUvG$qvrM6xBPWw^&Mf+X*OZ(TsI|PT#p*S>$+u?Nt9Ag|4 z91|V8Id*sK>6q>)a|9g~j=db!jv7bEQSX@RnCDpF*w3-h(d1a-SmtPTv^x%V9OgLE zakS%j$H|Uo9WOdwa%^#Ib-dwt)A6q3J;z6mj~$;ozHn@JeB=1R@uTBc$8V0m9Dh5B zlXr?vt5b2>old9A>2>;?Mb19Xe$M{RLC(R>ot?WlM>mCh>X-p)GbZ08*3KF)oe`#T$*E1WBxt1D$Qoc4vokwG*62IgfT8<2=@R zob!0+3C?qz=Q_`Gp6^`eyuf*(bCYwk^D5`n&fA@LIPY}c<$TQfxbq3;E6!J)Tbx^+ z+nldCUw3}&{KWaG^9SdT&YxVIi@11~=5n~4E|+UZS6^2@SAW-V*9g~0*KV%eT~l4t zTzk0obWL{^yQ*B(t{T@qu6H5Wa4mB!cOBwd<2uxJnCp1g39b`eC%MjZo$p%b zy25p(Ym@61*R8JGT(`R(c0J;H)b*n4CD+TYS6r{Uwz#&swz)oZedPMs^_}Z`*AK2A zT|c>gcKzb|)lJ_XPJecd@&~UGARY zu5wqq>)i9*VRw^zse7fn)!pu1<37v{?xWmCyU%y8b6?=T(0!5nV)rHPOWl{bFL$qZ zZ*XsPU*W#NeWUv(_s#CR-H*B-b3g8W!u_QCDfiRvSKV9OTix5-@4G*6f9U?mz1{td z`&;*q?q5AtkIf@_WRK!;c-$V3r@-U&1Uy4LyLfi>jPi{3jQ33TOz}+f?BS{Nggo`0 z2G4BI9M4?OJkNa3KA!zOhj`X_4)q-7S?f96bA;zekM04_QJ$kcCwfluobEZ_v(9sY z=R(g#o{gR>Jhymm_1xyU-E)WMPS4YxXFSh(p7T8KdBO9d=Oxd}p4U9Dd*1ha;Q7$= zrROWp&z@gAzZTE}OMy_J7H9>&f*}P%3x*Z!T(C>Qt_9-?#urR2m{w3)P*G4*&`>bD zU{1k;f~JBM1*;1VFF3W}+=A;1ZYa31;HH9`3vMa6wcxgb+Y9a}xU=B?f`<#9EO@41 zOTpHHZ3V9td{FRV!AAw37kpLlW5KTl|9Uwu_1e6u*X1qn`n>^fU++loDDPlo@XqvBd8@rO-jH{Wx6!-M+v08YuJRt}UF}^{@UHhz?-Aak zyhnSF@t)v4(R-5ja_@TY2Jc4i72Yeoo4lL7S9!1YUgy2hdyDsW?_J&pypMUG^FHr= z!Mn}-n)h|@``!<{A9}y^e&zkzyWK~97N6h~eO8~%C;4Qb;ePatg z^^J2c@J;gV?%UH>>YL%4<*W47`Wk%md<%Syz9qh;zLmaK-x}YczQcTLeY)=~-`T!% zeCPVk^PTTo=exjnp>LCKv+pY3)xH~jclqw~J>+}L_muBB-%GwNzOBA(zBhgE`rh+> z<@?&V-S>^}Ti>6)zkGlD{_*|m=lwRn>i7D6e!st;zrTNgf2e<$f4G0Ff1H1^f4aZe zU*g}(Kg(a~U*vE0FZM6-FZH+h5AZMZFZZwTuk;_|Kh}So|9Jli{uBKt`A_zr;y=}Y zw*MUeh5jr2SNb>kH~X*k-|v6G|DgXN|HJ-A{Ezw{^FQu?+5d|FRsS3QH~k;^Kl6X? z|GMBi|91cP{vZ6m_Hn*cDS!oon|;KCt=Lkou$?p!#kaCG6s z!bycw3(E?Fg;j;sg>{8<3l|hN7p^F5D?GUH@WSH?PbxgE@T|h~3NI|YwD7XRjfGbg zURQWa;hlvK7Cu?HwQyVEYlW{DzESvQ;n#)R3%@D+w(z^c?+bq@{IT$-!aoZC43GdH zumz-mBj60U0{%e1K>xt7!05o3z?497pdzq$pe_&!%nK|CtPUI$I5==fU`^oAz+r*4 zfx`nw1da?q;Ml+kfs+EK2F?ha7dSs~QQ+FZb%E;xHw11B+!VMu@L=Gfz{7z@0*?kB z3p^fpCh%e6iWU|%6)i0~ zu;}n2C_1+2xS|t_PAWRN==`FKiY_aLtfxvY6j`+WBA*zR`rpr+-G?d=;l zmD6-yryDpY=h7`8^wCF$odp$T!8xsM%i32pHZ_;EHmzRKys{(g98_F7Vf@&ViKQdP zl}(y3V*I4i@)1+UjTt>+OnGrxX?fYWVClpOM#(;|FE?Z}=jRH!09VBI;dbD5)CFDC zt-4K@bXiw6bN#sf+yHJTZXh>^8?4)P6)bOqwva+MS zxT&M%pvI2oMPX-AS@Yt?)yq4Ai&{Eb+d>`er|spfZ4Gsm?fXW|nnO)Xn^!b5f#_fB zROod-4RobJ=TEC+{-I0Q2{VpVh7;t@@&R_``yR9mF>_FUe$dS%nnP;)z; z^3*jio;GRm!if{cH;o=Qdcx>exLR&+u8s@MkM@g|jVqexM*rB{-cTK4Rl0%w=lZ$T zp_<6?kP+%^ZVor(^2_zYsKR`1pRm(Y9jaR0(YSDVb6E>(u&pZ_+tzI4=5zaU_4`(b zI)7$p?8hzShHT`*-2PmnUZn4!_ua@faf`TSy+4oxfWIm+0S81Lwolkuwzy?^Gkd^M z4ehM;j%r-AqGjc%%JNE<4^|xwIK z);3FbM5OzLoyF;-S;t()&e#%zO#EAC$VS=@=8oWoY~l{#)^LY%hjDAU!}S6BPWnK7 zkUm&v|KUhZ=YTtkJDNL2AF2=2C+HJ_ECten9c)CTNgCZG+P7-knh$DeUELn*JJBDO z;xHL@4ohP{vOUtqE7`a)vYj0)b{y2wd~nz~JdJ5M`m@r;RgDW%7g2jE+&++7ubJSRm;FkExfeZdtx4)ZD?wxFuQTjR&o7Y+KSC8@yuA8Hfg&M#@P0 zY1|puIZxMjUeBGW?}Ev6mCddXVFG5e1XOf1uV{ZIGWnd#t=qtz$DOb5s*lmf8v++{ zmvHV4+(q2Q`fz>32JTYsGJT{zN*}#|$y6+2!)eRnmgcsw)5-phidBtER~tGAy?Hu^{}=6$`*NE$V8=5Gzlysyq44Xt>-DkvIDK^O!0Bg3{9=x`aJTLk z@5jY$ZH=r8-@@I-)wAIz@>fIg4(?841i%UR9`3&N+`alFAd9zf4{#5153wIU%ss+A z$~`u(JyNf1M3~pnvVu)UjVo5|vnV#bvLQy_O`oDq2C}4=`u4!$`>TH$Ipp5$MyyY9 z&*4b(H1`bmtiHQGRiCzzd!BoNdr{v*-&3Ei7tW5h%GuGD5q9pBu{oHUWac)rAU+!5 z7!++3uVI2-*Na(#jIiF~KE$5=HT_H3`eaFKTcpSxOcg}CcBVb z$#61)j3lGTXflS3CF96=GJ#AalgMN;h3rOlCsWBZvIp6dOee*pgp`sp5+vnh2AN4J z$X;X?sU%gTn$(b5vNx$CAyQ8o$ZRr)%q8>4e6kPOmnQO-d6jG-Tgf)^8hM?( zLEa>9k+;b^NJmoFCz>B<TPCd{aF1t{doNZ z{Y3pF{bc-8J-8}*y?oAq1tTlL%Y+x0v2JN3KtyY+kYd-ePD z`}GI(2la>ahxJGFNA<__$Mq-lC-tZFr}by_XZ7dw=k*u#7xkC)m-SclSM@FWR(+fP zn*O@}hW@7hmj1T>j{dIxp8mf6f&QWXk^Zs%iTQ~yia0f$R;W z4oC<{J&*<CwL%CG)aod37~4^@h;^G#Z#<)KFVn zQ&%4>3)fZ_*O%ASRmD{Yp+lc!huOuI6=mUKbMZsba40EmZ*T6vM-GL8_2Js$(pllE z;_8a>V5mOK)(7F*x?p+5TqB%a&}n$wDOgp)V(4@Yg)6F=M@?BnX??h)p}MRxXn2l@ zJxHv$V0CdxWiVV*9WD#9^2)+>!B9;@U1>1>AfwS?OuVQxmaH&VORSD6szdd~m6hT8 z;u&%Gap=BlR(E`~V14P#a7|rVu+9i{B02_0blQ$DIo7TX&8sq3%#;jfH6?qQ&8DK+ z;52G8i|erZ>Vl;;RkfjTSw%d6J<+9a%%x@(?&etilr&USmW5{**Hsi(*Tu_)`ds<9SxVW~0HNwhyMxN@? zcWkn6sI-`wvKAI?qha<4K^8_$9cy}#(Bc&`2mJ>VJcQ^UYGIp*l}pyH#YYW?DrT^P ztLrQ3D}z<+-*K1u=rTN`i_tN{bq!c(ZE^ifBiIG#IVj1qrhaCyE|T47k>!=9Ua>!V z3{3K1IhX#Z(V~&a;ziN*HzRes4hmM9TJR+E9--G)mY;* z>S`Kljfh*aS=P*9jl{5Aj+T8gXjYlpPAeJ?%V1a=tgEUB;j^*o!b&sb+t90jf>&9v zwz6g(R!z9FCfZO8>(yvIB$IV*O{kuwjg4I)mO;bk5cDZZ@G-`zgp?eX*)Aa=N1$Eb zw0fA1FxH4sHlS@mf^8YAHq*Rv3|a&dER5*PE$nzS8=PQPRa{yVn#+dmc(&?`>t+Pw zgT+bcG9tmnXu=^jLZc}gaLR(SD@s|=#Z?uZo$u7tphB}MYK`&Tc)BytY{!HWN^5H8 zg==RPhgd(u=4qIoji!SVOe6o@kc36 zHs?^Vwm32-CPaK08V*fRuBe?^Qyr{fV_c@3Z%7l)>}(EKqC;YXo>R=G!fZZQq0dfa zW^>~rcJ_y(8q2vcB-b@mha1?kp)6Q#Wb#_HAC$p9W*cd>@lJUIdh{W)n%h=3FYnKG zrmL4XuU*SpHyioF)x}luQR!wh+#!P@TMN|1TjOonBt!Kz@s@HY8urZ~nKYT+gSLKB z*|Kt3WPFdVbl9kY)06S|_oLC^PNTAUl@%pyT&`mMtD=O>f%Wr}C+UaKrC*9mLtPyX zN=ZRHipD#djM?NE8}_jW)S8B(C(v|grzx9%XR$e=tRlpgkMlyf;G0p&x&o#+UKvlL z)9_9wHXDb6b+d!46&eNM`YRO1)f*e^JC~r(B|QdPw?rS_7-7s+`63z*=qx7HGF-yC zwUO0VlC*+uK(ZJVffDaUA^HqICb=>AH zv>DQAgUJiesi>dHMjH0$#dT$2RBGiMw#p3G&#R5get;$gohFer zXk?BuqW>5zb}(5)CU#auhsk6so!zP-9LqXh|MHY8)a{cfu=q{4Y$}InpUdHxvwH1b@hUS6Jbc9&7 z$6BeW@lh9=nZ}_+F>^bi1!!t&f|16@s;r`XUf8sxqyD5o8>-_~m!_N3qNG6Q6xT(T zvWXpl?ucfle6sYL+IDS1r2Wx$FsW^9TGqHEGREWfVP0$v&NjJ`$Z9Uk28Z(HS4(4A{QC(bT>^kTW^er;`vU!w^L2PJ_Onyd{?2LB(&33`L z!P18M$X+_WsWGgFqxFs%tXT``+}G1lXu7M}v@>d!dmQ4U8&Gygzy`cxE|UY!=rS8$Pw@Gt}&3jNHchue_#` zt$ob9QyN02oy<<1;T!Jeikyx134M;$n&~7d%-;I5?Q?lWe2k~_(50Wb2*V{>lzGue z_eJBvbjGIj2o0mvpaiQ(-^ED7Y-NzzCKjScLPBdBO4w0Dd~KX2H_hlX%v=g?0$9I_ z^cJI>U^P2A=v>FqrRbH=q36`Gaf#(UrSC68)1f5Pj9--WuWjsDif4S4b8xvAn`|1` zaxQVkS&2@Albkv`Xu3)2Ky=wD$tBt-jB*m@zz($UmtG~oroh6Xh7xv`7MZF#7i9F%bV{+aoBWgxN2MK-Vq&R`)i&#!MyJuyGPw#O zNzam>qtVGj>Lb^N*wIlvp2MNe9K|@TGvD&! zipmDIbT^_p5&Z{|+0jprqQ_IQ4F>Bwk)>VIKzs^%^iA-HSwyC`gz^1!v>lw;Had^9 zEktRh5ztxa(m%6HbZ9iJ&+WoG)KG4k*4AYV9ml)SJhq^%GL$bu+X0#5h|GVTL(ipX zUR}J3-R0_ESvk9^|D=&)`j6;eyl4@7gV5eT)X44+v>e*p#`ejTYTjXYjpl9gOGqioC?;PM*j%nz+zoifl|`2l;q-W<<6b#ir&OS?q*3a{O6}4_Q3J*fY>; zP-#NfN_duyV}lc3`WnOU z5%e3^wI2pi7sO#YHqROYkE6iE+yq$du)Ss#>tH49fEZs3MqcF6r%)%v#pRe47Efp0 zzKkk4bn{5(j2*nBZ*^NxX>4xsG*s8suy-t#H8r*I+`fkXIrQ;pZn1*X<=3dGH&HE3 zpIqJE(Ym5P-ppm!8{7JqFK=AJmP@fWDG57HGp zRd`l#9y>TGGu*#G_tff5bT19#4A{U1|M-!a;r})I5AUM*Qa13gqC@l8d}+Lirr)CH zh%P)Usw2yFwsbcgz0n`gb+;~DBL`TKV?JYWPCn(MKch}+M@@YASnk*i#)i?_&cmSJ z&^dLwN$bqUUe-O?lvP_(fp4cG$BpyCrR$mlD;6t?q;~hT zIj$`Zv3w>?4;C2}Qs<>ah0siP7lX}l4e=vzi;8~Jy2xI1I*Oek8}GQIdzsFPusBgF zbu2bQU~A6OU3WFsf~9lkX6$Sh4=QEutZ9^@4K3DLEk5+m+;0qj9CKndQ(9Zgwx*>) zHsy6LA}s;b$y_ZNbn4i_lM%@ds4zccdYMA>^^~U9vnS3Fglf`AC{B3lctsvd+yljLwv1i#Jv;hP01tf1ig4zgHdMpJY-^z6)tY5 zufc)Mh-VlIm1Yx)etN*J_xCT2&L**l*joTr%PCd6D+;Dg*QUmrAQ-Kj$WR**WXIub zab)ZfEF)2EkDSAcs+j|1>s8}X$Dmf~ay5HoX018F;&|T1qgZNBV3*uiwY4_k_a1od zBjz33y+j_cDl{V$tTv`k%Oup`l@Lo#)+ScVQP3(Gbv|@3E!9-@Vcayl)stm`L9C5X2sFvDov#8Yu z%h+Y5ig+DNM}^cnNUu;IIoL3Q!1Ih?U0qFGJe*P#o0XEgm{>dOo5jnQ_g@}c3Yj0W zbY_sAgK%*PJ0N7WgZ02v%Ti~g3e>L5Bj{+Gj?_(A zO=;wa%Hn4vmIl-bb}y49>;)=DmUz%*nTwJY-IFw)GmY#% zJ%|(+6=@{*L&>u4MT#rP)Msx*#e5pQsrgh?;mtZWYDBKMrD`ui?fULTTisAu8F{-^ zUxU*({xwbXOR$7c9wo$^YwQYXk4)39xUEdGWx>-SU^6lEnYx$OsEUi zH?ZN=vpW!Hz;2r!eqw26-btMW~CQk;e~d&$CHkD8JbFQFYv$Y%u0 zPy(CH>T56`api+hIiK#ALAg3O2hYNcs#}Aysi!lU2OLw&W;dIow>BG$V`9tN9zK6$ z3&#e==w6(i)W@IyNL0>e1dl&|tlzPlMRoDVJ_^;k+37;;xPo=)ng({C+vwY=-TGK8 zp)t<}9G%tK;yZQ^ioH}bGJ=gEzUWAu?@z$u7Id#T;~|V4V&nw`UcSR^Nor9iV^LLk z78RNO@EM}Zi^%3Y%tkGCE>>M{T#B=thSJ^Y_3Xx6^twA6tJpmawvvyxoHJ20bs3+z ziN{+`L-nj`cFU9nSv-T??=kw!IVf13M*>WJhMnRC*>$JLpl!P2Z8^UOK}Ht1M&%hn zUWkI->Msq|5kWSw#9xzGEk-kkRn7X$$H5${oLZorC{T`d-af8PCPxRf8X+3^*mya9Ki>D1YKoXp zOQ)O>-F3M|7gw8EQO0fu#08DE5^vErqF&Y(ofuto+ODXMemGN8Ud}GQ)ReQM_R>1z z<%8uG)J#2@$&x{KJTrQ3=gazdbho2Y)~PSewmG^fh`j5@OL?X2jJKF=1v}rdS?)sF ztXrf^vf;9Z=+_d#Fl*B@Ys&B?L#grMf#qHl&br>pBpeOY7>|qVYw)!fTMMvD4E3Gw z>n#uTtN>#f6BC{nIXh+xrHXjQAI>i|@pi^G#_S*|I)AZK<;Vvu^);n6#_LwgW8IG2 zv~P@ODY7$-PE$tDcoGZ9I)fzSJ|21O7=qmoh`!!UsL*FnH0$~{y(s&{i@nNc%Z@U3 z2_q_O#Q8i*W}TE0Buz6&ZCttCfy6nuV-1_L&cfhRje##hyODxs_-R~={lF?LsZ=@Pu1v) zKl7V5%O|L_N7p*Wi6Q%>EX+ldSEe2!XEbJk*mOXmJr*~dKbvGse@n3Sgmc2M@#BQEk2ij2iqvXT2Vwbe8F zm$R3%c#vhfm5VJZa;=8__k`;gmS53-WLN$;D6(tl%o(pO8%g*BedkrI>|jJ6Mx%;? z71(>>5W_yLS=idxwkT~|XSdzi+tzp+XDjq*6TnNrr48(36LyP&-T8?>;@?Ac zU%aw$`5Ja#49~6cj|>eKJ)mE+b}fEf6`ddPw3;oLirJUx^Vl7-Sc4V3s8o}KQd*VB z-Fy5LJ0=@xwee4fg+dgc)+2Ev&ORtrmMRsw)W(uf(cZoq-$z8lG@g=e4;bqWzj4Gr zuzu8eol)qEiY0j~Rt2kS>expItcV$rvDJuh0P0odt%o0sRQTLC)oM;s9lMufWM&RZXB#xSu2!=& zTXdGz8n0S~`6yo9Ygw7iK3FpDDF_Qtdk17Sj$rK4mX*zm)~>CsDGS9u=IVrw2>YX@ zKaFJ+8i1(E+k}N1kVY4w;RuTa#>R_b#TcCo$)CySz>UIUVF_2?l^Ofqfl>F8onxRj zp+z_#%DQ9UB4&h`8OAGx4n{r|T7^}@fkK;dSqY@|N@2BdkZ`bYh_D98 zDj){}ITXlYz;FJ4;4@|+BBsMR8a%n=JavQ5*mWL{5{_ZOMd4^5ZR>?&fwX5qE(#|y z$YE?n7Y|`k{g6u7%Wk^k$YOW8HlbkU>0l?ZWV45ZfC?fAgn@;(F^Cq z`WyT3Dr1j`gNqV*h+SvlKFq@XKv=CCuteb@;YkKd6do2H5grvD6CM|y0CF6VYsUi9k+c_(TRx%nl%AVHxc^kz)A&#Y;3N)?hDI zCo&$Rjblhvv4f#FEr}5pg?A!^#X{j-;XUDf;RE4A;UnQ= z;S=Fg;WOcL;S1qQ;Va>5VY~2+@U8Hj@V)SZ@T2gP@U!rX@T>5f@VoGb@Tc&X@VD@f z@UO^;MC3&(T0}tldtQGeb>%@>)FE)s? z#W~_!ah^C|+(+D3Tp;czhQi^qt^ipPn^izkRDiYJLDi>HXE zil>REi)V;uif4&ui|2^visy;vi|fP-#0$lX#EZpC#7o7?#LLC?;s$Y}c!hYSxJleB zUL{^FUL#&BUMF5J-XPv6-Xz{E-Xh*A-X`8I-XY#8-X-2G-Xq>C-Y4EKJ|I3QJ|sRY zJ|aFUJ|;dcJ|R9SJ|#XaJ|jLWJ|{jez97CRz9haZz9POVZV|VN+r-zz*Tpx)H^sNa zx5anFcg6R__r(vy55|fX=Kwhu$az4{2eJ;x1wbwYauJY=fm{OQQXp*Dy&TATARB;e1i}Wn zD}ihRvKh!#K&}RI4UlVrTnFTOAU6QH5y(wI*bsCJkXwP=284}2cL2E)$X!70267LO zdx6{sgpCvr0C^C|LqHw|@(7Sefw0l?aUg7vcoN7{K%NHj43KAmJO|`?ATI!U5y(qG zUIy|CkXM0h0kRdyHXy7cy$<9JAa4SB3&`6*-U0G1koSPR599+N9|HLZ$j3lF0rDx3 z&wzXmsVm0)8~`V}KtE{5asp13v-yiNH?+elqY=fZq-H-GQG9{50VA0De#4rvqOM zdj8Nkm3z5@8YfS(0?CGb_iR|8)Id@b;M178Px2>5#78-Sk;{2but z0zVJ<`M~c3{Jy|10DeEioXz;^(@8u){NKN$E!fL{arp}@0c*IM8Y2mT1)j|5%^9)Ldz_@jY8 z2KZxvKMwfgfj+M@1^82eKMnZPfjP4OXH{2hnvt|^d_&%MOI&^RmF zQT#Wunofo=CZZj!JR0SJcC-rUkOSJ$YD2?3(T-LHow|c|v}&;j$rbHrb)mzUtZc^E zT^Dv7z>YlGoj()W(OQ7+-9S59{pi>w+R<8+!Hk_ynGkB$9nmZ=w4=2@x}+=(qGXLE zw4-$(+U9_Ev<^YTZlWEnJEx5(d9e~{_}1ZQn~^aJ6dO;Q7&jl>t5L`%_Ku>6{AuvijuV!4f91iT0`iSE85XI8?6&bcqYtVTC}5e9{O~RcC_xB*)Aa=VYJH) z?Py(yw%O2*)@HQm676VRie`DC9j(jIrCVr6>&n!i(x4r!2clU{Xh&-Yn&yLcv>uEe zU7;PVhoV&;Xh-Ye=#bc)J5gQuemQoGXw}hhXntr%>(Ob#$%b~c9)}Kz4Z16|qxD4e z$sO%zJq7LaK|5McM~@uPj@Gl#Fc-9=_1tWdCbXk<9UA6_cC=oEwkD8TI<%wpQZzCl z^14PlTGyjXj%Y{g6=-Zis^M!-cCi2?46x zV58CAZ$je%odCI1OEcQhdTWw)Hs+i44m8e!cC_A&1|}vUE+~`Gj@J9oCO@>J^+EK> zigvUqlsBYU=Ezr=3Sy4 ztv{fdX&lOocC`MCrluyC0qto0Eh*6KXh-XxNrA?$@Pv~(z&~ha$|p;|sck3F@@xcc z6EJgKp&e}&bV)!A=8SfnK zjy4ZE=8Sf<`BFma{H#9lLrYr#O%rm`2`n^`aBMrET|(YF?V@8Gd(_U)qRl6v?aF)WNwUG$Munj|pgbYMK(N97<+IB^|gjUg+fzF_eFh-(v z0*SCow4-ed`s9grw2eon+|iD-?y+R;{lUI`sK6WY;MiKfY1!7k8_wiqWr0oDS=spY5wgUb0VL{qfp+{~kNLxGF=EZ`v9fU5qvLJ11y0A`QLE6@4 z3?0cz2`ot4k!YJU3(|HJnx}p|-y1AQ+p(yf%FO5%3(|H%USiowZKt79k64hlGf^V-W6+df@N=m6(lE~FNn%0T&Ox=*ZzHp-WoALz&PTmebV^!1 z12>kyg0x+j$Mbc`g0x+d$MfMEn7Rmf1i#Zu%Yw9Bj(RyXQp|p=A!KAh+BTwCDsw9( zSxGEN+a~nu0SnT0H43C5^5O4tGGkZqDKfAiZP%ejDs3Po_~;FH^A|Xn-DDP|?M9SK zMa-qhVMxg=NZT#w+*1~$?RHejp_^j}2Aw}G3(|HMDy8zr(mqTY7NqT7^v|J>M{^tb zDlQFc(^xD&fNE*_WP1K$bi0v2U$i}(Av0MO(y<_IkD*eUmX}`1*aRm=^JMzxF#uOx zvmkBHphCAr3f98H;_?ssDv@A&5->8$hS~BRESdbEj3iDGw z*f!;)S6Gk|#d0!tQWj}aIk6U*fdwgvsMli_q$HtCFR&mbI|`+KuW!m@{OhfpSdfwf z1yiT%OoGu_I};01a-&-BuplKbYNak$vqu)I&d6tKrcdpqLKI8w2}umcm^Z$^VVrD}84FVCi}IgP&jpGZ$i9e5{?37OiYjD8A%#}+WC;wqCEsxf8newNNF@GXRPZyS&-7WeB;DH zKN4jZEJ$f0DyMdx9I~6hf|RD9PF8wJQZ1P>nVtnHO-0G9M4rr&?CabRGA~m>h0>lq zh%`M5QYt~oe85$vhfm9bl!B<3Pop<08fc&NEJ$f4YS*XC)|so^q@5EBQksP&WJRAO z)m*%}cJkHgB3Rw@EJ&#u3&^Ln#S4gz38^edX>XLw%Jj(;bDVUP!h)3Q^G$V9oQ8Bp z7Nj%>OUP#g%TNM4`AB3zO7l@UpYE4IIgte^EkN1S)0v!FUn&by+P{a-Z;%gDSddZ^ zD(5qT$Dcn93sPE)YTfK~Ik6z67A)a^#)6cVV{!iz7NpdQMg1o%NU05_yVdJ6vmm9_ zD4M#A&)mf0Ehjw-QaS_$vy$_Y#wOE(^la4^aZuOTmEJz7Z zEuSgXl*34P$t+0en4ZRGW6KRWC zS&-5hxkVSRgf3W+(%Gn&wM8dJ7oE1VvLL1NP&4&pCQAl0upp%iP$}!wm#_~v^|t8m zJxtGnlrBcutXrf^ve{XX(q$-|b-k5III;_lgYq)5Af*jGE5KOButf$!CR16E(v|t8 zCf?4vVnIq*bvts?zA>JqR2HOkEf$b<21&?$Jo4Bvgn?{KfO|?eplH_hZFd|kDc#dMMPy+?O7~+CxpvTQupp&}uz+q; z2-C12rAJY*w^@+V6R6sA7Nqnv>hul^QhE-xdV>Wiy@*QPt_Aa(Xk$_J3M%%D1u1Ps zjUKWfrPooU`z%Q5E%fgm3sQO)egAV7r1Sw6(|asP>0{LEH5R1w8Orqr3sU+Lm3oB* zDQ!oo-eEyX-=S8wS&-6?=-vYsr1T34^acx3`W=2hcJECZs zK9hUINi0aYKPvV%3sN44sy(W#WEP}6q{o4#VL{3}qiULt7*A-|WWNk7NO?F)_c{ww z9)+_1H49Q6i>3UBEJ%3*7VuxOAmz!Z-m_{o0~<{&NO^aZ&NgUt-D1qrY$x8yf|U0_ z@m^*@#@KT5bh$VRQZ#;0CRcDnu8@OrxjaK=n4qhHzXtegfxqqwc`tdETq!eH(DlII z3H)6^KSqk%uqbaX#-9r=YGFY1k?pOk+nSmyTiQEDwy#>=(s58r^TF(*$8{GGiUxm<4Mn@58W$^1dS<999I!acw} z$UVd<@}cr!@>&KszoYYCgU(*X|4n%kFkw5}ReSzgJ0A?&P{jem}zn6cIf0Tccf0lodf0cief0zG||CIld|Caxe z|5Z4JD7-=yiy|nZVpVL4q{xb**cDaL6o=weT#8%qCs4P^PltoIjvRGN7ELB>R1C(XTa%F|GQfXCIDF-TT zO1sjbtX2+E4pt6P)+mQ6hbe27!<8eHBNbf%y+!28yOg_?dz5>X`;_~Y2b2euhm?nb ze;W8_fPWVF=YW46_!oeG5%`yYe;N2!fPWSEEx>ODejD(w0slJiZvg)$@NWVCHt_EN z|1R+F0slVm9{~R$@T|{$4E!g+e+v9(z<&<>7r=iB{8zw#4g7ZCzXASR;J*X@d*FWn z{zu?{0{&;Ip)&|08- z1FZuZ0$LBW0qAU?bAZkTIuGc4p!)#b7w7_@`vDCD-5+Qp(1k#ofGz^s40JKjB|w(~ zZ2@`!&}BfE16=`hCD2x&tAHK|v<+xG&<>!hfgS|(V4#NpT?6z`poaln3-oZHM*uw% zs16i>9tHGhpvM3`7U*$6j|X}J&=Y~41oUK}rvN<_=xIPt2YLq3Gl8B3^lYH#06iDz zc|gwxx(?_CKraM(5zvc)UIO$|pqBx?9O!zW8-Q*EdIiucfo=l28R%6&uLgPz&})HS z2lRTNHvqj6=uJRx26_w7TY=sN^md?k0KF6FT|n;!dJoWhf!+u7exMHkeGuqFKpzJB z2+&7?J_hu0picmO66jMvp9cC2&}V@@2lRQMF93ZJ=u1Ff2KoxnSAlK;x)ta)psxXa z9q1cC-vs&=(6@oU1N2>>?*V-u=m&8&q4GGc33Fr$tRyLvXESmVUvk8?K&?pZ! zq4F|1ev}+IK?hM3h@sY(o2B^y!*S zXdjl@E+HYiqFrumLinc8g}(vgu(XukwqOlf2v4=1w;?U$pCi42#8 zO=#bUwmoDM+Bcy{K5Rn!)##Bso6vq;*Y;*Mq5Vd*H#PO_Y(o1jXl}|_=X{aECbZv< zwx$+f)PR}9X1^<`{OoK(`@Lx1C7aOx0GgS`q0DST`@?8zYJwTqg!adh0?p1Qv_F{? zsDWijWE0w-K{Hc6v#<&6&!cSuC#@?sq5UOvNnqsV%qFzIioOXHy0mOU`!=*opwO9U zgY5U_2CGBtHx|aOjoIHo>jadU$vXOhzhU||ns$RtXnzkKb7m9TKTHWJ6PwWf37RJ4 zq?5R3ViVdwN4tc)ciMHqCbWNrzIm_-?cZc6jAfKvZDzlIIaE`}enDz^JDm9-366$K@F-S z&@#CSx@4XRVoHZRnmx+l8iiW*c)y0A__ z4XVM6p{Jk*)tP9UGip$sh32W>X7>haP_0JoR3t+RpDTKCGy>0Kzn_QwzR@ang_QmJ z@XFxqU}bE)SNG0KEIVpYtw*ucud6ad7CRwMhZS#WnEhBo z$cP$Lm!nuJ(kUfbNvJ`!75#dE8dTd*APw*>57eN#8Z}ZsW={z|+Fi{bc%(xOs)wLl zD#0v84ilS<8dMKM=boYl)gw?Phi;A`WJL|CfJ&)&tF#Z31~sT2gZ?@6@n~+lKn<$L zqgt9inI7+#1~sUjlp!-&71E&w)l*R^P0LHKWbA1ZP=o3j>7U2oCv}Y)RL@3*Zlea( z^UyuDdXoakh#FKcK>w~-bm>ro>c!~UE!3cT8M>y@ZW4p;8a1eHK%LZ%ny3>S`jSzD z>Xqo6I^Cpo&H**3UX_bp?x;caTGZoLG?~lNS&7w6;e@y>OJV!E2u&Bew0cbi;WNrFkf^4%!V3NA3~+foi&Y80%}lw z6#X;z8^hl;;ip9ns!yO!=4#2HV?qt8Pou*86nK#-CrJpv=ojKhH;5de6xXr!a)dJ< zVt)vCoWiq?W7MGf3d-~XYEa#ZLaCpFoAMa{ zI4CF7p!zxrrcT$H1fz{M6KYU>3)OlDHK@LeTB*y`?2*N)GxF`b2{ovGfMTgVAz##> z`Z4PKXQ)B-Gn7xQjTj3fPt>6LC90)%+bn8Hs6ll*Dx}sydW9}fgX(uEmX)KCgu}sc zDS}KXof#~h6%H|uaOq6;i`VN(FQ}als4Ln-B4KAm z4XVGRa>lyO6E&#*m2aH!~uMy2DPzR!v734s7=7){wJtGZ88@1pP&Y{-BG$*y*@K)P}>7VQE#VD8+%$77ZnMUjks6nl)2SKJo4Qew`uv`5l4Qf!^3)S+OQcXEbiyG9bdK#Y@ zHK^70G<`{^K`n%Wse4;fqsa?3sLk&3aT8&FSy6-9yxgLTS3(!4L2X~u%i5w7qssv` zsD)88^<*YX1~Z@rwS}mZb?Qslhnsp^Hq@ZjjIvp`NSS1_qXxC5D4cb@l}R|VmF^NX zs4eSR0md>WBWh4vnO|z+?W`-*pmt!lBhL*rsC8fgS!a-h+{Yu&h8olkM$xS6+w`KD zQG?o{D4BIqN{}?Q=|~$%K@Dn$qu_sy8q{@HK?71I=zD$)Xqh%-arj%>rkoNwP1b|9lI@}U4)7~ zLk((|qDBu+TEztZPcK4AG-GdHK;v^0=q0RTYEXLt_5KUgp!PB}}Mb_BE>ZsIrn#gW9(}4m1sFQ2PN@({#jmLc0dX zWk3yTKcjT7qXxC#Q1-t@4QhX4DgPmAQ2Pf9_^(id4ua}Et5!1)(1aRvSWr6KpwV@U zF-s?)1|3!u?`70rY&GwYQG17m_U5(`WzCCQRyHqMySBEbEEFzbSEkCs?C{92RMFC( z#xe>skvYZT;MT2oIDvi?N8&gl@7+>RCC0TZ_I|ylEEuECIeZR(0`|~R*A&#MrVUC?0 zyEt}r40nuhjC70w`UlWIf&K;bZ=nAG{TD18SO{2nuu#45Rjz_lxV^YpT%}{Y&gnj& zpRtYMAtNH3yb-IK+ZK;l!6Zf|dCU0K(>c-o}J zO{1F|7fxE#IAye1a7tA0i|z=H89RBxl*yCFCJ62k75u6jf>Xv$T0CXp=t<)e1WTiW z+q);&Jb6)5)8qufnNh)SyCK*#X3XMo6Pgw!3RXo0zwd_Nl(CKDnrj-fpDGU9R7S5~L%6^n!>GdPfp^O}P|IBZVR@g9<8&fDHwagpQ&J zQUyeiDgpv_#fn`&*t`B;yPHdaME!iee#-Ch_U?o#8@Q^Mnt z3Ys{TT@Ij8yy`Q}O0UnnvcSASuud0O=sW|(JIf1$)ItG{rw znQyCHu|hkAjf~xDzODKTcbFd$Eg!cuHs5JpV7|+|(7edJ*nGG79`n8C`^@*7mzWX>v)EN1B4AsgX1_k*227)J&RMNK-3mY9mcoOH-&cb&#fTX^NDlXlaU-rg&+( zMw+gbrt7qfJ!;-)e$4#1d6W4G^JeoF^ONSS=56Nf<{jpp=3VBe%)8Bd%zMpGoA;TY zG4D4YFh6TPXg*{DotIbsjoB*k*1N-G*+6Dr72CCZjh#2X__QWMbb1?nr2AT zY-yS&O}9(a0%=+#P4`OE1JblinjV&>mD03EnjVv;C!}eoH0_qA1JZQ3dZ&0oZ#yP+ zyk(ZZQ%uV$Oy=vla#FIjPO-2kIrWC|=>^F-z8ZVRijJ(MzGY8s!`{h-nb(F##l*%& zMMXr%Mnp%)Ma0I%hJ{zzro3Yf5BH^~M*Fn(SJ_jaF*0=p!_|NBdr&)Q-|E^eFSn?G zuL$)Ow9l!U{3}NCbs1mRlzhe4t5$xsYKQ#dlAB<&qQ4kPAlX)mpk;&N-0P#nASn>Hm2ki+)&t|XI^n`nttf^dat9s+M%~EIn7s4 z*kNGekmB6_d3pKOP7aG=6rmxv;r=3MW7@(F$!R%Rxn$%|zc{)4T-U!L+c%|p>UE9O z8~+>n$k_O>@aXv1`0%KR*zm~kn8@h(noklg(qUCk5{uc$R0uboH5IvrPOOHL#MsJT z=xVLPZlr(Jzh3C@VfkaG&HsthD=%fDFO?>1 z6xHax`{btQRj=a4M#3#wEbtej*&&tB3Ko!T@8p8CDai%CuEl9tc|CIVm#bZGEX|C> zTmN6q-J}77Yv{f$t&ODHj<;;_=lj2T?sgsahi7lsVS13tZ>_4=K&VlYb}aVtm!#?` zTw8k{oR?PEAwMfSD>R#~gsXA~Ug^*+TN}Y#^oI6s`_w8|YfFT2%=Z6>HttJJkBa!0 z+jw|#M0$L5wZV@i*2q*C&(i*4ApC9HxMQu}_Bt73m z`0$vT&*G@GaaSWxQGd_*P_5VMX(S!P6PmvmJiS+>&1jofZ5#cJl;Xx)QvS)5 z=xYY_8a%X5PI88?L**qbQU95S4*qxaD_+W_13k-NyNiNQ%lR}>KwH7qO5SJ@3! z3(im@P>jOk!+X0?cz>Q^jk_x ziY0?z;IO1x(kwnpx-=z7Q#WbqE=@gFSu!nImWdYbw|YubFKOy6O?|XqM{o}=$jV{F z(mgM=IES%NQdW+S8=stfzFp&@w}xdEW~F5Nx@9I8B&Qbn3Wg--X86YTrhZsOY0NtF zr5P9Pl<3Rma^*{_z8AmcaNUfz5;J1o2|RKl=l=6XQQ~7~j+h=57F)F} zw^!NlsFtp~Vjli!g;AEM_=@US zYFSZh5lSs(6k(M#jgqF(dJ)EyxT)c;dm3~}96M60*YK$DxS1oTN5;jp3yG+#dkk`_S@Pt;v0E`NBvpPnzvd$ z>+5IBFGg|&(o|R_x$ES>{qO9lNG>L_qNxU59&|-*xeKZjRM#>wsJ=86OVeaMxhY1Y z+M8OuaqZ~o;jwxHjS8b#g~vxmlT&{-gRDVJ{MXzg1-XMfq|40OG-;xJSt~azSB1;Xx5W%;A+tla!`8(sa|Rpy;3&vKS{#H%rqkdUA9BtDWYcgvD>> zW{sU59u^<31)YZnGe_~Sut@$@S)_!ZUbRN3cTgWALbpoOZB-*QbAQtWS!09-2Mw(? zxnV)Wi4e1s^QGwyJ-IveO1w_q8JjcI=y4*$BWW5@QQ9d{*)+xnrPZ3MFDTu}|>F8S0oD*~tY0@U{lcxLiG?(aUcKNQy;Bkw`OlLT(H?*i2Dkvf< zjt(-W;_MBYAGEO6lotgpHp>2>G%c-C_O4G4eD&dR&R!CE+o?Mw;@<9n(U(P%^ zW_ny~WIVZx3yaqJ115c|hICudQ?=%EchDXqpIjAJSIy^)PrMUfH>!ziSNUKk=-Hs7 zS{@Gu9SS-ebVQogO4B-NS}#o-Rs|gkdX79kFHMg~)1!J)8~?lKZL8;YHTcOm8<+{# z0uvq^RW&d#2fa~i#NP}$X9VVPX(H})l#D{T+6?NtaN0Ku^}vKjRQBcPgFexc`XK1T zppSw+mZr_p#E9TYY1+Cf=+mIj2+SAKv`w0}>q+hS?*^vzJ#EfJmk&&$BD7(kcHUH; zgFgiQT5DW>3;NxN%Pwhps%l&kp8n7@%}A7Sj=#lNRck%cvIbhOu-38Gm8Lz?v{#y* zmZp8Hto5x|8fiTvP5b?6{TBx%U5{s5bA3kL##hv>)opD=5o+w_tc|TrEEBCwrRiB| zI;a=nkkQTUy6XAYKch>EjnlfMhzM;|84(l9g}Nfn)>hVbTEaEmI#@%k?MXOHnmEgj z>Iomy6AnCbNZIwR9_NVYxM(UbRvY$ICK_Xn7cI-Iu~x32$EE2+nYE+!8fiKuO)pn_ z0A=lB+}%CTs$c&Di0r%!9z4ZFcI_D-6BQ8}6CKwhG%CDnY-rcmZaqSy%QZ)Y9E&)ZoCOVb&C-tI+~8$4}I z6#J}6)}f-GrO?{NI+EWZ^i9oA&P_`yNX{)xEy&6*8kv=w=9@Y=xhPYbUMdk+NE2Pw z*)!JB)-j}T!8+DDZoDOk@AdU>*58+#QIttaRbCsGoL%g*uI=qJX;yv((b=bakN$ZX7nf+IHQkyal4^aQFRA1g6))b)AH6K=4O;XV*}qb1 z&6cKD|4Q`owdnCg^R*hI*Tc$VP9s1krRnu*0ixXgBhRqT64p{HL%%mmt=#FJz%^7&U%Y=u63UER_krn+pY7hjAh=Irgx<2U1@qxn%*&^#=>)qCSL=$QHsHDDrR?ydeB2Ax4(`P01OTtDLB^PA)ig@ZV zDyy(Zc2oBhHEu7#Ee6>PwA}Nr{c{MWjTe#YTrmN5(`mm`hDdkBj%QC?qmHI^H)lxgaMdDk*Dl za@uh1kEnK0{>ZMjuII-lt!u1nt?Q(TtIj{9>B36u2J0i%N2TdYsWgy^S!*6)R8!Z~ zqO8eTMbn1p3vjyS<>Y7kv}aF=#W~tsi&2A#z6@=`g>bnq*=10^{;0}mw$)Qh$|}nC z8RzL{v1GM%i}gwCR_it^m%y*2=|^e$RVo2eQ7e96)Vj;c{qRcbQ`X&9E)!o%(>K!e z?Mmy@)_qp45Z_7D_tNx(mi3mGNo3hh5pl{B2|R6xJ-o9+{lkvPt;VNlh#w#7o_PYY5G~3exX2`l(bdlgi~% zF_F-97pj%&wDp3hXZ_Op)$oCd?RheoJSgb0W!!^JMMJ;pL>}{A{XK1k7ErG4Kkr#T(qPd5f#Vqhg0P&n?c*K2xV|z4})+ z7%`AVkXlzUta+;T_+ZSy#I8lWkWyUaD=fK;^ZN3UiN&d@K3|$I?M#rhuFdJua&&`l zTHz@ETYl6syQ9QZqLjJZC2C2Xl6u1iCidj9DO>VurFjKs8#QiHH>BxR&3HXA>qZ|( zP4AhVoKeWXwce^(mzGB1gRZoc1eUM_!r5N`3Jv3d>Ciq}VhMTKX{TGZYTag(k=0yw z9AW&`rzNCqQ`WjIX&WoA&$J7z+rC2>c^IzsB$Q0gOJaUKg9i*qwCm$R@eT6-}ro-U|LWYUb7m38D1R1L3>x0nYe_VZ2irRlvGb@9Tr zos$ak@`_3V#1$e)cttZ2Dk4Oz=)oerA!4*h5qV;gC=|tFwwNmxhzGL zluReyo#waF`i=EFkwl!efBDanAFRLgd$#l%KUsga?y>%A<;3_}DuGhr`uwd_>PV&T z8tc>IJ=48jI4O9r}xk{Cu>bbgOba&rKT)Kvw6jm4J_3v zvo#Xk&8ux8wx+hL%vaf(>kF+qNX1%W*H&IBcBwd};?mZNDjuyTuUJcDYoiYf72DtV z(4*=@6S{mcwpu)xH-?KEvW;`!cSPb&FEi2#|kv_P#U1#ep zTCTHov30d2*t*%e+j`h~+ImT)p;Q`4rLk0+NF_unO{H>`RGO``^|AG}^|SRC4Q$;+ z6GmG~bE&kDN=vC;Q^KX930-pi68AWo`fx_mc~kU9YJ8bDFAOT+;k++xxVEB1>*bP? zQz}=3F@)%>?NsBN;8uc zbdTy56&(?u5UM>L4UGzm4-4%YpO6sRvui@ngsAQjVbR@U`FhN%IWY2HnZu#ES(A!= zmA^>NcjvRYKCtoCl5LAeOHhwPQeIwmN^(J;!ty6j#)zr>z2uaHYHxX`Pv-`US(~t! zne9TRhwD!V!Xv}tX3jLu!7*ZYnO0xrHB|8n-nL|0HVe6JDYjHwn$2fRw`JHeZCSR7 zwi~3;7>=o+^E7yDYpH|v#w$0`4 z%65xXB1>)aq!Oi#L4&v*tH{6hGxmi>t<4up%4~Po?vx7MW5S(m$d>z7FXIlavG@FnS$ z>giom64tCs?uu$#uePydceQPeZLMvcZM|)SRIZguXQ^B#l`c~0y4v=rZKLfm+vB!P zQb~|XfmH63$}Xuqr4=>AScpc*`ehZhuc)Q=j6jOZZ;oo)Bq6ydHM1ut2Oo>5B^ABW zh^PMnY(@I~^hPba7|(G1k=S9|Ct9wx?X>N(J!RW%+hf~nds-@-Hr=JtLn=L`LRZHH zzt39RGnR?A12||qWIHUCzS=FM!fXS7`lK>ZDmNJ2TKV}qgnK_SuC=9Z`X^nA3u!<+ zQD(5(BUc-iP%gXHl=MjkUz*+yk_voW-nGt<{kd`S-?U%4t659A{JFESmv)DxwNY(E zN;~CHxyCrpI2|ftQ|*8WJ{AJ{Dv~!+a!fa#Q`5CXL~E17bTBjh({i@cwzJ%}+s;U( zf2r+7sSGG#*^8DP{r0A?YoV5iKftfrm?tf>y=Hq|Dg&i5sLb}J?VMBwOJ!(@yZWt9 zlC~_!zu$Yd548Q>7mrHi`jUt;+lSiUhLl8HyqAB6&ukaCPZ5vWzOenHg#IW|DoOlP z`yV@L|8tLEailMUkBtmtH9&^1;#fb}v^h)LkG9v@{THbWBcECl!&ljUv;8g}wR5XE zLMqAHYpGhJwv;cW)NXD0X3n(2QTkPEiq>b9=f|$v>reo8zCzn=r1rOZ+TTW%xGM)x z{*BFc>%|+}wT(wttfaTQ?2Q?!l8VRfwFlc9+8b#bjFHM%sf?4#c&SX_-OB%!7rMOY z!^_t~57k$lj7`nUP0z|G=H8XtgyH4uIfj?t4EZzF&fdYuRD116DU_f*l44gEP7VINhQgV9pSuFSzW z`vmPUG}J+*_GGE#R30YHo=M}Ph5VwGkM=RVBx0pK%RW*2XRcHxY0b-jh+I3Ltf*e| zg<1sjDmR&I=c5(XH<_+&l3%&WEIS{iSVK)y-_$zwt=F?w;o6peQs4f(EU+)CrCR)3 z+-EOkKw!V$zQq23{XzRu`!f4O_T~16?JJ~GD3v0q6ia2YRHjH}s#K;)z3m)hqD*E(_-pJpF`Df4y z6N@-Yx%=eDzG~hfRr`3r;Jm^tt??Q&FPA2(T{kLb0?O}qjhUv3Z3k%Amnt_@6>nUU zw;_2``e;+SJ_e!M5b)A1nFPyb#CquwwI04=b1hTG{a~Z2FIOB-dtaLfyeOl6GIHso zyCoO$M)MkXWM&{cIls`C)~g_|IKQF~+F{>S+1Q_w%B;%9zSsVA`3H0C`$awb0sFJ| zgZ4ue(<#;FQKV8LmAj-O%Rhi~FBMKJlW@wUB~-yKMj1{)s=;GW*9O>8$-T z`{(TPh3IF$VE@wo6|+i=6%(1eDkPU(nMcmmE))8kN`F4pnLwCs&?>n^SWHrxZ@J7! z_!~PPR&!O%SyYe3O8YlN)B<>eWd5F0s-{*1yrrR{;nIJE-0dH8jn+?vdG9$1BaQ7^)oZkS5)R;`l8CePiqa^VYMHW z%H1^I^0w=6mJAqt+QB@9_{QOu%DqNXUPrM0D@SW_-ZIf~wS_U`LsD5z zXQ>S$`LLHhpQH`r=mS%4(5BQ2jOD@l$cpB{Tl$b!eu)gkBZhh;~cL zdqz*9C&#_kpxo?fd<-L3`=nAfAA-{FYO0M@wLVF^E7P{@Lkb+bh^@<0^A|GOaSe9^ zju=O*BhC@;=qQzkrNXs>`-d{AtX%Erf;IJ2|T$Pd_)xe}~H$tm-|#!(Y&9V^tPj`d{yw&1sZ2P0Ma>&%A#bQ?U>xY5%JmdSj1fsaA5KKs_#pSJ&sWWL=IHC_CzUl)Su2%w)km~6_=>qQZOL*-ixwf_MuQvd zxSrNs-lk~1D;9wE9n&ag{fM+MTMtMNV>7Zsp6IpL*jSlqJE!}af#hB}L6gW5mv>PpBwpzQjGMbW}=HTS4 zf-*;ub`t;LHvXJVc1&?FqT4K$Eo5XwQl{3>Lc;xT_Z8>0x7TPRoK5~0XB#hW(_iHF zty7vdnLu9kPosx$hZRz-9dx$M{@wULqcPm#n9DCrQi5u6t}G6t3rML@Vx*gLtcAe52jpQrp^V{!`kxq1WbUwBDdFVVW_*(z=jK&gAl4E0WVT z<&bZ&c^zc^RtJvNcUR4Wd`3-VRSozeOV}qkV-QhapRM=ScADhII73GuUjxAN+ z*k=60AMu?I7G0Ik894~@b0uygwWr0Bm5zOmXB-6ixKvI`<#fd)k>jA_NcBk_$5F>I zsXQ;06WR^BHs@Axh0rFhnsu2rq3S%3>@~NatKhJsTRG#At#}C?x@DIlY$Cr+;9A7)W zaeV9e&hfoexM}6C@eQfGDHTfnmQ>!B$~#hdcfI3B$4``m+at$sj^CX^n-G6bDnC#% zZjGecP^#fljnJmVM`i1?qs9!3w$j50w|*7p62-?W3(~ZyN3FERTg)E~GajEB|4uAU z$zfnQs>+TQ8+-6|BFV+sMf!!$n6eBhe`&C`JD=&yEvnwdXy=J>o%9t{OjOc_v}5~U z*HSv`ywcggX%_XI7SYdXb=tV%Y0n9biC(QJ1NGY(SB5V)tGLj}m_DGGT4cQA&Nn`q zamia!`9`}Qant|(OHPN=$uV3`x6{LMEY61Fg0qqMW&}@)^-K6Lp2Jg!(b|BX{~A3& zNLu;Tv9^wMG*9~dovL=MZ=~{_RL)nq*gBgyLmcCrR|#4r%O5_}FODCS^yR0-YQ8oa zm%;WGv)5dRjA>5&1a`I*yVp2dIa@p1INLg}cD9qsM^gDzDxXW`3#t4=Di{7nzg^vj@gfOBBYOO?u;gS0DNO{ zw2p@28y-qim)~l2%4fTc&<|t9lGCa+sDO0iaHE`~jWKCOZB=_vP`wS8Imc*O(1x&o zWpm>Q%7@emN&f<>yjowp3V8k~Dx(od5lNXA$@C&PmP!Co@66O651H{JzpzEY3Tp zNL6V4msjiE+f+NMKAcYE$xd#D{@04d2s}yQ%2|Ee@(JvVU+?A1%kdQzOf#x(rdD;c zq#B@|9L8fwr*!hgB5R#zwDjRjQXs^>V42q^d|&m1>|=uUPB6-8tWR zhx1O(r@NdBrCLX-b*0)ssxIx^Qr%MZl(@%e0}CFoCu?2DXd{p1_XY0Sx)1HHP=A0` zev6>ZI53v*kE?5VFWCQHkEwK1+Pas@5%6gLdqaoxCl!6ZHnn_x3d>MV8gVI}7(sn(OKzSK>n+~r`y25vr35*Zb*ugBMye8)s zS+(2S;C!UK+f$uVHT%0gqdGQeo!Ar3&CV^(C!JfJ+oWodYLHZ|Qng9dE>%aZomlyR z`M=d$jSAInAIhINx2)*FwI@?XmHX?Uz45tEZ9QI<2l-CpinJ1 zt5rMjsG`3wy+|9wkg#^mXW^9o&x&jKDB~R0Mh7E@WMyRP@A})u1?RWg(Y|zk<^0pFU$r;&-T6K)p0esIX7dU-=bhCy(Not>Gd+qfA$G5*Jg;4syOfHt zuZvT-b@|xWRflJzuDUK-bQ`I*s~GmW8n}Y0k33yg7tOG(RIe@xs|aJWE;qHVI^J}- zUBMUa*3d-isW{Idj_3t3qAASFE`N%qRY`e8 zxZ1edmd~;p-IgoV#WR&sS9_^Omb${E`bUq0T~S(*qg^pljh1R$Ws&1uoi57zwXV)m zWxH7NPIECDbF(hDuc(&yZmyme?bpjibBmX1NB<+ds)M^`U8Yvwe}HQQj|yA^U4vYM zUDvyYxDs7SuA#1BuHjPcB-LxBN-nOGY8R&|ivD0g?J3n|Qe7bfyUW0SOcVtUl!1e^nEv&(0~bGz{~upHP<5$;znSE@cpS9e zmG7G5DsUCLid@C6$*w7`sZ#AFRcfe@R5@4sNwvRJ2S|0GR0pjmiqnld3)gH{iA%cX z5I%LV|HguSZC;vNl7*%e8{GnOkTBw~ccS-g7Qr9A>4$+G9 zPYrWi_qy&U31gV!S|ZiNiid2jr7jjitaL4NJ>*(0)g-A7mFlpSt`*|Et4ylHwH|Wv ze_({;TF0ftwO*Nu&6 zU+dcH+UDBs+QI*Jnb%5nf>e{GnkLmu{r~wGX#D@)2&Y*MBb@x~;=)kv{cyKx&Fq{U3JL#yW%eP*JPPD?e0JWq9K{>w3{K&h;{Pb!w4R)3xD^ z>id`P>Rb#2*0|nqz3DpVddu~;>m8|PNHt5U*;37wYQ9tpDhEWa9XtTw)l7ZvpNUQ$ zqxG)g8z79^I@iapPqeKkO7#Zr%Z#V@<%1~K7y8YennNp9HdR?uP`>Ne#@l(dy#0fI zZ>LVu@9ngSV&mJPDwZ_(kMgUrU7@~R`4SENM$j@?zY#R?1VFzL{EOo${i^Q{q`|wd zkm{66$4=UUld5AUcYSw|F?M?2ZFV!%n!`j& z<)~W*xcTabpi?(j)=BmY@>Q-&*TaHxQdXRf?wZV})(LGe_>)UICBlj?=&MzPP zxkqZb8|9|gyF;pXR_1P;JNcsHRT&8Rqn7T@x@e;+qn?ThEO)Nx=gxOBa(s|!1NE*; zr=8rn+O(5kctvb!(Uay_lba{7a#W4oG?v3upD_i0wsXpYd*DYGTKIz^nRj$4dSJvwe z_fyrYtm=A4f2#Z32QJx&hmdhnVG*N$qa*GUJgsyebsuv-=RWR!UaF;1<eaqU!oPp*qWg9CdkhfVZ@Axd zpL4(Ee%t+y`(3GW{a+_l#u6K(`iN8?mFmW|?)Tm2-5+q7oap}8{fSf`lj`GA-Jv~x z;s4bGQhm0DwTqXoF#JC(SNxNOkpGsoi;=N>ioe?0MfbPv?^zM!=G5I(>i$8hPxxA;9)^2~5>gH1S?^4}T%i2TFS?b}Yb!%-K*7elaHmoPrZKa+o zrMg{fW*4tl8(iLaJywssBC{T@aXTy8wa4uV=5dMpSC5y>Np)A5r=f@I;!{%H%OqZv zg_NGAp5|=nF=>Z;k|UORT4;aWtsmTfq&6O|EM=ay+BD$I=e|@@If*n2AY6)KH`kI%en#W%~+!^xMeHEi`PlBh1vC$X$J2~1$z4X66 zQ@K%J&j5WRPk;TL{n|zxj+6-Auwl&SbWl~Efm)q|B$8|4{Y?TX_W z=gH!Z)-&ES!ISJs@uYgvJU&mlC&QB|)x%OfBGsc(Jto!Xqk!1dNW++S;@j7&qJQ&o`*dv zJf$A4P_Ik%4XM5<)pJsPOR8_Lu}t)=))o$V);U_~-S#^)G_Cvofm;c!iT&~VoqzZ0 zp<0(M`OnMEo*i6@JzG3adbWDDc{qW&TD>RL_od2x*axdUJ3YI!hrS*o2gZWshJl?xx;gGG`f)Rb;Q2DOUvnUqn!#k`>3V)B{vFn!I=#kaE|S**euo>i2ouiIhp#8+`MV_LZYz2n&O(WvKr zkG8r-{Yt7|YxlJN#W!J=dj2gA8;AT@KcxS~@-gst<@xw0*56c{x4(3};Q7|`J2P{h z?>Hxa@ciic$@8=47tgPr-=zAJep>!4)nBCgt5kXAcNr*TU;v9>yaC?JyqEJ|#jEls zpC#=r122<-m&-tt{&z(NCg^|ZrZw;XcNf2uN33G;i`QOdS&P?I^&fmHwA33c1NqFp zen;eO%wiUA6K{yOsSFI1fz17yEA9Zi&AqJ{+tvus26n9&m3R}py_kgXcJp@k_VD(Ufk86RDg$jY(7wvs+uO(6*V|79 zI%J?n2C{D#t;GbTU9|A8aAaX|N>*WEv9BOqCxftOd!i||XQ3g6W ze_s>(yu-Z1y(7FVD)f#LPkTpu$9Tuuzp{VD!}AN?ao+KwpLe{M57OBS`IZV}8EAHK zPVT_soD}Uda#qG&`r6Q{F9xMn`_@cW>oTvzT2p=1n*Jrr?7~aNzS?5wisbx9srur@ z$>pWWK$i@3TU;d~DB#?GA43V#ylsMDGpWY;TS?*PAB;y)uv!qLmB`m4T5mFuwAfDDY0HwVh1$PV?UAoh}18 z!y3!LCNeOj{3hGW%tNVnwhU}oYsJsui)+Rj6>qGau#T01jfk|izT<{;o{aO&K7a4| z5;--JOTgs3Y<(@TvFfgUjuDf2`Z76sX8kJ^Li+a~HXtOneR#;gydu7Wh3`b}lN+LC zit9>hHlJ@y3rR1?%L(~UWUym+M}JRqhj&q}wXxWHH??uE47^GPw$N*%rN1_oXtnWx z3~W|wZ7i#)Hk$wEYa`RBjneYk`0qtMvr5!gdpFb?^+&vq67|PqU~3t8wI20${-{5p zMSZgjY*TC0x7HN(w&hXJ`*)oKT7SY91SRrS#M$(X<>x_p^#mx1AW^!Z4f(S(j^(SJ?`cBnP_Cu)j*Sb6mS`LC9*a+Ocle%fXV zAI$7<@mG24AI5E89_usZvCjKX#M)oYRU-Sc_l;U3`=<9Ck$qbRM#;cfJ+g8B$iAmV z_I()`U29}NtPbyAhtirz>YHTT0OR%{jvR3i|ubR@S56U8ys-yfJp{+DsQy|{$iie!I;9T z_=@?8zv#=9{NFXNM>1F~k7Tg^;~#m&g&}F$F9qr!gbc|l49Vm(JL3|4Ieg*U`E@zi z7VOX_Z6mbDUcpWo*rU8YgFW1D1bc&ngB!{~M%mnv^{p6N1vd%4s`|qO$I;;C+T!`Z zUfPqZDHEzbxe9I_eDy_Jp9v1tR>}wV(YC(((P~?V2S;7Bb(OX8{%pqwcVe0|xMT1& zGO)i498hij`=76H|Kk<%o&IcWRQbComp+QF?Gd&9iN@;R5*6GtxYy7`?c-E@QIrr@ zz=NxBHKNfGd~H*M_wX^k!*4>E>wvE!GdsYVFf;p^8zThG&;p&1ihRt5#7(#b^KcvH z<4!EYa;(5gtigIvHRi|h1b7^2K98@2uyA|}>01UO3llK~b8s`}f`^@!+i?f(#{*aj zj$ZNE2gP6(?TP03L?N9g9K%MQZ7+a)+TO;ycpo3&BYcAI@gsi5ulQXE`(;oNh*+dx5%%GN5Dt#bhYN+H zH;A2M00v_Sl8}i>n2f0)Uk>u+xD5-i5Q}j?s2c}$<9GoHK@ zuFZH7&)@*4a~E~)qRv@VBV5npB#4RI0_w#*7B^ufR)e&7f-BrlfcE3wErjRA`pcb#GxZPp)1W6c%5g3Ir7>5Z+K^oGL ziHXQYF7i=;B230q+=v;Ng%Zrc&6tZ@aXaq70xZO0+=KhD1P@{vmSY9VunKFi4jb?& z9>XSV#*^5F9oU84*o%GGk7scRM{o?saRR4s8fWnmUcqa41LyEI-o^X)03YEKe1prE!Z3`$XpBQL(vX3P;CybFk3vkwG|a$k%)u?V z6`Y3+7l8h(;XR-aYWN@?!U}LMHC%)BcodvZ4L5`9K*Jq)3VXqM+VEL$jy607&d-LY za0V~oRlI?>@Gj2dBYcW4@Fl(h{YyjYq~WhZG@?ElT@LD^Q5{eZjm)498gadCMEo0Z z(l%-g`u;}5y%Bv*BVyht6vVqxB#3pRco64C*MS%}>H*^0s2_-Jqw7Ij8x6-OjKu_` zA{|-CMji@KjH#H8S&+CH^Kd)vz`b}Fr8t1&I0wq!h%z^#tc`vYqHz$sXbAG!m^?P_ z3hJ^kb=a8WHC~7ZuoMSz0>roR?|d}x3e*MVZ9+MlTm$mig#0zR5$o^-=mVRa#4C6W z-+}!@0?-B#h{8~e1LY5)Y$22>Nw;OKEwrlB}7y5-LwT-VIpXAO^Hua+D+5P z!LgfufPdgihE*P11?uanM2rE)yXqD!0A;;u8=l59cpdM9G@2=B01G-G4jnN9$)LWP zkw!DxaWmr8Y&VEkv%?_2&3?qsLNs@yDVm`R`hd33oH}k!-kVPW=`<&u=A_enEB0a^ zXt&KNNAs_RXu{^C_ zI%r8eTKbTIJ8>VDfLOFV2HH_ej^FYp{31jv^3aMrv?33!`h(-Q8ji`B4T*ZYw1;h=8Yb^`fmn}&1{)3&t7 zw(~)m+CG6TcpbEpwjbbkhVECOF0Kac`sx_uV;U&a)n#}D8*v)!fAyO}w7U%Ta3#n? zJMz$uJhY=vXvgu}WnvCEemm-?9mj9C3%fzRwtENf2@!fZ>Z1Y3LnwI&B@dyakP7NJ zl)4GM9e03ygl+?|34I$M<5MBpH$)52M%s7B01)5y1)v=5XM(u4UjyRNo_cNn7RW~j z1suPF1!0KCH5h?pkdF@3O9%GvK)rN0h!Z#^L|6cbUl?Twqg{rvU)Z%E|6%MGM*hQY z#Uk7d${)u5VTbW0e!?$8gf~T7v_lfcVmu@$dpKneUyDuHj92k4sLKfIAi@dSL_|0A z$3Wy_3aHD7m3S17fwD(X_K0_ch*Uv)iVQ*|I-xUWVlK#gBzcXbtdZ1zBzcY`&ynOg zswFxg9KA3YLog2uu^2nCAJ5`EzQ6@;7s+chWsYu!o}jLxuSXF$cJyqlz6w5JUy&zAqqd`n#Ni&u-V@WfXe8m2UpM{8v z0Cg94EjV}LICtXagS6s^b=>Ph#5V$Ej&BY6kN9zzfcrqZh%W{Ci+>N4wPPEQZb$Ok zF&%lB1nQyVR&2+2_+5x=0zi4Lxei@%GdORqxeJFt`LB6Fh)x!G5DeN$C(6`mJjg>Q z^3aJqbb1lz@U{@wI?xy)7=#fR1=`fLw8v}j#c5E!Yu^;2vl+y#GjZ!ox}7sI5szaB zcHtZRir=~2Z;x2SqYyJN3zYA=z1WA(K{>DcL5MDKpnP4rf&QTjG44V+yU<3uJcs9n z=vo&xkoT^%<*q|998Y2oo)#j(geyUQ6Z(NzBn-nncnHKIfjUT_y(Li3-Dun0I-xUW zgLd7G7<4<17xA(X-R)?ECZH_cDNA?C()}461Kakv9QDxvgE11LL7VEa8RW620^aXQ zUG*FfV%U=y_S}eVAdbBP!TY_epk8{70QJ(V43B_qd;KUx@5?}0dlQG=#GyBF=)D|k z!7+RPBt#$b)Q4mC83P|OK%Dx}Zu%S%qA$nk>j7=1FYouwMLrJW6sZ4xP0<$ZFclIv zfo=M+O+U8j&o=$pra#;C&jZ`_zY(;{{;%N;AqI3oA5bR)D8~TeF@Q1;R6$)03e;TF% zvLo`qF-P7A>TV=;H*%X0qpS!*7I&=-t(R*12rH}*=HF$|PrY%(az*vG-~$I)iS z)j>Us2JLEGI!dttw6AgR;WK<8#P~>bLTAjyU04LR8P7K3*=7Rqm_R*E=!V;IH|_=P zYQo3(RET8KNhY0S(n-Dzlr#ArypPZE4Et^-9OOBj^DUjSq*Gt%yYV#k;~);> zb-WMal}=pKiAVaE_)Un6%b}tU8XyQZ&}K6zZ$=D8Ap_aSLjk5>8pwafEX>17kdKVV zu^C%I{4$Q>I8NfU5ShVf4)UK#{bo`(nb)EVdZ8}{U@(S&<7X11%sWA>Gf6v>v@>^r zddfV47x4-xYvwt;1InEFA-=(P_z^$jS0Shaf0b-lI29!UW7-tjXY~qkj9I}Z+HgU+_i)ZjG4&xY}$0?k_OL!G; z;4Qq1^Y{p#g7Yn#c9BgPv%d#zIGb2e1d&5K$)TO()IoiiVFj_saU&SSB&R8eXHF}$ zMJU1$3EEOlJUZbzB%lX+qaOz1dL&^usP~+)n1EEIBMaHcLjj6071J>b5;tQWZpWQi zh`Vtg9>6j@j54goIy{2M@C2U3cI?6)?85=jmUE7Rwwyy-W*Lphc@eMRb)3UHcpo3) z6MT*f_!{5gNBn}{g~+`O3a&ssG(ZsSaKVd42thNnL>sh22SgwmakvJZ(G}g%3w<#F zgOP|~7>O|$j}-Wji5rlMNhrb;+=!Vd!A+Qp+i(Z&!eZQuC0L5(D8(wQ#RhD|CTzhr z?8I(7jr}-?BX|xc@B+@_WxR$r@iyMW2lyDD;UD-4-{J@SjNgRF3xElMsEaFMfelW0 z&=5^<6w>^1Vb?bqcIN2NJ9oDA_w^>#AHmv49vzH z+=5#%9}BPu_uzg!h-Fv};+c08r$K!3h(R7@&--48{2+Kix$+ax4+C%miokL6S71FJ z0ddVIp84m6m{bpT5SvMTK|7g5nI=)+lc?uO5h7bkBxq$c< zm=T6}&<+bmA_c^u;66MI+C{;BJcs8&z6psaybL`s5Q8CcJ2-aXGdPCh_y)h?cOiYxQE&y+Bb#uQ>Vg|;+h4(8)dtiu!7f>-e_-WOsjv71^C)WKBZF|`X4kcmks1hJS( z`#B`*0A%;Koz<3O|E-o!%5}(GCMK9K>RJ9;Sl4OurkH zXZms+25C$`jZZ+`O#fDh8TCQk&TxTz%(xC+K}=`R9%rP3GR?RH3$P8elNq#=8L#0z zoEKuI3flF|AoN2LDAUZDm$p&@Sh&-yAo{{~Y$4(*>!>MlLA-9QL16hU0hZc-44 z4v0fXi~(i8DFbsbA9vzO>;ZLo6LoOYm!M7D+!(FU27NFD)aA{yFb}tZvfoVEZ{Ce} zKzq9RGa+uVA{dQ8JG!Me`htCKS&Vyd29)`hbNE$=xzyR*D-eNea4ix+zUPj^-B^a@ zI0(*#xwPZC+irc^mn-o%r7#h|cJRKDYz- z;(n0U?dS2K5cAuBGS81fGA1G$)a!iO#r%yRfAjwV%6dmMNcRr%dPf1KVi*^`|G^FD`Jd9GD1LeExV<8p>gSaguZVO3wVG$pflyC7M&^Ik6#)~QEV%o^!m+`6)cXK}7-2~+Q?h!}^ z=hNN$a1_r8agPNakl%ZTgIL^?f)!W~VsQ_3a1ZV69_smC+V;J)?R)!TK4{nX5`%kR z!8>?Qi2Fj&23Lc!+(%jNqb&D5kC(u<_nYB{7h~W<253|F?*(~W5(M5~LR~Gn0mN_# zFwY zd{9s8kKqhn6k-GEY#^Ntq_bfHvOpW$KwdUHg+2HVzYFn50ImULdZZ_)(?>YwBXdFB zJwn|*Lft*u7_HC-xtId-_$X;TdI(2^*htwn(oQ#0&l`teG>FB<L!%O<0J# zLHV{nfMp=f?VCYtw^R4q4}kjJP8!=^1Z{CUb-4X)&>pvcgirA~DDMu+wIc%8fY|I9 zfWhFHJ4Rp>#$r4&L0j8Fxpt739pq&PdD%gkc2K4rlxfE%YytJMleW5(^mjTy>~<2n zoej|pEkQfl$@#IfJviRZJj}#g+=e?q>~>PlowUWBkK%Dq-kn=Pz3imiJ1OhV=Wqh2 za2jXv5?;Y?LhSOQD>5(-q_vAQb`y);U*Jo81KP=M+SBe|h1er-IaJg^eVAbdW!mFL zFdCyNsH;7#&=#QxLnLAlk4~Ts?nyuo5br(xFc8;+zG%;I5c566e9r`=A{|-C1~J-G zfMQI=bj*STb+d=E?YSM)(H`n(4|TMMI@&{f*+U%nJd84|#yUKL$M6K`_x5bZE>QSA zw5L6^r#**o6vuHA#COk&Ahvs6$2q)%_wgYf%aRV1pALG(;0zg%)UytI;0eh(atn z;#zb;H}pgw^v56!!BC9AXpBQL(vX3P$U!~|F&Wb^1G6y)x8PRH#{w+EJ-8na;vuZS zO02@Gj2dBYcW4@Fl*%_xK6F3b9Y% za;T_-`Y^)^2iyooV>Cr`v_e~iA`Fp;K^(e*c<%Ed7sO{DG1y1h_uY++*aphA?;Jh^ z_5MsCEa14$bV3jG0&#tYcs?Vs7Mnn9p7{W@lV>Q?e(HNa^}L@v?e7WdZ~yh6e)hBf z{;43&`-$)VM{pc3f_Aw7Tl^-(0pf5V0@r|cabO%WkOlI6U>TO<9eje%gm|_QT7qLg zI~M84#6moPrJ$akrGB2}c+dVK#6bmtAa)0d*+F7eUWW2AG8bdDXyDV)Yv_!++n@f_uNE({S!1~GUp8w+qhsMqKA z;1G`B6MPL~al9VvATP%|fbtygh*Xfq@m$OWb#r_X)`7Y`z6Ipt`0IER#PoUEUz{ zImpMUP!O9_!!ZFVxEsqr8#(n9o(1te^)V>NDf+Azi0ccDKs;U;fMFmXFU$bPe__9OqNe;4A+6{ri! ze}?_fbVep7p%6<@ij{Z{XYmq#5aKLlKTFxqHbyJ7!C;I8b$OOLID02(6K8kf01o0j zz5sRkq8H830+jtl%KqXA%mnS}#apofTd)=1;8&3Mm&of&l=UU*|0VML5_x`!Jiqi1 z)?gj>;xLZlHz8gQL>+`79@k(De8>QKeVH=9{2=y#x_bEtJ_W~q`8y$AaUtaYYvI1b zx-8f|fIpAr#@yya5kWyokx)bs6ct1q;6f2lAW)oX8n)hs+1_RIZP{Kn&8D{ZUT)33 zGRrh`o=@kTb6ubF{^5`3zVGjSKV0u0?+-dNlu_j4PS&`SHP7=qGFo#i2-eDV?GW_3 z)=X>FSbH0KTKhdRUFVtWJae6AuG7c5xy<8f>~7twcz4!&ch=X!v)0RceF1l3=JgNZ z_s9C5_$3H7G{RkMXpR0hOv0=i9>#Mw=yk(JcCtGNHrm6+9-PB07E*(GHm+bL$Ae&# z_hwTa(lP(09IoM3ZpWQ$GSjA?gJ82BHtS)t9yaGOjtShzUEIybe8YD^u*FSnagSRf z7>4<_jAaRO+ak9up1Wl;zXrjtvGgR78C<|Z9_IyKWG{y}90Xg6o*RAA1nb6Ae~ zw!X&+eVeBFws1 z4twRWR}TAH5{a7o&PPxC)ZF(28`u;C`{lIX-R#dmo&9cRznj_b-}moeR}lO*fNbpl zw|jVmW&F-jjs?L1pE=+&2Ylv$&m8cX13q(LCq8%Jk0AKnUH+cW7+z;Jc5=`h2jy|l z%m**Ut`1&{ybgN74tgj4h^7bUa3Pm-C7&XXKg{%}UH;jTD5hcNKj*W8RlFAjhw9RV zW=voTWjxLcyog;MIu-cS9hm2@0>*L&5AY!N{?{RV_HY(%Mg|9dX;Sc!iB{vZgBM$(lyuHbrZWIMkh-+zXn{y(F+ zn}>N6|NhSr?CDrC1IWa4j(N^8&pGx5KjH?D>*aVX-Iz)>GkF3t9e)WsJ+9{QKZ4+d z-JP(z6LxpvI&SAq*0L2np7gAfi6mjRlkW7SJ)eAskCDZx`ZPlxr*g<=44!$)Gf#Qu zsUL8cr#1z_>0bCnOZ(m%V%mWXC-e2;Ta(fh+qO!D8qBk@SHO|=gffmG^B_MDtVc= zcqa&J>!-GUYU`)=BJ^8(F~6{dtwC6)H~mTH7VhCb_VE`-g0OB&BI!gbSqxt4+guH`QDUH5rjME`YH@CI+Qiud`5PmyC?z17{uPWG^$-}#fn9OXE&tS6^> za;kS0O&N(i>J?Lp-0D5VbG(2a>gl20LH_;u(?M8Y-u2I9Ab!67XbKp|?fCinkFbm< z&|7^!Utd4CB)8J8pOpmv9}|a|1VFrw#At0UqR8WYW-H8oIrP zcG2)1-eomk@ipJ@9h-tMq62OqBAQs@=s{0%t3O5t+5pE{p3-lVH&xjxSg>`JiEk(#X z!YxJYVLxsu!tNs6Qp8c*QpBksY!u=S8r8*JHEKi?n$eLc%-+cCjh^IH^wnr3Zw295 zb!bjY&ZaF>G0RzVna2YD{j9#v`kXJ3abqDB9tKoa`VPm)2_%&YV%^+-| zw*kpW+#wV|ftzK6vQQeTS_gagVL!<@?)VtCx^hD|xkA z$)B7K!m~rppbks8gS)t!`}mau9K_F@eK-hPo2|9kTAQu4*;<>ewHmF}@IC9WwHmF} zXzgy=G{mj6(N`OLY*WGvW-$jlY2)*4+-#c<`51d?+Z_G3wTHGn=t*yq8NfgWV+U>R zplu#i+{^=bR$E!O{TVmkb`$&fi@!OB-*N2%>Jxz;+v%}gOU_2UcHYBwp4;vg?%@&S z-R^Z(^8tEpr{{KG@)K)VkDBe|*KQl9g0OuINfc3m4BB7BRmi^mb=-(r?bT{;zwIAH z&GwJ+1k3TRw^z6Qx2$CYZmzw%Xzwn3pEhj&2WD+=*7jy?e7 zGVPE;e`MN0rX6J3VFaToK+hefv4G3Cg2i0J^|+f3OSu>McX$YWcX%7~chFCyx{=dq%W= z`3mYs+D)W8h?G&Jj3Q+exf}NoDWgakMan4BJ@~JOVaHlDq%|>|Ln7zWmsGMC$}rqs z$5G@niAv^jDNC@|j`rH|7H-F0JKAeU`E-1YCwZEed5aJEj&=Aw-Ej*t?6{NP`IEyO zQeaZ>IssyOX>-$-9%hJL$Vq72cUnGW6Zvu+vwpN3BkWIKk;4 zjFL%|`-!SYQ(AB~ZRtQqx}jFoAciv%nM93c0#hiblIhH3HWzXQH*gPfiIPi{T%w-g zc~kx6j8!dD$sl9Ib6au+{u4=m}NYHU3Pw%*I3D0yvu4nV-0Sw^8xpc zyLb@4A7acHV;3=M#i$jdR*YIPKd^z#Y{h3+SQz0)$AHe zPkNJ#IlJ~}FxliXg3%OE#Cgo-V%$O3tI$)|8@QR;D#zPkR#!657w zgWBEH?xuD(wYv=^;uj$42Zp?l;UA zXV-B@I2wfUbx=D#3Ny!>Io{0iW{x*={6OR$FZX!4$ICjtgemATz7p?W{7mL>K6;M7 zlxul}mFOkj?D1yq-W)g8-Ltw+#WT9gvHQhb$<_GT?snJxcHDONd$^zf@-6n&<4hXS zns!8Djvig;N*sMi$L@M$F_aOEW(?ygjqHWR_5g`$>>h!UdQ);TD#1FAwk#Phh_Z_M4!$gqL`e z_pslD?}M;cUs5q!uYAnXs}%d_NXgy$^|hpl0uYTG(&zy2!A%?0T1>cJC@);4R)kjovcpt>@m~BA4Dj1z}QW zdXh*IeYg@?Cz(0P%t?AqGH;TMlk}dX_oUZY$!C1YH+W{!zt1#Bk~xw#u!$}F9)#yM zXDn{@+z;6qgvn+|mS3{{B->AN4#OFVT_t;ulIOD!IVRhc@6CtFdQ7&fWV=eXtK^%x z4ZS8m#WOt5i@buIlixrd$?qbImT+tV_!4&eH;7k`#vA>344Pur4c>I#I2;5GvyNGnIg{= zd8WwI-vkI#qKwN)X%f~-N_?7 z#*@gh-}87zKbiKEX}=%%nYC=vuHv~WSiQS_QcZ{`%5)nY8FG$ zN9ri@8N)bAIFEVAHT5#C;3}4I3%BD=Q}5Z7LO1QH^=h7Ga(=cbVoc)2`+k+)~>8 zm?iBImhlYkDeXmG=2hO~V?M=9Y2WfaKk*B0D{T+^IKUwea}@KX*QEt^l-?GMyeaGR^Fc3^RwKmrT86>LqhLddZYsW;qMFiW|5YePrq*Q%;$W@ia2Z{0Q%4rka^M z*u`G%rz(r_daM8ZemaoY7BA@gJe9Yn)zIVxdz$ypyx5mpilT5xek)+pda{|wa9bO z9?a$Me}sen;4l8>pCHU?!8zn$maKW)j-6#agBn>c@(L?3Q z*J>>@`_IcCT)L(ZQZ4#Huzs7*Z@au!W!Lnoq<;jlQotHV-AM=!&&$YD70 z9X6VB^fzn~m*M>xwwUX%)BZwytHFEFeIpm)!|6Db4<)176Tv_G5$A`G}T>0m2M4jBN$SKzja@EY; z&+o`9_Yi6iKbsiR$flUfxsxYRXSlwH?+U^^+2+YMuP*w`bN6|&%AMP~jEO_-F8<9U^aRit)J1?q4wyN{D^mOw42H|W4;;l&6aPr{65%IzCGp3E> zUJyqQ^j)Cu0y!1PsX)&KdM+5tc&1`61@=*JFZc5x zPx1`U^CF+}6Px%IeHG}dKwkxiIKn?1=Ts2-pDu)B>Jx=N#>jAt-%n%Q+n7-lFpfg> zFs1@`K1POP>}iY}8Y9CoPw@rnjWPe21N?y+V?)lQE)9sFCH6PA1D)thPtK(;sd(4M zdJo3h<=AXSGZFR2sy%i#=d+MST*778>)6|PkVkk7IgNdqXIaIUe1$!Z{g#bvVH-Qy z!#?b9Ts<1mgy!gdoPNi3C7uL&lZ<;Gr}uG#8Nnn?S_VYV`a+tq4h8rAbhViZGPCx8!ysXB{V!S%z zzhE7nHQtR(s6&11X+mR~Vde>*F`*riL}6DG+{*;doY0?iGO)7=b~Yi0;mBda6zpii zHQ399r&)zLCb-E$H&S>OP4SKtdPfS|(jHkB=Hp!`oWLZCk!#^hyvv1dpwROR%~g06 zOK=B;Hz41_H~0=&70RekMukW59!#{~iRw*MZ(;}Bc`h=x1U!xtMLD zyPQ~nekRU9?TPoXf*)~LlVm!{?MyPyq>gkZhHfMx%SnAnMUIm)kl!TFom7RpnKTP` zGs(P@7Vso5@(!!{fRFi%FZqV=*^Hbg?O=Bh76mk-33@7Og}#c+T4dIubI?2qSJ6)Pus;YVpGi~NBKyg* zpDg>yvGk-j$;f81Y$m56`^mDNto~#LIfddn;*!-IYYron9o-2XmCT z*%CKfl7;=1jH8eurch2L)A0V3T!NWOxCFUqGN6BZHrQ}C` zWjnhtM~N9q4swViL0D>rQZtm+Cj$MK>c6xj-icB>FSYa1bFuSM?^>ySmkuM3iA<)1 zsm$hl?4)!dc2asRH*hnz@-)w32c<9Z3U9ND_xXtL`H3~GXA@i4!vX%l-ISVPiW#QZ z!IXxaMRQtVmMQJ%Mgn#*C5aUJV;5608G#*4DZ(66%rT{cD(2y)r_^vUmthxEuHp$k zV>c&)aH`y<#^L!>J%6g_PaTNNrpjz;E^1Ac*HrJz)H2Rv8Z(%U?54_Y>XqEdvwVY@ z%j)CVW$vb|823_Ej(aJaj%Soz%JtlYtjq3TDfgh?vR7Ea8_2v&=4J1pM%e~7vy~l~ ztIW-mo#12;mN%dgO=ymc%UcseJPF9SJQ;nK%b+}+L5yN76PSeg%gtYI{&G2&U%(zw;-D@h+E}t0JHl zjcLKz_+3=dk&*Syw)ZyenTr#+7fOuS$JYet?WCx3Z1x z?Bq}o`ky3+{&rwkRR?!l6@h%KnjzOJxmL-wO0HG$B;aF3ItZt? zB$6n)5Q`kAr(^fivlxoqPrnp>PQQiQxr=+ap9guC7cj$gw>n+F(@&sIbuIK-Etl%1 zs9CLMbu?;LyR~Y)SErzUbt(2$eK~ej{V>aTf~R?o<-Cl2Rohp!eN}&ro2dSgUod<1 zMz$b>>Yd25S`IVn(3W#B?~HLw=W6c5vuAkb4EH%>6`%4MU-A=nG-ExUKf`lp{Ki4t z#Ec^x4Z@jbo@qBT8_|T8w5A;$=!6Vr>S1Ot^f7Y~L&#wSqZxz#X1;@7X6j+4IcJ)4 zra5PBU^DJ>rrY$l3qyaiF!VPI!&w1mVBT5spVbU~%+kj!eazCwtVH^gNgis=Dx`=K zuECCH+3T!NkgLDv7tXfV*{x_pd+c?#nP!`5_O;xGyO=G@*?OL>zuB^%Et}cLITeKd zPF*=ygsOvYIoJe7yH_y!Uf1=PH(OeSG&vV;yA5Zck=9;?#zmMkH_uP;8lrQ+2@Av^Z&z19BbNHKa;rVCLlorV0 z{KeeF&D3* zWH*OVdx3o|I1@ei8*<@-=9qnfoh@(!3p(Q6UJy?Ly+~p*rIb-YB_Hz*`dRRO5H1{y zJuaL?F;ghVXBW<49t)`9VlHI~*CCsQx3H48c$d{dcwrOtcA=eLsJ{zSk#minYg(h% zn$E;<4)#-HKQ$Q)VhA}5X9VNXTaDgo^j4#{nrY~(<|6b|b2(RYEjQpMYL>E$r+AhZ zc!d?%Q;nW$KE$on{EWTSY~WY6vx_|(;W&1&$SxMup&oXz$h))142$wmcaeIFUgJ$x a2mkwjXV&_^|7@4@fB*Ua|36&x{{H|R#IfH1 literal 123847 zcmdSC2YeI9x5v9PEA2|Vt1e5jXr@C#?<6F_MFt1AST-#oiZKS97))$KLPC-~>Am+3 zr1#!?umAMkdnLWsH)o}lcdcT{cJ6!c-ut-$bIxzh^s+NwFJIT%($P6-(#ss-D93Xa z&dLb`$N;){#D?bfj+VByi$}CA-@Cb~vt!AK#`dPwE$qkjBRbpGc{y_A#n*epT{w}m zaX!w^6>y`uG2B>gJ8pY!95M;k?OX@9 zAGbfZi93h`?ojSX?kMg!?s)EG?iB6}?i}s{ZZmf&cO`c%cO!QzcPDo*_c-@7_YC(u z_X76{_bT@(_Zjy&_XYPQ_Z9aww~hOe`-%IV5JCx0EW}Dg;v#P1AqAw63?d`RC^DK% zCR4~%GL1|pGsq5PSF(Uqk}6V7YDg_@rR!)X-H#qX527kPj2=mkp~usc=&AG!dNw_e zUPw37OX%hFDtaxwf!<7Sqj%DK=>7B|`Y3&ZK285eU!X73*XWz{9r`}~h<-}HpkLE( z=@0a0`WyX|{=*aA!i&7bJ9szm;{$vlKad~7593GjWBBd)3H)S!8b6cYf!~Rr%a`)y zd<9?0*YI_G1HYKxo!^shB%lIq#YxwK=oA_J#JNUc#`}ha>NBGD2r}$_2=lPfTSNS*ixB2(@5BX2{ z&-t(TZ}{)|pZH(-Klr~boQ1at7Q02UxGY{vfhA-aU>R%~Y8hb}ZQ0H;-ZIHD)iT2} z+cL+pi>1U8vCOwDuvA-iv(#G_S(aLsS@yCtSyozFEUlI{OS@&gWq-?omV+%|IoxuT z#b)k!6eJQp**Vt1Z`AZnWHDx!rP?z21H?^-^vd~Es5@}*^)mX~9 zb+~nub*y!qb)t2Ob-Hzybw}&Y)?#a!b)I!sYn8Ru8nrI8F0t-mjaiplo2{#@`&idn z_qBFf_p=^gJ;}~5Z?oQM zy~ldL^#SXH)`zW6Sf8;zYkkA|ru8lB+tzoiA6Y-PeqsII`h)dH>rd97t$$npvHmOY zf=#dslHeB#gn$qf1_;{=<#wC_TBA!+8gaF?5pg1+t=9F**okT>>KTy?1$J7wI5+W+J2nZvV>ujs1K3PxfE!f7t()IEj}8$u239OY%wuQb-yg4VH#VBc##NcG7rhk~CGC zA%9HktqoS>YnoTi+qoTHqt zT%>GKE>*5ju2!y7Zd7hjZddM7?o}R89#$Sxo>ZPuo>N{_UQu3G-csIGK2SbZK2yF_ zwkh8!KPtZ{zbk(!|2n9{>aaOvhtuJ4_#Ht<*fGda8;BI*xH1 z?>NbEs^bjD*^cuZ7dkdOE^%D$xXN*@;|9mgj@uk}I_`1Y?|8`ZsN)I8(~kc+UU0nZ zc+K&q;~mHQj*lFlI=*mx?fBO5gX3q%Z;n45|2T=$;uM{d)8TYGea?Wh&^gdK#5v44 z(mBSty>o(dvU8eqrgI19PR_Z`QfIld!ddC8an?B-oQs{iJNI-pI#)PXIrnz1ajtWA zI5#*qIyX5FaUSYC!g;jwIOmDZQ=F$e&vKsYyuf*}bF1?*=atTDoYy;Va^C8^!+E#! zKIenZN1Ts4pK?CyeBSwz^Ht{?&bOWKIX`rM;{4qCmGc|t_s*Z3zdHYL{_Wyiyi0J| zU5d-)^12FKA=d!cVAoLB2-j%WcCPWRNv^4`8Lru`Ij&t?C9a5TzH5Q2+O?aj-nGcJ z)V0jDm#fLO($(T>b+x(LUF%&NUAMdLcHQH8!1bW(QP*Rxr(93Fo^w6#dfD}g>kZeN zu6JGUxju4z?E2jGg=?GZ8`lr6A6>t?eslfh`rA$1)GfG0x9nEjZnwu>;10OM?g8#0 z?jrXH_el3x_jc|H?uqWH?rH8>?%D30+&jBV+@*UGBTx_qgwM-{-#H{j~cT_p|Od+;6(ya)0Lj-2H|7XZJ7eU){fXEFP;z@Q5Ct z$L}fd4D}524EK!iO!7?jO!4gO*~K&0Q|zhoRC{VXyL_bDQUO&mEpSJ&$@G^E~c(!t(8?&K};&i7V$tGrQfgLilD9^Pf%eZB484sWM-y?29m zKkxqDjot&i2YNSo5Aq)DJ=%MW_gL?7-qXAncrWx`5q1vG)`27v8VE+q~a+fAId}qdwjz`edKtbNf8L z0$;$loo{>JINx~R1m8s8B;RD;6yH?eEMKi}H(#AE>Z|uP_!jyW`4;<@_?G(C`S$g- z`#OA`zV*HhzWsdr`!@Ox@u|KeeMk9D@tx{B&3C%*Y~NMBt9{q_uJv8#yWV$$??&HE zzWaRl`yTK;=zH4tjPDiSTfVn_@Ay9Oed3+z`_%WP?@elG3_7CwF`G@+)`zQD(`X~7(`=|ML@bBm^_LunQ`D^^O{@wg_{-}R< z{~rEj{zm^Q|7!ml|9bxh|9<{M{Hh=P$M{e4pX1-+zs!G?|2qFo{@eU_`S0^T?0?+< zwEsE(EB-hAANoJ?f9(I-zs>)r|1baF{(t=c7H|dL0$+i@pr9a75G)846c&UFiVB7n zj4jx%U`oN%f}IL>F4(1DK|y6feZd|Dtp#fe))urCtSi{RU}FIk99nR6!HET@7o1yg zUcvbVTMDiyxT)Z-f`jKvYZVn6$+!DAWa8Kag zzu) zBj^kUg27-YSQH!@92OiN91)xo+$p$oaF^iRU~#Y{SQ;z~MuO$RdBLh+bub#-J=hf7 zH`pHR2zCb72R8(d3?3CcI(SU**x+%&f!@(zm&jnuzz7_l+_*rmU@SEUI!QVpekSF8~`9l6sK`0OkhC-pjP&iZ+8X6iI z8XFoHnh=^CniiT7Di6&I%@0+Cb`32ERfZOZ7KIjvmV}mt_6)U#)`ZrE+Cu9>>q8qt z2ZYoRgpLj!A37m)O6b(kX`!=1=Y`G>T@bo3bW!Ny&~2gHLwAJk4BZvFJ9JOz-q3xa z`$G?f9t}MidM@-@==IPWp*KTsg+2;>9Qq`*E%bZnkI&7!jB8T zF5Fi5P2sm;Ygh=2VO!W9mcnva2|L2hurC}64-O9rj|h(mj}4CxPY6#5&kXMvE)JK3 z=ZEXU(QtjZA-p8KPq;O_CcHM>7G4+LH{2fX2p%w=1?+)J+zBhbd_>u6F;TOU$hTjao75*UnVffSV*WvHOzc-9nyS}yc zCC<)CoXjbli}Nn3j!xL8d1J>?_TR+bb>WDn*2a#GEu4dMs#N8-aBj|{TEOa2ca6FI z6=jh{ZSDJXtZQs)E^BLAzovO@XUsjKxODooDJ3&X$4@PrIeq-JnWg39XHT6xY5e5! z;9m>a+iT2%EtAr zosktSoo(&WPWID|^0xMdy2_3{G_&St)9U6m%}gNv*Shs<%bGhnTh=yqvQ2! zqqDSa&AQfRCRmGm`;JbQ&A3}hV@LA}&9S9(V_oyQwsw|(mtGW$TGp;;+pi<$4%Ia` zF`sx4`oCffcD)!Tanrd`mvEE0Dcn?Un(9A~pPp35!ZMqR}fb0u6USH?xS za&8_spR3?@Refqe4XI&upgLGBQirJ{RAyMoRdLl^4Oh$U#?^6AuAXa9$Ee$>+pFW$ z@#+M1qB=>PtWHtKzQiqN&3Xy9l-r%#0~_{oR#UsP|F$=`vYOq{ta56B>W{g1*nj-~ zSeoP4HMg%EzeX#v9VSj}*XnYY*2eW~n^s4gJMcuOu6gB7GgmfEYHnOUb4BCqNiT9S zZZEEpTfQ{jOV&27XsqG+EI`(s8w13!QKQgdCIKmvuDkk zGRdgds_7N>LT~nznJZ^6pEPr-NpD|GZ$R(#nrE$OYMN!z+o0(U>V;m@=}a#^dz{Lh#*Mo8Vs&i1N1Vx> z6?6NlqgCrW8<)2>m$k6rxNU7?`^K%@ncUf2(;n5)uAk`|=W^%8HS`}`$X#Uq!4__- z`3IMAmz#fZ6?e7y2iI}en}2WJC5-2l}9Suz5-=*Xc2L*~*sIW;T*cZ0KN<#>B=IYg*P$tSql&V-Fid zyZ%(hh8*+XCTIiFIyOL0H1_c4H07BwcX2vpHfim{PM6I}=D&668f9PBWM{|Rk#w?^ z<)UBa=6>74{lfjKM$`prwJz`n_c!O=!u`qprIxGn zws8M&|ElxV3U$|IOr~N5Tg9}jY-w(fx!vsVs94pwYHdsBdYt`-b``lP!7jJqbDQUK z_&@QH;%p*p!4V;mII$7gm^e|0L#tr1p29O4==vA`j-i#?%t-yI);p6BMli#wjT)0l0XbOzzeBM74KHnLZEak~ z7Rp6h!!2Sxm@T$CHx`w$)%dEmcGlfu?n1p+nT@o68<8=eWi700H*T!b7N5fnW_lV6 z8(Y`28OUHY$Y5pej&T)Zq3SBtY-MK zJ;*Y$Cy9~0NF!NJn#c;$OjeRrWHo6ady{=gD_KL`U572k9j1$p*3?*`I7A z2ap5FCUOutm>fb>!Ujch7&)9AL5?Ixk)z2mw<-1G$mh zL~bUxkXy-ZR8Kz<}Yk)O#g~qzsW!3U&>KJDdnk!TB$%q zYNK{4QJE^#L7mh^-PA+9)JOfafCgxghG-!T(*blK9YhDyA+(4NrNiiOI)aX*qv&Wl zhK{A%(e3FtI-X9T6X_&6nNFco=`=c>&Y&~tEIOO+KzF2b=uUKJx(l64i)jfhrDZfi z%jrBipH|Ra=>l3wt7tW?p|x~3T1TU_o;J{hbP-)lm(Znjce)2%M)#yKx)*Jv%V`r` zL7VAHx{9u*Ep%_SNo`hFs;ku1YKywJx{unbu2I*jZR$F8U$tHBP&?K2>IQW`b$@lE zdVqSMx=B4qJy<n+RZmk- zSIZR&s>gDPc>XqtM>ecEs>b2^1 z>hW%76>dopc>aFT+>h0YeIc>fP!+>b>fH>iy~i>VxV->ci?I>Z9so>f`DY z>XYhI>eK2o>a*(q)aTUa)fdzk)tA(l)mPM4)z{S5)i=~P)wk5Q)pyi))%Vo*)eqDU z)sNJV)lbw<)z8$=)i2a9)vwgA)otoG>bL55>i6mo>W}JA>d)#g>aXf=>hJ0w>YwUg z>fh==>c2oZAgn`BAUqHY5GxP?hzP_6#12FPA_Gx?IDj~TxPZ8Ucz}3;_<;C<6aWbT z2?7ZLDFhM*G62XxAcKGm1~LRl5s;xkh5;E4WCW0rKt=%>4P*?Eu|T#1vOSP-K*j@^ z0AwPNNkAq8nF3@gkZC}s1DOG2CXiV`W&>gC!ySRl0kRX2oq_BEWG;|mASFObfs_G> z04WDD56FBV6+m_cvH(aWkSZY6Kx%-nB@vsd*qjpuQV*m7$U-2CfGh^G1jte#Y@FT$ z$TA>%0*L|H3rHi7MMq?G#%%i5Pp|n0$(okJi8PPqDPGlsJT%@|Vq%sn#sg9LJSbAl#x=6I9 zp{_KN%*b)*Fgcl28cUWKt0h)P71h!D;>yZceet}c`-$kjeO7nOTBN>oeypahEK;Wj zdI~xglK2dtTz+iY6kSqfteDd?nAMc*YBW0w%|@nCn_paq)mIlOt*NSw#>y&^0i262 zgA*<_>u}4->ZhckqOvTuu(+%L~wUKzG}DrKPcjk-8{r!FsMPM#G8S4Ym3! zV{w$$RM$tU>$U$Dv*L_J7gdxe8{yWp5hbdyq@upGrYu>{m!a+G?g`C{R7dKH>m%4W z$}1`>s^_t!x}2^=r@7sol7&-IU0T^t7KzP|6tjwo#>(qzs#x=?sALT`-q=h9cnzve z=&n{=TU%LCT8!N~7Fo=4i^UTwuB~8=uyToBPS>OFlw^%eD%kt+7zq|2@7GA^Tw-Z5fz z4VY+cas7Nf*gMd3L^scx`uUMMt#so_mRA~j#og#JyqgCr!-W-C8dxpy3QAVzedrJ* zWzFka+cx5*vvWR3_`% znrJ;MHa2!eSqbVsFQHG^$&?1%?6oLD6Oeo604nG9A*6so2PF24VsQHnQH&8 zOKL-~HeKl+-=jw;jYqV8N!M)l6Iu;7S!K`KujpWI&e2G1u{I`}BK`vnN1K!@YUkHf zM{3v@m#NJEP7}_;EDk(}4(0~EsF+QK*?f5P8Aj$eH?CmkfHsy;QZDOsPjeN1{;wpVq;uSO; zn8A>(1?u$H$h)#hM(bEbGN} zWieKdpv6Fgg*LIXwCZbW z*nwBqtj?F9rJ+5>mjIPzCZh-%g}cku;{wQJHy`Z`?KZh|t16DNYL2l5ZiKDwaW>T> zU4SlzVq{H4DTP+U|z7GrMeAz3tXe!|W0b zmrWuK+7e1zf?=0p!Lats)Vlg#J&KIz5~+(M+e&#IYv_w=>K2$={37%imewO)!%NKe zOMAA*xip!rWoU0`>MUDq*hG_9G=h6GZE=@0-wVwRCDt`xXiYs9VS7})2Aa^;&;s-t zFxBkJZs|wkyGS-Gv+~z^8QaU%R_K~7XkOS=98p&7iB@WCe646^7>CSargqYXriLb{ zH9l5l73E7}h9w=}-Yw9E>SWcW>*njb1-huXPFu=$>j3+snV~#c@f+H9ttrw2(RL)M zZEV`7ag{d4CZaAZ|LCe@R7k zah<;F-~oNZMqf6MvN4Da&D!LrSIOaMS7fw{ERK{m)N6a`~N(x+0V|;l9(wO zt^Tw#q-1}-C`)4T+DNRX^!nR^4yF=_w}z@>b7Q>}?M$trD;Hfs>8e+t^(bSWu`S29 ze(@PT8k=9v7MsQO#k$Yc=rh{rqmSJB`memElC6D=yHox;bQ)%K>Iz?XH&*10Xm9Fs ztkw)CNilY7hi#wBE4s$`ThL{QF$vuzo|I|Pcsm-0(it1qBX^fB7-K5931Iz7>n(aZk!p5wU|7dJj9#V=y{L|jORU^e z`u<~RI+{eA@yS+EZDZ$ZJmafegv-6eWYfTwbLJW6Npu?7&8e${rkkXmL6>3OT;h#F zPscO|K8N;0y4lx7mZmj+5sl|`Gmb3QPF(QBu(W}laj_MtA+T7qp@f~KX;YP9LH0^I zrNr4yeoC*S(!g#pv7$`WHtU;ur+Eu4yH|l$=vm6=U3Bu1dhO;OJ36Yzb6B=5OO8Mv zpurS_0ZuY8?T~|=-r!Q1Et1&8QH;|%^DQr~sBBA*9huoSK9934L}{fS&^B}_%Ip#!8g=XMda#Z*lpChC zA2WuI<6U$KThLbN%DQ_ zM^Utq-6d!_pt+sxlPlTwD#ku3uv%c9j~%YpS1e^+56@iLSu6Wf4J*&`%9=%bD1V{$ zE-BjDq2j#ewax9?Ee&0{EK<@iPpb-?sc?N!8Ce)fmhZoLi7lQrgKac%p~n>2n8Xh9 z@$f7WnT4X*oLnPIoDgfrpQV_|;(7X>!D2+mmE@_Wbum3_T2Dw8l@s-*<)&A*giS$g#?W@BrFul3Jn~mj zo#^&;c*btrRq1N}Jn|Q>dhIBb4GQzvc8RTHLF+$ zD`5x3_*qciis7Yx@R7%tG(ktnE+ESCo>`0*|>Jq`o>l4>=Y}glznQ9RYckG zEPjH*kA3V1dWjUH!sL|tF)JjxZ*jCLwji>E9h{Wu?q%qnTD@lX(iqNw4Q%jF9+~O> z^U!}>56PFZfrlj>UBc!|{Y$iESM(g;gJ(sxwp?dRcf-+}r3zhl=)qMxz|xNS^uf9N zDW9blby7R3IrE9KV>1{VMr)IYL6#^wr%pF%o!QvSx+j~mYHKR+>y&ofxFlA}zDOoJ z%EDaq*ov2RdDcnTqAGE0R+d<}#-glMbnP`POHglS4~5hvgNkh7W!}MB_CS%IsyD4? zESKaHWvhJ&W@jqp4b|q=%5oG*?e1yIxVAXT%Cr0Q(2NSH^O9L1I-lLeU~^nU@(A3r z8vSEgwiJKI`R%G_DgD8(C^ z-dXoW|IGbH_s20OQ8T5rwQOrz8evnOVG-GhI+?2_gH9bgc+w-;4;7YXEMBG%|9FI7 zsp6~eN?g8Fv#DD<7{CEB@!_8Rf!!z2&x7N~2FXr!0H%|ykz z6il724UN?#7_S^{sMQ47aX4EXCHDx&pxVwkhZk2f2FTW{dRC7|t<>de_Q;G{iz3Cz zaytpdQhNfs@V>6StqC7l;kA#1cVhRVWw0tbFB+*%PM@cu&Tc6g?52YqiVc!%t%z3) z*v`ywxX9WxyQdXpvpQ?XiQzcePR>C2)Y@3o*pBy7ifY?hTbkItDXQNtbx6Ptf8!2O#l`4{Ta2_h8)E;?q``#ijVQkR5Wc5;nwnEh?vWog7M+El|pmXC#&zP$$y6k}P2# zP%$#|L6_xbl&t8Tq@K?-vbXgiQe0G|k-QTn%X$|nt{_wMeh(_<)94M&r=kjP*0E7T zyW*CreLrf~_b%G%hRRCq>sEaYPT%;~G|@kVDMa%~A=z9LS4g!3IUGvMSpuvp)UkW8 zcuz_1507F3`LwoV0`W1SE>hpXhF9Y*ZaA*+-k?DDZT?Mcpk< zU7CGlVV_40g_BI-f0#l(BUpwM*lbo`gXNJ_egT#9>3$iMt0Rl>EKIMumr*wLbSCqF zV`$mzW^?@3W`llAY(Dg~>^`^Nw^O_I`4N`0_&AoX6Ox#m>d5>-9@7cSG}Iyn2TR` zXJZw+r@>b8$(Hjqil#2(GdJ;M%W0@yP|a?cvLK7+vHLxGpZOLA%kwAzL!V)%coBBp zNgK2ccf2h>^dU%XfvZ=Z9^}s`*sK21P^}5Fi6!}&#PS=eO)pt&xEWtqQCHu^ytLgqD!jHuP9@;1CoMzThUvzRYJY2E!rGieA=$4 zjz65KDKBRiUuw$PQG02f{_(-;K+V*XnJgv9j%OwOB> zm-0&48E-M$3U+;Cv-(gr>lP`KY^bt(zTMPS^fWC}L2rtpjj@d$~B3a@C^DCNUJ7XJTc90aGzu2j= z_MoM{rnE-?+-e=t>&OlJ#$+jJJJa|yrT2_sm_XJU#8mdl$P>p9?0!J}^RB5vN1|xf z^=*1l_QZ>Q%4f@tGIj|gF098n1|_pjN+wCe45H;8A1lPz50mR7>-H$PINPLT5G-n3 zyP}94|L6yRT4&LO*%gHNZbf@t5uMN87Z{f1)(MzOL!VPguAA7(pMB)V($Uud*2(>o z1UnJJ`-NqA{1anili`5MIt`P^wS%TDEBw~W_CXb8?5j;Fer06ib=Ps4btWcIrad0S z*YAz3ZL7MzpN%g{K3mh?2C~DD;>sF)@|C!$%PuWdumz>@dX{wuRGi;i#mZuK_&>j* z3O_O#u5(#;Le(AeRE>Z5Gk$5a&PAP_d)CoU4B3;i7&`&M+eyjSFM7`?MXfnGYq7zm zEsl~=Db-`EK*bq7$7ZTmwsB^Ud6HvmC2GvfQv*9F`|6Pt zsX>t`_(@i~Pg7exuc(}ToW+AI!>wFwQQEZ{_V1?a7uGuTpU{&(4vOp=I&;Qr%X%R+ zpzo53wVjNl#ei^ekOTWJ9A)ISjmz5_+gGG*>+H5W``Vgp<7|Z%-II=N&4>52@xgQQ zxtnzj3e1TzlwV6zQSsWw){X4G7@k|>9~qr5en7u*(Je^2y~@1x@WE&$yVR3- z*Tv?_#7~k(h}J_;xvH9542R+p{#gV8ss4*p8v=C6@IvlwI5>S$3lngOA{IKU^uZFQ5^2 zAsN3)=;d-HrV{P9RM-N^`QN3@~YAmEWb}vUS znVV2L+n~{NwHl?_qO-JC|I})|6~(LjtyC7W2TS@r1?wHCJrKE#;~74)mJ>Ok){`1~9a;^<7`!~q3i^X zQU3%2wydVX9SR}Lcp;Drx_F1eKw&uJ9SVbl z!NL%sNEj+G9@vFIE&_5fkj+500NHvOnJ?q6;b`o|L zb`j?Je`%Y`Okh0rXl6jlkVg%)9NVIQGYSRaIkQQpb8)yDjX&pE*v2oDI6snEgT~pD;y^rFPtEpD4ZmmESw^o zDx4;qE}S8pDV!yoEu15qE1V~sFI*s8C|o35ENm9G2wR0qgiD3Xgv*61ge!%sgsX*X zglmQCgzJSHgd2sMgqww1gj%sXcr|>78TJUIz^Z07CoX@^of44Kn#dMF(ej> zVR3*sP#h!<7KeyM;!tszI9wbdjuc0Uqs1}eSaCaXdvTmNUYsCK6eo$3#VO)cahfXNj}L9mE~QIpR*@&f+fOT(MXz5lh7~F(Q_W^ThdLg}AG@K&%w2#A>ldtQB_? z83ONeAXfmn63A6Rt_E@qkZXZl2jqGnHvqX2$W1_Q2679KTY=mL`K#PHv04)Vt1~dY+ z9Oyit^MO_X-4*Bppp`(YfK~&o0a^=mH=uPuqd@C{HUM1+bP>?SK$ier3Uqg%djMSq zbWfl$pnCyr1iBn(6VMevn}Mzbx(euOpe;c62D%T>R-kKut_9i#bRE!rfwlwf0NM$3 zJWH0eUOY+koB< z^bVkR0=*09-9YaFdN0uXfZm@Rw?*yoL(ddP?YWkyy==&eW)!t|3q7D2MeT_~9%x2U zdybF;no(@V#dw}*Mp1im&>J+PsJ$>?8OasRD6Yl$dW~ijwRZ%)Kr@Qk!+{>rjN*P6 zUK%rY!el_8iQ47r7L?~*P!v|#0WKT`>D@LMr*LzF` z!%S#KQM=h)Wb!hA@4BHGMeWk|s7%&r(Tt*YBRg#J!57c?=)s6)6t!#C>FrDfqFtUI zoK_EuLu1Y)&XUCYvA6q8wq`V=_)vWKG9owi=n$o$3q6WvBTYg*pczH&%JF!UOY-A% zb~K}?T`Vp%xu!ugirVeqK_;^dXhu=H?mNO{8vh!b$Sk|qz^?Bg|3%NSc9S=h#>0qa z6ko$~8E&%5ie?nwLqfNUtnJ1djCSowXK{ML4 zYmkO)uuJ}_z{Ny+w`unqhjszrQZ0>WMw@nt(V(4;*=EzOCT2l1+O#W%2Bu&dG^4E$ z%OO8BqirDiWJNRDhM-9{G^1@8TI7Oew2eedLwk(hC@_=hY}!>h0~arDl!9ioX*b{u z?KTND$%tmOY1i2d#mH{UXwNMTXhxfMAI$*9%YtUKX&1|8cC`uCfbvE&+O%V~e9(-x z9k3X4M>E=X>e=3irLxUMdqY!a*~)@uw3VW{p~Sl8ixf1YO}nFIXaRZ+7?)|bif-v= zM>Eqiq$2WrFZ!K{ML4yF(_D zo57m3kS>OmO}i!33pArmyA+f&n$f0R^D%|g^(@}}P|~Jd<1v*<7jV!(vaxN%GBK5V zmtA~}V_9X3Ja%^wpEa9$#3uC31I=hVBuiqZWDKvT^_lL_EQ!Ty!wj~t9f1y}5{N&s z?}lcyX*Xg_t)i<0x`NWf(C)jKNP<108Ex8ymORmnHtnKH?r27vb_d1O=Mqcv#4q_s zMKjvY!V=06&1gFpje9~f+Act=e9(-xi_ybWpgp1)ZClYNUo@jlyP{$0J(cQHCX0*MUF?0kSnb3^3H_$d`G^6cpG*5lY-XCa2+k2>;im>Pvn$h-Q zUSiqNjJ8iuEcG>7hR6~p#OctCw$D+iPiRKlS16JCHZvs{e4UhB=_PSF-O!A-Z%{4u zsbY4u%xFg2_o$ajGD)ka)4xn;M%zz$&PB7%biHXhypoLrLOL{~eK0DeX?f|D z^gXRPnxX0Qqw}VEMl;$+phBdR7jnd%nGS!M*9Nv z>lZYmy&9!b$6`GM_Ta^UP{@X6wC{#WnLBG5B@>#_UXT8n`;G2znDEo08SRTuCv&xA z&@rGH?MqQ%Y0CR)Lz(mon$f-t)5+XPS)|?4NwmoLISqSB#-Qnl0oUiBy-=^uXhwSz z%Jc)8(Y_LeQlHNo$}#zvDkn6fy#)nRr|V3D@mV_)n$g~hYW;&|w6~#F>T)%EWQpq3 zUX2-EzT4YTEVUilPDM*IFKpIRFUc1WIRM*D%NmfCHz zsC7d#+7CvB)H+D7&;y#$4k(tDJJLHeqy2CcPTkoXh%1?ddqgwZk3#Kyz-I9tf~&t+ zRy3piSX9nf*Lk8D?I+|LCl2~rls%vs?I)vhYS+o3bWLbR`)R0?6(-WHmJB7C9?fV! z6D6|(axzP@$FfmGP^NMN?dS9%()4IX`}ruD4@b(7`Lt+8`$edjPop;~>ZG0YXh!=M z)UHpNtut4-K|3cjqy17$AuEZbTg@e#YZvaUPNUqWM>E>5zy$JXZOH^Op&9L0qhwaZ zPNtZX;Gq;Wqy4&ki@IB!x^zY~qy0uqA)gT}LkjHV!;EIM--62dbiWMBW;CPyc9cy$ zoyn>7rJ@<_cl9y6k z37XOVBqsHrpc(DYpmeW#eP%SH{W%m(UB+i_;>nhi9?fWf5e2h??7EFjh7mghn$iAB zAA;1_hx+zj-%P!Zg1zc5Y0!-Jw@@vgDb-MhT6o>jjP`f?8lMr(X#b$E#n%nZX#W@m zQ}?!pMq@Zv(3W;t(Tw)bdd#jFewY=_X#X;|=#rJt1Desk4fV3NXmfNqpc(Dop=RpI zOqLSNfM&G+h)P+fKGQzj(A(m_hA=&v(f$j{X5AuXlFg20wEvF6S=U>cgtc98l3SMv z&1nCtZwctj7`Dhj=3^?F(f)6KMU!l2J)s#T+Uv-3Lo-TNOd#tFVk-M&k`o30Yc!+e!BqZJG^6D2pCq!N8Kod5 zk!uI-1)5O`V*fmz_h?3GI{Nnx%_z-6-~SxVDD8;J^dFj0+8Oow4b3PO zqg;QW8Kp8*>K8PlG!Lcv2hAw$idwx!GfGwH-Ul?JREq-rfo7DVsFa2sl>&K;&$8^K z(g=9Wj%Jh=qIkcc8KosCl?F4{Yc!*@2P&qa0rsGn3C$?QQ18D$GfK-*ISo3jhe*3Y zAJd^3rDl}wE1FSSjVgUXGfMlQLYm4*Lhq6*zMRmE(puE(6YZ}%no-&pCDZhj+(XQa zW|TTnyr0pG(taqKrqARaaW^!hbO0*$H=0p82vz%3S>4f$lG^7$)1Vop!%#I%M@$xI z&!D{wXh!Ksl3B@#KSVQ1Ct(8r6`D~x71jGzt?b%sl3!^+GfHQm zbhbgG=N4m>W;^jNG^2DjivKrgM(I4%&IQdVU5J*M(2NQ4oOH3YIgUlte{hL(6*uZq z=~C%3>2m1`iQ#r00{Sq}M}R&G^s!5&tEFqCYo+TXhTVA_=o3I+0Qw^E{~+>hyouu& zS<%A4=Q`Oi0e6i5u(Y+YgRu(#e+o^^2HDfg@eb)O22_+7e(1@~62lKYl?A9M-OqrE z(gV_iK%WNsToR}#Jt95M=!Vjx(qllM0s8C~=?U&%p#M`V`iZBgNBf-gqB$X=NG~&t zB7I)ND88N>Mp1f$(FCP8fig^(`Z5_zhL9pomfn%xmEL0*MfxhxSAa5r7_fuebQs0T z@@Qv!bK{x?Eu9l9%PZNhk7a)+tltXf%9F_;j1LZ;TV0nmKBoCE`$;0Il@<@4Tx}P+lZ2mY2v&<=y2y*RgqcDY0Dl-J7}ybLOxPHN8cyli!y=kUx|^l0TL|kw29`lRuZgkiV3_ zlE0R>$=}G|%HPS~%Rk6J%0J0J%fHCK%D>6K%YVp!%74j!%m2v#Dx5+Ts_=?Mu_}Th zDmKNgNQ$f|ibHWKF2${Q6tCh_{7QimP=ZQGDOAGB0A-*uNExgQQHqqI$}nZPGC~=t zj8aA`W0bMVcFOiZKLyIhh|ht30rX3tY=HP0=r*9=0R0x|cR;@f`U6nbm45>IGtggv z{tEOrpuYqC1L&VXSug$@=s!UJ1)c+*08fGEfwur}1zrGN1l|U`9e4?N8F&SF2k=hd zUBJ76_W-vs>4z~2J=t-#*~{O!Qs0sNi7-v#{Lz~2M> zy};iG{QbZ`0Q`f%KLq^4z&`@~qrg7~{Nuns0sND|KLz~Lz&`{0v%vok_~(Fs9{3l4 ze-ZeXfPWeISAc&N_}74c9r!nZe-rq(fPWkKcYuEv`1gQ+ANUV|{}A|(fd3fyPk{dv z_|Jg<9QZGQ{}TAGfd3l!ZNPs6{I|e=2mJTI{{Z}t!2bmN&%plz{I9_O2K?{9{{j4; z!2bpO-^n3c(N2kT#3?G;X>eAYqM{x6_JC7Vw3FRDaEgj{MwF|E?bI+&oT8$g2lf|EQPIu? zdxuk0v=hPJ;}jL`1TZh0qN1Jf^#-S?XvcT?;1m_@Y%UL+qM{wW1x<7}{4QT>q>?Dw zN!mb@QF@%Bq8*TBvNX~I746V#Kn6_%PEpa0u*PIC%!E@^v=gc#lUHV(qN1HNjml(Q zTNABk7dMdC()Dyk(GHKoCZ7bu&xBJ{v}2(3cBTT+&U6N+)$5K^RJ0=+zsc5&Q&hBr zmO_(-9-RTFsA#7wBTZ&K;1re3crY{Gl*Yq|Q&et1tKlZAtT;vGW^^z&=Po=JcBsVnY~?mI z9BoqW5vQo!nI@cUI7Q_ibTBvQo^XoF{pgcBPEmOX?eoDYDvzQ^CibD>_g5-UpkXdJ zMdj&ik_Mcj@;@}p4X3EQfVKvBSvs7e@-iA3uyj4+6qVP|#lQ;8hEr7DL}LTSEHh3~ zc?V7N#3?H8qf>8iipodbvXKp^sCoTBnYH|=aVMdfQW&Vo}^zC{BA2QUpz zQTYLF^1~@AKci1poTBm@nqI$8>(P2S5L%U6a zNHXFS9U{6Iijh5#?v7J*NN8h#*=4~gIvi-*N1UR=jUxHr6dgYF$Q`HX2=r`k#3?!o z(caM1v*Q#U1JT@2VqNn^3Qo~61Z@p1K(7JgGR-lpTl(2?ijI+J-Xl)YF$T>H<4|Ut zqGNkBH8jBtI7P>VZh>aUDLN*13sgr>m~o1ZX=r9B&n!4a$4s;}q1}4IDLQsQ7ZcVm zXPlyAC-gOe^U~rJ9dpsn1kN)664|fnjZ{b3FG)-+B^;$_ZKAIktm6;^-LxD{dx2AQ zRG?$dI7LTgN=TV-ijEpIHI+#hB+!6Ubkw1psocBldcY|<8qhZnoT6iKmc&?zCUyag z+L(${bnKobv3PBmaf*&T(ZN&#X>p2nG$j!o#3 zFHX^M2zr@1bS9jl<4`os6Q}4n0-f^0DLRfum)voRj^ogNIEgklwl}S2zq7n^HT%WJ z47*%c%zp83G?g#tI1wFkzJ z%E-b-%>Jxv_@iZRgJ((0we7acFw&-Mcop?> zXrx&BiH4Aozvy@a#Zoy}DTURIzvy@y{rbRPbi9WGX()Jk@E08)qDJZq|CHe4-PQQ& zB^`g!@d?VMV%<{Yuwc9M7agCYb6@$3j;~N9hi;A`WaTe9zCoo_=2zOxq~R|*zDNHY z`gpu-+3XriSH5~5{|VL7^vU$h!!-Ow$FCVmCaXd^{-Wa#R7%tG(ktnkU=x4Q@ptA|Uhx;53c98ufXqSn%wKf6 zP$#vcnspLGUw8hZ(~Hij(@k3E9QcdQf?V`+=Px=#sMkmSqH_R>^i;j+Hp|Yzxg@WD zvd+L?bPh$4)b5_PjMMTLog+{obzU+nr1BS?qtUNl_>0c%P%3pS)?Tbnb#O z{lH&zmY`7T8+k)HCSOeD#9wqqP%w46&LkLbw3+ye&iSa;Km0}K0@O-fu4a!cQJoqP zz`$R0R-;&IPso?Q=-dr;{xklfvmWJBYa;;&$&0bED3%p2(v1;>myZ>>iT4--wTCPclHLXN+#hR z`HRjb)Xs-q7VjZi*jf3D&XuT~v99yvFFITDjWb!eJ@6Nut*D&Zb#f?O6Mxa!hB{e^ zA>C@pP?G8Si_UhG%u2z@ESZ_V=v?22NYnEdo%^F?KCmc5=F{>Qod=>~K8@a}n4Q1q zJQ%g>Q)cVTRc_GEiNELsOd%_7q+87;n`>_TMd#s|Kt8Q4nLsA~qVp(}%*xTp6mya; zl)_(h9-D7bcZ*Y(&d6VMo`5OjGlFGEfqe`z^B0{bqjEmoFN3m~zvw&-Wm8XQa%z34 z{6*)PeawGG{-X07RL*AvPv$=jf6;k9s`awd<-}ifUW6(9pYa!+TQIr*6aJ#}QcUVU z;V(L`KlBOA3(v>y{)0qeXYl}8Vmji#%`3!2Np3Gz^!3_LG=X0o(b?P(i z!wtPH8-LOHBFbjnB4v`z&R=xCg2GwXTbYD&<1ad2?^^=;GKMWO((@OcZ{=4s$#&Kg zf6@7FuOrWmzv%n`6UaJ)n94pGIbM`!mv7SY7o8uYXx8;@deO}MMdxQInRQY!NgCR8 zdj6vGOBDRC`HRkNn96_3Uvz%gKS^ZaFFJq3By#Pbz2Gl8f58NLP5w*6Uv&PCiv7)B zbpC~^edjMa|3#ht;V-%>|G-{( zu?L9ycr^_vr{jd3p zuGN^zf5=~S?Sl#YSNuiST2${_wX$ok>G+GTeNj5wpwV-SF-o(Yco%=s)rsQ$%wJ4^ z+*})6`%Fh{xzGFgyw82Un4Gg` zcIH2`v$He1Cv`T}*<5E!ovn2qs`GH2N9t^=v%Suvbsnp;qt4DckJovk&XdyAPMW$) zQ$J}MDorD#X^b?Dm!?!{nk-FIr72UIZj`2cX__fbv!!XSG|iW$o26;7G%b^+mC|&l zG~FXj_e;}yX?j?i9+#%w(zL&*fh-Xnwp-= zH$(a|lQRn1g@xtjC8bQ8n3j{2>Fb=4l%JI{CDE6go9@pV>PzbwmzEL{86T4nA0HXs zOy9DXwqft2+$kL+qGRLYqN5{Y;v!>W;v?hYx-gfA^6#upP_K2y&cnW|2I zUfEX^_VDFS%kyVf{6fMjejAbQo6+5$m6zks$nfQ~%`Bh%AtQN(6HsNzmwxMES(0Jd z`N_;$phd70UX{*-CHytY%yC;g(4bTD&7|8COhS(Cfvr=^kU3Mrp7Qoe@8 zfqysU$})V`NVeug^PIoA{^O%#;$tHd;v=J?Vj`m>BjUr$H<3W~M@A&Y#>YfdsQy=t zJY6@@-1jdQ_3H1Z4EwVN@TReYI)6(oRcKo887W&BF#g4?SJJdf>ZwA)9~%kV{+>2l zA?eSJq#YB@j|B3p-PMqul9b0`o*Slrp={fv?Cdb@fbwPK<%ad}P4{K^vwb;XgENxy z()>A@xnb#pr}(pcgA&_jm%LPNtHd7t%59LFIxQ@*AUDsK8P-2NIVUNnAWZKzX83ca z<%ae0=VztryKbxZI@+sYy?sfkzMR~!fr&%&v-h*8R_06c5`VU6xp8r4}8J7?q5t9&?5D^_27ZDW^8x@mKNgv5U5)q#i zUg045#mH1$MrM^2xspz-l7pmTq06@lvoO-X{@*WjL{fBYOj<(uquw|%Tvh1U$jS@t zi%(7|Uud&pWU9fxaCrHXr}|Q8vPMyj-n&m$n!iF7o3AwzZdm1);+A|7Zn;xJ3J=rd z@O$s1oYWafIliv>spdM&gbCZ;stz0|r;peVgq@(oH9t9}l#G|K_pV zb;O?@yK`I$QRICAyQIh7|4G}0w`9rv>_B`01nj4m#o{=7!L07_AIgD4r^zXLz zUGawYZrhY{XKQnPW1oC$_P;y$iSVVQMMwVIZ9F0=GA$vd!r;f;*vM3CCY%3aAOdaM z7?)LSdo7IALjV5rdsJL(LKMAMcx*&eTy%JJLPTukM{#s&QgVf(IMm2f*xz$JROq!L zjie)(!u*TD(|bkQjJA3Cwh?Ef6gAPD{4b_NUo+r_!NdDxCQbH*m7T&8{rNd5zOcYM z`WY|z>VclQgR$e7zp-O4KaezcHPVm!7Y33kN!fY%IX>E%e*AVfey!NhdKt+k{B5UT z>h&7IcT1c5mexO zJG$M2bJ8;zSM>0wuM2dVr0IqY<`i=(MM;yU-qO@ZFG}D4YEgoZp3GeJp;45C zxH+Q=qr>CMmnGYrua)ISbB;OJoF`5FrHRTJC{2Sln5Ua(7}YXZnuh!@sFtp~W7qt= z)+kGKLTPo(Gv88W5f+(mr3kl4Q=&8t(~B^?s3kSrbzki+iQ`9W^%@Z!5kF^iVN`r< zn~=z|N?vKcyUJ8oneQRh`=n{4G>y_z9c`q#_rZO!mrGKm;fyYfPl#$05*1sT*R|%2 zTB?;E1)I#9jc|{Zrg4FA>#gy?mn4wH;pTdiwKX< zl1qpzP3{fzyH(cLd*=6z9m`kDEwDs%U>`5Th^PMWf$ z$*(7uZ8WO=Df!zrk1dRd(;H}XIL#^|Au5KPmS*!O^Y2=^mG($~ScG|qg;}v2Y0A~p z&C}BjKDzU6S8KgKBE!S!86x5$%k>PF8kXy-D!Qe%rH)bb>C!Z#e9>orcCp{aafOlb zkyK1%Y<%hQWN}*RYROgBR4w%^*OSEt(o`T#v-IQ&^(^)q)9tPMPmM2(2v5*@t*G$0 zxH$wODmGT@nak2{YH3wvR$E)z7+IYoO*fU#>YV2qc9WH6HOdlSWpW9Yc4U<^SCKSH zJ-K;$Ry)h3ahb!7Lq94aiaLsp*4k!i9a*|rZm2TV-j+T_CKpK4!U~z(KmO*6V+#|a zDSjjumVp|#47Lo{Qmd@rwv4chB#)z|=@w~Pq^EYP(USHZzv-GcOZ(4Jg)v$d%fdL( zl3Hb&K1&*Da^TRHF45Cms;AlIhn|Bc+&+#P4fHIE9pqIV_tkkC2pQi)E|jAq&&M4@lE$X?jqa z)@-nBvur1ak4e*7XGQy_S8J z{nE5Snl?()CTZHd!E(TIkkULaO$w+Fv?5H_m zIir>3q~&GHDa&bTdPJJGNz-;|dUS*3tmPbKIWJ9A&JMjSJO8_7X>n)EvoR$Dl;}up zIH(=AWrcai@}X9kcP;N(-nV=pO;1P@D;Q5n)6*L)A6Y&&TI@5@v@6hJ|BC~Pt}kU+ zvwTLyB$S?GE#F#xqzIMwjF&AxQ6IlZ(;jKss~2IPaV+m{bmHRY^kQ-GS}ztEsf`FD zW8*l zM9o&4XjW{sSgq1Tn?GD^wOd)qc|n>^R#@V(dW|cyL)>^4nBvdyPiAQ+HmYl{gxKiF z(Ab#xo}tkZUE@N##&z!*8WZ0=CZT6^M6bB0m~xlI)&@pGN3KdJp<9oHI6AV>=;)~K z{1e$TG(Nm*cxbn7@!evgdvuSFPGAB+znZf)vNqw5a#urtzlw}d5qO-IL;ZPIk2NYs#~L(+8otTo0OD{3kStZ~-(iDnDmMH$$vzb|WY z-V{?X2y`mV3L{+Cik0f5F(QleIHnHQT30&;I_&SC(kK zwTrc@NU8FDz9+56h+$WLDiFQy)}C7QdPvjD>#STip86}%>#IetpERAWGN`3wDQB`Tk9CDH>A5{K(kE-z@Wj-3ndOP}6kng*k?A>H zsB3@a&R+hFK;xQdwi+*Y~E+LLp z)P%@{i14(ih`5C4)Rd^m;Ym4}vC$)pwUyDve`4E22BJIHI-kF^W-YQx>pW@tP?|oH zrjOTK7g!fsZeT9d82iyb(wX!b%m8x=uf5TYiask znl4Kdf2byq^*gPs*{`?WWxdo=yfq3k&K3Uo4pRzt}<*a#0ntqU`A2~C0 zF&haCZC6D`_FJE;ETQMEhpdOC=_hIWS(<(!q0V1dDAgJ3F;UZc-1^eUfr)Kde@hx< zsg{(?GGK0Ac=)-XYSm4O+P=f6Cev9~qtT3NQnIizx{3BBnf#f+oMBB^l(&kYo7Zj& zu2IMAty`~2)7BBual@Nr6i&ms<+yHpCP3X04!@|kK zNUb-aWO`l_vy)PM-NJ>rdeOBc7a0}EWN~CGevgS|!yf4zoBk}0!r~b1#dxMst>q$< zYQ(I#U8_o8pJ=W#FtMMnz?Z7`WYop!j-7_(`2BfBL86AR2(M@?LPeyA6Fo&gF;t8d z$-*yg6uBZ_%oR6_W#V4(fOt@>73;-Du~}>tkBMi*L2*=^7O#qT#RuXe@kvlnkQ!tO z@&+{wY7!J06dlwds83M;pn*YygN6r<51JI@3(5(~51JLUJm~hI%|W|@_68j^qFgP= z_^;?(^52sG1Tk*;)i`CIw7xukVB&!Myd*mE9yGu8)|1v#B850>zXgtxGuGGm8;AB{4Py@(0xcnNF`{K_1>V~);Fwg2K5f=t#u{*8Aqj>R7~0rL36b} zrO}@s{KE>e>DvFvMr4bPn*Vu9+D3_a1=_jkpKPg3QR`ctv3|sX__6g9t=Z@CD$`bj zT0F96!$zr8H-hz<^&duDe8tw!M2g>BM+~DrQ$>;}5Fz}VCqf3+OzcgKw-YfU-29m_ zjQWZ{@hEB+RoDJp7&GhE@5JS;b?&fUvi@NG(Rx|*w*D--3+A4~rBXvG*Gi?PRIV#( zB$YZv?xH4I-(7m+kS$0b3@X?B{fjt3`U+Ul(DHjT)@PMk|EXI-Y{nX`Qu}XQbt%6v zWiu9P6(;`ww&i+Ob@U;%&1DM_&9>OwHjm9~t81%gyWUpc)<7y&so11qmx@CwPN}%0 z;+BeMi>;xpk*%?D;hAyQoK^BE0uaujVP+l)^x(Pids&fWshVG?Vq79Rq-vd zyf7$-MQ>l~NbODytzR3Klw5X~6@!LO+J`EAb0QZMJ$gk%bPbJ)ii!)3j_lDbG(I9C zCbUQQxUR9?;(LZi$M^Wt1bRewkB*5<=oT8qs$(c4r|{6O3EjGd_UhWLSGVXMk>N4j zS=4G%J_knr%W^n8EB(fNU!PP)IO%EWK867e*jyjV_^QaZ*`p<>$6=V?pOKuD(@yDRM=J=Ah z^5OTyDgGG)lcuLnHik*guH5X$(pgrXv0-liqym3_9$!+Zg{9J(9-hZ?d|rW;WFq5- zJnc>_Bkj_3jonqKstPTM8+OtsXH`gHoR&-vUvmEB$y&VyvTE!s-IuF1SJLRkGOl*b zH`e&Ie^Rn9BXFFIwfRJ|t+sKt@wN%JiMC0$BwMmA#g;0S7E);`l~z(|EtNJ>36)A) zsf0--d@Es`tc8`pPP1j$GHqFePl+&ss>L-)Dg{!RC6)QwiN&miFOW;4l-hNm5j*X& zVPYb&Oy!mlE%w^R##>yWj4%dP#_x&w$(d|DrrZb3Ha^kAmzI>D!QW#h4o31JB`*!u zKF&Q=S$T}_b9}k{VBEjOMQ$KNrPVPeL$AeQIX>3BjZS2iZ8leywnC{yuCvXNN|aWd zrpEqD>b$C51d@<8u140`=1C=boo#_sVv1T08<3Qq6?lJH$7u~w8@Xs>iLpi=_1x2G z7=tuzpi{A`wCM=_$m5+p+#RQlRGAwI%mq~PUfsZZJ!ew$+PJ)|ZyOtUZ}`yuq@v%! zno=@;X1E@j5}K>ukq`*LBS!Xm z>RV*eY|36H`}!=+ptQhe%ifd9D5><3%2=uNrUmiUrN;e=94r+A*2Ax**|xOHVnm&< zU+^1E;zUrrO}3M^mu;tPr)_6!XQk3tDs-Piq>?C=VNw~PwH4cW+XYIr$@ZG<^)Y36 z7_R3|ul@c~8Nk6@dY^FFeOsAkWAtWw%l0M&-b}A9HN+?oj>M8Zf%_E~?7k54`$yP;c#1!}XCf#W9!M z@2SlU@cnwJxq%HY85@o?HXNqE6~y104{Bih+4hSVBbCuo8B-+wra_`U`nCOGC%wuh z$%^ekTG%RkOaG+3y4}Pdw79ZmlvP8;!?D<|XvzKM?Z<@D>86$pmYS8e!hY#_ZH?nU z(0JP#w*qMsNae@4KN}HhuPv2Hv^#pULgPjfZ730`t!Z&v^_;*GkKJl_ zF~4lL+3j|RosnL$R8pkEdCj-M?zVe^dfORhq)8=1ucxe{mZSRn()9E4=%MM8r|3V8 zF*B**~r?5A&H`jL8*xtn6)ZR=glch36D(O<0y1_gosHeS^HgDHe zZ*tSLktBaEZ$wf~y4K+_bn?y6PJ6jSlV<3%D|F%BitmPa58>{%Eg=K3>y^x512 zq=g_*)DHGe+UA@9{OjzT09aVp&Zi@Wl}1PFsB?{7br+Y5?LF*0rE;TGrk752*?Zgj z32U*vkDV?hM=H6+_Wt$(QpuA_K9i^Y#!^IPZ_ z&VkGgvyZ$Y-BEUqw;58I$qZ>}%{K0G%QF=|H^Dx!!t*osWIM~Z>+H0d!gY3^RA&EK zDbux5PPI>y3ZsOgvQlQ*Z@eNmIrdzsFhrV5ZfH=(LD0C%;=U1C?gC6#4G%p@>`S$W>R*nS)PSYlslU&ekG+E*$E?6)h= zj$+a)BQ?jDHG(Nc`o*y+TGHHVrq3OOq?X)yR#oRRmPyON5KY_HGO4VP%EEHzO#7Yo zyNq+DRF+ER=F(G~(b(?egk`_q{(yb8{XzR0J7bkwq_RjVw@PKPR2W|^x$=ZndVUJ@ z-W5+BfmbV@GXDJ4iYJP|wiV9`XM-v^Ayf#f{aFILQsH0aS*a}7!m*-A7{xrKb%T^_ z0i`iI@uT(^jmebKQ+b6;hl`Dqx~!U*YXnn6w# zsp!W%eQjW6urz)CxKtG%G49qZDVrn6aSa1{M>R)vhsmKhR7bF*hEzCC)=8yUD(j`P zK`I-ivT3v9TJsP`E!1|@ahRpDSs&qU(f{2qmFJ~$$e0c(x$YmzN|7--OYE*cnwg(# zoH@8c>6xXS<|vn4>+$p@Nnfgdj?j7}V-khWbLa;~16O}k>+%A{Ha?}T#b_54+7$s~ zWKL39#*opN?fi2Qh8V^Ba0o)9L=Tjh*WkkwNN_L)NZg2e5AFb zt@e>NL0m&`E9zhD2-AMrUev#;4@5h-p=y0lEk~>)u86Vqqf&W{U$y`7A?<%HN3l}u zo9xdi7{M*SlYO~?eRXlLRJy_Twe1og=elJ$S=5r)xxvxPaRaY$(eSubc5AQgEowPN zAGpoXW;6mR3~&r4w?VaRA8XqTzKK*)iNP>dH;VXq!A)w#j%0C)Z7+ z@}6UoBgv8MNYOT6y#AC_o|eipQrX43W&bNFS4ruH>(7&n=j4)>J~^MYE3WZImfVj$ zvScwNkeM4Dd3t6Xx!NA~kl&IhwH#A+M|xo6S&lhcRTOF!u#c)J+c+_OP-0-K`HqEU z4T4k9vqgOaZ{6y+jh@WWn95)0SR$1JWkp`@SgF0$hDu)NVB(288UmlX%dtv(i;M5) z*6F#UBdKGwmB6!CnZu?t{t0|wwPQ_L4mf%bm*rr+VRGQ}t)jIp|4BUuQaS55Uq!V9wz%l{i1|Us8;&;}Z#mv}yyJM+ z@t)&-#|MrNrSg(gPDtgXR9=?KDXE;6${DF}mN_StR~~Y3W_Nswe>gtZCbUEY$JbIh zuaDDT)&KoeD*w=aU{qX7Dqm=wd=q+}(%!nXpJ8DuBPl!AmwH2vKR=rxxONJ>db7k7 z?eW{e{@iq}*&1uES8t`A2TIofOIEs!HRjT72WaPua_cCiZrEc)E;Nx5|OZOgw= z`oTa(1G68w`m7L(cV`^GabL=_j^9}i)E`3V&V!I#BV5o2B~Ibw8EtM?DOn=Z3z_K4 zTkovqtga10u6~Q<(!f?{orY!txESM%bVfO&2`od-L{__V$zxaUKFQK%M)XzK z{^?o1ZUkwX);NlU*(8+@%+-v9W1Xx|xJs+AVkFi(W1aC@MP3!7bIx|o_RbFEprf;s zv$L}cLHqMX?OF_CK9b7EToGNptZn3_yR%10?2DbG;jrpL!jj98eArz(Dh-F&g(>Nrgp9Zn1I>atl-|Csnxeb*4C{(rG(Wojzxp zbFy=alUWQp#c!nYtyCBee=n6wo6JL;89_by*YC{M2ZcXqog_0HY8|PXwO&%|BeZ#k zL0K6EeYgx{{ymQ&mwvUWH4GQ;>jPT-_B8FnPe1DEqg*|@-B8*$lx%PGE83VZkR}g3 zXOUjF48C$eU+JhY64kfVzNSIDr-xl@^T7ImEfA3c=Uj&Q&RNbv=WOR3Czs?uO69Ut z7}ftQm0vbGi=0wh33PJB`>Rxb(-r`w8l;s>yK?F|)0dL3_250Trl;rl^~PTE7a@&N zD7_a`RDm*97=*Hh5O}?72FFEefmWuhUjCecz8Qm)@;Qf;Op8abUGJpnzCKKQq$g!$ z6a>=gK6tn>0iZ>+~kA7AeCy9DR94;`gw^ z?qr>)tXXesV^`|?R?1jFq62dD%D_pPP$P{Na)zB=cPlmNy`!GsXl+Z z@n)su65jckbBFffYovNDA09PqidJ?>YUDt}9g*!mrnV zt}j)6CQ}X34h8Kn>zR||&$;@F&B9xyV6|_E7e9N^BB%c;_`au56&N*m!;ZRs?DU@vUD=T z`HS=Sin9~WKU^FUO{CgXyVBGabxS)#W3sPcQu*l(m*T2XF#*>#u4|>*T&gXM`%#U# zJma%>w5jmfIxa>l<)>j?HWv+}l~h{?c3!%gY$TA~r9uK8S3Sn&F0YF=5-QcU#af?f z+$_P31bWS0aRSgLa*dnAsKL6WTa@(P72fR__D>#nzxsKtz4~LZCs(QwyrR#MoKkGs?k!7k!q||}Cfw~;$npm+J+uydA&hP~`N_8==+351Q(p-~WQ(Wm%?Jm_GQtc_#UQ)ebqidQg z!QxZJqW{ zW&*FC&eJYTOOJ@sbA+~#RLPuOph}mzmNTq%EtBftb*>ds9r7o0bFMpGOtTcb?sDBN z)uB>NEOy=FVpuXvs>83GYSUM31A}MR8dtIQ!Fycm^sPr|KW@-}9H|BE>Xl$)BFweL z^|0}|hxCm{X`kDs|HuQ*<<^}`Hs9fT!r1(AeVZ}b=1=KAj;(C--L8Gc=6m&R#%Y`1 zqyIRpsO5+ej9|1|y33vMqT+aru)^G?>yYb6S^Qp*DtDR$LVL{hl6LSCtHgD#6H=X2 zI;-P4iW&~yXz0Pa0j`oxfy%XBTSX*G^u7tHB+itQl%%zmg#%H|9{RXum)0o&gd^qx0SR7 zcU`wOTkozX)x33X#;U9)8|$T&%+|Xbxf_?X4r4CJ-AtPck}9jk)nSLGa4&yppomF-n?&go64lbR@V(u!C)i`%2`fPW+ zJHg$~-QL~7&Cr2vn=!;(sTN69N_E~QXSTbGcAf3+Zsyczmg;=1iD`ZIgZ!(tu_~{0 z|I?S)e|~`-&lmjF>v4T(?;hh^vChrK=Ax@Ea|2nP==KTgM)zPco$O9=r;=ek z=ZmF!n^c!ww|4}q>(_1W+>M~<$V1?Gw{@WM!JSvqM%lD_1T-dvBBUxFaiQ*>Z9D3|$vs!QP`^XJP`_QO5B%E~>h5`5 zsJrL87q}O?3GAIx0Y?SIIscx3)7Ovaf+uYmTkMiFg z?w$P0lg4xrTcyg!c)p#VACW3go$0$+I>cT^3)t45+^D4fybZu7IpA|AAJ#R`#YPz?3DGAK z^)r~qB`$CBxIG?^*HhP1&vU(0pO@+}sh*PRd8xi3)%Qw|cTb3?X;sI7r%Ni%uBeWvzp5JL{%!Kki1AlJ- zqh?02PBPt}p)W}olc8-h`F_JbSs~gEL(;QSGPrLtH6$&^pZPb}UgbM)7txNBq5c4A zNdNvL286`5jR+a&r^1YzP(5+_tZGTOJ^cT@+L%IZ7|nNZg{UWbMpYU0(Vj6xeVkNJOZA){^;ZH> zpQuHBl2p%B8TFLPqRv!-9(DhJ>KG{LBNF*i!wmZUlH-8L%m1O{po$EXMxXnHSxE@A z`3%pERYpI@lS}mTrFucC3~A|sE(W4MQ;U9qRJlK;iqq=s%A(I~j2``e{i|fljbPHW zud>%}2n)ONt2_1E#M=^U|Nlg+165lZ*?AQryU??^%E;d4Swdu&N%c*szN1I>-9Thl zYLVsI=B+9tdsn&09^l^G10JT_n9L%w<|hBn*Yy6C*bd6i%g*OZ_5X)r8`xdD$dW$e zfeNu*9PGd5Zi~f*m4o_L6xz6^yu3ek)_(yJcB;3d|;ilp6@8=Y~vS3}p0_PwJ%}l5YIAtK5Z!XK#gdKhLwC zgW4QrA8nn(^PE&a50v>Z*BhQAo)1Fud{j1c`j7k+I5YGBf7a%zt*jk=>4?UkSf$aIn^RZOF zlj`>smhS$1VeX$V?6vu;101bh13CKI^D~#eo^L$gdcN~~@44jp!Skc%vgaqMayfEY zs{HV?RDY4`uTuR@s=rJ1kFB0xJii(bqk4rm$XktRvS1;DwYxR+xmX$8N(Q%+!R__w zvT_f38n@FKYoq$zCEP#p-+u~~YZYy6(73t7m!qw@mOj+sts};GE%rq+m`DF*aM0C{ zVtDIlk79UT+>^=%+@`QX23IS0(}maL_1cbl>+zIN@HH}6zw08nI)VB(9})D1@Wi&a zp|_C?R%CE6hjz)7inpmgr4p%tt5y3SnVd*0g-gtT!uSJVV>C>y;4&F{2mfnss__}r8&N8^Rb{YFGJ-zGg?(Inu z#?!mrUNX2&>B5A!k9Pp~`FZ<#`+57zV6zNn@BBV6sHbt9O_IS58SIq7E*b2W!5$gxmBDp4dsDorUZ1xq z|C>_h4jEie2463O8%kKT10Z;5!`dx_L zS>8hP5bqorTwez3_pk(q=uhkp;yVd@54id}3CkZi^SuiK2Tn5?+(?@R)eju~?BKms zI}8?kZ<+{#QK=8OZLl-m>$z z_ck9c2@!O=1}w)E8PJ?1@8@$_o%@8$0DJQ*CLos8w8@+V{OS?~EP zwjSkuReK~cI8NJo;n50Pzu|pL>$TpL!3pdB`NNpr_q`ufSm5-2?ERF_8_z9zKa;^7 z0)_cPE6kVPuVipX8QiI?FyDGFU6H+V&nN~$^t1Q3D>kb5K;pnFzG|s%^}43voKW+8 zeA|)`HQ+%bv_=ftf$u)5eGo6;9Nxgk_!8gY5`M(5_+5xPq*bRnyy%L77>|iaLJEAC zjC4%HEX>ADkXV3Quoz3R9GmbwPJ#WKN#D$kQsx`Lvt;Jsn1D%0Mk>h($cevuz$0;ASiWd9ageGW)7Vseld6;_R3LV(lQ-4$^i|M-JMAgErx~6~vmSc7%hr;2^G! z`$23S#MbdJ9s%)nJc_;8hy6H!6F7;NaT;&oZM*~G=J*Of;4*&3uR`!-f?%~-IBTFL zs2`^n^-v!nAm2{n;UpeT^6VtPPWH(iox;gJo$Sxa{#+bWF5>8l0&(DOG~r4>dvpYC z&qdpFah$jYVxZtBO~6YSefyK}Dw?aN(^jo6H>AO>#g#Z7+P z&sa=CGEzZ(d8Q!~eq@7s z^UT9a+ynCE*@o?S6g#jRL4DWd zxUAa*&CwErK-ufwj%V;HNUxqMMEwrvgf8fY9_WSM=nKkHpLo_E4C=Z*bzPrhqW&mQ z-}T2~0w`;J%37bY)~BrXIY#PFMFz4!yzA$Hc2J)-SbrvFf%4bC2}PKP1-KcDz;RT6 z36^06$bbDiaX0S4eRu#5Vl9fX0h>TP>pz4?Kz-JK3_I}zp29QOjlI~919%RHa0Ewj z3@_m%PT>sB;XGc&>v#ii;T^n(5AYE_!Dsj!U*cS7 zhG7IoVGPD$0wy6DsYt^VOhpE=kc}MVVFqSl4vIkgZ*Vhi#S$#X?YIl~;C?)ab=ZK- zcnBP;4IabecnZ6)7aXe%p2J}r1$|0`lb}Coa1IynI^M)P;23N05kAG|_zK^GW4pm+ z{DR+w2;tZZF@fVQ$-2x$uHA*3~^gOCUi{}B4B5Mm$F5yU;D zJBWEm9}w>lj@J-k9WormIb5_yRu& z(NLfonjjRNLD?Hp=7yBD;TYtg5VVhmfaWWR;4$uZ0KZR#O`5RNV#+0e? zXF@co2kN*qXj)A7sSJNl35A3h$ zNB9bqwV4YcXoL>v3DRhm20w1Z3fznPu?x?E`f5fR&1lEXh*$ILLA;u`0Qqe`2IEkG zd02pTcnGwG=G1X>^4|PakWO>bX-+yVoM?cCpxw5h94&@{&$n2L<#-sBrv>qAK^iTH zU5j^xXjuam*uZhoG7i*1%ZZqRsUUVOSAsfdNjzE}!BKpPAMukAt%yad)}S4=V*jni zVm!!0EAr5aJha*d_TTCmT);bc&$wi0O*?2^6KxO!^3j@@v?eC4iAihHX-ztFHd_8(ps*MoWu z?}i>oMJD_p58>n?oIHf@!68t`;nYp|Kk$VR5#%Gn1!5D?6@Aekb8s_gBM}?19mF@{ zEGS3BTOh8H!5|)y)N5oHkdMeTu>Z&#u@d*;0qnwaARm#`OC+Bs1R|;#!VrOE5WgtO z5JkI;;&V|Cg8WDExhV1<^(nr^_d-Nd{%AfQ-2y`~7UMAww_z!s#C{yWd!X#mls%>f zEU=*+x`Vomp$=kZfHo1c0o(8>UdF4SE@Mr&4s}4;V<~%VH%tcYDK-apVl~#_Eqn~} z9!Fl|C~F*rk0Z}<zya0jTn_78$%r#;6``_F~wKw2G$b%zeP36#0Rt)Tzt@GK7E zM)rqjF%Xoq`)J$?%GZ4Z z=pVWh9ap8!iyXzEi;aeRDy*^xXyOrC)W_0^9Z*gA`EK ze#D_4ap*@J`u!n9{~BPQ{l|hl^=F^`_u&YR3Ne5<4WQi&XbJW+fHpILHZy?t2fU2a zLJVwy2vGk6=ixRi#cOyE9|$psZ3eN;AhsFAHiOt^5Zeqo1-2V>QHa5`%fao@5$o^} zsFNX-V+ipWLYaq526Z(g2gGa0VZ0#3(CgrY8%dao4D1B)7)qHEsmsJ`P!mHznG?rj zEw%~%BLefXPT+YzClZH%C8 zj97+MxEEjHGT7h9-WUQhIg-4LB;O;S0BvwoFv#~P-XApv<4_FpJ8GK{qietd8>S%- z({U0PKzzr9f&GtZ2Z_a40^T3<4ydQG4bTFuKssYdXDsQAeG>aY8yrVo#+hM7KMcbN ztN>*icR#4paqM&4MIpvhcjKwM@zmY;T+GHzcpfi-JWe3333YKjDBA?u=>+O|!e%@M zVlm-6{0ibRF%s?35u`bhG$)eg#C@P$PCP2aBrh7HDM)Wp7P9d)D95Ddgh-++NoKJB zB-%{URAgW;Xje%u2$5V3H9`AI?tuXqggdbsYw$6?#UqjE7vsg!9dWt#e<5Ysw#!dXzxXKj0^C{#+0re*tELIQeNW{)Het|7}jpYr?9<2Ag2w?UozKg1^> z_WmzH?ERFyFX3gJ!7F$b7x5O}#RvEppWzF9 zjqmUSe!{Q#Lx>xzK|u}FL~U4LhYMa@j}SCQGqgk-gdq|!h(~*LLRa*_4d{yj7>q=W zz-WxaL?pw9DVT;V+=x8Pz%0x`5$5A&+=?Ywj@xk;?!ozVp%{))7>fx=LMkR>Dl(CcTues+W@9eqVIdab zHY~$R+=*4V538{j>#+%2@dzHpPCSWcum}5a1jIAvGkgc)lS>S8DSNIPk!TOfm79VL zQ17`*a2MEb?m-;GOCYX!#4}Gt8^nOv2gE-_rjRT-v z_wWyVA;b&^>LUcy(+uio2K$>Ku^5zf2C_iFyk-*nnZ#@6^B})7FX2Za3hcNZw6lT^=n0O=f+-;H1+<3((kURF0@5ki zf!)}P4?sBzz7k>}yEoYX>_p_C5OZ)p zXa}=5;RT!m`It>iW)qXy#AFWX%psjQq%)^KhGQgVpa}CoIp#cxr|}Vp!JKb|xXFfk zpk8n4hJF|TKW2hh+_VDsg1p@H1Sro<&*BqkX?!Qd+?t?n<~k7y>UM5C$j97CNCq*T zOM9Gq3nZAU1$8U2q#7 zt^s+FSo@}SOoGhZzqV&yf^S6J{DrW8x27lncoF{Kz!$CfpW|*0CAmP4B|1LdYyj}}rs)6>j=sJWU9_>(oc_8nL$m=4?x`_H;M4lIs z=SAfC)<$TJP;^Is48(ja!7@CF{WySk@fkiBVljDLOqmxqKsQiViw7VZ?0az`9>7Ly z#&Mj(c_D72o!mw{xh)cTAV#;%1#!LY7|81q%Cv-iEOCN7Eg1=7x`Z^BkmeH7TtYsU zT*8k+ENu(wZfOEIc9wGNEWHJ!wUk&dy&%Lg4=D4pCZPXVHU{HxJ7^cnR)hR4dmEH> zc~g+?a`L)78Pkvn>S6f~JdSVi3w{$~1?5@M9vx8xj++&?;W<$L6(@vPSql!hKs#AU znO2Skd00svR+5L6r|~-85aM590|ug|G1wei7o%R)|6j{FsSZpnP}k#$J31%6aGaLfjP%%6C_1&_CQojPIhH zchN@fdI2vAakq*(An$k6mhT>dL_CIF*dxR$fonj1SKR<&v1%w*;9d}mRn);M+S@AX z`JOP)w(n_&LeQ@7AqMvx#c7-s;$Aa6s0+$+FJ-xxvfR55N5HoC{vpJDHE=EZV>m{F zHg(^2kjMLj!29=8SND$vF}$A`-oF()K^z~b4&HyDHmH{ehJku{;6ZE#+dgm!zY9T? ziq*tnHE~!?99G|l^W-2o9 zJYE9zzxH}GLkr|#4(5Vw*0Rl7wpqtE>)2);+pL=gwp%wHw99qp@v0ET9ncfhNipRp zCLYC@mfC#924ueU) ztfCGqNJkEc@59Hy{vUoth)3$7DVl@#A1MU&^vG^JkHbQ2Bb{xevyF7N4aR8D2Dg!y zZ4YA`KEhY{Mu_dLL7BG4f;!#KKDSQ?b+?_m+fLm*S{p8SFaeW69v>yGN1wtoploa| z9;2Qg>xOYO3na33bVGT|3EI_@AL3&no+6#6Narcid1@gj=Tj^24nD;{gm^j*T|hjZCO=P; zpQp*s(;U;!kl$x0+cV?f12KK(X&k_FLhQ2RdNe>b3P64Dx(~FQT^sQ%UIKC5brPrW zCMd@)_P^^(P{+Hz$B#nnt_B4)P!rU_ZVPPajx3PR-K4pjxbAVIDO#X4+9Co`xB-JP z9Ms<)>SoV)Oobn`&pr8=g*liD31YdY7)L<6-t#Ih;w`+74?+I-P!D^)6Jl>2kdM7} zQ6C(qdm|Bpc(g|++ydgim;CSLnA^J<9bbON{pZB*Z=kNP8b? z?`wkQpbhNn3XbW0lyzTU3;<=`mk8R~KHBTPG^8T~lzSiL-j|1a@g!(7`-%I0Vz|FI zhF};UmP@DXTl2R_GF_!gIN8Nc9n zAr1z?gkW5Y>tKcrPIyocw6%kcK%E~XzXz%FgVgy!>ii(_I2Z@o`@xQ&T_5a@Ug(4V z7=)o1j!_tk2}lBEJUAJ&kAs=WMlPnK0JA|GIXDl*;@~3O24Zq>CGNy3(Ebln#|PJ9 zJvM=QK1e+ud=xuD+z&p3J=l+fID{8)3>+^9PvI=+j}E>D>gnLycn=@q6VN^meu;1J zJ$}T`_)Q3^Njz5_Dy~5-)PWTaxKS7N(GX409IX(Fa6};%3Fv^%=!Tx?jeZ!2AsB{{ z7=!VcgcPJ99U1T=2l<$ZLfiz21-J!^u@ozC2kyqbcmQiqjE&fWhp`2t3g2x)I@DqV22A{T#pbm zMl-ZT8-yVeF^ETdbV673zzyh&0T_%#jKFA&!$c&*hbfqbEZm4Z%)l(nK@sNTX55M; zSdQCq7w*CRco6Hb0h{p7(5Qlpj?M`Vh^bILl^M@*zaL8 zJg5uedYE_~9*#mR0I@ktdpS%yIZT-jQ{RWF=fmXb2=#b``a9AT)XxzcXD`sknb0=k%K3(9|!Oaeg^wK8i97`fK+6{59;YC z^>dW{9X*BDKv`cTb}tgM7m3-6%@79CcyTaBV=QK2J{Dp<9s$SAi=^@5dmvuNi2X6* zb<7I#d#nd~Aq5$rogKRkcYG)Zg5QLA$&H3+j6N8OVIX!d6@of=iFmy97jR zI6*8<(0)$PeonCe6FqSQ$ioTpaDqIXSOxZfq8JBp3@_m`&<;*q65^y24M09l5|fj} z_u&ET#|t19FTagX zKweH&2jw|c8*w0wQ{6BI)Xga$W`nvtbqmPHsYkH`#Pk&H@zi-xrc=M-cOgzU1MTEA z?c{V{Bw{$GgLZvdVh;|1GM)YcKY(NQ%=O^&XIfwYMqm`k!x{2$hCH0v0``CAahw4A zKSTYTVgF|p1cQ1#+Xihh5F;@f4EF!ZSQKIbZpL~%0`l<+_3{dz ze}#HEZ-xhT(I3R`JY_gfyFAb5&MyM_KhNjRlmGK)@dn-!;sWKrz~?Vm(E&Yi12T}0 z8CZiYcnB|nvR|O=7rwOiDEpAZFnB!`}LDT zT&#{-sErtKEL@}=UnIX5ry>LND;HOTa$o#EE!_uH*M<5&@XtrnGIwUCfC|V^Oc4j7 zfPf4`Z~!tC7q}-XqrLaEou%!)=WXv@ws($9O|x8?nQ7|(<@x>ZIj?)qJ%0N<&+~qs z59cnvSHElA?wU{8joNF>yf%_J5*Ur22W!iD5E-w1hP}vU?U5i@mrWshU1z3sYOK2% zJ*|`Fx*vmJy=SiX%=MnRULWh_zrKklc!8JrF9?2a!LfMO&l9L%1!n&FKEC89)&{`_ zd)?qJHbkMn4O21ehWqi{4SLafY|_IfJ#4byP4>H~n(MfYm3+cCd=~_pThgA6jKO@HtGE)m zZI;_+&)vM4EkW>W5`!2*9j9|9?)6vi{jblni~Sr3f-UCTGK_TQV$Ln~Jcs$Vyn&xh zTV=e}oLk+<)&fq&`?U31^uF~&zQp_VTQX@3=R7V)zrTHeEd1Yb1lu~$6}xH+uPjswwE~=1i!oM-(~RoD8^IDa#nCVX8GMLznkTc4D#{WKhEVcu3$Ca@_i6& zcT?N@p~vm#WaJvk5^d<#4?zj+tzT+C~Wyd$z%g*j3;Ilg$I0v)tl*3Lr z?3BZ~=G|{rT?S*&YOcj-&|t|8o@& z@DO`B#E~G_<1>4FW{=P8@tHk7v&U!l{DIHy*%t(Vxy!%GspM7O!%p^^W3N2+ntAU9 z*wx;vk=Nc0IP$*}8Nfi!;$kl4Q{?ftnfBS`zFx$$5Hs&PjaPV=_k-Y{Hgu*7)y!lz zkMJzdW0(7z(*pDCub_%sxtn{j_x=0v*?&jjHvTQ;VV>c+AUJS5olx&UEelwLUJmH{ zz~@15&OVBzbtwVb3}2IfuXCN8I2My&Os66lSrQB|M6mj{FZhJ)-83eL)x`kW32Ka0@F~ z&sMetVR#Bd7)lf8aRG1h37-aGvvzc$E5(#kiDx$R%x0e1><4~fQxG;EOeR@)Uh~Vj zlCN09x*$9zh9uNK<}5DeQod&c8-uV#6!9c7l{w7gNnYgTAZ*!;*0jZETKY^&pK19V zukl6@9@~=kbflV@%*Jz$^_*ip=eQP_^SBO7V=nV~fj5yyEB&<6Pb>YjItTr>GFz*) z{EAGEPbG^SZe$gAvYSI3=135>4pFc5XvQ#}GAfwJWc1OxmRabh^#T^Nlm<@YO!U(F zd@e#yt*_u}mU9C)V@Iu5qL9nQz_z8enO6I*5i)a_9czsWFXVFW@{_cw&rPj zE4Op>v-)l;-?n@BI|$pU(N2waYP3_Mof_>{^CN3m$A%zmuebKYNGFqQ)M&3pdo|jt z(O!-AM?brRUC5`sd^+f>!zc>z{0?I|9E2U+V#l_~r=xs2R%3S^-BQPe)ZrN&ef9*o zp3saI*#8Mrm_`jV*}`9#=>+>Y!9F^TK)p_Grqeh|vDZ!y^C*w=6h43AG1%FO$Dywi z-{LdA;48ij!p?)qA(wpQ)%jz-M_!%f<@dD1lTId%1bWk#$9RcXc#SuMu*HK>^6xR*l)K+F61h%MbF*z-0fEG<~|-o&2Dzp?MXhyP52$?(C;;e5y{9P z!an?da~M&=1S(N0Lam6|%tOtHI+k)K7o%>3n~1odhj14WPw+G^V^TC$UoA%5UKCT z%P@bWej?S4+{r&d7^PlRThxnEFG{^A^`c^kBatNfk&LXOWEG`el=m%4y(smf)QeIt zY6qnKMU17ANlalnGmv+*oTJa;a<1nl^c=0{XgxwLu5Y(T%!+u4P_qxBtqI0$1xjzQls`i^OXeq*}hy^9${D*BGeAParR=sQN= zF(pjEoy06<1$v74oFCYTx-tI-VQe#6a2)m;Yp=2P8QYC0dJ>EGK31LB*)*^U_Z0gi zFY+GV%~-pL{f6)PiA`+5tg&jwnl<*%AnawPUIWQyF8b^x(_R;IJ+ka2%U<^1%l>=m zzn3g~$+DOI_p<+9uOic4KL%l3fZpSVFaqyOoDAZoV0UqL7pM2Qc`T!m(@`_-T+ZhT zZb8ksCwPIEd5t&me#EUtj&Wb{E%J;rXPi0XcCar9=a)+(&|EC%BP>Q|QM4+)F|> z@<^D23=$S1g9Ptf!lm56N}flq31&<9gZ&)ja1bVj979WDP$yB1L^TrC@VnY!Vi98) zO9^Tw)-V%0PHez@iDz;S=Wz{cCq9gs6V04x=0r0mnmO?!+=Y9*XJpo<$2!541LVd#|(YU(C0_i@-rK;)13Y;il2Ou~+{42>#*;k5bI2{}4c_4-fd0?gU}JIv94E4y*`{r?NXj4;H(B<{c9ty1AQIjH49y zn^K8cQmT=C%0jNgz6W+9hZ&e*;O)G?o4ARAAMy#GVV;5dAE^I<`}h}m4m=WsgPLJ) zgXB8M-Uiv*pjhHbM9zcyF#vfFlJ_8a4@zeQe%1_<#h@C_;Az;|;6_fvGY6lI83$j;C0x!`T#G#X{(3n0H9lb@Y7c3S z8HNlaj}l}$M5aR~Ad4so+?GSe|@G43(F z4m(M|1ev9~?R53i<(BRy)9=7drr*Z{m?8ar^pMe#9>^-AH|k}0KQr85#t7VCMji!B zVh;9{VNV%LsK@>J-hnXV2J9$9%?vd&?&5Kt;dx%9#Z1u%2I#U*_K&4#E-b=|v`0s6Ao|({T?Y>}Q0!Biz)86}-xa ze9Wh)F+z5)iu){3bWXUG$S^mdMyuvqp&yTESJzM#m9qi`c zAk2;>krMQfZ4cS9$bJmZ%9cg;8@!F1$o>F%WPi&K$UJ);o}2AXviBhO>;oJM!W`M; zG^06f=t_5@v6CEko#U=^x;arP z(~)!DWn9g*T#q^OR$z|2J9v~Ad70OElXoyro_X?Qm-hodv6l60z|QhEC@I!6fEjm-&7+=6kpD&6RJi{0q4R`^mST zd~@YrhgtIP<~|?z-#^55oN%$9Ga{B`I*e+S;L{J+?Tc?w!!o`Tl2qa%J+ z6!atkvlN_yT@?%=jdU`}!hQ-$D8ozz)l9)VQBX@A4K#5E=W;$5VZMSJxQlyvfQPZG zf+u*I*Lf2&6_}~OOa&kEF{}BJ_58vne#5K<+u6y%ARN^JGmScluGrZqJM(=h;ixzg zNunPk$>&sx7>oHv+1aQ{>}-^GY1A}ou%A&+vWC4ucxrcKa_S7sca0 z*!Ad^w8M@^pNKq0%VV@WM#nM)^N$|Q7{)UZbB>r&OMj-DZHH+L(Q5|lj$b3bfTl5m|;<-g%@FQ|8+J?M~ z4h3OxKy%t)zGAhC%~ae2J1(~4;zZ*xu@!PEBpCB9~<1uC%(+@Sqq+`Z0Ipm?<7`q))P8Dj7nanigJmxCy zV|5UYHP=|r8k>w~jm;#RTnf}BlpyvQrO&RhJ=;UFA$ zEXUK94s_xqVu`0WeQ^)t^gd3n<7%13T$XY!7jQ9h8Fv%6ayxn$C!=wnuqg<~H^Yv` z$6z<(6G=j5;|DMZdm2BC4921Ec$tlt+4yRvQo~HJ~opW+R^=Z_#PF=I(2 zo?YUZC7H;wq>$0L_mT?SL5Z6$F<*%rD49(iOK=M%=W`LVF1Z4Emv|pa%vy3M_i#TC z@dz*QDtak-n-BPyPx+jU9PMvEhl0@mj36v+&T+KHJ(t=^X?GIPf2mt2O=kpdp|k*V zlol}+d6YKbW=qe;ZIs$esh&&yyehqt$N3}(%hV`~p+BBg=2^azC@d>R?qzZ>lY5!m z%c}AGvKpH3{Iat+mkUs%O#Wq_UnZBb<=lW=5o1~ccc?N zNunR_uUy9EsSG2JQH*8`<0(TX<)^a}?^L;6mha-fAgpLb8`^UMojI8Z5>cx{tqQd& z)T&UcLamB)-Q356Jj}Da$$NarCw#%ze8&%LL>`rU`G*7O zxvCj0I2Jpoibt zzbf}vbvK^1-^$?HpI4|=WW~q9I_c2e^x0s>IO;x$6s$bZ|cI00r|0=UgG|NP@ zOf<{H6S3!sCt=qUW^5>=d9&tHu5XK@drEE z9fXthIazO$M_}KR?RxU%s55yL>P%K=@&nl6Wc4OL%l|O<L!@|yY;?s4khK{(C5O>2RDPLtEL&gfxUFB0g( zDGVcnEJmW{v^vhkuBN@r2mHi3Hn52;Y-2mSkmEFUr>i$zz3J{}y8D@aBI-^z`*gEU zH~aKhGGXEhbFG&Ud%N88$5gZ(P!@C-yp0pQ%!SP(jK|in5)L~Ydp8cTs7vZ zF;`78Dag1cmjVhIO9>OGU?P*zLyfF!^igvb=C8ScOSyup(O=C!=%rQV+*K-rM@)kd^BM4`N z$aRK!XQVTZQp%~sOf$?h<840SS7bR;&olKm)81z855ifVGfV%o^gqk&8%Q0?s#?^uHrr(=Lw$THQwhV zR`WSu;pgh?_58wSwxa*pd-xl3%sv=|zBewMBab*_ z1!i0jh0iYNjoBC2*@6@XlS&pN$>&s-(!gn)!C9;g!i8gy!NN)=Vvh^!SVkkK8EEV>Id7d^>yyui!6!P~sY2Yi9d7j5KMeq#r_`3v{7NWP2ZySM|L zI0^e$Y#)oGaZihrk?rE43`f3;b1>Ut884o}B|MM%i@)Uu*03J8ym&KP`5iqh)Ua~@Z-oEz|d`%bfP$)h~M)7Zh1mw2C# zSj}hXWr=xxH(9vkXfJ!) zeeq5$%Oo3nURFRM#f(F5%O+5RewJO%D&(;2dp4n7eGA&r5qqt-*Lrp9)vH&pUOnF- z7S^j*Zx{8$7)cRkt#@1Xvzf<2>Np)U*PqWtT*{5y!foiI{t+JMDW2s;ychK!@(G`^ zk0U|Y&jqf!R!r{OvKGJoXr*7fE#I8!AkDrA>2#D z6S$WK{WLt!*X$0$#!kqfF^zOa;QeaML+*|8Zd9*Py~asQMjnleQM<8$Gtfh$du_Y` zchGn%tGJ7M(MO{`8uigAmqxp4bhC|awsAF|^A&EU@i+cPMvedQZxA-Mq!n#w&k5LR zQxq}8kw_op*JPF^vox8d$sA3ExSu8&HdSN4P4?SlzfB7;Pm_6?>bV3nH2FP%qklW5 U+5i1xM2r9X$N&HLVbcx&4~8WQRsaA1 diff --git a/README.md b/README.md index a4aaedab..0b2ea2b1 100644 --- a/README.md +++ b/README.md @@ -41,24 +41,24 @@ LFLiveKit $ pod install -#### Carthage(1.9.7version after) -1. Add `github "LaiFengiOS/LFLiveKit"` to your Cartfile. -2. Run `carthage update --platform ios` and add the framework to your project. -3. Import \. +#### Carthage + 1. Add `github "LaiFengiOS/LFLiveKit"` to your Cartfile. + 2. Run `carthage update --platform ios` and add the framework to your project. + 3. Import \. #### Manually -1. Download all the files in the `LFLiveKit` subdirectory. -2. Add the source files to your Xcode project. -3. Link with required frameworks: - * UIKit - * Foundation - * AVFoundation - * VideoToolbox - * AudioToolbox - * libz -5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. + 1. Download all the files in the `LFLiveKit` subdirectory. + 2. Add the source files to your Xcode project. + 3. Link with required frameworks: + * UIKit + * Foundation + * AVFoundation + * VideoToolbox + * AudioToolbox + * libz + 5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. From 2ada4667263be66ac5e9461881e5710beaef2543 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 16:45:50 +0800 Subject: [PATCH 24/39] update version2.0 --- LFLiveKit.podspec | 2 +- .../UserInterfaceState.xcuserstate | Bin 18429 -> 18714 bytes LFLiveKit/Info.plist | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index e17a0a04..49a0e5c2 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| s.name = "LFLiveKit" - s.version = "1.9.6" + s.version = "2.0" s.summary = "LaiFeng ios Live. LFLiveKit." s.homepage = "https://github.com/chenliming777" s.license = { :type => "MIT", :file => "LICENSE" } diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index 1db0ebd35c5b5cb79d95a62828c4d5a404f9edcf..bd82a2d20592bff84cdd0b3c48894e56ff60749e 100644 GIT binary patch delta 8988 zcmaKQ34BvU_x7DFY11a%vovYarfJeNX_~ZYb_8S*q%5WETPr1<=#6pXP!B8=A38FohLKF;yGZJ z6e$HN?x;qO*TsMnvs-wD1DL_g5N6oycMk>Df{(z*;1jS7YzJR}9pDf+432=K;A`*= zI0n82=fRKQ0{9910j`4^;7@Q9`~_}-+u$j91{nyT7)oFiR6-R@f@-LN$uI?GLJQ1- zR_KFG;482xYzCXd_OJu&2s^=Eupcah#jpepf~Bxr2ZzBb_zoNo--A=(Y&ZwbhfCoK zxDu{`>)=MX6@Cu4akoH1;0!zq&%qzyFYqe-75)bQf)C*%_!vHcPvJ8ZjzlN|MItGZ zAvw~bRHQ@c$c%ib33>%Jg#pwIwM4I>HmEIXk9whgs1OyS5;O>vq9JG~sz8-!BpQXL zps8pYnvQ0mnP?W8jpm^F=p*zo`XqqXq4j73+K4ux&1ehy6n%zvpgrg?I)YB3@6ZMG z6S|0gMz_#ybO+r0}0( zPZp46q=u{{tH^4ymV87$CL72`vXgvCc9Gp=5BY{1Bj1waH3g_kj0il+ACRFqhF%ir_rnH6`MAbC;1QRU3$&^)7%?YMG(}5Yvl=tgU z(0)W!QU5Z3^NNb1(RF-XF_YiWJe;ZM^P;)HUscBpXDXTet{n>a!X{}LhLL5pOdV5C zvuG~WY|@L2C|L7|c}(+YKGkr&#pg+jnM0;%IiF;E+$1`U&ZP@!HLa(s=_hm}-MUE> z6-JVmf#qNYsHQ9(Pbbic^`I8ifmvWBeV4vR$IA#vk$EvD|6yQ}9It2sG4+vyzJ zbBrt3X*Vak#-3!tgEIr`z-F#6J|Sug_>}Lwl}@Hp_|8+g3Go@JJHc0sz8-uDc7fes z51meD(3x~rJ=hEO@v;Z#Y&wU_lCfNVg2r$hoc^D(--9!}>^VA*&gXF#a0?Q2hKt~r zpzP1!61WVm&_#4HT|$@EgR9_IUiKPYMwj!lD}oc?-jJ%$9dJLS;{))JZ+}c{Xf5Af z$JI%7=@3HU{{n+yFgygdlCBDY-IOL7qG19r3u9m`jDzv?1G>3;nbnI-gJKvlo*Z%WaiQ46nl1{--W&U|Sx$9sQhctJVC0K3o~^h>(C4t9gxVGsHhJxq_$l#@(r-b-I&+AwWlU+%a<#`REGh5ac*6X-7P zdxbORW&6NT?H+EX!kG~A0XP_zG5Q+#Cf!>DhtPdoZ>3H%oGGe-6?8vUp5l@B!4b@U z_!b<=v;m7?UpR&vkf=^LM42$UlO8w;$HH+;1Pnk5S#D9Hx#z)9EWinHV($(GZHJcl zM|CQy8dMA4W%B=B(IE9coKzSsZQHU{UirY5<;5e0_=i^Il@}LPm6i{!gOlMDCjUkI zoXE^*I1SDS>N}ktsev=;(V)HoE~xKZ`t|?Pw?AA$L%Qyw-@Mdy8Dyi6(1Xtd0anA> z7oc_YTbdGze7GuzyP6*VU$_I|Ck?oJsPV*0;PpK4(U-uR;1(vf25zR`)xb~bDbAqM zw%QK&GO-`PFW?Ti6MhMI!QF5V{EB{0&(O2<9Q}cwr$5pQAHaQ#hIfX8@DMx-Kjosu<$1cPZ^uRMI_ov77CUGd~Y62EFS?cme)If1y|DuXXTec!@ble`5jWK1&+j z`WjQV68;YVfY;#-_$R$aZ_+#TANq)D8bNNszk?vR;T?FF{!ag(*X!Uv@E#9xgZ@dI z)0Dz!J$GEI=BBAf_dpE7|1FIO5+EV{i{7HQX-aA{rs0#H-1XHFrDY}0$3bEg^M88A zqBwq>yYz3auSV`sprrpJp+*{%Oz+YA^Z}3E^sff&VkS8aGRY@U8WVx^$gpXhW+*Iw z81hPFL>YZw@OaCjs-il+%EaV%>rfCZHR_Xz?2LXDyp6Ju72ZYJD2Jc@WBP<%r%&m# zRq!9=Ku+Z1uMT>H1q=%S3sC5R2^vKkGRjJAz|x#o;#Jh5dxru`Rp}6aYV#c(4TEyk zAFu#6Xw(X|ZW!EOi&}B{siq#UGi5c5greZq#FIJdz?6N+i~-pwA9Y3rs7sJ9PF#77w`o&puRzBU_ltCiRu)NLiE7`5%+CWt8f&e4HiUl zGh<9#zZhdUDr=CIJgUic6lMPL8B4N=vSk+(HQg& z8jHrE0HQ32X8}L3lm#*t$XTFZLE>sO9!)?K83}qP7zHd)vOvXxB+6S)x@o;m2SP!Q z-r*|;7HPz|GBgBGGiXfaxXmZD{7Ia-0LS)gWth6TwiNMV7N1*t61u^^2FymT$2 zLG@@QT7_2gPixSJEHKbW7NoPl$bu{u3<;J-vS3tj$)wFID@!Y@iiUDk`sn0np12AI zm5=OD^j7J>qN?(W-h4%uqW*bB{rzQy(V5(Joq}7g9~J1!*S9JuM!sxiX z)_J9G`P-FNSw*fA7OEjo3%gvHy}i9+b5NWrtRWVxv*? z6gtZX6FQB)M`u`IWr2+a*>&g~`T?D1K@JPru^^Ansru!l$_k@n3jAgM;wpd13u6>U zCvinNvOwF)$`Ss`M$}8_8Xq+1GP;6(L08eQh$k953vyZDV1bhbE*7{~q2JLT=sK8% z{tPa@hXt)!@ERS*g0?&b^2-n(wCAW{Mf~nk(zeP!q_Rte-`|g~>{eP?%BN85SjFgTabgg$6%SPV0-VR} z8uX$HZi-vo8>iy57t_Le7PO&_gpZBb^m1xJ&4LL;C^~T#w!Q3?&4Slo_R7V4f>T`` zTI`HM?7=PhK*L^i1vkO3;HJ14ZjN8YEm+V#w2~cI(2)h5Sdh;G-nj}`&}9vng_aZXzAp9op?YI;VWY|;vtk@p}y4KxwNEn)#&_z zjnl8hZ~doi0FT6@SWw781crKpDw80DT0{$g(3tos9G?chIxl1DlP$slv;3aq|_k|;o z)Zt}#Ik(M`%_*EMx%-ZkK*JEVq1T%H(uUHie_3};=(*7>AK_0L$q27w!O-Vqgf}u} zb$AorjJL3$oCOsu81d5XTk-aPvkd+M?_j|&77VBMLT;f)n~3+|eg7e`A0J>rB@3#! z?>xzg_$dAln!?`(0p8*PIEhyVGB`FWnOheo3N&hY8uNL~8vH#zgU{k~_y>F*|A;T( zpI9)O1#h!p3=8ps6W z5Ah>TXwB2kpbQHpuweFS`~*Mc8=nz|0AAoe!7xAw+QN;nc2vz^gPKe{MT8`bi6G%b zL?TEe5fcfC;x=Ryh^MmPJr+!7!MiM&!o`@Jtw}8JFeHuz@AJDMkrEl-PmXFyVz0I( zd`4Y5pwwT{HI$39U{YvmlV4I$$X`ejQFF&lyg7)5J7!AJ#|54>d`>d^gKq)MB$CRs zp*)*Rd+uUHPYhgiq?FW>bS6Le6y5-pK}_6gvl$aJU%lFFYVsfRB^IU)!DtK92J!Zt z8MMi)AR?c{kes03Kje)>l?_l%;^J=EGI*`r+)bMe6EE>`9kca(MH8-lc2+EDN}BWj zNt&^Mt06q`&gFWf+gg*>q)o8;H5SaPA#GVOpQdzaUsO8uxxMu z7A$1JqB_!<2kydx#UUGQ;*O;kzd?EhZOgOAk{6i<=|=|ei3}+uZ;&F=pA?f4!gJ43 z7A#}Iau%#$K{X3%SWvr~3?zd{DH+Uv%NX7Z>sV0Fg0(Ez5c+MTY1|y+oOZI}@*$R@ zVZ+M&4e5X-m=ExqSfje5$XF(}p1H#O!dzv3rLAad`WkIh&s-w`LYeE7pW#XttfD-x zaWxSM-5bpLZcuaHRaR5wcg$XX4f&NMlgSij5_6aZYgq6B)zepl*(QJHu;+zsW|BGl zJ_AyKmiDB*Y6%C@Snwh3!_z;H9r(9nE+mUWj>%7bJqtc+^vZP&<1Y`6&l}_8mtM(7 z8%borC;#n)|6g}|>8pj&PHs1GHgFjEB#6(G(q^jUbAmC!qYmp>bMhFMi+^%wTgNsY zb&ySDD^J^GGuc8uC7-e2Qx@>ott|Mwo_tQWAqV+_1>0EgWhgs8KvR0;`3F>WEA@{o zj7|uq5yQ$#L&Z>(H*dpdalP_FjL@Fnc6)X%9XKe|+_+v}k%PhU_L6;MKRLjH?JVF2 z;5lq(J)hOoGClYgI-ZIQ+{4U?0dkU@YUEgQngzR_b1XT_Z)@Zn`GEzyS+Ms7y^;&$ z=YNwYxkN6rU=ItvqV^Yhr#2fy|DDc}-^d^T(djzj-%0kdU_WQhQpJ+nQHp`L}AP>mHhMm80W>3hopssi6>oo#~1xKFiDnPuh0xTdbILd-!FLV`z2_pVI ztRPY#X2I7i_=ehh@`i25ic_1FzVUBm1+jvJV6Rx_c}^>k3S=z!mIcSTvsp<2KDX;o z(9S>FU(&pyco3gQR~#243DjK+s*0-o6fj|og3&T2CYSLrP5B)226~7g7V}4_3arFQ z{Nbq%8~9wP8Gk@p#qZhg@+YH{@l-sW-)U#_=c2Xzx#%kXT=YZyFspy0URyxmQsH^Og)XNBj4=Y_ur19ydw!f;qjm^@4!W(aE%)-o(VtY27hm_KY_ zSXtQ6uwh{pVSz9Nv$#^Jcu zfw+RWK5_lx-iYfTR}wcQt~_pdTxHydxRG&l;y#Z%5O*`~emsbW@e%Rz_>}n6__TOK zyfHpI-X8CWcg1_+eevDn$Hh;J-yFXq{y_Zk_zUsB#$Sv7BmPGG&G`HA591%lKTTi~ zU_wNKJVBd~mEcP7B={0uNobbPHlbZY`-DKpg#3hpgrN!Z66z9;Bz%|fW5O>9w-X*E zJV|&a1yUr9luD%0(pYJ{R4Ua-jZ&+$iL|w}jr4VCp0tCsyR@gYx3sUcP+BB?M_MP{ zAU!L+B>i1_OZrqMl!eP8WMWyAOfE~5sbp$dvP>&8$?UQwve#t=vaYi3vYr81Z&`_K zfNYR#u&hirR5nt^%4WzG$QH?#$d<`g$UcyLDEmnEiEO=WqwH(hT{)Cz$aCaAd0TmB zc^7#%c@KFnd9mCtA1E)CzbPLge@h;aPnOS@uaMWs>*OottL5wE8|9njpUSt&x5-b* zA1g$PY=u|RMv<>5P;^xUx+{7r`YTEl0~CW4gB4|p5sH9fl471>sbaaJT2ZT5qgbo> zSg}sAL9tV@OR-0>SMeZGkQkOIN{mcwp4cI=Q)1`DE{T&9=O->oT#~pvu{yCfacAOz z#KVb46TeBkocNEDQ9>nF5~WZXu9PY>lueXPmCcnclzGY?%Dw?*sj@;jRXInwR=HdG zjqisw=9i zs^3(Jzl*;U9VoJ-k{#3-lG0Y{keL(`k?xl`ndX}`jq;- z`hxnR`jYyJMy&B_25N?BMrtN%rfQ~ZW@=^!Gz&G0HA^+iHPxCmnzfn@nw^?`ngg0c znj@MMn(s8HHD@&EG?z73HFq=*HIFqt&HK`j?x1|Pl zr0!4sI`u^Aoz%ZmpXz`P>4;9EOVy?647v^G~71)ZMbiEWO$k$lOCTgO;@C=(lzPYbX~e3JtN(m z-YUIEdP(}+^wsI7)32r9HwuhlMzzsov>2_%9HYbNGB!81Hnuh989NvQy^U`e`x{G) zrN%dnLyg0XqmAzwry8dlXBy`k=NlIpml*4f8;o0x+l@PoyNw5pM~vSXj~h=K|H%+# zWMnuqT4ubK@p?x4j7}K^8Qn69GX`XoW|U!$k28*DJThraW>c0a z+mvf^nLMUEQ%_SLQ=zGUz~na#Gz~Y6Gfg&4GtD&3G0ihAG}W0_nbw#-GOaUhG;KC* zF@0$|X!_oC$#m0n*L2VH(Dcj<&BQD;i_JQ-&1^S2%^q_Tb5nDkxudzWxvRN{xtDpM zd9Znid6>DD^HK9L^9l1Q^BMCG=D-E>&*m%Ud*%n`$L435 zFcW7AGewz^nNgWBndZ!9nO!oAGsk4k%v_tfEAw3D<;*LYS2J&B{*`$<^HJtg3$P$d zq$S1@Z;@FP7QMw}u~=*ths9;_TAEmzSz1_HSvp&~TY6dgTKZf3mO+-mmXQ`}nP7R( zGTAcCGB03RXjx)eZmF@zk}oSvRf3+SWSQT4}AazGWR{Wv%0_6Rp#&Gpw_$%dFMbTI)*d2i6a* zo2=WcJFL5`Us?BAPguXVp0)m9y=c8`y=wi<`p6c*wiuh#CbuQpG&Zd*&1SIW+Fr4> zvvsm{wso=fwDqwS+KOydwi??fw%xY9wga}qwy$m9+D_U|+s@j~+iu!!+y1uQvpuvu z$!4-qwjet!Ta=xUZOQg!7i9O#uF77Jy*7Ji_Oa|M*-vv6IjS6WPD+k0r%6uhoc1|A z13B;HOv;&(Gd*Wk4wv&m&i0(cIX~t+$a$Re%nt3u9%lF0Ti9FM+uGaNd)SNZrS>v= zxxK>vj{RMGy?u@Sp#8Z0g8icXvi%qPpSd_UJXe#Oo?DRHBX>gX(%cofHM#Y95WoBI=*u3cN}sYbsTdXcbs-y zcKq(R;rPpO$MMkd#K}03lQ?b8w$38wVCN9$FlU7`;GE%{?VRge;9Trn=B#$sIX61D zIJY{tJ9j#FJNG*GI}bWfIe&IOc163)uGd}NT*F*rUGKRjxu&?Lxn{T)xE2LmOI<5m zHLg0>R@ZLVUe^KFA=f$AkFJZZORg)f8?J}0XKvs|?l5)dX)&)wAhs=JlD zjXU4n#ogWA(>=gl>MnDayGOgnyJxy*yXU&+yQ|&j+?U-~-PhcIc%UcBBljphYEOzs z=gIU~JvknS$L;ZY0xdm#JOe$$J>xv@d!~4%duDk!&r;8FPmQO}v(mHLv(IzMOT22Y z$!qaiy*XZo*X3>QZS8IA&GUBf_V&Kv?e8t|mU_#)<=)}mx4jd*v%K@X3%rZG%e^(; zdhaUldhh4n{oX^~quyiQ6W;H)8}FOzTjpEm+w9vGdLdyTY`6Y?~DHjTmIK| delta 9160 zcmZ`-2Ygf2_kVXKZPKJGt4W%)Nt&c>_Gpt7L1eG8kzGq0pin4uFooO)lu1NP$d( zjKwLL;N;>~dBPH=lqq8>7FQpMYzI5QJ76a`2o8b6;6rc(90kX~aqtPa2rhw7!42>Y zxCy=m-+^1;HuxSq0KbEW;13uJMNkZtPz6(<8fu^xrov2^1+$?I24E0|U>4R1=&AXTkY!0bBxKhRa|rTn*oZZ^3QwZMYpC zg~#A=c!IkJxN z<34x@F2cn)jECYKilJQvTyi|~th8Lr2x@M^pPzlAsA9r%5G7@x#v z@OgX*e}+HD*YG#^7QTyr!VmCYL`Y&t0!btqq9v(BM|{Lja!4)-kRS<>JkpFjMVgbA zq%~ zK;9zT$lK%ta)2Bphsa@aiku@C$t7}?d`)hVAIN?38~IB>1QLN%5HC;)RDu*iy1*bX zayL*5Aml7~s`wN=L$9&`umEzK@J;Z})>lb20BKyBFoPQ|)N`$bMsAkS5IMt$7%?-H z8D7f_qp4Ki_&$OexsvM6FkP5@W)w4eaDLA&Rh5N9O2aM7%L^yg^KC^;_r~rq%-DgC zyL*N!>zOf3In%v&eoy|TAVD^Hvdfm{nqo_v90>4FftTqZ3NHG39ut);t@XVBSn5nW1`)0MQIuBUI& zZCjJ#V+hwfIk8zCs0WL{YRb}SbSj-r(~T|nc5U(XpJg`&PR_{*cx`DOpW8pB-{gQl z*TlXIXZE_Zagkup8_Fd%?SO4y~bc z>AY27A9$Y^{eaG=3wY6mTzC0$%L#DiUuDmNk9pbibTM7R%W|AdVX|BXUqofEfX~3^ z;3|EAzDQr9FRub$f@{3&b@~c@m6u({>z&G7QE1T}@KXcCpTRGD|9$!zUBUOi&ZQ|$ zR`4f8|E3ILNE(#Y(z*s^rOH%e98Bb8p#(}{JWQag=xVx#u3ZI_U~&VE3$S z1NMZy=zH`KJx0^dGHrPuZpXA|I>3IEp>n#5JCM>+IJn_?57$ebD}EwT#J!uMQ~3u7$(szFJs9-{)4UO+7|2g|%=r-A^^=c+CglSmqEchZRhF@Cxh)#}_0d zgeQzC99`V2yl`|yQF+Oj%6=uIi^CJT6;=+X2PqRnchL{d!ijJaBZiaV^Kc5M(Ae@1 zG&nwtKsq45XNRT_>LAN>|9eBD)O0wbAVJZgb(_v*!&;XWRgDUduIyY^R9IP3Ho6{G z!n+PPhy1hI`;%_%3{po~0ktbM!pDKtG`u>7{k>eMZl_#sPQ`9)gD% zJ^hr%@h)l5Zx`Vu_$mE@eo3#@!z=JJ<}Ce+1q|oX zjcswA8MzvM4R642;7#}~y-vTQcjyoFSE_4LavR=@D)}DXfp_WG^alN=9{vD-}T6DBWoEcMbTu%(>UtLj9d>qzzw|2WRMF&up8`(dZJ#mkQUG) znn6d>v2-up%>u*%5?!vx#DE&8g9W%C!L!6{Yo~tQuhFJ~F3p)H)$DfbZ{BIHYsokZ@@e$`xo`6y@t^ zC8|Yrs2;6CtI-;?mIYcCq_RNA0zC`TSdh*F0}G5SFueg5q4fWun3I9(GbPf|NV8l`^#4$J)i?A5Sp?TaoZ%;`_7Cgm*&Maug z8-t7S<+i~IOyMe=h?8(K<~^}F3!Y&?8y2+Z4TF_f#RuU{tj3!D9g6vcRWh_BT;97O z)iq4@X%@6BNRV@z;^pE5*bvoNkJGp}e0o%e)48?2^vEMM*oaM7!-7^UXw84syQr)< zT%I6k>di#+qP=Y!7O7>w&TTvM2J2K(nZPtjI2$E8{I|qe=1?@LgPz#Ww5LN@(1|jQ zNCG%mkigw@%19j!G2OX4P6em;6O5bTr#P$MhH-Pg)#}fV?Q1U%7nYWm6)h2D#y3z8 zeunoD%!BR5-3+Eha68K zXnNny;h~j%O2Xs&^(YxOyy2;T=LSdb!cUyHSu{p@N^a=kgQ99!G$0;%GJX~hjE>e9 z_rv}10G=0~WkFvS^kYH)RlGmUWBT$LllO*pTy1DZ1P{YJgVkc5wFcIrd3a3<>_pgKp7U4 zv0(Cf{0`p9ckaTw@g81aD}E17#{00Hd&=82GJyq?STN>MxHUxEe*8gWw6S133wYZ; z3bjU;`Y;-5NAOX63?FAfISVRSP|1R-{|dFok=5k+|BRfb*2q6Yrm6YU#xUU*u^k^~ zPXg)R!sH+G!(Z_;d=K0CD^HaZqc)fnjU+w~)gQ)Y7SCJ&5;7cfyOk{-5wbNNJg9X*~ zL`hVH&$lyKu#N?9&~#3em8Rq)r)Rj7XY6qC;{;lekj6=}l-x>NLIKf}Og>eTG?GpX z#7ImegP4hhSXnTO1-#SGVL=TG=CXi~#Q7{(z=DN*swCOOM(q6ENnHHZ6AfR!gjmc1 zehbM0js!V~Ax zM^zK)z#ErzBs@#Kz=Btv7&o7E=PgFM5}vAFWWh^y%vsWtwq(J})Yqe=xJTv0?!%fa z){hMQr)(=Shzw@It1Q1A_ccislA%0Lks+jr6cawxFJr-S7QDuS6|2ZFGMto<5iEF} z1$8W_XTd72)}iR#ucB&5Nkv6fxT06(#4+IkL#s+lyM!w$3WtRooQ;g-osIC(v65;o zF^5PM8AryG35=LbWDb%^WHQs9Od(VF(;_>WN~ScvadFP&*|m>|pRil#R*wEkyZ>=HA$Z~!QzKFa|Ri@eMK^FO|e1@AOm`G;*Zy<6XIZHMU@r(+m;uO3Xd-NN`IZImJ?Xkl?nJwcJiJ)5$%drx!=S{yEGGrFO&aSU&o$pi9`|D++?5}xlq zctn8$h8bBe00JmLEI7b|!z?)ZI8+4!LG0hjO&}78S-=;*hoY+-t?wUG6Zfl2rWPa! zlK&wu6YxCuAq$Rh8SXT-KrKl9N4GAj?ijC*D{w1;i*v`Pa~DkU=us^fb-VnYox&5t z#VyN=hVzwJ`DuY!VCmhnvam8t0TaWdFa{=@@iQT&1z#>-=TCSa@MUKlUr8ooIbSep z`8qHi2XFy@Ry&!m7$W>Z?Q~p?XW=<~8h;fp$1CtkT*n{PuEXp3W7^N~cLegsukE5_ z5+P&AL^7EV^9UcyHDn%tu)2spSY68d-+lgYw3DE(V1Qtd;5oq%L9t-0V4|Q}FiS8; zFjufoa76I2;ELdy;2Xg`!Jk4R6bfU7Vqua{CR7MjLbWhYI8Yc7&KAxQ&J`{dE*I7c z*9$iYHwrfkcL{e3_X-aQ&j}-!h1Z2Y2!9j)6{CpJ$M|EK#dL`26Eh%YP)tEgam>(| z;V~m(#>Y&InH)1EW>?JNm?JUAVot=u*!WmgtU6X3tBXyG&5X^CwZ}SR-Lc--!LjpW z_rzY0{UP?}*k5BG#6FDuQMa^BnkRZe zv_e!b+9Y~gv_rI0^sZ>1Xus%y=%DDN=$h!3=#J=~=tnUYOT{X&No*Ed#aUvTxI|no zj)-T7XNqTw=ZhDL7mGRZtKwSmcJV&(5%Ep&-MEA}bDTHM7w3=5jcXm(C9Z2+_qd*M zz2gSQJr_45t~hRJTx5707q>s|omNl#19O3z6zNH0o%kp3k7MS5TQK>ASnXFL-R<8izoJ|D9{PFmc@gF6K5|jxk37Uk|gkcGl3F8tbBuq*;kZ?NTY{I#O3yEoo z_C#l*J2B!-d_Hkb;@rgfi3<}yNxYtTBk^Y9cS)H^IZ1(}P*Ss`>ZBz}OOswqdO7K4 z(l1H(lO7~JOm3dsDY;j2pX9#D{gVeKhm(gTmn4r&9+g~{T$8*x`Mu;D$#;`~OMW0j zvIJR*Oe0H`>1FA%ESXK_khx?YnNRkVtfQ=#tWY*0A}f`RmW`2>%O=aF$RaXUHeFUN zTOeB|TP@on+b!EGdr$Vh>;u_x*-6<)vNN)eW#?u0qQg<|!5^7Ack}mMLCSysoHK)GJmi-csyP>{FajoL79JxTLtO_)Kv_ zaZ~Y~;G~m%50@w=~TLvUZr2zO*vj!t=yp8u6$2Z^*V z=BwUTy{kH=`dD>d^@-||>ayy(>W1p3>O0kK)g9IS6qq7TQKjfp(o>8n87a0DM~W-O zlj2KhnbInyO-j3z=TqjS%uSh}vM}X9%ITD|Dd$oysN>WrYOPwQPFEY%kqmW9bw_m< zbyszF^-%Rh^>lT$dX{>&x<)-u{fc^ndY^i~`hfb7`jq-p^;PvP_0JlqMy_#aT5Gy% zdTIJ-`e_De25Ba0rfI4*vov!wD>N%Lb((t3YRy{BHqCa;JDP)<4>d)lxvzPk`CaozMDv$6PAkYJ%sQn#h?EzCd57AE6(mpQLB?)AiN*S^5R~MfxTBrTQ23uj|+7H|lp7v<98QV(=T9 z8Cn^-MGS)s1%^UHks)jtW+*X?GfXf{GCXgXYM_Q`h8c!83?~iujRs@3(Q9mGY;J64 z%s2KlRvO0{BgSdQ8OE8$g~nCJHO6(u^~V1g-!g78ZZU2%?lA5$?lB%Q9y6XWeqOt;Lm%(2Y1tg!5`9JE}v+_3y%O|$x}Ev%ibU9CN=y{&z%{jJli3$076 zFIZo;F0;O7t+Q^lZny5V?yFm4Nf7losvI%T)5u4PevFU8- zHj~X^bJ?DrpvOjHaWp862 zXfLss+pFvo?33+N?X&DP_WAaO_9gbE_Sfud?OW_e?HBAeM^aon~jI)8=$I1J0Jtd}mi@H)juLfpdtn*g4EO z!ddF9a!ztiaZ=|r=R)UF=Znslov%4pI_sUQog1C+I1fggN1ex=C!J@V=baawpE_?l ze{l(2T33#%yQ`P0kE@?+psUI?)m80U?Aqqq=i2W&==#ug%yrdu&GohGrt6mLj_bY~ zxUpO47P%$vc(>NAcN^RpZjamV4!HB&?cJT+`R?xS!R|tLv3r<%tb3}Px~IFV-7DO+ z?nu3RjeDJYvwNF+hkK`czx$T^o=4*`c{+N!da69rJk_3Ao*K`5&qB}3p4U7pJ@uZ| zo-Ljoo}HfEo_(JEo`aslo{v0NJYRag@_g<2&U4#y*YkttcW;b0-kao=dsSY&*Wk_Y zTD@7`j^4pu*1N!4>s{qt>s{~N;N2AQZuM^W?)2{Qp7Ngde(b&Az2v>@{oMPd_bcxW z?@b@}CHpMCJYPrO5Z^f865m?i`@W;TW4;r<^S%qdi@q;?*L~mkzV-d+yYKtm_otun zOZ-WGxnJc^_3QlxzsYa$XZdaZpuf4lrN6bmgTJ%CtG~N{uz#3;gnyJjGR9xwf8HPQ zPxDv%XZvgXEBx#H8~i){`~3&~ANr5_f6Ni(=ySX|ZFBnNjLw;sGb3ky&Z3+pIm>e1 z&e@Z5KIc}>&pE&3+|NzP9h6&;J3O}}cVup5ZdLC1+*fng=5EW~k-IB*Z|=U_4{{IX zp3Oa29r(k}td$3QiUvOY> zaByUBbZ~62GB`dsF*q|gD>yq?6RZus72F)$7Tgis72Fd%5CFBundlePackageType FMWK CFBundleShortVersionString - 1.9.6 + 2.0 CFBundleSignature ???? CFBundleVersion From 9bb160132d510ef81319ad1372b351f8e68bdc9b Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 17:08:52 +0800 Subject: [PATCH 25/39] update read --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 0b2ea2b1..f99281ff 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,6 @@ LFLiveKit * VideoToolbox * AudioToolbox * libz - 5. Add `LMGPUImage and pili-librtmp`(static library) to your Xcode project. From 605b6bc8e86cd3167dfb8afe822483961d4fa866 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 17:12:42 +0800 Subject: [PATCH 26/39] =?UTF-8?q?update=20travis=C2=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 4 ++-- .../UserInterfaceState.xcuserstate | Bin 18714 -> 18823 bytes 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1dd25288..fdd2dc20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: objective-c osx_image: xcode7 -xcode_workspace: LFLiveKit.xcworkspace +xcode_project: LFLiveKit.xcodeproj xcode_scheme: LFLiveKit script: -- xctool -workspace LFLiveKit.xcworkspace -scheme 'LFLiveKit' -configuration Release -sdk iphonesimulator -arch i386 build \ No newline at end of file +- xctool -project LFLiveKit.xcodeproj -scheme 'LFLiveKit' -configuration Release -sdk iphonesimulator -arch i386 build \ No newline at end of file diff --git a/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKit.xcodeproj/project.xcworkspace/xcuserdata/admin.xcuserdatad/UserInterfaceState.xcuserstate index bd82a2d20592bff84cdd0b3c48894e56ff60749e..14912fab873395a763b0c7f6e7aee26106f8732b 100644 GIT binary patch delta 7081 zcmZ8l2Vhgh|9{tam!w%u+w7!CN1A5u*#x2NN?9#?)lvc!N?S@Y|Cg2u{+sl@8{fP8-1k1;aapzi%%H(MDVfSjo3Ai} z3-eor*d;_MQBI87Jna^UzD-Uf=aTct`Q!qUCKr;6$i?Im@_lkMxrMAJx02h)?c@$} zC%KFKl-y08CC`!P$qVE~@=Nj(`4#y!d71p4yhh$4ZOb-C)gPlz@D%#><9b9A~+D1!E#suN5V=t z2}U6WF`Nu%!gt_YxDb8-m%|ls4O|P?!H?h;xC8Ek`{4oj1w00i!}IVv_yfEJe};GA zJ@^3r0sn^2Q5@nR0g@mo(jYA|A`>zr3u=j4q1Gtc2DL@)Pq(LL7UMQRFAfzZD=>zgASt4 z(OGm3okthYMf4@Qg07+;(NE|O`W5|wp0eUtTo#YTXNg%7mXxK5V`=GIWOE>?&w_u( zfmAw`okvlLZwjJ#`-W~cr~RtM|31Q5hIAQ0i6mu zj;<;iSQ^QxtSlN|$Fvm_T^qYAh>`vN=`M&=)e#j$CDFBervj!@C=ySY;IO1RCR$P_ z1OiMc;0sKg*f?@DF{ZHI!1){Shu0Ey#45^8c_~f3jcX)hEl-H2R36op($K|%Z&B_J z?S*Ugt%YCn=!jBJw^F9lDN23)Eah4NPS9?3PjI|`mbx`)?)7_oL4VNUaRdhid_{qX z-{TMZ11?9<6?8fVyPf`^XHc=*)xD^4gwNBXq|4CqvPkd7m(Sv&xtgW&;j82B#Qhw1 zH|}2CFVs)eed;0g2la%(wp+s->+l*vNRcu1FsdUv5yLkS6UZ)PS2CY0AiKqNiz^^| zlD()*4A>aNW5B~efI$LvfZB&aA_mFKmYVCNG*0NuCkIjlCBcAOC~CPtZ=lPQ;_1E# zYCbunv7KKi^2gdQ=~Q&*%vSW6L9c*(|U%-C0R|@khNqTxr$tk zK`I7m7^Gv6fq@PKMlE^_vM?~LC)W~May_|${E+;JDQzS-VPK^A7??0%u+75Yi&*VE z2KiK4-`v)1x)lw~D;gNdEG-m?m}xsw%JS&os0dRKk>UDkDXOA}f7H zub?TzIND-Rfn@r>PGywbLmr8t+)I8&?j!e;2grlu=j0*sFa~A}EErfZuwh`wz=44i z0~ZGF^u_Ln1Gs zNMAyuwXWH^KBYzD%STsrtBgdNboUMU9iguwzs0~?LteqaN6+>-+mhGG8?ojeF!0xq zKVlG|(zPBX(eq zF=+Pc!2X7DF+MyYpOVkW=j01WK!Cv;7&OPA1qL}7w8Wql2CdgZ5<-X|i}`Sf2^h4& zpe+U+G3eFs?M)x@*BhW1Du|?2#0}y{;wEv6YD2Z9+EMLS5kEl{R1ZPOs{hn2)bj?8H1h}bi$w;&GGo# z!{)KonLT!4v;kYe*37szdSyjp4l1OFu+_FKDaTs)M&|*RFyta8IV2ZD zjP$`$42m!qi~)hcP%5ogUSx1pkCMpP-uWd%hBlP?*T*uRPBMQCH3&!=m$r$*rvIE~>YoQlDa znwZHl##tyzY*k)1s=PGPwxqNwQW;ydY4&&FY(|{;fU?%WIT(~sX$_7O&Lf7`!ufCk z2E#BIL1~)g7s2<4q*_1#Ky{=#)xxE483w~KD5X5HZPKGdk|){4MUtLY5H8p$;H^s31PQ6(JKf$1~30r*(+g7*@gHafad4+8! z+#Ops=^t==;9d->Fc?k08CKIgr3 z+8W~?N<}(CUx(6AI?BLcHUW#q)3~FB~w;w8GG=}=40T`^rpt_bg zkBTV9$=6Vx{E|WWRpYx3d1(}A2paZp*;Z&cVjNB#1{*0)lTtwG#ZUTV-e#%*J1D>1|MPYFyfsI*sctV!q)I!5SMqr>P3`X4%qzQBNSXgX%hJ`9*K2Vz|(n1ANe&~>ok$;j-b zx0wB1&?R)43CZZISoh~lYwbUhd#SYU#Y`NiY+CAjbo~|hA22xlicB}rtwzU83oM!_ z`WZ2qP181i!QkjC$p`4+t4+lq{o~BhWAxWQ4(D$Sjx{-)XTIms47AMfd6|!8p0}J zm9olM6|9M@X{`Aynze|vgmr>-k@Y?6C)Rz|@2qERE<2H(%$BfaYz;e=ozB*=_3U=+ zq3l`gh3rM_CG2YUI`+rx?d)Cb-R!;WBkcdNzhIwZUt>pavmdaZa}bBeN#~e3Ih^*K z?wo;~A)FG5FkTg}i`T~+;!W|EcyGKv zJ{TX0Zx-J?epvj{_@nU;;-7N?m*ldz94?ou;bw6yTpQQHb#a?>TXWlT+jBc|J9E2o z3!>a2ZWZ@EZVh(>cN2FH_W<{E?qTjR?g{QG?iub`?q%+M?w{Nz+-KYuJP}XH)A1ZU z7th1<@dCWjyh*%Sym`C@yoJ1_y!Uy_c`JEqcpvi)@=owB@*eS?^3{A7zd64JKZoCn z--X|s-J<%1jT}g zV2GefFkUc0Fi|jBFjX*JFjKHtuwHOd@TK4f!EM1E!Ck=v!9&3#!DGRnLLih1wZe3v zPN*09g+XCR*i6`5*k0IO*hkn;I6zn|j0lGaON14|vBKHH#lkw_s#M`>;acHV;Ys0X z;aTB%;YHyk;n%`%gx?9T3a<%&5dKJqn=7Ky1bIScLRNw?!JJT$Ffk#TfD@)9e39^F z!dD5G6TTHmMH!+@QI^Ok8YQAclSNZS(?w@Q--)h@u8Dp~v?K--Ly652nIzAt_tekgt_el8&-aS|wDNfIP#iC*HC zge7lCT1Z+-T1z@gI!n4r3MAbn(Vmh@$x_K$$!W<~l53KmB!5U=NP(1;A}L!ck|s&T zQmIrfRZ26YR;fptBYjhvC+#HdBF&ffmG+kwN{gh$(ukCju9a?+ekZ*t{YCmn7AND$ z1hNEKqAXdamT6?EvUHhFrkB}e0a=c$ldPw#x2&(MzpPL;G%6b=E0vYWDrA+i39@Oj z1+w>L%VjHNHL^O{N3xBwPh?wUTV>m2XJvoNS#q1)FK;35DDNTfCGR8eCm$dmA}^5- zmyeK_%SX!pBcCFlBVQ)3ldqPqm9Lk7DBmjIF5fBtRK7?4nfz-7p%5y3isp(uMR!F{ zMQ=r4MSn$fuwtlUn4(lsrl?SiS4>gNRxDLiD{2+16l)Y8D>f-ME9w>76bBWD6h{_S`c`#C^|zW- zv(y|lSFKg2sWa3%wO(ydd)0n*P~BFYtL~ugr0$~5SNBy9P`{-fs2-~xub!ZOTRlll zspqKYs^_a$sB6@9>NVJ93h>VxXj>hIOp)i=~P)wk7m)OXdtsDDiXDP&3%rLa@t zQ+O$Yl!TPTl;o6@6hlf*O0Sd=DU(x{rPQbFOL?LJ8jdDOqt)m&28~H$(YQ5UO+b^a z32VwV?`rBaA8B@Ic4>BN_GBT3Z{{uF|g2uG4PT)@yfZk7!S7PixO=&ucGhztvvRey_c*{aO2q z_DO17Dokaia#9mg6H}8@C8@I1)YPn0M`|E_dHTxqb?MvF_og38Kbw9f{rmLm={M4E zrr%D#lm0mUuk@$sFEZjXPzEQ1n_2&T^R>64riRoh+fRNobe>%nU1B4 z*YR`$om6MmS#=JbTj$dSbz$8bx)!=ty0*IZy6(E3y572ex?0^P-DX|AZoBSDCMT1d z$hJ4+)Bmo2tp7{@ME@*{pXJCJnN^*2FzZUzUj~UmV@NaT z40?mx(AJP^$TM^{G;TC*Hf}XWcNjl4o;02|o-9@eM}Qg^GzR^)|);uZ8B{!Z8PmKT{Yb`-8cPadSv?3^tb7S znQu-uOU(+i+MHq5n~i3R*=Fuzt~9@Erp=4ZOU>__*O_;iKQ-?)?>B#LK4SjDe8T*V z`AXD$&3war%Y4Ut&wStf!2I08wPad?mhP5OmZ)W^WsPN%WwWK;vdyx?vfpyha@caz za?EnVa?Ntba?kRsZ;WEHtXwAI)) z*!J5F+rF{guqW8X_U86n`xN_Z`&|11`$GGN_MP?v_Cxmn*^k*z+P}1aZU5GO)qdT6 z!+y{H*b(PYI=(!I)^$(ILnjcb)_jcdK@BiCWqDc42UH8*sN z+(~YUTkcl59d5VV=MK8V?&j|H?hfuw?k?{B?zh~n- z?v3tG+`HU+-22=I+{fI%yZ`cJcr2cLPfyRpsOMeJ9M4?O0?#7P5>K^fwP&5@L(j*a zPd)oQ2Rxs9j(U!HPI^vzzViI&x#PL(`Ni|l^T_k3=Wj3R6?tV|r8mW!>do?+yjHKn z>+-z48M-zMJ|zH`3wzKgyqzN@}#zB|5qzF&R6`JVU*Kj~-rIev*> z=}+;e`ZN7mev{wgxBFep&+Iw=w*Fjyp1-TVo4=>Ow|}s|++XP*?H}i#;GgQB;eXdZ z$3M@%z`w@-iGQ2_fd82Pr2kCRf6o6TkQB%YgaVxcMS(GacLTEnivsTjmIdkpdjp38 zR|3BWo&}xovMgPVeTg8PC8gNK7hgU5rXg5LzM1g`~u z2>uwn9sD_XFB@jNvwLQb$)1_LDEqza_p?`I*JRgaZ_7TIeK`AQ_VMiKsq8b^m$PqW z|CaqI`_Jqr+0R2D#1AEel0uS@JfsSxgft;T$Qf!8>JaJ`>K}S5R2&)_8XhVORfNWb zri7-4-U-bP%?-^Dtq83QRflRryFy1oUxZGCPKVBgE`+XyZiH@y?u71z?uVX+UW7py zhS}lxa8g(iR)@8N!|7pN*ctYP1L5p&^Ki>>n{c~upKx)wB0M@gHatE&DU8EY!_&hH x!^^`P!kfaI!}Z~v;oafS!u!MLn-R^Dn#r1_H2mm6#KkqfS-Qq|)3cfO{{X%vTDJfI delta 7115 zcmZ8k2Vhgh|9{tam!w%u+w7!Co2E%Knr83Hsw~SWEtIlKM<~TYp)DXX@(xh;eC*l5 zu%aSMQ4~Q1S&D$5Ao4>%oQMd@hW|?osQ--j-re_m@4n-6pYPoa8mw9j7D`DKOHKDk zHNdTj;N+rCA@(q01Tm5rz3r7BL3B1bhn!2!Bj=L~$c5x0axuA#e1m+Ge2d&lzD;f; zx05@_o#Za^9r9iBL-J$t2zit|MSewIApb{RB)=zbkbjal$y?-KJFcaFK8~R}Y_JX}(KI{VvU?J=qh5cX=><QFrzi^iexXd0T1W}UX0!#pf!;*h&~~&JeT4R* z{pe%#1v-wtL?_Tm^bPtB{eZ5bAJKL62fBf7qdVvUD~`oxC9p)SM3#c3?5n4*kvSlF zhXvk=11Yqgok5Rar_o)Q{~WfC=5Z8*xCD==B&r&T;gp6-JxRpcFA~)al=dXigXl?& zB5H>8Ea)+|zO<}5(xJAtbV4)JS5D-&iN_GNgPw^CBK6J07^05I@7uG0Y3$H(U>v`? ziD)J^P}!7+((Z6_O=PU+KJkFcqw*;&T`4$^IxQGETeqXD@GOt6QX1$kN*$f1H0)TQ z+zh}`+NUl8$961G=Yo*mT~Qt>4|vOb&WN+jS>f|IJT9-t<#3f(4D$yf<$k+sm?seF zTUtBP>n^P7U0P8TySS}xI!UuqUcE8ym$+Z!ev7*n_dE47b)C9N{Y~9x?D|C05fxWA z5<-fMdCo+E>`7F=PD}(&GN0^27LbK;1#yLB5!s*0ph_`dV-Sx44+8=95w#zK1Pl@> z?J1%&6TdD*SE3s^m?9_%2AmR6M|x<6gI=f?(KQMDc(SZbz@-y1I&&)98hm=F!9u^4 z5KsS}!KPnMRM3SPx%6KdO5T_@1yPB}$-E|$?r4y5#l?znBxgD6du zHj#J7fFIoU`UTF4dTN^%vsnp{J!CD)Pb$p#EG7-%s_#UKp>9R}$b=rPE^ zfYELuwB!bIBe{v(%sjP_uVY}O_!wkjV8S39gORb;cnn^m(n>_q4s~@^b@ipA=z%6t zil|%e^993e#`i28S5;YBUsF4f=_o8M%PTF5RMQ`1E=eBDbUa^C#fUo8MoL9Qi71&a z@mKpvv{U|^-(>Fr;U6S2=z)nnxG|EGE)rfQ3+j%sLAwLN+5Y4R*#XeQ5)Uz6Wp zV8_6LfwP%BM}AA5$H0Zb3mD{4X>>G@9xV|i7DTEe<@J$@XFivRQu>$IR77lyt1nd5 z+nxz*-RkPbM(Up0bcy_#Fl-_(lUK+e$gAX!0uFqV{v5}1ysRu$GpB6LwOjb;V& z6;EfrL*65j8p*pDgc`~F7=-D@psIv?3;`2=NMMlD2;(q#j!G-e>zr3LF2cC+WGrMs zK4I7h*^mR{Ar~^Ms2v9FG3bCnM+`c}BtquTd|M^Ctye~+9jyxaqEG?VgrNy4W8&wT z-lk`o?WnZAv9Q&ah{O{o+O4Vf$=1|~PEWv^>i4=PPkw4ZI?Q-xJTzd?m2y87nV{t# z2V(P6X-`Gj(7}w-wD}C37ZL14HB$m;;}K?O=P@0d|C) zFzC^mot_x`4})5D!`zy1uTS}VHenyxp#*zFxMWj9QDPZ9|lFS$Nm_+7`s=~ z*F7n;%DW>9i;1d+C$tR*{p-1esBVNqF(`h*X0V*WKdgWe3yNL^iNWu&lv!stkG z-Pp3KR;0HsUpR*$CO8*^2&Fnl9E1ztLbwPnCU}r04!|XFDbW=!hs&9b#4fl3E^lkm z_kC-8!ziM)Qo+@5O^HaB+pn-N@=|?nP5Ic7kx}(|HRYxCRW+lU;aa$k$p1%yUavCH zX@T6ufwX9w1x>9*F~6#WMMbl^KWW_AHWYW7>&Ug%3VTt4eRIxrxey}~UW@_oYxoU33(vuC;d%HSya4})!2}Fm#$X}_49`!-fWZ@!BnkuE0>6is z7`kpEo8VRWBm61G7E>`;9OG(Z>yNShFa}3rv@xK%bVAM8`WQDb4E~IfdoZi7q<|Kf zr|U~Zq8N|oM#{!kRz_O6`YZSs{2Sh8@^**G(LH#dE^_AS=2HX)(=b?c20nz37~x|? zAiyZxK`<_g5V?y!;OwQIk7EW+KaJRkL-0^M;vyd6BLNbk1iCD%Krjb`S237}!7CWd zra#W|bw)`HZcs7?Gnj&gq)5h$laozIIiOnwlm4nEfdY}JYz zl!7#L2U{vcS|Xo$(9+XwNn(^vbfp;HnEM1>$bgLWR+SVrp-e`Nd5UivZAKRQip>g< zmFd1>v$X$@i^xWFMUdP@bS0U{E{KI;VeE~}_5-%7^?VA|svcPN)BH zF);|l@cOh3pUf9RVcP1>By*VQtZoB6+0998hdMBbMC~!48xg~=OXyu@M`zR-b&Yj* z!C+}4>W0BGDy^_bY1ODFKt?@KK9SUndZJ#4;i=^qtiWJpGwQ>HuMmS(tqM?{;aT}9cVX0 zzGx@fh2BB$V(<J z_!xsvDEBkF)7wv^+V);;TQBqz`t?79enZzVIDo-HdXht(g#JXgVz9r7!J$U7DQ4P5 zdrB1DMfck3_fyXvqQ@~?Z&BSFSxl&oKCzWWGPbfH3t?~!gX7O^WpP-%e^1NevjiA? zj=>j{yZ9M9rnj#e`fp=dNi0chR1)!I|H+cFWEgyj!3o;rOj(>Gj;63QeGBSK>mw8( zID~@G5f;Kj1c`RcX73t#55_?Oa~f7a6-;5y!Fp(fCT6oy3G10c`wDZ&odxH>c}&4v z#2j&(m?Q2c=7{?`d=tJ6w=>7vtMDd6qdCkPAAw$E78zzvqpzSD%(_{OmN4hh6;ZSb ztw9G_1k1!KU=3xJvm&hFtP!kg)&v%2&0{TKEn?BEL#z|5^Q<3Ozq0;d-DR`b0(JsB zk)6y|vej%2JC&_tcVZ7?&tNZPFJjZ|HS7)S7WO{&VfHch7wj+DXW8f2=h;87Z?W%l zASaO{=V&-aP7WuRlh26`;goYCoJvkLXB1~NrK6KN(JSD(Sosp@q!70DFR9`RWMz!Sg=uWSa4GCKfx8jRl!e!Yl7>7 z8-kmHTY`teB%w;E5vB@tLYL4Z^a%sPkT6%6FYG7mFB~8&5ta(eg%M%3uueErxLCMe z*dS~aHVfYt9u^)Iel9#NJRv+KJWbDOCm`<$AJD7YDWYi!=?NJL#)RsG2?-MuCMQG_ z4kVmR_$uK{!Z)HMkyex@N*85_YDAMnQ$&<#s_2;Ltms?OccT9#W+r+PeTjj@P~!B& zMTvCc(!}M77Za}~UQfJ{cr(e7^juQ=q>f3sN%N9cC9O$Xm(-ASBk55xksOx{lRGCD zBo9g+k~}n;T$WssJTkc^c}#L$^4R3@$%~WUOa3hRdh#7H5XXsmV!1d?oG#7~8^tEE zQ|uOd#eQ*692OUer-NXQh{^qw-nuW%Bj%MtQS*qkOacZTWWjPWe0X-SYS4r{xb6T!m8+Qgl`1 zD+&~S6-A0-MVX>P5gn!&t{9=HR*Y3d6*Cn}6>Ajh6b*_dMT=sK;!VX?#WuxW#XiNy zicb}Hl`JJk$yM@|9h5zly_J2Gh00mVWy+Px)yj3s24$0SukthH5#=%E7s|`Zzg2`P zP6bs+#a6|uq$;y2N7YW%LDfl>r|Pd7tg2Ggs^+K`M^#%?`&D14PN+_)&Zxdoom2g- zdaRC9Lp4jSQm3f3>QuE(tyeqLF11J9QT@ESi@LiyPu){pq#mFiq#mNKQ;$`TSHGlw zSv^TTOFc(DPrXXLR=r-`q~4(3q~59Ct3IkeufCwZsJ^7WqQ0vBN&SoZH}ymH;}noW zGF2jlof4nIOA(|bq$pDKDLE;HDOD*`QkJD`P1%!jJLRD!PQ%s6G#X8+MyJthESfBh zUE|caHIH|_NM)zSr}9z-sR^m_RBdWzs$I8Kw_MktdrP-V_kr%P?u_mm-8tQP z-38r6-6h?1-3{F>-EG}H-NSSuJucmto|ir@y&=6NeOvnb=^v&aO#eLnWP0>w`d{gf z^gvJQkzS}z*JtRBdb2)DZ`Zr@9=%T=)aU5i>GSkG^}Y24`W5<(`px8- zP;VG-c-1h=FxRlau*k3^YFK7iVOV8oF??KFm*O{Gv%3jng*JNn#xQSrf8MvMbjwLXwwAKtEM@o zd8P%XC8lMjm8R9E4W@0T-KIUJy{7%9L#CsqFH9#)r%ZpFxn{H3XU;WuF?TojF!weW znERQ_&BM%9=4$gO^B8lTd8~P^dA0dX^D*;%i`HVbWLumTkHv2ZTJkK#mO+*hOPM8N zskDr-Oo>`%S>{?6SQcBBT2@$^Et@PYmNzV0E!!A z+G2gjdf0l*dfa-_dfNJp^;_!&>-W|x*4x(TUF!qu}ET-N2RD_K{wu4ny`^=H=otVcFrBW-+JqD^d**%UT|&0@3J95%1b zZwuLSZ0&8GY|q>J*otfeY=doOwuo)GZG>&Sjj~O%y=t3fn`>KYTVY#mTW4#G+L~?e z+CH%zvYoVjYrA0k-gY^gpPioV&(6y($*#?wk-a*5ZT6<@mh3IrJF~yYKAnBtj_lp+ zBkXnddiyy0OLlCZYM*YOXP<9hXkTk@us7K^+F!H3Zr@>l-~OR}pZyd20sBe&*Y>ma zZ|xWDm+e>WKiTg)pd-;Cb;uo2r9WeB@HN)Gm!H&82tcxH`LfxQbnqTr*v>UGt)@g)Z9lnrn~ii0eDo zUDpHGV|SbzxjF8jyOX=KyPNw3cYk-eyUJbdu5s78C%Ip7Z*aG`54lgcFSswdFS~zm z|L%dFc#qbT=_&B^_e}Gw@vQeWdNz1AdA54Cdvc-X7jMFXf%) zebqb5JJ%bX?|sMniT9xQu=kkvxc7wjjQ6tl7wp24AypyKk3ow{MScuW!HaQ{O?~A>V1=_r3>yk>Bd? z?(gRx?Vs#_)j!ie+dtPo-@n|y(!a*P-rwkNj{0}|_xnHff95~zKj;6>f6;%*f5m^z zf6xCo00Lxy6W|2|frNlQ5D0_=?E)PG&j-2&@&ko|qCjzASfDCU9jFOR2uuwu2rLRL z2`mdV1kMF62d)Nw4*VL73nm2RK~+!_ObhCRSwVZy74!xJ!B8+aI4D>d921OA3C;-4 z4$ccM4AQ|h!F9pLU~_O|aC7iL@KOkcG$BjK7P5z2A#cbZ>JaK2>K4ii^$ZOR4Goor zDneDE>QGH+Oz7p%w9vxP($MnI%Fwz{V`xKYQ|Rr`d!d7&!=YoL%wEh lFNY_GsqobBlJMH_*6_~o`>jU~A}+4&&eFEspI*Zs{2wcIP7weA From 9381d6c79b2617013d700649ab545db1bd93cdc1 Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 17:23:14 +0800 Subject: [PATCH 27/39] modify read --- README.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index f99281ff..ae01a65f 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ LFLiveKit - [x] Swift Support - [ ] ~~FLV package and send~~ +## Requirements + - iOS 7.0+ + - Xcode 7.3 ## Installation @@ -58,16 +61,9 @@ LFLiveKit * VideoToolbox * AudioToolbox * libz - - - -## Architecture: - - capture: LFAudioCapture and LFVideoCapture - encode: LFHardwareAudioEncoder and LFHardwareVideoEncoder - publish: LFStreamRtmpSocket -## Usage: +## Usage example +``` #### Objective-C ``` - (LFLiveSession*)session { @@ -127,7 +123,12 @@ func stopLive() -> Void { func liveSession(session: LFLiveSession?, debugInfo: LFLiveDebug?) func liveSession(session: LFLiveSession?, errorCode: LFLiveSocketErrorCode) func liveSession(session: LFLiveSession?, liveStateDidChange state: LFLiveState) -``` + + +## Release History + * 2.0.0 + * CHANGE: modify bugs,support ios7 live. + ## License **LFLiveKit is released under the MIT license. See LICENSE for details.** From 3f3007531f9d0b999f0c5714e0c902da80bf77ec Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 17:25:09 +0800 Subject: [PATCH 28/39] modify read --- README.md | 102 +++++++++++++++++++++++++++--------------------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index ae01a65f..c6ed52ab 100644 --- a/README.md +++ b/README.md @@ -66,63 +66,63 @@ LFLiveKit ``` #### Objective-C ``` -- (LFLiveSession*)session { - if (!_session) { - _session = [[LFLiveSession alloc] initWithAudioConfiguration:[LFLiveAudioConfiguration defaultConfiguration] videoConfiguration:[LFLiveVideoConfiguration defaultConfiguration]]; - _session.preView = self; - _session.delegate = self; + - (LFLiveSession*)session { + if (!_session) { + _session = [[LFLiveSession alloc] initWithAudioConfiguration:[LFLiveAudioConfiguration defaultConfiguration] videoConfiguration:[LFLiveVideoConfiguration defaultConfiguration]]; + _session.preView = self; + _session.delegate = self; + } + return _session; } - return _session; -} - -- (void)startLive { - LFLiveStreamInfo *streamInfo = [LFLiveStreamInfo new]; - streamInfo.url = @"your server rtmp url"; - [self.session startLive:streamInfo]; -} - -- (void)stopLive { - [self.session stopLive]; -} - -//MARK: - CallBack: -- (void)liveSession:(nullable LFLiveSession *)session liveStateDidChange: (LFLiveState)state; -- (void)liveSession:(nullable LFLiveSession *)session debugInfo:(nullable LFLiveDebug*)debugInfo; -- (void)liveSession:(nullable LFLiveSession*)session errorCode:(LFLiveSocketErrorCode)errorCode; + + - (void)startLive { + LFLiveStreamInfo *streamInfo = [LFLiveStreamInfo new]; + streamInfo.url = @"your server rtmp url"; + [self.session startLive:streamInfo]; + } + + - (void)stopLive { + [self.session stopLive]; + } + + //MARK: - CallBack: + - (void)liveSession:(nullable LFLiveSession *)session liveStateDidChange: (LFLiveState)state; + - (void)liveSession:(nullable LFLiveSession *)session debugInfo:(nullable LFLiveDebug*)debugInfo; + - (void)liveSession:(nullable LFLiveSession*)session errorCode:(LFLiveSocketErrorCode)errorCode; ``` #### Swift ``` -// import LFLiveKit in [ProjectName]-Bridging-Header.h -import - -//MARK: - Getters and Setters -lazy var session: LFLiveSession = { - let audioConfiguration = LFLiveAudioConfiguration.defaultConfiguration() - let videoConfiguration = LFLiveVideoConfiguration.defaultConfigurationForQuality(LFLiveVideoQuality.Low3, landscape: false) - let session = LFLiveSession(audioConfiguration: audioConfiguration, videoConfiguration: videoConfiguration) - - session?.delegate = self - session?.preView = self.view - return session! -}() - -//MARK: - Event -func startLive() -> Void { - let stream = LFLiveStreamInfo() - stream.url = "your server rtmp url"; - session.startLive(stream) -} - -func stopLive() -> Void { - session.stopLive() -} - -//MARK: - Callback -func liveSession(session: LFLiveSession?, debugInfo: LFLiveDebug?) -func liveSession(session: LFLiveSession?, errorCode: LFLiveSocketErrorCode) -func liveSession(session: LFLiveSession?, liveStateDidChange state: LFLiveState) + // import LFLiveKit in [ProjectName]-Bridging-Header.h + import + + //MARK: - Getters and Setters + lazy var session: LFLiveSession = { + let audioConfiguration = LFLiveAudioConfiguration.defaultConfiguration() + let videoConfiguration = LFLiveVideoConfiguration.defaultConfigurationForQuality(LFLiveVideoQuality.Low3, landscape: false) + let session = LFLiveSession(audioConfiguration: audioConfiguration, videoConfiguration: videoConfiguration) + + session?.delegate = self + session?.preView = self.view + return session! + }() + + //MARK: - Event + func startLive() -> Void { + let stream = LFLiveStreamInfo() + stream.url = "your server rtmp url"; + session.startLive(stream) + } + + func stopLive() -> Void { + session.stopLive() + } + + //MARK: - Callback + func liveSession(session: LFLiveSession?, debugInfo: LFLiveDebug?) + func liveSession(session: LFLiveSession?, errorCode: LFLiveSocketErrorCode) + func liveSession(session: LFLiveSession?, liveStateDidChange state: LFLiveState) ## Release History From 1e7296c5661fd2f2b01dad2d44e3f6c666b4776a Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 17:26:00 +0800 Subject: [PATCH 29/39] update --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c6ed52ab..7caa2b44 100644 --- a/README.md +++ b/README.md @@ -63,9 +63,9 @@ LFLiveKit * libz ## Usage example -``` + #### Objective-C -``` + - (LFLiveSession*)session { if (!_session) { _session = [[LFLiveSession alloc] initWithAudioConfiguration:[LFLiveAudioConfiguration defaultConfiguration] videoConfiguration:[LFLiveVideoConfiguration defaultConfiguration]]; @@ -89,11 +89,9 @@ LFLiveKit - (void)liveSession:(nullable LFLiveSession *)session liveStateDidChange: (LFLiveState)state; - (void)liveSession:(nullable LFLiveSession *)session debugInfo:(nullable LFLiveDebug*)debugInfo; - (void)liveSession:(nullable LFLiveSession*)session errorCode:(LFLiveSocketErrorCode)errorCode; - -``` + #### Swift -``` // import LFLiveKit in [ProjectName]-Bridging-Header.h import From 9566b8ceb980e95af412228c4cc6ee4b004edb0a Mon Sep 17 00:00:00 2001 From: chenliming Date: Mon, 1 Aug 2016 19:46:18 +0800 Subject: [PATCH 30/39] update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7caa2b44..75a9f330 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ LFLiveKit [![Build Status](https://travis-ci.org/LaiFengiOS/LFLiveKit.svg)](https://travis-ci.org/LaiFengiOS/LFLiveKit)  [![License MIT](https://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://raw.githubusercontent.com/chenliming777/LFLiveKit/master/LICENSE)  [![CocoaPods](http://img.shields.io/cocoapods/v/LFLiveKit.svg?style=flat)](http://cocoapods.org/?q=LFLiveKit)  -[![Support](https://img.shields.io/badge/support-ios8%2B-orange.svg)](https://www.apple.com/nl/ios/)  +[![Support](https://img.shields.io/badge/ios-7-orange.svg)](https://www.apple.com/nl/ios/)  ![platform](https://img.shields.io/badge/platform-ios-ff69b4.svg)  From 542d8b88d55573b9ba418587eb2ab2e9a02821ea Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 2 Aug 2016 10:17:39 +0800 Subject: [PATCH 31/39] update read --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 75a9f330..5f0421cc 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ LFLiveKit # To integrate LFLiveKit into your Xcode project using CocoaPods, specify it in your Podfile: source 'https://github.com/CocoaPods/Specs.git' - platform :ios, '8.0' + platform :ios, '7.0' pod 'LFLiveKit' # Then, run the following command: From f379e59a01644ca595912c72fe1100ff52daacf8 Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 2 Aug 2016 15:55:45 +0800 Subject: [PATCH 32/39] add a icon --- LFLiveKitDemo/icon.PNG | Bin 0 -> 15453 bytes README.md | 2 ++ 2 files changed, 2 insertions(+) create mode 100644 LFLiveKitDemo/icon.PNG diff --git a/LFLiveKitDemo/icon.PNG b/LFLiveKitDemo/icon.PNG new file mode 100644 index 0000000000000000000000000000000000000000..2fe52d759419a6e413ea3c81190a7a3a104f6961 GIT binary patch literal 15453 zcmb_@2UJr}w{Jj-fQkwThyoTAK?uE95mD*Vq=S&q5`whQgCd|5!9op95Rfj0-YtL% z2?A0=fKY@`LkSRC;2r+|@4N5aweEM{UGIHaSvfgpX7AavXU{%+etYJHk%7)h*7K|& z5a{H+ySE>JK*s_=AUgHqOu!S)Qb!DM;q}#i=xc(2`XU{@As|g>#3P8{J-DL_lhhzdY;)y?#w??Zh(B_{-2-0`T6cmUiBpay|d)C0U6ojf4Ef{!3BZl0=_R~lO{ z3%WV0Ubc|cm(cgpg1EZf4f2MV1R0n*1$j6rI$u^-6I2OM0vNy{zK((caG0l$Qh@5^ zzxXNv|BoJnFAM%v#n(gi^4~%|)Hf2;LU=<2WyPh%oFpVA1?A+#C1qsgSb47UoRyv7>PuRBc;U=-Y#HC zMMcFU9#T?bKn*dUKu=%C05MOWEC1wh8{*^S?dIj{hVT?T;^_DY;peM*8IbhvF2KG1 zgRQ5}Khy*$3>@I-1(p<-IO@_rE9&e2_f_HW|ETTb`vCI4<^3-i`9SB{$UQquB0M9|87r^!Gmu7r6Nk(}#Eh_U;WBb}>fnEeNF0bMLmMX~4+R z7+Z*qUnX_UK3xvpuN$Nr)S6OYWF?H$*4AEx40Y<9?$!1mfcS0FnXl2)tFem+e;3dF zSn^3N#@|-Eov*}ZFp`n+ord*|=Ds~y`0JW`BUSg_Kzrm13-w%Z-5JtD($d+~#UzW7 zA8BdQhthA-5C?~pJ+sZt&1DJ9nWIS#5{6tfGiY0Mx^Q%pvvJa^WrX+dg23ftJjed3 z!p;E9aS%vwg6`;Y9B>i9ho1lFH$0sA=<;_35FPu`jYivFWT#h;ZvIi>-%a~B^Z&PY z{~KC*eyt;+|LuJLue$heWjd1RztQ9WlvX(E$qBl?1!Z^tfC&66EL3$F+PqArKDm-v zAqkb`=w>qXZYe;UnMvCnHAu(a`?$MwS{oDE>eIq6t6Vo)|3Q>Cz;r^EPmT!NPJ`xw z<@h$C7|qQS*MaIrf@Yzip>Q1a)d|KW*AmNqYPWxA=hF9r3!@VtQkvfr{IgcM5>z(6 z%{WdHsGq35%%GCGc|n2@R)~Muh<$kx*Asez2R68>i7CpKEP*?BA6Xkczqf(3KiX45 zdE`Nk@`VZ99Ja0~2tAJ*s%TewGFe zMs-)x1Eo^8^CwPRk%{1Kk~!?IgEkiur7k(dF?-&ijk)kSFHrBKDLxlC5s( z&=4~ok+wn!x58-SmQSchOf!Ro8?v(a7w6^_)(+2Hxij4M1b-O6|NYe!|LxVu45D~= z%pET5`k+hAI=FjF^(@(43!1LqG6LJ@f<~N|sv~$}2BWe5hl^`_IEN_2_nR0@wmhFg z`q$%vuk8XRBM(+6vsldJ*J$ogb(|t8+<#>pu8bsC{djYo z@a+Xhe^f<0pZrKm=w2xXBRxrCeZ+B1?;{@LLP)%%bt|%6A1#N^QVkA4Apd-SB^S0I zfR=-wcVySNLzh6jFax88PJXe)24K=^6+e?J#_)}LZc&KQ#<2=5Fd9yKv`sIfBd3^c zpyuvB(xdPabh)Uyy83XhH&5qEr2xyyszIIZ7Uq6FUMl`as-#mN(?={?KAUrPVPU~w zzqPEa4App1*?$9t%~0%zV$+l45@qG(3A*6s-zeLj;Yue%rDP?~-98%Frta6m;-aD# z=+>o)q!RKNluw;6WG{+}&Rhp~_EdDeb1ZDUq~9gSHT)8GGR!ve3zXEbneteL4tRkU z=vCf(b4~Izop6Ba;XdU$R87{uztE7H`A#)pH3vS{P^~_3Bko2BLMbeRzV2fuhh%PxPsa3w&2aUUg_rC&5&jyb2SO zJ-77UArAI;Xp@$jdPDu2_LTIYZO_B?E-)1}^a&@8yBqeh{!0dVh>$W+VnJO&q(X8) zB7E?(KYKqzp!({^KRPB^P9rG-WP^Q0$G7->X6&`l%iDgY7oeo(3nrkuw{(wfMmo?L zI?ZseR^9AJ*DC``I0laY4R;If-SkstL3A;*XDJZND=Q^wgnSN(X1oSg#FJ1HAHf_R zZqZv4ilyh0OA9Q)Z@1o9YH{mW<-Gn;e>dQHr%$vcLb;O*l*ldeVg09Q#m1qvbckc@ ztZOjhnptfgAvXV(ST4PYrppv3xxgTEZES;Fvz=I#dd%t>o z17SJ${p;nm%&o1_Yp@UHg{+zBMg7azOl9i($~T%3Opy)x@g=yGuB!7QfZzN1xg1f5 za4o0c&u6y%krf3qVgFzoiA#~vV^1aAh*+*wHQUiprf?O;LM~DBZCqbK%8h`VR#o5S zL1MNIRfl_o@vWMs2+uWied8FRN-@V_?w_8OXp!%2}oX2cU(Ph_(JwbQX150RQ2 z$u#3|WKIc|$x!s*;8d|Z+NTz9w*82bv>!F<=>yGK(oF z(bU)1-x~Gl!E1Bdny!w!E)Gp7#~WJ_il z!I3gp@Ajy??M2-Xzpmk!WLDI|74ElWB3oP#3=nZs!8f?U7Lja}Qelaiz0JmiOJhhh z%!sfs_#KlIY`6gbP4xU(n2YIge{x!;s(L@&2@p(pIAGEM+diG5DC?wb=9FMQjsWCqNwdrSS?L?#KGS4(s{ zhaQki_bbc8O!GN~{5_pT@(4hi`D+OQijhbELPs6c|d8 z;V6OB2Eo=kJJ~`;#PwDhbp%4+a(m{7r6R-BZ9>-CPcR%0faq}D;0o{Kr6nIHha}aGYwgpyO~Z%@%?s?b1tA1xxcdH z<{C=}&x7Y#1@@tfOomOK4=RZMs;R2V;M}fyYtl*H=r?5FsJqcY5BMohB|E)V{mr|i zOvD`-s=aiqCcWLB5A@`UVY9v=`Y5>ZM=9?pzG&G0bssZGydZ!+1Kl+Eo)Hqyv5Wh& z!;mxAv_-FE;jy>1%u9Uv#rB%EYLf#O6z^bbYl~7a_Ii<>bV8bMM*fszvXMLCBOUk+ zz88kjae5C%K>OH2+-q}aLl4zx8~y%+mM)bc93Z-isS8$VVdxQ)+{VKzIDeO4FY_(w zhw7C!d(bHNt)BxpWteW=-in{vcbQNk?L3tqgGVZ5-}&Iplq118RaMoioUr^}e&NY6 zLx#dz>&aKa)-^XPE?r+`PCh-L^%b_rSZIUY4y=81PcZgm2y4yxVC=$tu%yTbdclcz zsY(|4M5vLLFV%iPH9C8;Nm>m(%c1ic@*z}b%%OJA9z#q?wr}9NH}J*ri%x(i-FH6h z@J;)c{o83_Vq&J3&V(jfXc%>YLMMizX}6eF61Hx@BFG;J+;7hR=;Tj;iMTE7hSj23 zE~2V4f1Sv((3FKexd;U8Z$FiTuwP1|b@n7rWTMq3{NPBw`V!;a7hXf~nuP~|;%7bW zgEHmk{VWt$21PS-kSVFv#CLM%6t2-a(31ku~&&$E2=Ep)N363{Yh6H*P8B>BVkii5{( z)s}Z%Fw4mG3&#>o#_O##p^%s}pPC$IxD;8Y*iqYBsQLp~aac$H&1C4;rY)9vS3blK z4;ULtDe4q2;-)sZL`lH%~J5PIBKT3qYNm_o?HqEA~U>4uq3R;|g07 ztQax-o82w&mLEIMHdT5qffTu&&0Q7;%NU6j!!^v6q-3EyCK9WHCV}_aJIYz@_#u<{ zl&~!znuu-*RmJriD3=`)-3?FwS?lQAYRH=Ne-!&}j%dgDOU%0FS(vBme*U|6Pq^8D zzG|~b?G03`jwHNOZtF`;XTf321upE4=8A;suwf9PIOdp5)XM%ePme4}aZ;?=WG&9r zyvzzRHa5mcRA=M>KP3Ff{Z;b@SYAnEvmECyHuV6%Q?y@GQ*$u!4uXnQa54Ps-jk(D`~1fc zwQGXvU*3KyI%0@Tzwumn;?I@mt~TdxKKvyhQw=s$+i^auj~!pv5p&#xHk=c;;VJ%p z|E8<}_eJjbTK=Am`TkJ03fDmc>5`E_Hstu`v|Bf4vOHpZw6z&`#`N^y!}eOWUkVF6 zh~bZu-yWD;L|I)07E@;Dv9OEy*I~-Cw=GQ8KFp8LMD_SaiFvgm4m&4mZIydq>wxE2 zdW3n$`uy1Fo_p{}>3kR_l->F;&f&B&`SJvv!p_i_HTG@Cxnr#@oOkSk=JO8-zfx-7 z8fxOulKpb5^sF+hlC1oN8Q%BrV7sJQ%t=|nNC$q7) zlDD+m-Auc0J55F^^$xog=uM~32m0E5@Atkd#=QA5b}052$6lQ*f3Zxu_%JS0xfVtA zZQ>f;7;9{i&sq@RAtV^8TUWUnn z3vmxxOfdWEjr|o^TT(HcE5TYEq|yLShR}b%ZDwz8j}EhtI#3=;FAHeK_ksrQ7LLt5>#z>Q(P`Xn zUj4#$J-O!WYmjNGe@yAy*fS(%0{J@>bIkCz2XSDid`5o0wj|BjJh&L&0>XqwbVRZ_ zT6HMyEq<<1T~+9j2MOnFE)G$nYXl<=Wxmd1YuamsLE-l`+eFdf$d~xG1_uIpKgD%< zkG$8tOKiB&{U~VPKJ8@Mt1xA*i221CMP?9V?o0|RTysR* zwZ6o)$mw@_=P@Aky!7&q1Ev?jRs-*<81?y)o8!`o-)ejcw?@6dpCQFJQjK=u(cqs; zGrt5b_bd1hFRe0V`2hHY-4*Xhm1^F*gP$}faf>>n>T7^_TENm<>VOq9o6R)5nu?Dy zOwTOJPuEuCMs$gJe%I9@oqE~uy_8n(P-|_A#vhzO3&0xl+|Lug`f((4R&)jP8*=?qiVtCTRb2eMef zDBR=@6DacF*X#HCPfss^+S(NZW}tnYSSDEpf_bFAxxJ^qSD+)+xQoxCMSA-tn!mrz znB2E{< z{H)Gjbh__E9ls>v{b>pdZ9Kr!i=}n{ZqdQosa)Tdx|?=@%1o4>9XPZ&>CI1I{9Il_ zFAg_kCe$n^khe7B0+oA?5G7!e3!6a;4}O&@-Hxyj8w!~@k5O-xwE?kBxyIrfN?wAy zU@T#()k>)$`cpI0;w=6&r%ZAvdFS1x#i>o=cUpao@*uB+tH;x7AQ1ur#k^wJ-Q69@ zSh>O*BUMGc4)rMi_zUALGAMO$zt*AaV`Lf;HDrLXB}x%?^Z_X5(&2{0@KR|$js{+3 zkL$sn7-a7;QSK?7^v0YarJuauG1%`uxYi_nhy58_)k)OMGZJdLo)G|&gx#-CBsF5? zfyh>svgo1srw-Yu|2sO99Mx+28?@hsN~>0Jt~oTmZoLKdZ}lO|p+n&7*@%X5*j^?h ze>WY}rPWn#9|(m2p#gj2+3g;rm09-F@I=#13FI}{_Y=r-dZe#K>&}R1{3|iC-owO# znfbWG9DI{N!FWOK7;N4RI@ZW4|91MY17ER1d3%Il(XqF82Q))HGQB=giFvIR3qNWn zYCnrW_yb0l@@pY#x;#-M(ix3Echp{zIDq<>`BsHLJzyy=#v0g8*?+ux|4s&6?(nn2 zB#te@r^PaL4NX1v<>FDrgm4SR8$B#qL<#gBy!Nc~?+;~PzYrTkSH2i=Y*^a+3D~!E zA1IZb0qSbBX));Uf57J!c1PjjacTqhQ2!286gx7jXsA*8t&)z!(7S-aW$2rV*+2kv z4u9RI7?5;dl_-b$3?r#RLshkdwoXc+1JNA6)ZmI~Pn195VPnL4(kw(*m4R=?=L=~@ z)_kCQ0-{T_;sK56Lu5;K(lj4gny}}Dfg=?X;1H<1Rf9$kvI>nret>Rou^6x%>k$-1FH{~J+=A($0DXXmKg2I7gk_N%ZGB5OO^2U9AW#w zl4ecGPJ*y2BtMLo?8xIRY9&5vl|?^hQ{4oL0SxM{)v3C9A82EXQ=qE+sYq$pQvB#j zdVH|F(_gTwUHjY!v}WBl%_8FVvy)z;e#OShe-VoMj5T&Aw|*AOdMafGkvnvOPK!GV z=X#RB-8IClfE`BNE~;cjJ-&3gay{6_1R;SAxmVX|TRXh;+q#STP(9cVqH6c7zY+%< zNP3adWB*7lx3Y=fgLH~i8TPxhw*8P4Pn6_q79L$m@D9t?a6UE;>JWTty7?H(^{jDS z{g;{nE1i`m#yDnArVpJvgZ=+-s`z#eYyr4*h*QdX{v zJ_(P$x#6D*hq&o!<0)DgvT7V)X_15LIv76z-HT-RLJ0%m8^?H`eh3}5f8<{!R&Sar6VP)bi8 z$Lemw*ov9#1)U7N$(K`npr4;>grJS&8)aTPnpaP+-cN*v9xYGDa)L0@*)k=WovGRb ziM%-USHo^|>yL>@uQW?UBL({4+tV7cRv7}h>78HG#JOGGnqRP=)ld#K7>!Gv4kXNs zZh_QL_YTzge2VZ(9%=Pj_ogp23A+XVq?eV>rX@UUd_D0jR}tCR1?@M^aYxlCG#2gI z9w=eVaA>PNA9Bn0cPZV2wvd{wE1$DcK%lqYbspOv@uQ0@NC3r;ekvm~^gwU7An9fL z3mjj^UId&_U+{5nuo?^RtlYnKeOUEPNTaH{*Z2tlUuOWlEg%016|J~;kqiyZcuY^n zt~gjPPLorjmIv`TnceSblAc&YkJ5MJmKzeLkAsBAylQuSpwU^B z!<{&57NoFfMg+HzErB<}+ms5#D1P!YNOjXP%zQA^x`n5uKIokbHy!9ZqpSomKxf@c zo+OTWT;Y@fIHs&Wd&ksb@}N&C_xYCPWDWCO><_jaiC1uZqb?cp4FkMUUp-%50di_z zY>Sy@r4d(;Oi4v~q?_k$$ve#0!rL1OD1Q1Z`pl=9KrU_DxM=g?*Yms_Z&7t!I>&tM zlm#ayuZHSm+TV)iiT2JPJU5@spt$e!gc4OVa}F~`t=E;N6Xe}>eK-2y$$_%lf`KvCt;>6&csxMZu1)z1&)dy4 z)r>H7@q$C`fKMB25e`*e`C+{GRD}LApd+9UgL^DU;H~rAwT;gRy-O;NDk@Gbei>LL zv=>^O<|;?&f@0YLADimL01_@c8pI!bsfNv;Ve3M196(NT{e7XBMjPMTeHXl(i28h3 zRJwjW4mPq${`4!^-a7;Yn&BYNj%pwC0W`#TjOS?#dw?2g;-U?ip|L0ZVt|T#PbC2A zV|Qcl6K)qcKpOML$0+g0(}05PZd`bACQj3mx#<|)x<&U1JE|uJYcJu^K-}r6Tv|c> z=EFh{-{4oZFR+20rOFw`&ElCkU%MFa88%;3?`Epz^%f5#jjq{Gt5mxyFo9lM`%r!y z3d}OZx>;N!h7rzQe_tBA7q0pRM&8V)q|*2!o|=}&diyF+d-v-s7qt#PUrj3aBU`{u zU+^iiq)q)~3wC#C*%3W(`XD&EJpiQ0T1f*%wFO}|*aYV=6myDDOISqe0ri<95W?jK zU3D@l4^obR53PlqIxv`cR{z67SperL!6);wk~z_?A?@?PYY?`9Hgi7gHR}hQd zARO-2lH`V$AD#_^_2a2Id-+Dvn@&lO%hPWCI&zBVs+ne^2X`(|IO!@p5dJ-97{~R> ziNQjxRKTpB({+Cm)U}E#nn|@vW&!OyI1Ivwsik-n6m!y#A8>Pk`_H!8JQIrS)VJ>m z{is`)?dLg%C&y7U&|TCf%mdaVYkNVLU~lw2k+twccnoV1NTX%`SoQh4o$@JzQs3qU zVh)(ev(ab4e0$|fh}s`4e%ACiQ%bCt#3Kd*{UR0<66Ph4qYVjQ%_Vn#7vxAv{;62j zpptSKw6FdGq&nZVzxK7f#L$BQ{LtCI?C4q3hy%%;PAVVqkL`#5+U+?(!vgP>l~fo09Zb(-&QG2?c+^N))b znI%a#m)w515&G8Cdhc_QH##FN1;7bIz2A2{wYG98}vXiew z$bQ2`HXlO#-``kUce8K~Lt8!g)Jf7WBI)lo>mbZ_->vAW%Ih+Dse-8$7zKmJ)_jGR&QwZTm=L$SD z+Xn;{e|#(pCp&x~bUmcmd%Y*x?hixL^`~PxhK2a_+TEu4X6isFF`)6#sq337Dc*as zHui8d$5=7^gK=fj^7L7)JN;U{84!HyXlr*1zmE9VyO(h6=#Rs;TS0eL^CT2eaIU;Q*QI6Ta}!y z9rBiSCVu@LD}Vbcfp}JhZ1*MfKA1@H`nXuTul8fPrKYLlp6Zf8{m@bdSA7wg3A8g| zk?~Em=ByBFMwqtIZyA-+%+^(PW&?~K$2sn;$2abZ7|Ct#ubJ+B2#pV_>wn$nJZYe- zuS4n72cpA1ojm%W$$6W`CxzP#wb=!&<;gB`#8(0J$u2?p1zqidi$L`Ir8LIuH|Vk+ zDexpkG1{u#Jol>QO5?{#&6C~JY_`v#-GhZbdfKwMLs|pRZK|qQL@grUfHGP1!RGQPjZ0e38&?caa2DN4SV9V z&-{gmOAZER{gUN77xKPqeL=Bn2XjFg(Vq=-`^EWG#6b+I&crA)xT{3$3J`bS9|^yd zw5Cw4=TqA=Q`VEJAB0?Aychtei_s=aahWyC36$tTYaR=UDf=L#$(fEoR_}u8E>H`+ zYV+>QuGQvAhg#7&UJ#akkflg%O$swV``r`DnYxL#Pca^87e>4b}mF^%@&Kwr-+s2!d2Gof_hmyZ!^ z;MCW!G^odGwZIv3RgUMMBD%ToSNwv49DMu{cq#w2P_b8uia`z`ks? z5cYHr1-Z1X#y?QlkYhgUGmWSjhQ7sEZt_-``t}xU|H4`!g|#GleU~0^)hteS)2N*% zF0^C(UD8jc`lH2L*H^>0xvYCB`HLp)%7w){kF%-SiOnm&1y$9qx^Id9B)y17xjbq! zK|de&VB<@zs`6W;{rH;MQCT?F8X0-;FlVv&mM{>#+@DOdSQPJ%%Wm&K3~re?7=2SR zlV%uY^QyEM%?YIocX$o7qUhD0de5xRsDCrSF_^d`dpBWWDN|?CL{m;pp~&|3JE*Db z!H@H)!IK7)`UJn_RieRXuhjsTCh+7Y<1=7QDa4v2ac9 zOxl{1r4@dw8;CVVA6n_x>0~AGte%2n4cm0N$ue3g<{lz__DIq<<53sp?{ zHyn*$k$C+%yqiXIWhqZKhH{V%nk?+y{GGXzWS`vhI>jHOOX{5uX$WwSO@sx`)=?!r zL8r}r#FTAszV2eiUftPjVQZ}V!V6~JI)2TpCC-a@oSUj=07n0WkO%T3u`t)X;D}G;G?(=Y z(vXSx(0&jxzD00<*>`yjWs_eZUj8VbsAqkxW)!V93h^noY51AW=b8KI8nEba7P2mw zkDf^b8J*w91@;G;6?>(D(~V=;B}C%R7Es4tgx5;1`L7n6p6uoxeS^LwYzNU-SHdl- ztz?O^OkCx({QP6YJdzN0SIcv^*DVd&+0&0m!LE0IOu(ARXOkE~&&=Wgrxf4RDUMsJ z2^yIiUnxkLeatiXu+E%?66K{BoQ)9lmWfMz-dm8;d4|FHaVS51svcos>&Wur)*l0 z{2y>#VM%HIytU~d!=sP4IeK#@3P36Mg~WPB{n2>IX~_rLoPqvrjt0dbix&gZjWuay^M-`M4%9VhIaxfow_Zn!R~>B@7(zP+Migckj>?}f#{tu8?*_q_e@X) z?BbqlodUJC$UPl@&gO^q=j0HT8e!L+e9J`y_L{wu9PJuuKskma8QwQx0fSIqk1{ry zXA5Qj)yRvYa)l??K4lAf#O~=tl=F>7k=A@)l7g;>WCe+K9_NY`O4n#Hn^}bn0*Tb= z?CxZopW01YEai~DgIj}zq3LS7xoNq&wbDwlDA`|XIW)TOU9Na~{$^fGVR0$o^gw^K zH;-TcV@BGNOQ=b0_O8ZL3LB+^L8l@2V#`#rBEqS#Dagqc$8OQj8|?Yzr9~Z7VA}Pv zvt8khcp&5D8|Fz}4Z@hEq|N^Oie1;VB|zA04fi`#>@c3lNSN%&7&0T zt3BV_tYC9LjD3;fRv^LMyb_h|w+=X8-^=GOfz2S;>TF>00S<8&{0UeG;+dLY_AfiT z(_36>s(N9IDcwQrQe;rBcDwo1vfb;Q<>fHJHL2FLFE&3Q9?XAUS^#??Q>#!Rl`w`O_(g6EUgrz$Jq3EZD=oT{2GU$n=}OvwRB4X?XcqWGI}wad}A@ ziwniWJ+s8P?2XTy<8o4Ldg4TvQ|E04gH#wYPVUN+Jk>p1uG;y}CH3*GWVR~ol#Gh% zKCt8voY4MRi38Cgc&1HbikTu?alq=9A|RtY{TRT%A+7B0g{%7^wargQZh|xh-t!+jqCXj+t*d0|P;7P^YT)5&S1VybOZFNu*&-}wE<3$1Zxq=Ffzj9d|02sWPhr}{WO|N6yM`hB6I zij>D9+Ny9myZGwQhze*funP~1v-FgkU5*2X_f1>Cw55IKp=MjE7nf?hH8C0y)3`+w z*S>nVF^M~Q5m(YgvMm0YzQ->^sChPY8JHq2$6XCuvAv;jCiVOTI$q94yftmG&nu%9 zBmF!sp!}~D!!gj=2S6YdXtHa>FPhT4vcH{47}{{C>NLM($#sRJV_!dM$41x8(q~8& z@P>5kfqkti7%&V=O5E}_e`v2iu=GDzlW25zMDZP z%yPeYMh#UzKl!bT(`#QO!kk{j<7s&G6hjlR1>qvT`(aY<*TsH>^ar)ZJ)=w96~?>2 zd+JDwbQJ5=3a2tL3p%-Kk$r1a@HKXckO<$5V1RK$%PyiUem6_ki#KOy>dk1gUM1+_R z9G)Z-Po=$@W;f^`9S@AnlH&`c>!j0=8Q3%J-|&c-7|L>(N{VF54Z{QwR$dPFsd1t3 zB%iQ}-wA6fl=r}^Y4e~j@aCE(KfwFfMA7FOk6m#9+e}BDpGZ>pt#}ZqB!6qt;1ZZS zzK>3D=nnXPsc8{=`C24+Yk!#3;KJ>xE*nd-;Dk`mS5UH@`YekY{HVM~1NVZn#e zqA4Izdykmq}wVa1C)5OV)#Bxlp(Zje9Gq({uz=BY2Ta%)G>sO<%HT%C6RZ$cg^b6 z4UI7V?vWD~V$yP>X8mlk5?ChW*hE$^cTY*F;^Op~os4ZN6ufiOa&Xc4&;)8y?rbT3 zJm6Ix@0%AWmzqUFoVJirK>F_d^QCSOwPMPN*#4Mh0Z+MqDA{fQCHjf)#?ZQpfar2( zPU~)VkjDnqq}_a9&AIcz@zTr(`=JABH=DpaXYVmegce!lrB}M_DfP!?;St+>2lDBJ{4{pA*V7Ey2IrM`nWp8_op{Tw9cy2ySuPr$7 zS#t3d^2g{au{dpf>pL5`ob$D55GePN=sf-Sx8s=>1%lNR{1~|ID8xIA^VCK@rD=n? z>J?iC`_N|5v25skgLU^vwGxSoq_y{EJ~*o=hm|x6(MghJFWX{Y3bH%p`TS1T3sc9c z!yOE6$1-M%W%nyTT|#*Dtyaf431!+4kYk_7>WN&J=?;98y$0!^1F6_mCYfSPBbP$C z-##6fqjbxSrq?<5=Y6eGx9#Dtz^N3Ge+2VrBQ&Dj(OWJ+-M zv&PY=CetKYzj27Eh==<%MDN!h=$gDok`dgHg?Rr#A83iVwDQK;X=Ay`&=$(MEK_6Yl3jLu618qBo6ydGjLX_MadG)L_d)J0friceJNQO{ zReN&E$@ut=OGGmzCzQ@M?Ld^IZWNRxixyZrtRFeMEfy`-;h}?C{RPWPP9dXuOMNhu z3}v<0(XG^XQb#AA+)hs1jk^5BwAZWC7D^z?EaE!@-Y%7tb*Pd&XH&+ara|HBg_L=v z!PM#sUo|iFHz6!8#h~&Di78)F|A-3f2hiQZ%y5hQ8$;cyhqEEH?_v-{C(oM)5+6Pv zrTA}TOYuoXtN~j}hU&it8mhky;xkja+D!*+t4ho(F5ir^?35zKKPt1NijzDlCIj#Tc`J(?s`=1Vh0b5s&nZG+q8w;Z1SFCMqy A Date: Tue, 2 Aug 2016 16:01:12 +0800 Subject: [PATCH 33/39] update .. --- LFLiveKitDemo/{icon.PNG => LFLiveKitDemo/Icon.png} | Bin README.md | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename LFLiveKitDemo/{icon.PNG => LFLiveKitDemo/Icon.png} (100%) diff --git a/LFLiveKitDemo/icon.PNG b/LFLiveKitDemo/LFLiveKitDemo/Icon.png similarity index 100% rename from LFLiveKitDemo/icon.PNG rename to LFLiveKitDemo/LFLiveKitDemo/Icon.png diff --git a/README.md b/README.md index 9b15f290..5bde5cce 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ LFLiveKit ============== -![icon~](https://github.com/LaiFengiOS/LFLiveKit/tree/master/LFLiveKitDemo/icon.png) +![icon~](https://github.com/LaiFengiOS/LFLiveKit/tree/master/LFLiveKitDemo/LFLiveKitDemo/icon.png) [![Build Status](https://travis-ci.org/LaiFengiOS/LFLiveKit.svg)](https://travis-ci.org/LaiFengiOS/LFLiveKit)  From 6cf68fb37098b3f455e8477d0fcca353608b4e99 Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 2 Aug 2016 16:02:30 +0800 Subject: [PATCH 34/39] update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5bde5cce..ae273d03 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ LFLiveKit ============== -![icon~](https://github.com/LaiFengiOS/LFLiveKit/tree/master/LFLiveKitDemo/LFLiveKitDemo/icon.png) +![icon~](https://github.com/LaiFengiOS/LFLiveKit/tree/master/LFLiveKitDemo/LFLiveKitDemo/Icon.png) [![Build Status](https://travis-ci.org/LaiFengiOS/LFLiveKit.svg)](https://travis-ci.org/LaiFengiOS/LFLiveKit)  From 7a901b8b362c124c6ae0263eb141b49f6ab62850 Mon Sep 17 00:00:00 2001 From: chenliming Date: Tue, 2 Aug 2016 16:06:07 +0800 Subject: [PATCH 35/39] update.. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ae273d03..eb44d8ce 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ LFLiveKit ============== -![icon~](https://github.com/LaiFengiOS/LFLiveKit/tree/master/LFLiveKitDemo/LFLiveKitDemo/Icon.png) +![icon~](https://raw.github.com/LaiFengiOS/LFLiveKit/master/LFLiveKitDemo/LFLiveKitDemo/Icon.png) [![Build Status](https://travis-ci.org/LaiFengiOS/LFLiveKit.svg)](https://travis-ci.org/LaiFengiOS/LFLiveKit)  From 24e6c4576b3a38e55c858e7ab6b5f55f6862bbd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=90=A8=E5=90=9B?= Date: Wed, 3 Aug 2016 10:41:55 +0800 Subject: [PATCH 36/39] change the AVAudioSession Category before start, or it will fail --- LFLiveKit/capture/LFAudioCapture.m | 1 + 1 file changed, 1 insertion(+) diff --git a/LFLiveKit/capture/LFAudioCapture.m b/LFLiveKit/capture/LFAudioCapture.m index e8485329..df32b488 100755 --- a/LFLiveKit/capture/LFAudioCapture.m +++ b/LFLiveKit/capture/LFAudioCapture.m @@ -124,6 +124,7 @@ - (void)setRunning:(BOOL)running { dispatch_async(self.taskQueue, ^{ self.isRunning = YES; NSLog(@"MicrophoneSource: startRunning"); + [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil]; AudioOutputUnitStart(self.componetInstance); }); } else { From 408e6e17f5bff1e51d6676dc8a4df9fbda1a61f1 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 3 Aug 2016 13:23:46 +0800 Subject: [PATCH 37/39] Update LFLiveKit.podspec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix issue #36 关于use_frameworks!的问题 --- LFLiveKit.podspec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LFLiveKit.podspec b/LFLiveKit.podspec index 49a0e5c2..553b6a30 100644 --- a/LFLiveKit.podspec +++ b/LFLiveKit.podspec @@ -11,7 +11,8 @@ Pod::Spec.new do |s| s.ios.deployment_target = "7.0" s.source = { :git => "https://github.com/LaiFengiOS/LFLiveKit.git", :tag => "#{s.version}" } s.source_files = "LFLiveKit/**/*.{h,m,mm,cpp,c}" - s.public_header_files = "LFLiveKit/**/*.h" + #s.public_header_files = "LFLiveKit/**/*.h" + s.public_header_files = ['LFLiveKit/*.h', 'LFLiveKit/objects/*.h', 'LFLiveKit/configuration/*.h'] s.frameworks = "VideoToolbox", "AudioToolbox","AVFoundation","Foundation","UIKit" s.libraries = "c++", "z" From 458759db08dea84745723b85407cfa0bc936566e Mon Sep 17 00:00:00 2001 From: toss156 <308276366@qq.com> Date: Tue, 26 Jul 2016 11:16:43 +0800 Subject: [PATCH 38/39] rename h264 encode lib to avoid duplicate symbol error --- LFLiveKit.xcodeproj/project.pbxproj | 14 +++++++++++++- .../UserInterfaceState.xcuserstate | Bin 11298 -> 11298 bytes 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/LFLiveKit.xcodeproj/project.pbxproj b/LFLiveKit.xcodeproj/project.pbxproj index 71df2c50..a8043724 100644 --- a/LFLiveKit.xcodeproj/project.pbxproj +++ b/LFLiveKit.xcodeproj/project.pbxproj @@ -1511,6 +1511,7 @@ 843724AD1D4F260A002B398B /* GPUImageSolidColorGenerator.h in Headers */, 8437244D1D4F260A002B398B /* GPUImageMaskFilter.h in Headers */, 843723E91D4F260A002B398B /* GPUImageDivideBlendFilter.h in Headers */, + B2CD14701D45F18B008082E8 /* LFMP4Atom.h in Headers */, 84001FE31D0016380026C63F /* LFLiveSession.h in Headers */, 843724411D4F260A002B398B /* GPUImageLookupFilter.h in Headers */, 843724E91D4F260A002B398B /* GPUImageFramework.h in Headers */, @@ -1643,6 +1644,10 @@ 843724691D4F260A002B398B /* GPUImageOpeningFilter.h in Headers */, 843723C11D4F260A002B398B /* GPUImageColorBlendFilter.h in Headers */, 8437249F1D4F260A002B398B /* GPUImageSketchFilter.h in Headers */, + 84001FE51D0016380026C63F /* LFAudioFrame.h in Headers */, + 84001FED1D0016380026C63F /* LFVideoFrame.h in Headers */, + B2CD14741D45F18B008082E8 /* LFVideoEncoder.h in Headers */, + B2CD14731D45F18B008082E8 /* LFNALUnit.h in Headers */, 84001FD81D0016380026C63F /* LFHardwareVideoEncoder.h in Headers */, 843724A91D4F260A002B398B /* GPUImageSoftLightBlendFilter.h in Headers */, 843723A91D4F260A002B398B /* GPUImageAverageColor.h in Headers */, @@ -1664,7 +1669,6 @@ 843723A11D4F260A002B398B /* GPUImageAdaptiveThresholdFilter.h in Headers */, 843723CF1D4F260A002B398B /* GPUImageColorPackingFilter.h in Headers */, 843723B91D4F260A002B398B /* GPUImageCGAColorspaceFilter.h in Headers */, - 843724051D4F260A002B398B /* GPUImageGaussianBlurFilter.h in Headers */, 843724B31D4F260A002B398B /* GPUImageStillCamera.h in Headers */, 843724D51D4F260A002B398B /* GPUImageTwoPassTextureSamplingFilter.h in Headers */, @@ -1676,6 +1680,7 @@ 843724551D4F260A002B398B /* GPUImageMosaicFilter.h in Headers */, 843723BD1D4F260A002B398B /* GPUImageChromaKeyFilter.h in Headers */, 843724171D4F260A002B398B /* GPUImageHighlightShadowFilter.h in Headers */, + B2CD146D1D45F18B008082E8 /* LFAVEncoder.h in Headers */, 84001F8E1D0015D10026C63F /* LFLiveKit.h in Headers */, 843724671D4F260A002B398B /* GPUImageOpacityFilter.h in Headers */, 8437247F1D4F260A002B398B /* GPUImagePosterizeFilter.h in Headers */, @@ -1795,6 +1800,7 @@ 843724201D4F260A002B398B /* GPUImageHistogramFilter.m in Sources */, 8437246E1D4F260A002B398B /* GPUImageOverlayBlendFilter.m in Sources */, 843723A81D4F260A002B398B /* GPUImageAmatorkaFilter.m in Sources */, + B2CD14711D45F18B008082E8 /* LFMP4Atom.m in Sources */, 84001FE61D0016380026C63F /* LFAudioFrame.m in Sources */, 843724C41D4F260A002B398B /* GPUImageThresholdedNonMaximumSuppressionFilter.m in Sources */, 843724F91D4F260A002B398B /* error.c in Sources */, @@ -1806,7 +1812,10 @@ 84001FD41D0016380026C63F /* LFVideoCapture.m in Sources */, 843723B81D4F260A002B398B /* GPUImageCannyEdgeDetectionFilter.m in Sources */, 84001FE81D0016380026C63F /* LFFrame.m in Sources */, +<<<<<<< HEAD B2CD14721D45F18B008082E8 /* LFNALUnit.cpp in Sources */, +======= +>>>>>>> rename h264 encode lib to avoid duplicate symbol error 843724CE1D4F260A002B398B /* GPUImageTransformFilter.m in Sources */, 843723E81D4F260A002B398B /* GPUImageDissolveBlendFilter.m in Sources */, 843723D81D4F260A002B398B /* GPUImageCropFilter.m in Sources */, @@ -1815,6 +1824,7 @@ 843723F21D4F260A002B398B /* GPUImageExposureFilter.m in Sources */, 8437244E1D4F260A002B398B /* GPUImageMaskFilter.m in Sources */, 843724841D4F260A002B398B /* GPUImageRawDataInput.m in Sources */, + B2CD14721D45F18B008082E8 /* LFNALUnit.cpp in Sources */, B289F1DC1D3DE77F00D9C7A5 /* LFStreamingBuffer.m in Sources */, 843723F01D4F260A002B398B /* GPUImageExclusionBlendFilter.m in Sources */, 843724F31D4F260A002B398B /* GPUImageView.m in Sources */, @@ -1868,6 +1878,7 @@ 843723D61D4F260A002B398B /* GPUImageContrastFilter.m in Sources */, 843724BC1D4F260A002B398B /* GPUImageTextureInput.m in Sources */, 843724521D4F260A002B398B /* GPUImageMissEtikateFilter.m in Sources */, + B2CD14751D45F18B008082E8 /* LFVideoEncoder.m in Sources */, B2CD146F1D45F18B008082E8 /* LICENSE.markdown in Sources */, B289F1DE1D3DE77F00D9C7A5 /* LFStreamRtmpSocket.m in Sources */, 843724CA1D4F260A002B398B /* GPUImageToneCurveFilter.m in Sources */, @@ -1982,6 +1993,7 @@ 843724221D4F260A002B398B /* GPUImageHistogramGenerator.m in Sources */, 843724621D4F260A002B398B /* GPUImageNobleCornerDetectionFilter.m in Sources */, 843724341D4F260A002B398B /* GPUImageLanczosResamplingFilter.m in Sources */, + B2CD146E1D45F18B008082E8 /* LFAVEncoder.mm in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate b/LFLiveKitDemo/LFLiveKitDemo.xcworkspace/xcuserdata/a1.xcuserdatad/UserInterfaceState.xcuserstate index 390dd66634d84f1e48fc828ea348ba7f4d324239..e20b6620997f0d7dd823d85f46116c75238f48ac 100644 GIT binary patch delta 31 mcmZ1!u_$80TV9qzv(OEjKl1YP0a>hPCx7G>-z+V|E&u?{R|`V` delta 31 mcmZ1!u_$80TV58o?dR<_f8^!o1G4I_O#a9#zFAs^T>t>z01S5k From 77122b8630fa045402bd8158374f6eabb500c3f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=A7=E6=9C=B4?= Date: Wed, 19 Oct 2016 14:13:46 +0800 Subject: [PATCH 39/39] =?UTF-8?q?=E5=85=B3=E9=94=AE=E5=B8=A7bug=20?= =?UTF-8?q?=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- LFLiveKit/coder/LFHardwareVideoEncoder.m | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LFLiveKit/coder/LFHardwareVideoEncoder.m b/LFLiveKit/coder/LFHardwareVideoEncoder.m index ac6c08a1..444e0af2 100755 --- a/LFLiveKit/coder/LFHardwareVideoEncoder.m +++ b/LFLiveKit/coder/LFHardwareVideoEncoder.m @@ -61,7 +61,7 @@ - (void)initCompressionSession { _currentVideoBitRate = _configuration.videoBitRate; VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, (__bridge CFTypeRef)@(_configuration.videoMaxKeyframeInterval)); - VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, (__bridge CFTypeRef)@(_configuration.videoMaxKeyframeInterval)); + VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, (__bridge CFTypeRef)@(_configuration.videoMaxKeyframeInterval/_configuration.videoFrameRate)); VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, (__bridge CFTypeRef)@(_configuration.videoFrameRate)); VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_AverageBitRate, (__bridge CFTypeRef)@(_configuration.videoBitRate)); NSArray *limit = @[@(_configuration.videoBitRate * 1.5/8), @(1)];