diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index fcc29b84d1..0db44ee0ab 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -13,6 +13,7 @@ jobs: runs-on: ubuntu-20.04 env: AWS_KVS_LOG_LEVEL: 2 + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} permissions: id-token: write contents: read diff --git a/README.md b/README.md index 1067bbda88..586c8c0602 100644 --- a/README.md +++ b/README.md @@ -396,21 +396,32 @@ createLwsIotCredentialProvider( freeIotCredentialProvider(&pSampleConfiguration->pCredentialProvider); ``` -## Use of TWCC -In order to listen in on TWCC reports, the application must set up a callback using the `peerConnectionOnSenderBandwidthEstimation` API. In our samples, it is set up like this: +## TWCC support + +Transport Wide Congestion Control (TWCC) is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of media streams in real-time. This feedback mechanism is crucial for maintaining high-quality audio and video communication, as it allows senders to adjust their transmission strategies based on comprehensive information about packet losses, delays, and jitter experienced across the entire transport path. + +The importance of TWCC in WebRTC lies in its ability to ensure efficient use of available network bandwidth while minimizing the negative impacts of network congestion. By monitoring the delivery of packets across the network, TWCC helps identify bottlenecks and adjust the media transmission rates accordingly. This dynamic approach to congestion control is essential for preventing degradation in call quality, such as pixelation, stuttering, or drops in audio and video streams, especially in environments with fluctuating network conditions. + +To learn more about TWCC, check [TWCC spec](https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01) + +### Enabling TWCC support + +TWCC is enabled by default in the SDK samples (via `pSampleConfiguration->enableTwcc`) flag. In order to disable it, set this flag to `FALSE`. ```c -CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, - sampleSenderBandwidthEstimationHandler)); +pSampleConfiguration->enableTwcc = FALSE; ``` -Note that TWCC is disabled by default in the SDK samples. In order to enable it, set the `disableSenderSideBandwidthEstimation` flag to FALSE. For example, - +If not using the samples directly, 2 things need to be done to set up Twcc: +1. Set the `disableSenderSideBandwidthEstimation` to `FALSE`: ```c -RtcConfiguration configuration; configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = FALSE; ``` - +2. Set the callback that will have the business logic to modify the bitrate based on packet loss information. The callback can be set using `peerConnectionOnSenderBandwidthEstimation()`: +```c +CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); +``` ## Use Pre-generated Certificates The certificate generating function ([createCertificateAndKey](https://awslabs.github.io/amazon-kinesis-video-streams-webrtc-sdk-c/Dtls__openssl_8c.html#a451c48525b0c0a8919a880d6834c1f7f)) in createDtlsSession() can take between 5 - 15 seconds in low performance embedded devices, it is called for every peer connection creation when KVS WebRTC receives an offer. To avoid this extra start-up latency, certificate can be pre-generated and passed in when offer comes. @@ -510,6 +521,32 @@ To disable threadpool, run `cmake .. -DENABLE_KVS_THREADPOOL=OFF` ### Thread stack sizes The default thread stack size for the KVS WebRTC SDK is 64 kb. Notable stack sizes that may need to be changed for your specific application will be the ConnectionListener Receiver thread and the media sender threads. Please modify the stack sizes for these media dependent threads to be suitable for the media your application is processing. +### Set up TWCC +TWCC is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the Internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of +media streams in real-time. This feedback mechanism is crucial for maintaining high-quality audio and video communication, as it allows senders to adjust their transmission strategies based on comprehensive information about packet losses, delays, and jitter experienced across the entire transport path. +The importance of TWCC in WebRTC lies in its ability to ensure efficient use of available network bandwidth while minimizing the negative impacts of network congestion. By monitoring the delivery of packets across the network, TWCC helps identify bottlenecks and adjust the media transmission rates accordingly. +This dynamic approach to congestion control is essential for preventing degradation in call quality, such as pixelation, stuttering, or drops in audio and video streams, especially in environments with fluctuating network conditions. To learn more about TWCC, you can refer to the [RFC draft](https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01) + +In order to enable TWCC usage in the SDK, 2 things need to be set up: + +1. Set the `disableSenderSideBandwidthEstimation` to FALSE. In our samples, the value is set using `enableTwcc` flag in `pSampleConfiguration` + +```c +pSampleConfiguration->enableTwcc = TRUE; // to enable TWCC +pSampleConfiguration->enableTwcc = FALSE; // to disable TWCC +configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = !pSampleConfiguration->enableTwcc; +``` + +2. Set the callback that will have the business logic to modify the bitrate based on packet loss information. The callback can be set using `peerConnectionOnSenderBandwidthEstimation()`. + +```c +CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); +``` + +By default, our SDK enables TWCC listener. The SDK has a sample implementation to integrate TWCC into the Gstreamer pipeline via the `sampleSenderBandwidthEstimationHandler` callback. To get more details, look for this specific callback. + + ### Setting ICE related timeouts There are some default timeout values set for different steps in ICE in the [KvsRtcConfiguration](https://awslabs.github.io/amazon-kinesis-video-streams-webrtc-sdk-c/structKvsRtcConfiguration.html). These are configurable in the application. While the defaults are generous, there could be applications that might need more flexibility to improve chances of connection establishment because of poor network. diff --git a/samples/Common.c b/samples/Common.c index 64e9739180..0991cc03f0 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -414,13 +414,13 @@ STATUS initializePeerConnection(PSampleConfiguration pSampleConfiguration, PRtcP configuration.kvsRtcConfiguration.iceSetInterfaceFilterFunc = NULL; // disable TWCC - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = TRUE; + configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = !(pSampleConfiguration->enableTwcc); + DLOGI("TWCC is : %s", configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation ? "Disabled" : "Enabled"); // Set the ICE mode explicitly configuration.iceTransportPolicy = ICE_TRANSPORT_POLICY_ALL; configuration.kvsRtcConfiguration.enableIceStats = pSampleConfiguration->enableIceStats; - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = TRUE; // Set the STUN server PCHAR pKinesisVideoStunUrlPostFix = KINESIS_VIDEO_STUN_URL_POSTFIX; // If region is in CN, add CN region uri postfix @@ -554,10 +554,12 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P ATOMIC_STORE_BOOL(&pSampleStreamingSession->terminateFlag, FALSE); ATOMIC_STORE_BOOL(&pSampleStreamingSession->candidateGatheringDone, FALSE); - pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; - // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. - pSampleConfiguration->enableIceStats = FALSE; + + if (pSampleConfiguration->enableTwcc) { + pSampleStreamingSession->twccMetadata.updateLock = MUTEX_CREATE(TRUE); + } + CHK_STATUS(initializePeerConnection(pSampleConfiguration, &pSampleStreamingSession->pPeerConnection)); CHK_STATUS(peerConnectionOnIceCandidate(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, onIceCandidateHandler)); CHK_STATUS( @@ -604,8 +606,10 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P CHK_STATUS(transceiverOnBandwidthEstimation(pSampleStreamingSession->pAudioRtcRtpTransceiver, (UINT64) pSampleStreamingSession, sampleBandwidthEstimationHandler)); // twcc bandwidth estimation - CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, - sampleSenderBandwidthEstimationHandler)); + if (pSampleConfiguration->enableTwcc) { + CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); + } pSampleStreamingSession->startUpLatency = 0; CleanUp: @@ -656,6 +660,12 @@ STATUS freeSampleStreamingSession(PSampleStreamingSession* ppSampleStreamingSess } MUTEX_UNLOCK(pSampleConfiguration->sampleConfigurationObjLock); + if (pSampleConfiguration->enableTwcc) { + if (IS_VALID_MUTEX_VALUE(pSampleStreamingSession->twccMetadata.updateLock)) { + MUTEX_FREE(pSampleStreamingSession->twccMetadata.updateLock); + } + } + CHK_LOG_ERR(closePeerConnection(pSampleStreamingSession->pPeerConnection)); CHK_LOG_ERR(freePeerConnection(&pSampleStreamingSession->pPeerConnection)); SAFE_MEMFREE(pSampleStreamingSession); @@ -706,27 +716,61 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } +// Sample callback for TWCC. Average packet is calculated with exponential moving average (EMA). If average packet lost is <= 5%, +// the current bitrate is increased by 5%. If more than 5%, the current bitrate +// is reduced by percent lost. Bitrate update is allowed every second and is increased/decreased upto the limits VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { - UNUSED_PARAM(customData); UNUSED_PARAM(duration); - UNUSED_PARAM(rxBytes); - UNUSED_PARAM(txBytes); + UINT64 videoBitrate, audioBitrate; + UINT64 currentTimeMs, timeDiff; UINT32 lostPacketsCnt = txPacketsCnt - rxPacketsCnt; - UINT32 percentLost = lostPacketsCnt * 100 / txPacketsCnt; - UINT32 bitrate = 1024; - if (percentLost < 2) { - // increase encoder bitrate by 2 percent - bitrate *= 1.02f; - } else if (percentLost > 5) { - // decrease encoder bitrate by packet loss percent - bitrate *= (1.0f - percentLost / 100.0f); - } - // otherwise keep bitrate the same - - DLOGV("received sender bitrate estimation: suggested bitrate %u sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", bitrate, - txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); + DOUBLE percentLost = (DOUBLE) ((txPacketsCnt > 0) ? (lostPacketsCnt * 100 / txPacketsCnt) : 0.0); + SampleStreamingSession* pSampleStreamingSession = (SampleStreamingSession*) customData; + + if (pSampleStreamingSession == NULL) { + DLOGW("Invalid streaming session (NULL object)"); + return; + } + + // Calculate packet loss + pSampleStreamingSession->twccMetadata.averagePacketLoss = + EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, ((DOUBLE) percentLost)); + + currentTimeMs = GETTIME(); + timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; + if (timeDiff < TWCC_BITRATE_ADJUSTMENT_INTERVAL_MS) { + // Too soon for another adjustment + return; + } + + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + videoBitrate = pSampleStreamingSession->twccMetadata.currentVideoBitrate; + audioBitrate = pSampleStreamingSession->twccMetadata.currentAudioBitrate; + + if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { + // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE + videoBitrate = (UINT64) MIN(videoBitrate * 1.05, MAX_VIDEO_BITRATE_KBPS); + // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE + audioBitrate = (UINT64) MIN(audioBitrate * 1.05, MAX_AUDIO_BITRATE_BPS); + } else { + // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE + videoBitrate = (UINT64) MAX(videoBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_VIDEO_BITRATE_KBPS); + // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE + audioBitrate = (UINT64) MAX(audioBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_AUDIO_BITRATE_BPS); + } + + // Update the session with the new bitrate and adjustment time + pSampleStreamingSession->twccMetadata.newVideoBitrate = videoBitrate; + pSampleStreamingSession->twccMetadata.newAudioBitrate = audioBitrate; + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + + pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; + + DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, timeDiff); + DLOGI("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", + videoBitrate, audioBitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } STATUS handleRemoteCandidate(PSampleStreamingSession pSampleStreamingSession, PSignalingMessage pSignalingMessage) @@ -915,6 +959,12 @@ STATUS createSampleConfiguration(PCHAR channelName, SIGNALING_CHANNEL_ROLE_TYPE pSampleConfiguration->pregenerateCertTimerId = MAX_UINT32; pSampleConfiguration->signalingClientMetrics.version = SIGNALING_CLIENT_METRICS_CURRENT_VERSION; + // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. + pSampleConfiguration->enableIceStats = FALSE; + + // Flag to enable/disable TWCC + pSampleConfiguration->enableTwcc = TRUE; + ATOMIC_STORE_BOOL(&pSampleConfiguration->interrupted, FALSE); ATOMIC_STORE_BOOL(&pSampleConfiguration->mediaThreadStarted, FALSE); ATOMIC_STORE_BOOL(&pSampleConfiguration->appTerminateFlag, FALSE); diff --git a/samples/Samples.h b/samples/Samples.h index 2ab3d41f20..9e3dfa5bd9 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -83,6 +83,12 @@ extern "C" { #define MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE 736 // strlen(SIGNALING_CLIENT_METRICS_JSON_TEMPLATE) + 20 * 10 #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 +#define TWCC_BITRATE_ADJUSTMENT_INTERVAL_MS 1000 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND +#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. +#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. +#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. +#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. + typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, SAMPLE_STREAMING_AUDIO_VIDEO, @@ -160,6 +166,7 @@ typedef struct { PCHAR rtspUri; UINT32 logLevel; BOOL enableIceStats; + BOOL enableTwcc; } SampleConfiguration, *PSampleConfiguration; typedef struct { @@ -179,6 +186,16 @@ typedef struct { typedef VOID (*StreamSessionShutdownCallback)(UINT64, PSampleStreamingSession); +typedef struct { + MUTEX updateLock; + UINT64 lastAdjustmentTimeMs; + UINT64 currentVideoBitrate; + UINT64 currentAudioBitrate; + UINT64 newVideoBitrate; + UINT64 newAudioBitrate; + DOUBLE averagePacketLoss; +} TwccMetadata, *PTwccMetadata; + struct __SampleStreamingSession { volatile ATOMIC_BOOL terminateFlag; volatile ATOMIC_BOOL candidateGatheringDone; @@ -198,6 +215,7 @@ struct __SampleStreamingSession { UINT64 startUpLatency; RtcMetricsHistory rtcMetricsHistory; BOOL remoteCanTrickleIce; + TwccMetadata twccMetadata; // this is called when the SampleStreamingSession is being freed StreamSessionShutdownCallback shutdownCallback; diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index 4cbe8e2b9f..ff5ffccb35 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -3,9 +3,10 @@ #include extern PSampleConfiguration gSampleConfiguration; - // #define VERBOSE +GstElement* senderPipeline = NULL; + GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) { GstBuffer* buffer; @@ -22,6 +23,7 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) PSampleStreamingSession pSampleStreamingSession = NULL; PRtcRtpTransceiver pRtcRtpTransceiver = NULL; UINT32 i; + guint bitrate; CHK_ERR(pSampleConfiguration != NULL, STATUS_NULL_ARG, "NULL sample configuration"); @@ -64,12 +66,40 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) frame.index = (UINT32) ATOMIC_INCREMENT(&pSampleStreamingSession->frameIndex); if (trackid == DEFAULT_AUDIO_TRACK_ID) { + if (pSampleStreamingSession->pSampleConfiguration->enableTwcc && senderPipeline != NULL) { + GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleAudioEncoder"); + if (encoder != NULL) { + g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + pSampleStreamingSession->twccMetadata.currentAudioBitrate = (UINT64) bitrate; + if (pSampleStreamingSession->twccMetadata.newAudioBitrate != 0) { + bitrate = (guint) (pSampleStreamingSession->twccMetadata.newAudioBitrate); + pSampleStreamingSession->twccMetadata.newAudioBitrate = 0; + g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); + } + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + } + } pRtcRtpTransceiver = pSampleStreamingSession->pAudioRtcRtpTransceiver; frame.presentationTs = pSampleStreamingSession->audioTimestamp; frame.decodingTs = frame.presentationTs; pSampleStreamingSession->audioTimestamp += SAMPLE_AUDIO_FRAME_DURATION; // assume audio frame size is 20ms, which is default in opusenc } else { + if (pSampleStreamingSession->pSampleConfiguration->enableTwcc && senderPipeline != NULL) { + GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleVideoEncoder"); + if (encoder != NULL) { + g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + pSampleStreamingSession->twccMetadata.currentVideoBitrate = (UINT64) bitrate; + if (pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { + bitrate = (guint) (pSampleStreamingSession->twccMetadata.newVideoBitrate); + pSampleStreamingSession->twccMetadata.newVideoBitrate = 0; + g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); + } + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + } + } pRtcRtpTransceiver = pSampleStreamingSession->pVideoRtcRtpTransceiver; frame.presentationTs = pSampleStreamingSession->videoTimestamp; frame.decodingTs = frame.presentationTs; @@ -120,7 +150,7 @@ GstFlowReturn on_new_sample_audio(GstElement* sink, gpointer data) PVOID sendGstreamerAudioVideo(PVOID args) { STATUS retStatus = STATUS_SUCCESS; - GstElement *appsinkVideo = NULL, *appsinkAudio = NULL, *pipeline = NULL; + GstElement *appsinkVideo = NULL, *appsinkAudio = NULL; GstBus* bus; GstMessage* msg; GError* error = NULL; @@ -168,38 +198,42 @@ PVOID sendGstreamerAudioVideo(PVOID args) case SAMPLE_STREAMING_VIDEO_ONLY: switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { - pipeline = - gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " - "name=appsink-video", - &error); + senderPipeline = gst_parse_launch( + "videotestsrc pattern=ball is-live=TRUE ! " + "queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " + "clockoverlay halignment=right valignment=top time-format=\"%Y-%m-%d %H:%M:%S\" ! " + "videorate ! video/x-raw,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video", + &error); break; } case DEVICE_SOURCE: { - pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video", - &error); + senderPipeline = gst_parse_launch( + "autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + " appsink sync=TRUE " + "emit-signals=TRUE name=appsink-video", + &error); break; } case RTSP_SOURCE: { - UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, - "uridecodebin uri=%s ! " - "videoconvert ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", - pSampleConfiguration->rtspUri); + UINT16 stringOutcome = + SNPRINTF(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + "uridecodebin uri=%s ! " + "videoconvert ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", + pSampleConfiguration->rtspUri); if (stringOutcome > RTSP_PIPELINE_MAX_CHAR_COUNT) { DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); goto CleanUp; } - pipeline = gst_parse_launch(rtspPipeLineBuffer, &error); + senderPipeline = gst_parse_launch(rtspPipeLineBuffer, &error); break; } @@ -209,44 +243,46 @@ PVOID sendGstreamerAudioVideo(PVOID args) case SAMPLE_STREAMING_AUDIO_VIDEO: switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { - pipeline = - gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " - "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " - "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - &error); + senderPipeline = gst_parse_launch( + "videotestsrc pattern=ball is-live=TRUE ! " + "queue ! videorate ! videoscale ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "clockoverlay halignment=right valignment=top time-format=\"%Y-%m-%d %H:%M:%S\" ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video audiotestsrc wave=ticks is-live=TRUE ! " + "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " + "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + &error); break; } case DEVICE_SOURCE: { - pipeline = - gst_parse_launch("autovideosrc ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " - "name=appsink-video autoaudiosrc ! " - "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " - "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - &error); + senderPipeline = gst_parse_launch( + "autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " + "name=appsink-video autoaudiosrc ! " + "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " + "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + &error); break; } case RTSP_SOURCE: { - UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, - "uridecodebin uri=%s name=src ! videoconvert ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-video " - "src. ! audioconvert ! " - "audioresample ! opusenc ! audio/x-opus,rate=48000,channels=2 ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - pSampleConfiguration->rtspUri); + UINT16 stringOutcome = + SNPRINTF(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + "uridecodebin uri=%s name=src ! videoconvert ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video " + "src. ! audioconvert ! " + "audioresample ! opusenc name=sampleAudioEncoder ! audio/x-opus,rate=48000,channels=2 ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + pSampleConfiguration->rtspUri); if (stringOutcome > RTSP_PIPELINE_MAX_CHAR_COUNT) { DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); goto CleanUp; } - pipeline = gst_parse_launch(rtspPipeLineBuffer, &error); + senderPipeline = gst_parse_launch(rtspPipeLineBuffer, &error); break; } @@ -254,10 +290,10 @@ PVOID sendGstreamerAudioVideo(PVOID args) break; } - CHK_ERR(pipeline != NULL, STATUS_NULL_ARG, "[KVS Gstreamer Master] Pipeline is NULL"); + CHK_ERR(senderPipeline != NULL, STATUS_NULL_ARG, "[KVS Gstreamer Master] Pipeline is NULL"); - appsinkVideo = gst_bin_get_by_name(GST_BIN(pipeline), "appsink-video"); - appsinkAudio = gst_bin_get_by_name(GST_BIN(pipeline), "appsink-audio"); + appsinkVideo = gst_bin_get_by_name(GST_BIN(senderPipeline), "appsink-video"); + appsinkAudio = gst_bin_get_by_name(GST_BIN(senderPipeline), "appsink-audio"); if (!(appsinkVideo != NULL || appsinkAudio != NULL)) { DLOGE("[KVS GStreamer Master] sendGstreamerAudioVideo(): cant find appsink, operation returned status code: 0x%08x", STATUS_INTERNAL_ERROR); @@ -270,10 +306,10 @@ PVOID sendGstreamerAudioVideo(PVOID args) if (appsinkAudio != NULL) { g_signal_connect(appsinkAudio, "new-sample", G_CALLBACK(on_new_sample_audio), (gpointer) pSampleConfiguration); } - gst_element_set_state(pipeline, GST_STATE_PLAYING); + gst_element_set_state(senderPipeline, GST_STATE_PLAYING); /* block until error or EOS */ - bus = gst_element_get_bus(pipeline); + bus = gst_element_get_bus(senderPipeline); msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS); /* Free resources */ @@ -283,9 +319,9 @@ PVOID sendGstreamerAudioVideo(PVOID args) if (bus != NULL) { gst_object_unref(bus); } - gst_element_set_state(pipeline, GST_STATE_NULL); - if (pipeline != NULL) { - gst_object_unref(pipeline); + if (senderPipeline != NULL) { + gst_element_set_state(senderPipeline, GST_STATE_NULL); + gst_object_unref(senderPipeline); } if (appsinkAudio != NULL) { gst_object_unref(appsinkAudio); diff --git a/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h b/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h index 39ebdc6b22..11b3d36656 100644 --- a/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h +++ b/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h @@ -1221,8 +1221,7 @@ typedef struct { //!< would like to whitelist/blacklist specific network interfaces BOOL disableSenderSideBandwidthEstimation; //!< Disable TWCC feedback based sender bandwidth estimation, enabled by default. - //!< You want to set this to TRUE if you are on a very stable connection and want to save 1.2MB of - //!< memory + //!< You want to set this to TRUE if you are on a very stable connection BOOL enableIceStats; //!< Enable ICE stats to be calculated } KvsRtcConfiguration, *PKvsRtcConfiguration;