Skip to content

Commit

Permalink
Create FrameProcessorEffect as camera middleman
Browse files Browse the repository at this point in the history
  • Loading branch information
mrousavy committed Feb 28, 2024
1 parent daedf60 commit 0636409
Show file tree
Hide file tree
Showing 6 changed files with 209 additions and 36 deletions.
2 changes: 1 addition & 1 deletion package/android/src/main/cpp/OpenGLRenderer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ void OpenGLRenderer::renderHardwareBufferToSurface(AHardwareBuffer* hardwareBuff
// 6. Cleanup
eglDestroyImageKHR(_context->display, eglImage);
#else
throw std::runtime_error("HardwareBuffer rendering is only supported is minSdk is set to API 26 or higher!");
throw std::runtime_error("HardwareBuffer rendering is only supported is minSdk is set to API 26 or higher!");
#endif
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ abstract class CameraError(
*
* Example: "The microphone permission was denied!"
*/
message: String,
override val message: String,
/**
* A throwable that caused this error.
*/
Expand Down Expand Up @@ -231,8 +231,14 @@ class DoNotDisturbBugError(cause: Throwable?) :
"system",
"do-not-disturb-bug",
"The Camera Device could not be opened because of a bug in Android 9 (API 28) when do-not-disturb mode is enabled! " +
"Either update your Android version, or disable do-not-disturb.",
"Either update your Android version, or disable do-not-disturb.",
cause
)
class RecordingWhileFrameProcessingUnavailable :
CameraError(
"system",
"recording-while-frame-processing-unavailable",
"Video Recordings are not possible with a Frame Processor running, because the device is running on API 22 or lower and ImageWriters are not available."
)

class UnknownCameraError(cause: Throwable?) : CameraError("unknown", "unknown", cause?.message ?: "An unknown camera error occured.", cause)
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,8 @@ import android.Manifest
import android.annotation.SuppressLint
import android.content.Context
import android.content.pm.PackageManager
import android.graphics.Point
import android.hardware.camera2.CameraCharacteristics
import android.hardware.camera2.CameraManager
import android.media.MediaActionSound
import android.util.Log
import android.util.Range
import android.util.Size
Expand All @@ -22,12 +20,10 @@ import androidx.camera.core.FocusMeteringAction
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageCapture
import androidx.camera.core.MeteringPoint
import androidx.camera.core.MeteringPointFactory
import androidx.camera.core.MirrorMode
import androidx.camera.core.Preview
import androidx.camera.core.PreviewCapabilities
import androidx.camera.core.SurfaceOrientedMeteringPointFactory
import androidx.camera.core.TorchState
import androidx.camera.core.UseCaseGroup
import androidx.camera.core.resolutionselector.ResolutionSelector
import androidx.camera.extensions.ExtensionMode
import androidx.camera.lifecycle.ProcessCameraProvider
Expand All @@ -48,7 +44,6 @@ import com.mrousavy.camera.extensions.await
import com.mrousavy.camera.extensions.byId
import com.mrousavy.camera.extensions.forSize
import com.mrousavy.camera.extensions.getCameraError
import com.mrousavy.camera.extensions.id
import com.mrousavy.camera.extensions.takePicture
import com.mrousavy.camera.extensions.toCameraError
import com.mrousavy.camera.extensions.withExtension
Expand All @@ -62,11 +57,6 @@ import com.mrousavy.camera.types.Video
import com.mrousavy.camera.types.VideoStabilizationMode
import com.mrousavy.camera.utils.FileUtils
import com.mrousavy.camera.utils.runOnUiThread
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.asExecutor
import kotlinx.coroutines.cancel
import kotlinx.coroutines.launch
import java.io.Closeable
import kotlin.math.roundToInt
import kotlinx.coroutines.sync.Mutex
Expand All @@ -89,6 +79,7 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
private var photoOutput: ImageCapture? = null
private var videoOutput: VideoCapture<Recorder>? = null
private var codeScannerOutput: ImageAnalysis? = null
private var frameProcessorEffect: FrameProcessorEffect? = null

// Camera State
private val mutex = Mutex()
Expand Down Expand Up @@ -264,7 +255,6 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
// video.setTargetVideoEncodingBitRate()
}.build()


val video = VideoCapture.Builder(recorder).also { video ->
// Configure Video Output
video.setMirrorMode(MirrorMode.MIRROR_MODE_ON_FRONT_ONLY)
Expand All @@ -288,6 +278,14 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
videoOutput = null
}

// 3.5 Frame Processor (middleman)
if (videoConfig != null && videoConfig.config.enableFrameProcessor) {
// The FrameProcessorEffect is a middle-man between the Camera stream and the output surfaces.
frameProcessorEffect = FrameProcessorEffect(videoConfig.config.pixelFormat, videoConfig.config.enableGpuBuffers, callback)
} else {
frameProcessorEffect = null
}

// 4. Code Scanner
val codeScannerConfig = configuration.codeScanner as? CameraConfiguration.Output.Enabled<CameraConfiguration.CodeScanner>
if (codeScannerConfig != null) {
Expand All @@ -302,6 +300,7 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
Log.i(TAG, "Successfully created new Outputs for Camera #${configuration.cameraId}!")
}

@Suppress("LiftReturnOrAssignment")
private suspend fun configureCamera(provider: ProcessCameraProvider, configuration: CameraConfiguration) {
Log.i(TAG, "Binding Camera #${configuration.cameraId}...")
checkCameraPermission()
Expand All @@ -327,8 +326,19 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
// Unbind previous Camera
provider.unbindAll()

// Bind it all together (must be on UI Thread)
camera = provider.bindToLifecycle(this, cameraSelector, *useCases.toTypedArray())
val frameProcessorEffect = frameProcessorEffect
if (frameProcessorEffect != null) {
val useCaseGroup = UseCaseGroup.Builder()
useCases.forEach { useCase -> useCaseGroup.addUseCase(useCase) }
useCaseGroup.addEffect(frameProcessorEffect)

// Bind it all together (must be on UI Thread)
camera = provider.bindToLifecycle(this, cameraSelector, useCaseGroup.build())
} else {
// Bind it all together (must be on UI Thread)
camera = provider.bindToLifecycle(this, cameraSelector, *useCases.toTypedArray())
}

var lastState = CameraState.Type.OPENING
camera!!.cameraInfo.cameraState.observeForever { state ->
Log.i(TAG, "Camera State: ${state.type} (has error: ${state.error != null})")
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
package com.mrousavy.camera.core

import android.annotation.SuppressLint
import android.hardware.HardwareBuffer
import android.media.ImageReader
import android.media.ImageWriter
import android.os.Build
import android.util.Log
import androidx.annotation.RequiresApi
import androidx.camera.core.CameraEffect
import androidx.camera.core.SurfaceOutput
import androidx.camera.core.SurfaceProcessor
import androidx.camera.core.SurfaceRequest
import com.mrousavy.camera.frameprocessor.Frame
import com.mrousavy.camera.types.Orientation
import com.mrousavy.camera.types.PixelFormat

@SuppressLint("RestrictedApi")
class FrameProcessorEffect(
format: PixelFormat = PixelFormat.NATIVE,
enableGpuBuffers: Boolean = false,
callback: CameraSession.Callback,
targets: Int = VIDEO_CAPTURE,
) : CameraEffect(
targets,
TRANSFORMATION_CAMERA_AND_SURFACE_ROTATION,
CameraQueues.videoQueue.executor,
FrameProcessorSurfaceProcessor(format, enableGpuBuffers, callback),
{ error -> callback.onError(error) }
) {
// CameraEffect holds the SurfaceProcessor

class FrameProcessorSurfaceProcessor(private val format: PixelFormat, private val enableGpuBuffers: Boolean, private val callback: CameraSession.Callback) : SurfaceProcessor {
companion object {
private const val TAG = "FrameProcessor"
private const val MAX_IMAGES = 3
}
private var imageReader: ImageReader? = null
private var imageWriter: ImageWriter? = null
private val queue = CameraQueues.videoQueue

override fun onInputSurface(request: SurfaceRequest) {
val requestedSize = request.resolution
val requestedFormat = request.deferrableSurface.prescribedStreamFormat
Log.i(TAG, "Requested new input surface: $requestedSize in format #$requestedFormat")

val currentImageReader = imageReader
if (currentImageReader != null &&
currentImageReader.width == requestedSize.width &&
currentImageReader.height == requestedSize.height &&
currentImageReader.imageFormat == requestedFormat) {
Log.i(TAG, "Current ImageReader matches those requirements, attempting to re-use it...")
request.provideSurface(currentImageReader.surface, queue.executor) { result ->
Log.i(TAG, "TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: Close ImageReader now")
}
}

val imageReader = if (enableGpuBuffers && Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
// Use GPU buffer flags for ImageReader for faster forwarding
val flags = getRecommendedHardwareBufferFlags(requestedSize.width, requestedSize.height)
Log.i(TAG, "Creating ImageReader with new GPU-Buffers API... (Usage Flags: $flags)")
ImageReader.newInstance(requestedSize.width, requestedSize.height, requestedFormat, MAX_IMAGES, flags)
} else {
// Use default CPU flags for ImageReader
Log.i(TAG, "Creating ImageReader with default CPU usage flag...")
ImageReader.newInstance(requestedSize.width, requestedSize.height, requestedFormat, MAX_IMAGES)
}

imageReader.setOnImageAvailableListener({ reader ->
try {
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener

val orientation = Orientation.PORTRAIT // TODO: orientation
val isMirrored = false // TODO: isMirrored
val frame = Frame(image, image.timestamp, orientation, isMirrored)

frame.incrementRefCount()
try {
callback.onFrame(frame)
} finally {
frame.decrementRefCount()
}
} catch (e: Throwable) {
Log.e(TAG, "Failed to process image! ${e.message}", e)
callback.onError(e)
}
}, CameraQueues.videoQueue.handler)

request.provideSurface(imageReader.surface, queue.executor) { result ->
Log.i(TAG, "TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: Close ImageReader now")
}
this.imageReader = imageReader
}

override fun onOutputSurface(surfaceOutput: SurfaceOutput) {
Log.i(TAG, "Received new output surface: ${surfaceOutput.size} in format #${surfaceOutput.format}")

val surface = surfaceOutput.getSurface(queue.executor) { o ->
Log.i(TAG, "TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: Close ImageWriter now")
}

if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
// Use custom target format, ImageWriter might be able to convert between the formats.
Log.i(TAG, "Creating ImageWriter with target format $format...")
imageWriter = ImageWriter.newInstance(surface, MAX_IMAGES, format.toImageFormat())
} else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
// Use default format, ImageWriter might not be able to convert between the formats and crash....
Log.i(TAG, "Creating ImageWriter with default format (${surfaceOutput.format})...")
imageWriter = ImageWriter.newInstance(surface, MAX_IMAGES)
} else {
// ImageWriters are not available at all.
val error = RecordingWhileFrameProcessingUnavailable()
Log.e(TAG, error.message)
callback.onError(error)
}
}

/**
* Get the recommended HardwareBuffer flags for creating ImageReader instances with.
*
* Tries to use [HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE] if possible, [HardwareBuffer.USAGE_CPU_READ_OFTEN]
* or a combination of both flags if CPU access is needed, and [0] otherwise.
*/
@RequiresApi(Build.VERSION_CODES.Q)
@Suppress("LiftReturnOrAssignment")
private fun getRecommendedHardwareBufferFlags(width: Int, height: Int): Long {
val cpuFlag = HardwareBuffer.USAGE_CPU_READ_OFTEN
val gpuFlag = HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE
val bothFlags = gpuFlag or cpuFlag

if (format == PixelFormat.NATIVE) {
// We don't need CPU access, so we can use GPU optimized buffers
if (supportsHardwareBufferFlags(width, height, gpuFlag)) {
// We support GPU Buffers directly and
Log.i(TAG, "GPU HardwareBuffers are supported!")
return gpuFlag
} else {
// no flags are supported - fall back to default
return 0
}
} else {
// We are using YUV or RGB formats, so we need CPU access on the Frame
if (supportsHardwareBufferFlags(width, height, bothFlags)) {
// We support both CPU and GPU flags!
Log.i(TAG, "GPU + CPU HardwareBuffers are supported!")
return bothFlags
} else if (supportsHardwareBufferFlags(width, height, cpuFlag)) {
// We only support a CPU read flag, that's fine
Log.i(TAG, "CPU HardwareBuffers are supported!")
return cpuFlag
} else {
// no flags are supported - fall back to default
return 0
}
}
}

@Suppress("LiftReturnOrAssignment")
@RequiresApi(Build.VERSION_CODES.Q)
private fun supportsHardwareBufferFlags(width: Int, height: Int, flags: Long): Boolean {
val hardwareBufferFormat = format.toHardwareBufferFormat()
try {
return HardwareBuffer.isSupported(width, height, hardwareBufferFormat, 1, flags)
} catch (_: Throwable) {
return false
}
}
}
}
Original file line number Diff line number Diff line change
@@ -1,34 +1,19 @@
package com.mrousavy.camera.core

import android.annotation.SuppressLint
import android.graphics.ImageFormat
import android.graphics.SurfaceTexture
import android.hardware.HardwareBuffer
import android.media.Image
import android.media.ImageReader
import android.media.ImageWriter
import android.os.Build
import android.util.Log
import android.util.Range
import android.util.Size
import android.view.Surface
import androidx.annotation.Keep
import androidx.annotation.OptIn
import androidx.annotation.RequiresApi
import androidx.camera.core.CameraInfo
import androidx.camera.core.ExperimentalGetImage
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageAnalysis.Analyzer
import androidx.camera.core.ImageProxy
import androidx.camera.core.SurfaceRequest
import androidx.camera.core.impl.ConstantObservable
import androidx.camera.core.impl.Observable
import androidx.camera.core.processing.OpenGlRenderer
import androidx.camera.video.MediaSpec
import androidx.camera.video.Quality
import androidx.camera.video.QualitySelector
import androidx.camera.video.VideoCapabilities
import androidx.camera.video.VideoOutput
import com.facebook.jni.HybridData
import com.facebook.proguard.annotations.DoNotStrip
import com.mrousavy.camera.frameprocessor.Frame
Expand All @@ -50,12 +35,13 @@ class VideoPipeline(
private val enableFrameProcessor: Boolean = false,
private val enableGpuBuffers: Boolean = false,
private val callback: CameraSession.Callback
) : Analyzer, Closeable {
) : Analyzer,
Closeable {
companion object {
private const val MAX_IMAGES = 3
private const val TAG = "VideoPipeline"
}
data class OpenGLState(val surfaceTexture: SurfaceTexture, val surface: Surface, val size: Size): Closeable {
data class OpenGLState(val surfaceTexture: SurfaceTexture, val surface: Surface, val size: Size) : Closeable {
override fun close() {
surface.release()
surfaceTexture.release()
Expand Down Expand Up @@ -84,7 +70,8 @@ class VideoPipeline(

// TODO: get is mirrored
val isMirrored = false
val frame = Frame(image, imageProxy.imageInfo.timestamp, Orientation.fromRotationDegrees(imageProxy.imageInfo.rotationDegrees), isMirrored)
val frame =
Frame(image, imageProxy.imageInfo.timestamp, Orientation.fromRotationDegrees(imageProxy.imageInfo.rotationDegrees), isMirrored)
frame.incrementRefCount()
try {
// 1. Call Frame Processor
Expand All @@ -101,7 +88,7 @@ class VideoPipeline(
Log.e(TAG, "VideoPipeline threw an error! ${e.message}", e)
callback.onError(e)
} finally {
frame.decrementRefCount()
frame.decrementRefCount()
}
}

Expand Down
1 change: 1 addition & 0 deletions package/src/CameraError.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ export type SystemError =
| 'system/camera-is-restricted'
| 'system/no-camera-manager'
| 'system/frame-processors-unavailable'
| 'system/recording-while-frame-processing-unavailable'
| 'system/view-not-found'
| 'system/max-cameras-in-use'
| 'system/do-not-disturb-bug'
Expand Down

0 comments on commit 0636409

Please sign in to comment.