diff --git a/app/src/main/java/ai/cyberlabs/yoonit/camerademo/MainActivity.kt b/app/src/main/java/ai/cyberlabs/yoonit/camerademo/MainActivity.kt index 8b98af6..a787797 100644 --- a/app/src/main/java/ai/cyberlabs/yoonit/camerademo/MainActivity.kt +++ b/app/src/main/java/ai/cyberlabs/yoonit/camerademo/MainActivity.kt @@ -321,7 +321,10 @@ class MainActivity : AppCompatActivity() { count: Int, total: Int, imagePath: String, - inferences: ArrayList> + inferences: ArrayList>, + darkness: Double, + lightness: Double, + sharpness: Double ) { Log.d(TAG, "onImageCaptured . . . . . . . . . . . . . . . . . . . . . . . . .") @@ -346,6 +349,13 @@ class MainActivity : AppCompatActivity() { maskProbabilityTextView.text = probability.toString() } + darknessTextView.text = if (darkness > 0.7) "Too Dark" else "Normal" + darknessProbabilityTextView.text = darkness.toString() + lightnessTextView.text = if (lightness > 0.65) "Too Light" else "Normal" + lightnessProbabilityTextView.text = lightness.toString() + sharpnessTextView.text = if (sharpness < 0.1591) "Blurred" else "Normal" + sharpnessProbabilityTextView.text = sharpness.toString() + Log.d(TAG, " . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .") info_textview.text = "$count/$total" diff --git a/app/src/main/res/layout/activity_main.xml b/app/src/main/res/layout/activity_main.xml index 9f7c786..774a067 100644 --- a/app/src/main/res/layout/activity_main.xml +++ b/app/src/main/res/layout/activity_main.xml @@ -348,7 +348,7 @@ + android:orientation="horizontal" + android:layout_marginBottom="16dp"> + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/CoordinatesController.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/CoordinatesController.kt index 37096b4..05276ea 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/CoordinatesController.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/CoordinatesController.kt @@ -19,6 +19,7 @@ import ai.cyberlabs.yoonit.facefy.model.FaceDetected import android.graphics.PointF import android.graphics.Rect import android.graphics.RectF +import androidx.camera.core.CameraSelector import com.google.mlkit.vision.common.InputImage /** @@ -76,7 +77,11 @@ class CoordinatesController( ((this.graphicView.height.toFloat() * imageAspectRatio) - this.graphicView.width.toFloat()) / 2 } - val x = this.scale(detectionBox.centerX().toFloat(), scaleFactor) - postScaleWidthOffset + var x = this.scale(detectionBox.centerX().toFloat(), scaleFactor) - postScaleWidthOffset + if (CaptureOptions.cameraLens == CameraSelector.LENS_FACING_BACK) { + x = this.graphicView.width - x + } + val y = this.scale(detectionBox.centerY().toFloat(), scaleFactor) - postScaleHeightOffset val left = x - this.scale(detectionBox.width() / 2.0f, scaleFactor) @@ -186,7 +191,10 @@ class CoordinatesController( val faceContours = mutableListOf() contours.forEach { point -> - val x = this.scale(point.x, scaleFactor) - postScaleWidthOffset + var x = this.scale(point.x, scaleFactor) - postScaleWidthOffset + if (CaptureOptions.cameraLens == CameraSelector.LENS_FACING_BACK) { + x = this.graphicView.width - x + } val y = this.scale(point.y, scaleFactor) - postScaleHeightOffset faceContours.add(PointF(x, y)) } diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/face/FaceAnalyzer.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/face/FaceAnalyzer.kt index 3579341..73ce72f 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/face/FaceAnalyzer.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/face/FaceAnalyzer.kt @@ -14,6 +14,7 @@ package ai.cyberlabs.yoonit.camera.analyzers.face import ai.cyberlabs.yoonit.camera.CameraGraphicView import ai.cyberlabs.yoonit.camera.analyzers.CoordinatesController import ai.cyberlabs.yoonit.camera.controllers.ComputerVisionController +import ai.cyberlabs.yoonit.camera.controllers.ImageQualityController import ai.cyberlabs.yoonit.camera.interfaces.CameraCallback import ai.cyberlabs.yoonit.camera.interfaces.CameraEventListener import ai.cyberlabs.yoonit.camera.models.CaptureOptions @@ -25,6 +26,7 @@ import android.graphics.Bitmap import android.graphics.Rect import android.graphics.RectF import android.media.Image +import androidx.camera.core.CameraSelector import androidx.camera.core.ImageAnalysis import androidx.camera.core.ImageProxy import java.io.File @@ -55,10 +57,10 @@ class FaceAnalyzer( val mediaImage = imageProxy.image ?: return - val bitmap = mediaImage + var bitmap = mediaImage .toRGBBitmap(context) .rotate(imageProxy.imageInfo.rotationDegrees.toFloat()) - .mirror(imageProxy.imageInfo.rotationDegrees.toFloat()) + .mirror() this.facefy.detect( bitmap, @@ -107,16 +109,16 @@ class FaceAnalyzer( // Emit face analysis. this.cameraEventListener.onFaceDetected( - detectionBox.left.pxToDPI(this.context), - detectionBox.top.pxToDPI(this.context), - detectionBox.width().pxToDPI(this.context), - detectionBox.height().pxToDPI(this.context), - faceDetected.leftEyeOpenProbability, - faceDetected.rightEyeOpenProbability, - faceDetected.smilingProbability, - faceDetected.headEulerAngleX, - faceDetected.headEulerAngleY, - faceDetected.headEulerAngleZ + detectionBox.left.pxToDPI(this.context), + detectionBox.top.pxToDPI(this.context), + detectionBox.width().pxToDPI(this.context), + detectionBox.height().pxToDPI(this.context), + faceDetected.leftEyeOpenProbability, + faceDetected.rightEyeOpenProbability, + faceDetected.smilingProbability, + faceDetected.headEulerAngleX, + faceDetected.headEulerAngleY, + faceDetected.headEulerAngleZ ) // Continue only if current time stamp is within the interval. @@ -140,8 +142,11 @@ class FaceAnalyzer( if (CaptureOptions.saveImageCaptured) this.handleSaveImage(faceBitmap) else "" + val imageQuality: Triple = + ImageQualityController.processImage(faceBitmap, true) + // Handle to emit image path and the inferences. - this.handleEmitImageCaptured(imagePath, inferences) + this.handleEmitImageCaptured(imagePath, inferences, imageQuality) } }, { errorMessage -> @@ -210,7 +215,7 @@ class FaceAnalyzer( val faceBitmap: Bitmap = colorEncodedBitmap .rotate(cameraRotation) - .mirror(cameraRotation) + .mirror() .crop(boundingBox) return Bitmap.createScaledBitmap( @@ -229,7 +234,8 @@ class FaceAnalyzer( */ private fun handleEmitImageCaptured( imagePath: String, - inferences: ArrayList> + inferences: ArrayList>, + imageQuality: Triple ) { if (imagePath == "") return @@ -242,7 +248,10 @@ class FaceAnalyzer( this.numberOfImages, CaptureOptions.numberOfImages, imagePath, - inferences + inferences, + imageQuality.first, + imageQuality.second, + imageQuality.third ) return } @@ -259,7 +268,10 @@ class FaceAnalyzer( this.numberOfImages, CaptureOptions.numberOfImages, imagePath, - inferences + inferences, + imageQuality.first, + imageQuality.second, + imageQuality.third ) } diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/frame/FrameAnalyzer.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/frame/FrameAnalyzer.kt index 4aa1513..a2ebb15 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/frame/FrameAnalyzer.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/frame/FrameAnalyzer.kt @@ -13,6 +13,7 @@ package ai.cyberlabs.yoonit.camera.analyzers.frame import ai.cyberlabs.yoonit.camera.CameraGraphicView import ai.cyberlabs.yoonit.camera.controllers.ComputerVisionController +import ai.cyberlabs.yoonit.camera.controllers.ImageQualityController import ai.cyberlabs.yoonit.camera.interfaces.CameraCallback import ai.cyberlabs.yoonit.camera.interfaces.CameraEventListener import ai.cyberlabs.yoonit.camera.models.CaptureOptions @@ -80,9 +81,12 @@ class FrameAnalyzer( ) } + val imageQuality: Triple = + ImageQualityController.processImage(frameBitmap, false) + // Handle to emit image path and the inference. Handler(Looper.getMainLooper()).post { - this.handleEmitImageCaptured(imagePath, inferences) + this.handleEmitImageCaptured(imagePath, inferences, imageQuality) } } } @@ -131,7 +135,8 @@ class FrameAnalyzer( */ private fun handleEmitImageCaptured( imagePath: String, - inferences: ArrayList> + inferences: ArrayList>, + imageQuality: Triple ) { // process face number of images. @@ -143,7 +148,10 @@ class FrameAnalyzer( this.numberOfImages, CaptureOptions.numberOfImages, imagePath, - inferences + inferences, + imageQuality.first, + imageQuality.second, + imageQuality.third ) return } @@ -160,7 +168,10 @@ class FrameAnalyzer( this.numberOfImages, CaptureOptions.numberOfImages, imagePath, - inferences + inferences, + imageQuality.first, + imageQuality.second, + imageQuality.third ) } @@ -182,7 +193,7 @@ class FrameAnalyzer( mediaBitmap .rotate(rotationDegrees) - .mirror(rotationDegrees) + .mirror() .compress( Bitmap.CompressFormat.JPEG, 100, diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/qrcode/QRCodeAnalyzer.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/qrcode/QRCodeAnalyzer.kt index eb01c68..7ae4f33 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/qrcode/QRCodeAnalyzer.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/analyzers/qrcode/QRCodeAnalyzer.kt @@ -14,7 +14,9 @@ package ai.cyberlabs.yoonit.camera.analyzers.qrcode import ai.cyberlabs.yoonit.camera.CameraGraphicView import ai.cyberlabs.yoonit.camera.analyzers.CoordinatesController import ai.cyberlabs.yoonit.camera.interfaces.CameraEventListener +import ai.cyberlabs.yoonit.camera.utils.toRGBBitmap import android.annotation.SuppressLint +import android.content.Context import android.graphics.Rect import android.graphics.RectF import androidx.camera.core.ImageAnalysis @@ -29,6 +31,7 @@ import com.google.mlkit.vision.common.InputImage * Custom camera image analyzer based on barcode detection bounded on [CameraController]. */ class QRCodeAnalyzer( + private val context: Context, private val cameraEventListener: CameraEventListener?, private val graphicView: CameraGraphicView ) : ImageAnalysis.Analyzer { @@ -78,9 +81,10 @@ class QRCodeAnalyzer( onComplete() return } + val imageBitmap = imageProxy.image?.toRGBBitmap(context) - val image: InputImage = InputImage.fromMediaImage( - imageProxy.image, + val image: InputImage = InputImage.fromBitmap( + imageBitmap, imageProxy.imageInfo.rotationDegrees ) diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/CameraController.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/CameraController.kt index 4dc98e7..88f901b 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/CameraController.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/CameraController.kt @@ -140,6 +140,7 @@ class CameraController( CaptureType.QRCODE -> this.imageAnalyzerController.start( QRCodeAnalyzer( + context, this.cameraEventListener, this.graphicView ) @@ -181,9 +182,7 @@ class CameraController( * Set to enable/disable the device torch. Available only to camera lens "back". */ fun setTorch(enable: Boolean) { - this.camera?.let { - it.cameraControl.enableTorch(enable) - } + this.camera?.cameraControl?.enableTorch(enable) } private fun buildCameraPreview() { diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/ImageQualityController.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/ImageQualityController.kt new file mode 100644 index 0000000..2e478b0 --- /dev/null +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/controllers/ImageQualityController.kt @@ -0,0 +1,144 @@ +package ai.cyberlabs.yoonit.camera.controllers + +import android.graphics.Bitmap +import android.graphics.Color +import kotlin.math.pow +import kotlin.math.sqrt + +class ImageQualityController { + companion object { + // ellipsoidal mask parameters + private const val fracEllipseCenterX: Double = 0.50 + private const val fracEllipseCenterY: Double = 0.50 + private const val fracEllipseRadiusX: Double = 0.35 + private const val fracEllipseRadiusY: Double = 0.50 + + // kernel for the convolution (3x3 laplacian of gaussian) + private val kernel: IntArray = intArrayOf( + 1, 1, 1, + 1, -8, 1, + 1, 1, 1 + ) + + fun processImage(scaledFaceBitmap: Bitmap, withMask: Boolean) : Triple { + + val pixels = convertToGrayscale(scaledFaceBitmap) + + var mask: Ellipse? = null + + if (withMask) { + mask = Ellipse( + (scaledFaceBitmap.width * fracEllipseCenterX).toInt(), + (scaledFaceBitmap.height * fracEllipseCenterY).toInt(), + (scaledFaceBitmap.width * fracEllipseRadiusX).toInt(), + (scaledFaceBitmap.height * fracEllipseRadiusY).toInt() + ) + } + + val histPair = calcHistogramMetrics(scaledFaceBitmap, pixels, mask) + val dark = histPair.first + val light = histPair.second + + val sharpness = calcConvolutionMetrics(scaledFaceBitmap, pixels) + + return Triple(dark, light, sharpness) + } + + private fun convertToGrayscale(bitmap: Bitmap) : IntArray { + + // create flat array with grayscale image + val pixelsRGB = IntArray(bitmap.width * bitmap.height) + bitmap.getPixels(pixelsRGB, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height) + return pixelsRGB.map { pixel -> + (0.299 * Color.red(pixel) + 0.587 * Color.green(pixel) + 0.114 * Color.blue(pixel)) + .toInt().coerceIn(0, 255) + }.toIntArray() + } + + private fun calcHistogramMetrics(bitmap: Bitmap, pixels: IntArray, mask: Ellipse?) : Pair { + + // calculate histogram of pixels inside bit mask + val hist = IntArray(256) {0} + for (y in 0 until bitmap.height) { + for (x in 0 until bitmap.width) { + if ((mask != null && mask.contains(x, y)) + || mask == null) { + val pixel = pixels[y * bitmap.width + x] + hist[pixel] += 1 + } + } + } + + // calculate percentage of bright and dark pixels based on histogram "tails" + // one measure of image quality (or image balance) is to quantify how many pixels + // lie in the tails of the histogram, indicating the image is unbalanced + val darkTail = hist.slice(IntRange(0, 63)).sum().toDouble() + val dark = darkTail / hist.sum().toDouble() + val lightTail = hist.slice(IntRange(192, 255)).sum().toDouble() + val light = lightTail / hist.sum().toDouble() + + return Pair(dark, light) + } + + private fun calcConvolutionMetrics(bitmap: Bitmap, pixels: IntArray) : Double { + + // determine edges (high frequency signals) via convolution with 3x3 LoG kernel + // conv is the resulting flattened image, the same size as the original + val conv = IntArray(bitmap.width * bitmap.height) {0} + + // we iterate on every pixel of the image... + for (y in 0 until bitmap.height) { + for (x in 0 until bitmap.width) { + + // ...and on every coefficient of the 3x3 kernel... + var convPixel = 0 + for (j in -1 until 2) { + for (i in -1 until 2) { + + // ...and we compute the dot product (the sum of an element-wise multiplication) + // of the kernel (sliding window) with the current region of the image it is + // passing through, and store the result on the corresponding pixel of the convoluted image + + // if the image pixel required is "outside" the image, the border pixels will be + // replicated. otherwise, the sum of indices will point to a valid pixel + val pixelY = (y + j).coerceIn(0, bitmap.height - 1) + val pixelX = (x + i).coerceIn(0, bitmap.width - 1) + val pixelIndex = pixelY * bitmap.width + pixelX + val kernelIndex = (j + 1) * 3 + (i + 1) + + // then, one of the products is computed and accumulated + convPixel += (pixels[pixelIndex] * kernel[kernelIndex]) + } + } + + // finally, the sum of the products is stored as a pixel + conv[y * bitmap.width + x] = convPixel.coerceIn(0, 255) + } + } + + // compute the standard deviation of the pixels. it results in a measure of the amount + // of high frequency signals on the image + val mean = conv.average() + val accVar = conv.fold(0.0, { acc, pixel -> acc + (pixel - mean).pow(2) }) + + return sqrt(accVar / conv.size) / 128 + } + } + + + + private class Ellipse(val centerX: Int, val centerY: Int, val radiusX: Int, val radiusY: Int) { + fun contains(x0: Int, y0: Int) : Boolean { + // the ellipse equation is + // + // (x - cx) ^ 2 (y - cy) ^ 2 + // ------------ + ------------ = 1 + // rx ^ 2 ry ^ 2 + // + // if an (x0, y0) point inserted in the equation gives < 1, + // then the point (x0, y0) is inside the ellipse + return (((x0 - centerX).toDouble()/radiusX).pow(2) + + ((y0 - centerY).toDouble()/radiusY).pow(2)) < 1.0 + } + } +} \ No newline at end of file diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/interfaces/CameraEventListener.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/interfaces/CameraEventListener.kt index 19a0995..02a0f96 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/interfaces/CameraEventListener.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/interfaces/CameraEventListener.kt @@ -18,7 +18,10 @@ interface CameraEventListener { count: Int, total: Int, imagePath: String, - inferences: ArrayList> + inferences: ArrayList>, + darkness: Double, + lightness: Double, + sharpness: Double ) fun onFaceDetected( diff --git a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/utils/Extensions.kt b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/utils/Extensions.kt index 3d0de65..0916805 100644 --- a/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/utils/Extensions.kt +++ b/yoonit-camera/src/main/java/ai/cyberlabs/yoonit/camera/utils/Extensions.kt @@ -43,11 +43,9 @@ fun Bitmap.rotate(rotationDegrees: Float): Bitmap { ) } -fun Bitmap.mirror(rotationDegrees: Float): Bitmap { +fun Bitmap.mirror(): Bitmap { val matrix = Matrix() - if (rotationDegrees == 270f) { - matrix.preScale(-1.0f, 1.0f) - } + matrix.preScale(-1.0f, 1.0f) return Bitmap.createBitmap( this, @@ -130,3 +128,11 @@ fun Rect.scaledBy(percent: Float): Rect { (bottom - deltaY).toInt() ) } + +fun Rect.coerce(width: Int, height: Int) { + // confines Rect to the bitmap's dimensions + this.left = this.left.coerceIn(0, width) + this.top = this.top.coerceIn(0, height) + this.right = this.right.coerceIn(0, width) + this.bottom = this.bottom.coerceIn(0, height) +}