android - 获取 native 库的 native 地址时出错 : task_vision_jni_gms

标签 android kotlin tensorflow tensorflow-lite

我正在尝试将 tensorflow-lite 模型集成到我的 android 应用程序中。我有一个简单的模型来区分猫和狗。我从 Kaggle 下载了所需的数据集,并使用可教机器网站来训练模型。然后我将模型下载为 tensorflow-lite 并选择 quantized 选项。下面是我检测模型的 android 代码。

class ObjectDetectorHelper(
    var threshold: Float = 0.5f,
    var numThreads: Int = 2,
    var maxResults: Int = 1,
    var currentDelegate: Int = 0,
    var currentModel: Int = 0,
    val context: Context,
    val objectDetectorListener: DetectorListener
) {

    private val TAG = "ObjectDetectionHelper"

    // For this example this needs to be a var so it can be reset on changes. If the ObjectDetector
    // will not change, a lazy val would be preferable.
    private var objectDetector: ObjectDetector? = null
    private var gpuSupported = false

    init {

        TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable: Boolean ->
            val optionsBuilder =
                TfLiteInitializationOptions.builder()
            if (gpuAvailable) {
                optionsBuilder.setEnableGpuDelegateSupport(true)
            }
            TfLiteVision.initialize(context, optionsBuilder.build())
        }.addOnSuccessListener {
            objectDetectorListener.onInitialized()
        }.addOnFailureListener{
            objectDetectorListener.onError("TfLiteVision failed to initialize: "
                    + it.message)
        }
    }

    fun clearObjectDetector() {
        objectDetector = null
    }

    // Initialize the object detector using current settings on the
    // thread that is using it. CPU and NNAPI delegates can be used with detectors
    // that are created on the main thread and used on a background thread, but
    // the GPU delegate needs to be used on the thread that initialized the detector
    private fun setupObjectDetector() {
        if (!TfLiteVision.isInitialized()) {
            Log.e(TAG, "setupObjectDetector: TfLiteVision is not initialized yet")
            return
        }

        // Create the base options for the detector using specifies max results and score threshold
        val optionsBuilder =
            ObjectDetector.ObjectDetectorOptions.builder()
                .setScoreThreshold(threshold)
                .setMaxResults(maxResults)

        // Set general detection options, including number of used threads
        val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads)

        // Use the specified hardware for running the model. Default to CPU
        when (currentDelegate) {
            DELEGATE_CPU -> {
                // Default
            }
            DELEGATE_GPU -> {
                if (gpuSupported) {
                    baseOptionsBuilder.useGpu()
                } else {
                    objectDetectorListener.onError("GPU is not supported on this device")
                }
            }
            DELEGATE_NNAPI -> {
                baseOptionsBuilder.useNnapi()
            }
        }

        optionsBuilder.setBaseOptions(baseOptionsBuilder.build())

        val modelName =
            when (currentModel) {
                MODEL_MOBILENETV1 -> "model.tflite"
                MODEL_EFFICIENTDETV0 -> "model.tflite"
                MODEL_EFFICIENTDETV1 -> "model.tflite"
                MODEL_EFFICIENTDETV2 -> "model.tflite"
                else -> "model.tflite"
            }

        try {
            objectDetector =
                ObjectDetector.createFromFileAndOptions(context, modelName, optionsBuilder.build())
        } catch (e: Exception) {
            objectDetectorListener.onError(
                "Object detector failed to initialize. See error logs for details"
            )
            Log.e(TAG, "TFLite failed to load model with error: " + e.message)
        }
    }

    fun detect(image: Bitmap, imageRotation: Int) {
        Log.i("resultssss","9")
        if (!TfLiteVision.isInitialized()) {
            Log.e(TAG, "detect: TfLiteVision is not initialized yet")
            return
        }
        Log.i("resultssss","10")
        if (objectDetector == null) {
            setupObjectDetector()
        }
        Log.i("resultssss","11")
        // Inference time is the difference between the system time at the start and finish of the
        // process
        var inferenceTime = SystemClock.uptimeMillis()
        Log.i("resultssss","12")
        // Create preprocessor for the image.
        // See https://www.tensorflow.org/lite/inference_with_metadata/
        //            lite_support#imageprocessor_architecture
        val imageProcessor = ImageProcessor.Builder().add(Rot90Op(-imageRotation / 90)).build()
        Log.i("resultssss","13")
        // Preprocess the image and convert it into a TensorImage for detection.
        val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image))
        Log.i("resultssss","14")
        val results = objectDetector?.detect(tensorImage)
        Log.i("resultssss","15")
        inferenceTime = SystemClock.uptimeMillis() - inferenceTime
        Log.i("resultssss","16")
        objectDetectorListener.onResults(
            results,
            inferenceTime,
            tensorImage.height,
            tensorImage.width)
    }

    interface DetectorListener {
        fun onInitialized()
        fun onError(error: String)
        fun onResults(
            results: MutableList<Detection>?,
            inferenceTime: Long,
            imageHeight: Int,
            imageWidth: Int
        )
    }

    companion object {
        const val DELEGATE_CPU = 0
        const val DELEGATE_GPU = 1
        const val DELEGATE_NNAPI = 2
        const val MODEL_MOBILENETV1 = 0
        const val MODEL_EFFICIENTDETV0 = 1
        const val MODEL_EFFICIENTDETV1 = 2
        const val MODEL_EFFICIENTDETV2 = 3
    }
}


class MainActivity : AppCompatActivity(), ObjectDetectorHelper.DetectorListener {


    private lateinit var cameraExecutor: ExecutorService
    private var mCameraProvider: ProcessCameraProvider? = null

    private lateinit var viewFinder: PreviewView

    private lateinit var objectDetectorHelper: ObjectDetectorHelper
    private lateinit var bitmapBuffer: Bitmap

    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)
        setContentView(R.layout.activity_main)
        viewFinder = findViewById(R.id.viewFinder)


        objectDetectorHelper = ObjectDetectorHelper(
            context = this,
            objectDetectorListener = this
        )


    }

    private fun setUpCamera() {
        if (allPermissionsGranted()) {
            startCamera()
        }
    }


    private fun detectObjects(image: ImageProxy) {
        Log.i("resultssss", "5")
        // Copy out RGB bits to the shared bitmap buffer
        image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) }
        Log.i("resultssss", "6")
        val imageRotation = image.imageInfo.rotationDegrees
        Log.i("resultssss", "7")
        // Pass Bitmap and rotation to the object detector helper for processing and detection
        objectDetectorHelper.detect(bitmapBuffer, imageRotation)
        Log.i("resultssss", "8")
    }

    private fun startCamera() {
        val cameraProviderFuture = ProcessCameraProvider.getInstance(this)

        cameraProviderFuture.addListener({
            // Used to bind the lifecycle of cameras to the lifecycle owner
            val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
            mCameraProvider = cameraProvider
            // Preview
            val surfacePreview = Preview.Builder()
                .setTargetAspectRatio(AspectRatio.RATIO_4_3)
                .setTargetRotation(viewFinder.display.rotation)
                .build()
                .also {
                    it.setSurfaceProvider(viewFinder.surfaceProvider)
                }

            val imageAnalyzer =
                ImageAnalysis.Builder()
                    .setTargetAspectRatio(AspectRatio.RATIO_4_3)
                    .setTargetRotation(viewFinder.display.rotation)
                    .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                    .setOutputImageFormat(OUTPUT_IMAGE_FORMAT_RGBA_8888)
                    .build()
                    // The analyzer can then be assigned to the instance
                    .also {
                        Log.i("resultssss", "1")
                        it.setAnalyzer(cameraExecutor) { image ->
                            Log.i("resultssss", "2")
                            if (!::bitmapBuffer.isInitialized) {
                                Log.i("resultssss", "3")
                                // The image rotation and RGB image buffer are initialized only once
                                // the analyzer has started running
                                bitmapBuffer = Bitmap.createBitmap(
                                    image.width,
                                    image.height,
                                    Bitmap.Config.ARGB_8888
                                )
                            }
                            Log.i("resultssss", "4")
                            detectObjects(image)
                        }
                    }

            // Select back camera as a default
            val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA

            try {
                // Unbind use cases before rebinding
                cameraProvider.unbindAll()

                // Bind use cases to camera
                cameraProvider.bindToLifecycle(
                    this, cameraSelector, surfacePreview, imageAnalyzer
                )

            } catch (exc: Exception) {
                Toast.makeText(this, exc.message, Toast.LENGTH_LONG).show()
            }

        }, ContextCompat.getMainExecutor(this))
    }

    private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
        ContextCompat.checkSelfPermission(
            baseContext, it
        ) == PackageManager.PERMISSION_GRANTED
    }

    override fun onDestroy() {
        super.onDestroy()
        objectDetectorHelper.clearObjectDetector()
        cameraExecutor.shutdown()
    }

    companion object {
        private const val REQUEST_CODE_PERMISSIONS = 10
        private val REQUIRED_PERMISSIONS =
            mutableListOf(
                android.Manifest.permission.CAMERA
            ).toTypedArray()
    }

    override fun onRequestPermissionsResult(
        requestCode: Int, permissions: Array<String>, grantResults:
        IntArray
    ) {
        super.onRequestPermissionsResult(requestCode, permissions, grantResults)
        if (requestCode == REQUEST_CODE_PERMISSIONS) {
            if (allPermissionsGranted()) {
                setUpCamera()
            } else {
                Toast.makeText(
                    this,
                    "Permissions not granted by the user.",
                    Toast.LENGTH_SHORT
                ).show()
                finish()
            }
        }
    }

    override fun onInitialized() {
        if (allPermissionsGranted()) {
            setUpCamera()
        } else {
            ActivityCompat.requestPermissions(
                this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
            )
        }
        cameraExecutor = Executors.newSingleThreadExecutor()
    }

    override fun onError(error: String) {
        runOnUiThread { Toast.makeText(this, error, Toast.LENGTH_SHORT).show() }

    }

    override fun onResults(
        results: MutableList<Detection>?,
        inferenceTime: Long,
        imageHeight: Int,
        imageWidth: Int
    ) {
        runOnUiThread {
            Log.i(
                "resultssss",
                "${results?.get(0)?.categories.toString()} ${results?.get(0)?.boundingBox.toString()}"
            )
        }

    }

}

完整的错误日志如下

Error getting native address of native library: task_vision_jni_gms
    java.lang.IllegalArgumentException: Error occurred when initializing ObjectDetector: Mobile SSD models are expected to have exactly 4 outputs, found 1
        at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.initJniWithModelFdAndOptions(Native Method)
        at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.zzb(Unknown Source:0)
        at org.tensorflow.lite.task.gms.vision.detector.zzb.createHandle(org.tensorflow:tensorflow-lite-task-vision-play-services@@0.4.2:4)
        at org.tensorflow.lite.task.core.TaskJniUtils$1.createHandle(TaskJniUtils.java:70)
        at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromLibrary(TaskJniUtils.java:91)
        at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromFdAndOptions(TaskJniUtils.java:66)
        at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.createFromFileAndOptions(org.tensorflow:tensorflow-lite-task-vision-play-services@@0.4.2:2)
        at com.affinidi.tfdemoone.ObjectDetectorHelper.setupObjectDetector(ObjectDetectorHelper.kt:104)
        at com.affinidi.tfdemoone.ObjectDetectorHelper.detect(ObjectDetectorHelper.kt:121)
        at com.affinidi.tfdemoone.MainActivity.detectObjects(MainActivity.kt:89)
        at com.affinidi.tfdemoone.MainActivity.startCamera$lambda$4$lambda$3$lambda$2(MainActivity.kt:132)
        at com.affinidi.tfdemoone.MainActivity.$r8$lambda$cwS3iJ069sufgGf-nT7H81EEGtQ(Unknown Source:0)
        at com.affinidi.tfdemoone.MainActivity$$ExternalSyntheticLambda3.analyze(Unknown Source:2)
        at androidx.camera.core.ImageAnalysis.lambda$setAnalyzer$2(ImageAnalysis.java:481)
        at androidx.camera.core.ImageAnalysis$$ExternalSyntheticLambda2.analyze(Unknown Source:2)
        at androidx.camera.core.ImageAnalysisAbstractAnalyzer.lambda$analyzeImage$0$androidx-camera-core-ImageAnalysisAbstractAnalyzer(ImageAnalysisAbstractAnalyzer.java:286)
        at androidx.camera.core.ImageAnalysisAbstractAnalyzer$$ExternalSyntheticLambda1.run(Unknown Source:14)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1167)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:641)
        at java.lang.Thread.run(Thread.java:920)

最佳答案

检查 model.tflite 发现您训练的模型是分类模型,但您正在使用 ObjectDetector API。

调试,

# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(TFLITE_FILE_PATH)
interpreter.allocate_tensors()

# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details() 

# Test the model on cat image
from PIL import Image
image = Image.open("/home/vijay/Downloads/cat.jpg")
input_shape = input_details[0]['shape']
interpreter.set_tensor(input_details[0]['index'], img[None,...])
interpreter.invoke()

#get output
output_data = interpreter.get_tensor(output_details[0]['index'])
#output_data [255, 0]--> idx 0 ---> cat
#checking the above for a dog photo
#output_data [0, 255]--> idx 1 ---> dog

正如错误所指出的,您在分类模型中只会得到一个输出。因此,请查看有关如何处理分类问题的android示例:https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/android

关于android - 获取 native 库的 native 地址时出错 : task_vision_jni_gms,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/74841605/

相关文章:

android - 在 ImageView 内水平居中 TextView

安卓 : background thread

properties - 不同类型的属性 setter

reactjs - 如何使用 Gradle 将 Reason 和 Ktor 应用程序打包在一起?

android - 在 Kotlin 多平台项目中与 Compose 共享 Activity

python - 大 HDF5 数据集,如何在每个纪元后高效地洗牌

java - 从字符串 url 加载图像并在图库中打开它们

android - 在 RecyclerView 中以编程方式单击项目

c++ - 将预编译的tensorflow与cmake一起使用

python - Keras 最简单的 NN 模型 : error in training. py 带索引