diff --git a/android/build.gradle b/android/build.gradle index 692fc79a..93653845 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -31,7 +31,7 @@ android { compileSdkVersion 31 defaultConfig { - minSdkVersion 21 + minSdkVersion 23 testInstrumentationRunner 'androidx.test.runner.AndroidJUnitRunner' consumerProguardFiles 'proguard-rules.pro' } @@ -51,9 +51,13 @@ android { } dependencies { - implementation 'io.github.webrtc-sdk:android:114.5735.02' + implementation 'io.github.webrtc-sdk:android:114.5735.02' implementation 'com.twilio:audioswitch:1.1.8' - implementation 'androidx.annotation:annotation:1.1.0' + implementation 'androidx.annotation:annotation:1.6.0' implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" + + // ML Kit + implementation 'com.google.mlkit:segmentation-selfie:16.0.0-beta4' + implementation 'androidx.camera:camera-core:1.2.3' // implementation files('libwebrtc.aar') } diff --git a/android/local.properties b/android/local.properties index 46a8ab13..b18a9d80 100644 --- a/android/local.properties +++ b/android/local.properties @@ -4,5 +4,5 @@ # Location of the SDK. This is only used by Gradle. # For customization when using a Version Control System, please read the # header note. -#Sat May 20 23:50:57 ICT 2023 -sdk.dir=/home/lambiengcode/Android/Sdk +#Tue Jul 18 10:35:26 ICT 2023 +sdk.dir=/Users/lambiengcode/Library/Android/sdk diff --git a/android/proguard-rules.pro b/android/proguard-rules.pro index 6ce98961..54f34ec6 100644 --- a/android/proguard-rules.pro +++ b/android/proguard-rules.pro @@ -1,3 +1,3 @@ # Flutter WebRTC -keep class com.cloudwebrtc.webrtc.** { *; } --keep class org.webrtc.** { *; } +-keep class org.webrtc.** { *; } \ No newline at end of file diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt b/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt new file mode 100644 index 00000000..f4be51bf --- /dev/null +++ b/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt @@ -0,0 +1,456 @@ +package com.cloudwebrtc.webrtc + +import android.graphics.Bitmap +import android.graphics.BitmapFactory +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.ImageFormat +import android.graphics.Matrix +import android.graphics.Rect +import android.graphics.YuvImage +import android.opengl.GLES20 +import android.opengl.GLUtils +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.mlkit.vision.common.InputImage +import com.google.mlkit.vision.segmentation.Segmentation +import com.google.mlkit.vision.segmentation.SegmentationMask +import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions +import org.webrtc.EglBase +import org.webrtc.SurfaceTextureHelper +import org.webrtc.TextureBufferImpl +import org.webrtc.VideoFrame +import org.webrtc.VideoProcessor +import org.webrtc.VideoSink +import org.webrtc.VideoSource +import org.webrtc.YuvConverter +import org.webrtc.YuvHelper +import java.io.ByteArrayOutputStream +import java.nio.ByteBuffer +import java.util.Arrays +import kotlin.math.max + +class FlutterRTCVirtualBackground { + val TAG = FlutterWebRTCPlugin.TAG + + private var videoSource: VideoSource? = null + private var textureHelper: SurfaceTextureHelper? = null + private var backgroundBitmap: Bitmap? = null + private var expectConfidence = 0.7 + private val segmentOptions = SelfieSegmenterOptions.Builder() + .setDetectorMode(SelfieSegmenterOptions.STREAM_MODE) + .enableRawSizeMask() + .setStreamModeSmoothingRatio(1.0f) + .build() + private val segmenter = Segmentation.getClient(segmentOptions) + + // MARK: Public functions + + /** + * Initialize the VirtualBackgroundManager with the given VideoSource. + * + * @param videoSource The VideoSource to be used for video capturing. + */ + fun initialize(videoSource: VideoSource) { + this.videoSource = videoSource + setVirtualBackground() + } + + /** + * Dispose of the VirtualBackgroundManager, clearing its references and configurations. + */ + fun dispose() { + this.videoSource = null + this.expectConfidence = 0.7 + setBackgroundIsNull() + } + + fun setBackgroundIsNull() { + this.backgroundBitmap = null + } + + /** + * Configure the virtual background by setting the background bitmap and the desired confidence level. + * + * @param bgBitmap The background bitmap to be used for virtual background replacement. + * @param confidence The confidence level (0 to 1) for selecting the foreground in the segmentation mask. + */ + fun configurationVirtualBackground(bgBitmap: Bitmap, confidence: Double) { + backgroundBitmap = bgBitmap + expectConfidence = confidence + } + + /** + * Set up the virtual background processing by attaching a VideoProcessor to the VideoSource. + * The VideoProcessor will handle capturing video frames, performing segmentation, and replacing the background. + */ + private fun setVirtualBackground() { + // Create an instance of EglBase + val eglBase = EglBase.create() + textureHelper = SurfaceTextureHelper.create("SurfaceTextureThread", eglBase.eglBaseContext) + + // Attach a VideoProcessor to the VideoSource to process captured video frames + videoSource!!.setVideoProcessor(object : VideoProcessor { + private var sink: VideoSink? = null + + override fun onCapturerStarted(success: Boolean) { + // Handle video capture start event + } + + override fun onCapturerStopped() { + // Handle video capture stop event + } + + override fun onFrameCaptured(frame: VideoFrame) { + if (sink != null) { + if (backgroundBitmap == null) { + // If no background is set, pass the original frame to the sink + sink!!.onFrame(frame) + } else { + // Otherwise, perform segmentation on the captured frame and replace the background + val inputFrameBitmap: Bitmap? = videoFrameToBitmap(frame) + if (inputFrameBitmap != null) { + runSegmentationInBackground(inputFrameBitmap, frame, sink!!) + } else { + Log.d(TAG, "Convert video frame to bitmap failure") + } + } + } + } + + override fun setSink(sink: VideoSink?) { + // Store the VideoSink to send the processed frame back to WebRTC + // The sink will be used after segmentation processing + this.sink = sink + } + }) + } + + /** + * Perform segmentation on the input bitmap in the background thread. + * After segmentation, the background is replaced with the configured virtual background. + * + * @param inputFrameBitmap The input frame bitmap to be segmented. + * @param frame The original VideoFrame metadata for the input bitmap. + * @param sink The VideoSink to send the processed frame back to WebRTC. + */ + private fun runSegmentationInBackground( + inputFrameBitmap: Bitmap, + frame: VideoFrame, + sink: VideoSink + ) { + Thread { + // Perform segmentation in the background thread + processSegmentation(inputFrameBitmap, frame, sink) + }.start() + } + + /** + * Convert a VideoFrame to a Bitmap for further processing. + * + * @param videoFrame The input VideoFrame to be converted. + * @return The corresponding Bitmap representation of the VideoFrame. + */ + private fun videoFrameToBitmap(videoFrame: VideoFrame): Bitmap? { + // Retain the VideoFrame to prevent it from being garbage collected + videoFrame.retain() + + // Convert the VideoFrame to I420 format + val buffer = videoFrame.buffer + val i420Buffer = buffer.toI420() + val y = i420Buffer!!.dataY + val u = i420Buffer.dataU + val v = i420Buffer.dataV + val width = i420Buffer.width + val height = i420Buffer.height + val strides = intArrayOf( + i420Buffer.strideY, + i420Buffer.strideU, + i420Buffer.strideV + ) + // Convert I420 format to NV12 format as required by YuvImage + val chromaWidth = (width + 1) / 2 + val chromaHeight = (height + 1) / 2 + val minSize = width * height + chromaWidth * chromaHeight * 2 + val yuvBuffer = ByteBuffer.allocateDirect(minSize) + YuvHelper.I420ToNV12( + y, + strides[0], + v, + strides[2], + u, + strides[1], + yuvBuffer, + width, + height + ) + // Remove leading 0 from the ByteBuffer + val cleanedArray = + Arrays.copyOfRange(yuvBuffer.array(), yuvBuffer.arrayOffset(), minSize) + val yuvImage = YuvImage( + cleanedArray, + ImageFormat.NV21, + width, + height, + null + ) + i420Buffer.release() + videoFrame.release() + + // Convert YuvImage to byte array + val outputStream = ByteArrayOutputStream() + yuvImage.compressToJpeg( + Rect(0, 0, yuvImage.width, yuvImage.height), + 100, + outputStream + ) + val jpegData = outputStream.toByteArray() + + // Convert byte array to Bitmap + return BitmapFactory.decodeByteArray(jpegData, 0, jpegData.size) + } + + /** + * Process the segmentation of the input bitmap using the AI segmenter. + * The resulting segmented bitmap is then combined with the provided background bitmap, + * and the final output frame is sent to the video sink. + * + * @param bitmap The input bitmap to be segmented. + * @param original The original video frame for metadata reference (rotation, timestamp, etc.). + * @param sink The VideoSink to receive the processed video frame. + */ + private fun processSegmentation(bitmap: Bitmap, original: VideoFrame, sink: VideoSink) { + // Create an InputImage from the input bitmap + val inputImage = InputImage.fromBitmap(bitmap, 0) + + // Perform segmentation using the AI segmenter + val result = segmenter.process(inputImage) + result.addOnCompleteListener { task: Task -> + if (task.isSuccessful) { + // Segmentation process successful + val segmentationMask = task.result + val mask = segmentationMask.buffer + val maskWidth = segmentationMask.width + val maskHeight = segmentationMask.height + mask.rewind() + + // Convert the buffer to an array of colors + val colors = maskColorsFromByteBuffer( + mask, + maskWidth, + maskHeight, + bitmap, + bitmap.width, + bitmap.height + ) + + // Create a segmented bitmap from the array of colors + val segmentedBitmap = + createBitmapFromColors(colors, bitmap.width, bitmap.height) + + + if (backgroundBitmap == null) { + return@addOnCompleteListener + } + + // Draw the segmented bitmap on top of the background + val outputBitmap = + drawSegmentedBackground(segmentedBitmap, backgroundBitmap) + + // Create a new VideoFrame from the processed bitmap + val yuvConverter = YuvConverter() + if (textureHelper != null && textureHelper!!.handler != null) { + textureHelper!!.handler.post { + val textures = IntArray(1) + GLES20.glGenTextures(1, textures, 0) + GLES20.glBindTexture( + GLES20.GL_TEXTURE_2D, + textures[0] + ) + GLES20.glTexParameteri( + GLES20.GL_TEXTURE_2D, + GLES20.GL_TEXTURE_MIN_FILTER, + GLES20.GL_NEAREST + ) + GLES20.glTexParameteri( + GLES20.GL_TEXTURE_2D, + GLES20.GL_TEXTURE_MAG_FILTER, + GLES20.GL_NEAREST + ) + GLUtils.texImage2D( + GLES20.GL_TEXTURE_2D, + 0, + outputBitmap, + 0 + ) + val buffer = TextureBufferImpl( + outputBitmap!!.width, + outputBitmap.height, + VideoFrame.TextureBuffer.Type.RGB, + textures[0], + Matrix(), + textureHelper!!.handler, + yuvConverter, + null + ) + val i420Buf = yuvConverter.convert(buffer) + if (i420Buf != null) { + val outputVideoFrame = VideoFrame( + i420Buf, + original.rotation, + original.timestampNs + ) + sink.onFrame(outputVideoFrame) + } + } + } + } else { + // Handle segmentation error + val error = task.exception + // Log error information + Log.d(TAG, "Segmentation error: " + error.toString()) + } + } + } + + /** + * Convert the mask buffer to an array of colors representing the segmented regions. + * + * @param mask The mask buffer obtained from the AI segmenter. + * @param maskWidth The width of the mask. + * @param maskHeight The height of the mask. + * @param originalBitmap The original input bitmap used for color extraction. + * @param scaledWidth The width of the scaled bitmap. + * @param scaledHeight The height of the scaled bitmap. + * @return An array of colors representing the segmented regions. + */ + private fun maskColorsFromByteBuffer( + mask: ByteBuffer, + maskWidth: Int, + maskHeight: Int, + originalBitmap: Bitmap, + scaledWidth: Int, + scaledHeight: Int + ): IntArray { + val colors = IntArray(scaledWidth * scaledHeight) + var count = 0 + val scaleX = scaledWidth.toFloat() / maskWidth + val scaleY = scaledHeight.toFloat() / maskHeight + for (y in 0 until scaledHeight) { + for (x in 0 until scaledWidth) { + val maskX: Int = (x / scaleX).toInt() + val maskY: Int = (y / scaleY).toInt() + if (maskX in 0 until maskWidth && maskY >= 0 && maskY < maskHeight) { + val position = (maskY * maskWidth + maskX) * 4 + mask.position(position) + + // Get the confidence of the (x,y) pixel in the mask being in the foreground. + val foregroundConfidence = mask.float + val pixelColor = originalBitmap.getPixel(x, y) + + // Extract the color channels from the original pixel + val alpha = Color.alpha(pixelColor) + val red = Color.red(pixelColor) + val green = Color.green(pixelColor) + val blue = Color.blue(pixelColor) + + // Calculate the new alpha and color for the foreground and background + var newAlpha: Int + var newRed: Int + var newGreen: Int + var newBlue: Int + if (foregroundConfidence >= expectConfidence) { + // Foreground uses color from the original bitmap + newAlpha = alpha + newRed = red + newGreen = green + newBlue = blue + } else { + // Background is black with alpha 0 + newAlpha = 0 + newRed = 0 + newGreen = 0 + newBlue = 0 + } + + // Create a new color with the adjusted alpha and RGB channels + val newColor = Color.argb(newAlpha, newRed, newGreen, newBlue) + colors[count] = newColor + } else { + // Pixels outside the original mask size are considered background (black with alpha 0) + colors[count] = Color.argb(0, 0, 0, 0) + } + count++ + } + } + return colors + } + + /** + * Draws the segmentedBitmap on top of the backgroundBitmap with the background resized and centered + * to fit the dimensions of the segmentedBitmap. The output is a new bitmap containing the combined + * result. + * + * @param segmentedBitmap The bitmap representing the segmented foreground with transparency. + * @param backgroundBitmap The bitmap representing the background image to be used as the base. + * @return The resulting bitmap with the segmented foreground overlaid on the background. + * Returns null if either of the input bitmaps is null. + */ + private fun drawSegmentedBackground( + segmentedBitmap: Bitmap?, + backgroundBitmap: Bitmap? + ): Bitmap? { + if (segmentedBitmap == null || backgroundBitmap == null) { + // Handle invalid bitmaps + return null + } + + val segmentedWidth = segmentedBitmap.width + val segmentedHeight = segmentedBitmap.height + + // Create a new bitmap with dimensions matching the segmentedBitmap + val outputBitmap = + Bitmap.createBitmap(segmentedWidth, segmentedHeight, Bitmap.Config.ARGB_8888) + + // Create a canvas to draw on the outputBitmap + val canvas = Canvas(outputBitmap) + + // Calculate the scale factor for the backgroundBitmap to be larger or equal to the segmentedBitmap + val scaleX = segmentedWidth.toFloat() / backgroundBitmap.width + val scaleY = segmentedHeight.toFloat() / backgroundBitmap.height + val scale = max(scaleX, scaleY) + + // Calculate the new dimensions of the backgroundBitmap after scaling + val newBackgroundWidth = (backgroundBitmap.width * scale).toInt() + val newBackgroundHeight = (backgroundBitmap.height * scale).toInt() + + // Calculate the offset to center the backgroundBitmap in the outputBitmap + val offsetX = (segmentedWidth - newBackgroundWidth) / 2 + val offsetY = (segmentedHeight - newBackgroundHeight) / 2 + + // Create a transformation matrix to scale and center the backgroundBitmap + val matrix = Matrix() + matrix.postScale(scale, scale) + matrix.postTranslate(offsetX.toFloat(), offsetY.toFloat()) + + // Draw the backgroundBitmap on the canvas with the specified scale and centering + canvas.drawBitmap(backgroundBitmap, matrix, null) + + // Draw the segmentedBitmap on the canvas + canvas.drawBitmap(segmentedBitmap, 0f, 0f, null) + + return outputBitmap + } + + /** + * Creates a bitmap from an array of colors with the specified width and height. + * + * @param colors The array of colors representing the pixel values of the bitmap. + * @param width The width of the bitmap. + * @param height The height of the bitmap. + * @return The resulting bitmap created from the array of colors. + */ + private fun createBitmapFromColors(colors: IntArray, width: Int, height: Int): Bitmap { + return Bitmap.createBitmap(colors, width, height, Bitmap.Config.ARGB_8888) + } +} \ No newline at end of file diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java index 050f8afd..003b7c57 100755 --- a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java @@ -112,6 +112,8 @@ class GetUserMediaImpl { private final SparseArray mediaRecorders = new SparseArray<>(); private AudioDeviceInfo preferredInput = null; + private FlutterRTCVirtualBackground flutterRTCVirtualBackground = null; + public void screenRequestPermissions(ResultReceiver resultReceiver) { final Activity activity = stateProvider.getActivity(); if (activity == null) { @@ -206,9 +208,10 @@ public void onResume() { } } - GetUserMediaImpl(StateProvider stateProvider, Context applicationContext) { + GetUserMediaImpl(StateProvider stateProvider, Context applicationContext, FlutterRTCVirtualBackground flutterRTCVirtualBackground) { this.stateProvider = stateProvider; this.applicationContext = applicationContext; + this.flutterRTCVirtualBackground = flutterRTCVirtualBackground; } static private void resultError(String method, String error, Result result) { @@ -509,7 +512,7 @@ protected void onReceiveResult(int requestCode, Bundle resultData) { @Override public void onStop() { super.onStop(); - // After Huawei P30 and Android 10 version test, the onstop method is called, which will not affect the next process, + // After Huawei P30 and Android 10 version test, the onstop method is called, which will not affect the next process, // and there is no need to call the resulterror method //resultError("MediaProjection.Callback()", "User revoked permission to capture the screen.", result); } @@ -739,6 +742,9 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi PeerConnectionFactory pcFactory = stateProvider.getPeerConnectionFactory(); VideoSource videoSource = pcFactory.createVideoSource(false); + + flutterRTCVirtualBackground.initialize(videoSource); + String threadName = Thread.currentThread().getName() + "_texture_camera_thread"; SurfaceTextureHelper surfaceTextureHelper = SurfaceTextureHelper.create(threadName, EglUtils.getRootEglBaseContext()); @@ -804,6 +810,9 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi void removeVideoCapturerSync(String id) { synchronized (mVideoCapturers) { + // Dispose Virtual Background + flutterRTCVirtualBackground.dispose(); + VideoCapturerInfo info = mVideoCapturers.get(id); if (info != null) { try { diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java index e4270a40..e07774b5 100644 --- a/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java @@ -5,11 +5,12 @@ import android.app.Activity; import android.content.Context; import android.content.pm.PackageManager; +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; import android.graphics.SurfaceTexture; import android.hardware.Camera; import android.hardware.Camera.CameraInfo; import android.media.AudioDeviceInfo; -import android.media.AudioManager; import android.os.Build; import android.util.Log; import android.util.LongSparseArray; @@ -34,8 +35,6 @@ import org.webrtc.AudioTrack; import org.webrtc.CryptoOptions; -import org.webrtc.DefaultVideoEncoderFactory; -import org.webrtc.DefaultVideoDecoderFactory; import org.webrtc.DtmfSender; import org.webrtc.EglBase; import org.webrtc.IceCandidate; @@ -70,7 +69,6 @@ import org.webrtc.audio.JavaAudioDeviceModule; import java.io.File; -import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -110,6 +108,8 @@ public class MethodCallHandlerImpl implements MethodCallHandler, StateProvider { */ private GetUserMediaImpl getUserMediaImpl; + private FlutterRTCVirtualBackground flutterRTCVirtualBackground; + private AudioDeviceModule audioDeviceModule; private FlutterRTCFrameCryptor frameCryptor; @@ -158,7 +158,8 @@ private void initialize(int networkIgnoreMask) { // Initialize EGL contexts required for HW acceleration. EglBase.Context eglContext = EglUtils.getRootEglBaseContext(); - getUserMediaImpl = new GetUserMediaImpl(this, context); + flutterRTCVirtualBackground = new FlutterRTCVirtualBackground(); + getUserMediaImpl = new GetUserMediaImpl(this, context, flutterRTCVirtualBackground); frameCryptor = new FlutterRTCFrameCryptor(this); /* @@ -260,6 +261,22 @@ public void onMethodCall(MethodCall call, @NonNull Result notSafeResult) { getUserMedia(constraintsMap, result); break; } + case "enableVirtualBackground":{ + byte[] image = call.argument("imageBytes"); + double confidence = call.argument("confidence"); + Bitmap bgImage = null; + if (image != null) { + bgImage = BitmapFactory.decodeByteArray(image, 0, image.length); + } + flutterRTCVirtualBackground.configurationVirtualBackground(bgImage, confidence); + result.success(true); + break; + } + case "disableVirtualBackground": { + flutterRTCVirtualBackground.setBackgroundIsNull(); + result.success(true); + break; + } case "createLocalMediaStream": createLocalMediaStream(result); break; diff --git a/example/android/app/build.gradle b/example/android/app/build.gradle index 459b756e..5c8a320d 100644 --- a/example/android/app/build.gradle +++ b/example/android/app/build.gradle @@ -39,7 +39,7 @@ android { applicationId "com.cloudwebrtc.flutterflutterexample.flutter_webrtc_example" // You can update the following values to match your application needs. // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. - minSdkVersion 21 + minSdkVersion 23 targetSdkVersion 31 versionCode flutterVersionCode.toInteger() versionName flutterVersionName diff --git a/example/android/build.gradle b/example/android/build.gradle index f7eb7f63..ce647a43 100644 --- a/example/android/build.gradle +++ b/example/android/build.gradle @@ -6,7 +6,7 @@ buildscript { } dependencies { - classpath 'com.android.tools.build:gradle:7.3.0' + classpath 'com.android.tools.build:gradle:7.4.2' classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" } } diff --git a/example/ios/Podfile b/example/ios/Podfile index ec43b513..06630d1a 100644 --- a/example/ios/Podfile +++ b/example/ios/Podfile @@ -1,5 +1,5 @@ # Uncomment this line to define a global platform for your project -# platform :ios, '11.0' +platform :ios, '12.0' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' diff --git a/ios/flutter_webrtc.podspec b/ios/flutter_webrtc.podspec index 61771f74..f9d935e7 100644 --- a/ios/flutter_webrtc.podspec +++ b/ios/flutter_webrtc.podspec @@ -16,6 +16,6 @@ A new flutter plugin project. s.public_header_files = 'Classes/**/*.h' s.dependency 'Flutter' s.dependency 'WebRTC-lbc', '116.5845.02' - s.ios.deployment_target = '10.0' + s.ios.deployment_target = '11.0' s.static_framework = true end diff --git a/lib/src/helper.dart b/lib/src/helper.dart index 84149b2e..352d9476 100644 --- a/lib/src/helper.dart +++ b/lib/src/helper.dart @@ -134,4 +134,19 @@ class Helper { AppleNativeAudioManagement.setAppleAudioConfiguration( AppleNativeAudioManagement.getAppleAudioConfigurationForMode(mode, preferSpeakerOutput: preferSpeakerOutput)); + + // Virtual Background + static Future enableVirtualBackground({ + required Uint8List backgroundImage, + double thresholdConfidence = 0.7, + }) async { + await WebRTC.invokeMethod("enableVirtualBackground", { + "imageBytes": backgroundImage, + "confidence": thresholdConfidence, + }); + } + + static Future disableVirtualBackground() async { + await WebRTC.invokeMethod("disableVirtualBackground"); + } } diff --git a/pubspec.yaml b/pubspec.yaml index bb0e2a28..b30595c4 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -1,7 +1,8 @@ name: flutter_webrtc description: Flutter WebRTC plugin for iOS/Android/Destkop/Web, based on GoogleWebRTC. -version: 0.9.36+2 +version: 0.9.36+4 homepage: https://github.com/cloudwebrtc/flutter-webrtc +publish_to: none environment: sdk: '>=2.12.0 <4.0.0' flutter: '>=1.22.0'