sentis-iris-landmark / RunIris.cs
PB Unity
Upload 3 files
aac9c1a verified
raw
history blame
7.39 kB
using UnityEngine;
using Unity.Sentis;
using UnityEngine.Video;
using UnityEngine.UI;
using Lays = Unity.Sentis.Layers;
using System.Collections.Generic;
/*
* Iris Inference
* ==============
*
* Basic inference script for Iris
*
* Put this script on the Main Camera
* Put iris_landmark.sentis in the Assets/StreamingAssets folder
* Create a RawImage of in the scene
* Put a link to that image in previewUI
* Put a video in Assets/StreamingAssets folder and put the name of it int videoName
* Or put a test image in inputImage
* Set inputType to appropriate input
*/
public class RunIris : MonoBehaviour
{
//Drag a link to a raw image here:
public RawImage previewUI = null;
public enum InputType { Image, Video, Webcam };
public string videoName = "chatting.mp4";
// Input image for neural network
public Texture2D inputImage;
public InputType inputType = InputType.Video;
Vector2Int resolution = new Vector2Int(640, 640);
WebCamTexture webcam;
VideoPlayer video;
const BackendType backend = BackendType.GPUCompute;
RenderTexture targetTexture;
Texture2D canvasTexture;
const int markerWidth = 5;
Color32[] markerPixels;
IWorker worker;
//Holds image size
const int size = 64;
Ops ops;
ITensorAllocator allocator;
Model model;
//webcam device name:
const string deviceName = "";
bool closing = false;
void Start()
{
allocator = new TensorCachingAllocator();
//(Note: if using a webcam on mobile get permissions here first)
SetupTextures();
SetupInput();
SetupModel();
SetupEngine();
SetupMarkers();
}
void SetupModel()
{
model = ModelLoader.Load(Application.streamingAssetsPath + "/iris_landmark.sentis");
}
public void SetupEngine()
{
worker = WorkerFactory.CreateWorker(backend, model);
ops = WorkerFactory.CreateOps(backend, allocator);
}
void SetupTextures()
{
targetTexture = new RenderTexture(resolution.x, resolution.y, 0);
canvasTexture = new Texture2D(targetTexture.width, targetTexture.height);
previewUI.texture = targetTexture;
}
void SetupMarkers()
{
markerPixels = new Color32[markerWidth * markerWidth];
for (int n = 0; n < markerWidth * markerWidth; n++)
{
markerPixels[n] = Color.white;
}
int center = markerWidth / 2;
markerPixels[center * markerWidth + center] = Color.black;
}
void SetupInput()
{
switch (inputType)
{
case InputType.Webcam:
{
webcam = new WebCamTexture(deviceName, resolution.x, resolution.y);
webcam.requestedFPS = 30;
webcam.Play();
break;
}
case InputType.Video:
{
video = gameObject.AddComponent<VideoPlayer>();//new VideoPlayer();
video.renderMode = VideoRenderMode.APIOnly;
video.source = VideoSource.Url;
video.url = Application.streamingAssetsPath + "/"+videoName;
video.isLooping = true;
video.Play();
break;
}
default:
{
Graphics.Blit(inputImage, targetTexture);
}
break;
}
}
void Update()
{
if (inputType == InputType.Webcam)
{
// Format video input
if (!webcam.didUpdateThisFrame) return;
var aspect1 = (float)webcam.width / webcam.height;
var aspect2 = (float)resolution.x / resolution.y;
var gap = aspect2 / aspect1;
var vflip = webcam.videoVerticallyMirrored;
var scale = new Vector2(gap, vflip ? -1 : 1);
var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
Graphics.Blit(webcam, targetTexture, scale, offset);
}
if (inputType == InputType.Video)
{
var aspect1 = (float)video.width / video.height;
var aspect2 = (float)resolution.x / resolution.y;
var gap = aspect2 / aspect1;
var vflip = false;
var scale = new Vector2(gap, vflip ? -1 : 1);
var offset = new Vector2((1 - gap) / 2, vflip ? 1 : 0);
Graphics.Blit(video.texture, targetTexture, scale, offset);
}
if (inputType == InputType.Image)
{
Graphics.Blit(inputImage, targetTexture);
}
if (Input.GetKeyDown(KeyCode.Escape))
{
closing = true;
Application.Quit();
}
if (Input.GetKeyDown(KeyCode.P))
{
previewUI.enabled = !previewUI.enabled;
}
}
void LateUpdate()
{
if (!closing)
{
RunInference(targetTexture);
}
}
void RunInference(Texture source)
{
var transform = new TextureTransform();
transform.SetDimensions(size, size, 3);
transform.SetTensorLayout(0, 1, 2, 3);
using var image0 = TextureConverter.ToTensor(source, transform);
// Pre-process the image to make input in range (-1..1)
using var image = ops.Mad(image0, 2f, -1f);
worker.Execute(image);
using var eyeLandmarks = worker.PeekOutput("output_eyes_contours_and_brows") as TensorFloat;
using var irisLandmarks = worker.PeekOutput("output_iris") as TensorFloat;
float scaleX = targetTexture.width * 1f / size;
float scaleY = targetTexture.height * 1f / size;
eyeLandmarks.MakeReadable();
irisLandmarks.MakeReadable();
//Draw the markers
RenderTexture.active = targetTexture;
canvasTexture.ReadPixels(new Rect(0, 0, targetTexture.width, targetTexture.height), 0, 0);
DrawLandmarks(irisLandmarks, scaleX, scaleY);
DrawLandmarks(eyeLandmarks, scaleX, scaleY);
canvasTexture.Apply();
Graphics.Blit(canvasTexture, targetTexture);
RenderTexture.active = null;
}
void DrawLandmarks(TensorFloat landmarks, float scaleX, float scaleY)
{
int numLandmarks = landmarks.shape[1] / 3; //468 face landmarks
for (int n = 0; n < numLandmarks; n++)
{
int px = (int)(landmarks[ 0, n * 3 + 0] * scaleX) - (markerWidth - 1) / 2;
int py = (int)(landmarks[ 0, n * 3 + 1] * scaleY) - (markerWidth - 1) / 2;
int pz = (int)(landmarks[ 0, n * 3 + 2] * scaleX);
int destX = Mathf.Clamp(px, 0, targetTexture.width - 1 - markerWidth);
int destY = Mathf.Clamp(targetTexture.height - 1 - py, 0, targetTexture.height - 1 - markerWidth);
canvasTexture.SetPixels32(destX, destY, markerWidth, markerWidth, markerPixels);
}
}
void CleanUp()
{
closing = true;
ops?.Dispose();
allocator?.Dispose();
if (webcam) Destroy(webcam);
if (video) Destroy(video);
RenderTexture.active = null;
targetTexture.Release();
worker?.Dispose();
worker = null;
}
void OnDestroy()
{
CleanUp();
}
}