본문 바로가기

GameDevelopmentDiary/UnrealDiary

Test Selfie segmentation on unreal editor

모바일 사용(Google IP Web Cam)

 

1.0 버전

  실행 순서: IP Webcam 실행 - 제작한 MediaPipe 웹 실행 - NDI 실행 및 웹 페이지 스크린 캡쳐 - 언리얼 실행 - url 입력

  방식: 원본 이미지와, 웹 페이지의 변환 이미지를 언리얼에서 합성.

  결과: 굉장히 느리고 실행 과정이 복잡.

 

JS code

더보기
// main.js

// HTMLElements setting
const inputElement = document.getElementsByClassName('input_video')[0];
const outputElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = outputElement.getContext('2d');
const selfieButtonElement = document.getElementById("selfieButton");

// selfieSegmentation setting
const selfieSegmentation = new SelfieSegmentation({locateFile: (file) => {
    return `https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation/${file}`;
}});

selfieSegmentation.setOptions({
    modelSelection: 1,
});

selfieSegmentation.onResults((results)=>{
    outputElement.style.display = "block";

    canvasCtx.save();
    canvasCtx.clearRect(0, 0, outputElement.width, outputElement.height);
    canvasCtx.drawImage(results.segmentationMask, 0, 0,
                        outputElement.width, outputElement.height);

    // Only overwrite existing pixels.
    canvasCtx.globalCompositeOperation = 'source-in';
    canvasCtx.fillStyle = '#00FF00';
    canvasCtx.fillRect(0, 0, outputElement.width, outputElement.height);

    // Only overwrite missing pixels.
    canvasCtx.globalCompositeOperation = 'destination-atop';
    canvasCtx.drawImage(
        results.image, 0, 0, outputElement.width, outputElement.height);
    canvasCtx.restore();

    inputElement.style.display = "none";

    if(selfieButtonElement.textContent == "close"){
        setTimeout(() => {
            sendFunc(inputElement);
        }, 1);
    } else {
        inputElement.style.display = "block";
        outputElement.style.display = "none";
    }
});

async function sendFunc(Value){
    await selfieSegmentation.send({image: Value});
}

selfieButtonElement.onclick = () =>{
    selfieButtonElement.textContent = selfieButtonElement.textContent == "start" ? "close" : "start";
    sendFunc(inputElement);
}

// CORS setting
inputElement.setAttribute("crossOrigin", "");

 

1.1 버전

  실행 순서: IP Webcam 실행 - 제작한 MediaPipe 웹 실행 - NDI 실행 및 웹 페이지 스크린 캡쳐 - 언리얼 실행

  방식: 합성 부분을 MediaPipe 부분에 추가. 원본 이미지를 받아올 필요 없어짐.

  결과: 실행순서 변화가 거의 없고, 여전히 NDI를 거친 후 매우 느림.

 

1.2 버전

  실행 순서: IP Webcam 실행 - 언리얼 실행 - url 입력

  방식: MediaPipe JS 사용을 Python 사용으로 변환.

  결과: 사용 난이도 많이 쉬워짐. 속도는 빨라졌지만, Tick마다 실행 하더라도 Selfie Segmentation 거치면서 딜레이 있음.

           파이썬 플러그인은 에디터용이기에 모바일 빌드에 적합치 않다. 때문에 사용자의 진입장벽 높아질 수 있음.

 

UnrealPython 플러그인 튜토리얼: 

https://www.youtube.com/watch?v=K-NOsXHanLE 

https://www.youtube.com/playlist?list=PLBLmKCAjA25Br8cOVzUroqi_Nwipg-IdP 

 

Python code

더보기
# main.py

import cv2
import mediapipe as mp
import numpy as np
import urllib

import unreal


target_url = "https://www.google.com"


def can_url(url):
    print('start can_url')
    try:
        res = urllib.request.urlopen(url)
        print("connected " + url)
        selfie.target_url = url
        unreal.CppLib.set_url(True)
    except urllib.error.URLError:
        print("Check your URL")
        selfie.target_url = "https://www.google.com"
        unreal.CppLib.set_url(False)
        return None


def selfie():
    # directory
    project_path = unreal.Paths.project_dir()
    texture_path = project_path + 'Content/Textures/'

    if not unreal.DirectoryPath(texture_path):
        print('you should crate: {}'.format(project_path))

    if not unreal.DirectoryPath(texture_path+'Test.uasset'):
        print('you should crate: {}'.format(project_path+'Test.uasset'))

    # selfie_segmentation
    mp_selfie_segmentation = mp.solutions.selfie_segmentation
    with mp_selfie_segmentation.SelfieSegmentation(
        model_selection=1) as selfie_segmentation:
            res = urllib.request.urlopen(selfie.target_url)
            img_np = np.array(bytearray(res.read()), dtype=np.uint8)
            img_origin = cv2.imdecode(img_np, -1)

            # Flip the image horizontally for a later selfie-view display, and convert the BGR image to RGB.
            image_cv = cv2.cvtColor(cv2.flip(img_origin, 1), cv2.COLOR_BGR2RGB)

            # To improve performance, optionally mark the image as not writeable to pass by reference.
            image_cv.flags.writeable = False
            result = selfie_segmentation.process(image_cv)
            image_cv.flags.writeable = True

            image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)

            # blur masking
            binary_mask = result.segmentation_mask > 0.9
            binary_mask3 = np.dstack((binary_mask, binary_mask, binary_mask))
            img_selfie = np.where(binary_mask3, image_cv, 255)
            blured_img = cv2.GaussianBlur(img_selfie, (21, 21), 0)
            blurred_mask = np.zeros(img_selfie.shape, np.uint8)

            # draw masking area
            gray = cv2.cvtColor(img_selfie, cv2.COLOR_BGR2GRAY)
            thresh = cv2.threshold(gray, 168, 255, cv2.THRESH_BINARY)[1]
            contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(blurred_mask, contours, -1, (255, 255, 255), 20)

            # output
            img_output = np.where(blurred_mask == np.array([255, 255, 255]), blured_img, img_selfie)
            # cv2.imshow('IPWebCam2', img_output)
            cv2.imwrite(texture_path + 'selfie_sample.jpg', img_output)

            # unreal
            texture_task = build_import_task(texture_path + 'selfie_sample.jpg', '/Game/Textures')
            execute_import_tasks([texture_task])



def build_import_task(filename, destination_path):
    task = unreal.AssetImportTask()

    task.set_editor_property('automated', True)
    task.set_editor_property('destination_name', 'Test')
    task.set_editor_property('destination_path', destination_path)
    task.set_editor_property('filename', filename)
    task.set_editor_property('replace_existing', True)
    task.set_editor_property('save', False)
    return task


def execute_import_tasks(tasks):
    unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(tasks)
    print('imported Task(s)')

 

 

언리얼에서 파이썬 스크립트 실행 및 스크립트에서 실행될 함수.

더보기
// CppLib.cpp

#include "CppLib.h"
#include "Kismet/GameplayStatics.h"
#include "../Plugins/Experimental/PythonScriptPlugin/Source/PythonScriptPlugin/Private/PythonScriptPlugin.h"


// Python 스크립트 실행
void UCppLib::ExecutePythonScript(FString PythonScript) {
	FPythonScriptPlugin::Get()->ExecPythonCommand(*PythonScript);
}


// Python 스크립트에서 호출할 언리얼 함수
void UCppLib::SetProperty_CanURL(bool canValue)
{
	UE_LOG(LogTemp, Warning, TEXT("connection is %s"), *FString(canValue ? "True" : "False"));
	TArray<AActor*> actors;
	if (GEditor != nullptr && GCurrentLevelEditingViewportClient != nullptr) {
		FWorldContext* world = GEngine->GetWorldContextFromGameViewport(GEngine->GameViewport);
		UGameplayStatics::GetAllActorsOfClassWithTag(world->World(), TargetClass, "TargetName", actors);

		for (AActor* actor : actors)
		{
			TargetClass* target = Cast<TargetClass>(actor);
			target->bCanURL = canValue;
		}
	}
}

 

 

활용 방안.

 - 모바일의 장점을 살리려면 그냥 구글 meet 또는 스냅챗처럼 만드는게 나아보인다.

 - 그냥 배경보다 나은 점은 사람 앞에 파티클 날릴 수 있음