I'm working on a project where I want to try on a 3D glasses model in real time. The problem is I'm unable to set the model in its right position, and it does not follow face movements. I have successfully detected the face using Mediapipe, and it works fine.
Here's the code I implemented for this function:
import * as THREE from 'https://unpkg.com/[email protected]/build/three.module.js';
import { OrbitControls } from 'https://unpkg.com/[email protected]/examples/jsm/controls/OrbitControls.js';
import { GLTFLoader } from 'https://unpkg.com/[email protected]/examples/jsm/loaders/GLTFLoader.js';
// Get the webcam video element
const webcamElement = document.getElementById('webcam');
// Get the Three.js rendering container
const container = document.getElementById('scene-container');
// Variable to track the video state
let isVideo = false;
let glassesModel;
let eyesDetected = false;
// Check if the webcam element is present
if (!webcamElement) {
console.error("Webcam element not found.");
}
const canvas = document.createElement('canvas');
document.getElementById('webgl-canvas').appendChild(canvas);
// Set alpha to true for a transparent background
const renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true });
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setClearColor(0x000000, 0); // Set background color to transparent
renderer.setPixelRatio(window.devicePixelRatio);
document.body.appendChild(renderer.domElement);
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(4, 5, 11);
camera.lookAt(0, 0, 0);
const groundGeometry = new THREE.PlaneGeometry(20, 20, 32, 32);
groundGeometry.rotateX(-Math.PI / 2);
const groundMaterial = new THREE.MeshStandardMaterial({
color: 0x555555,
side: THREE.DoubleSide,
transparent: true,
opacity: 0.5
});
const groundMesh = new THREE.Mesh(groundGeometry, groundMaterial);
scene.add(groundMesh);
const spotLight = new THREE.SpotLight(0xffffff, 3, 100, 0.2, 0.5);
spotLight.position.set(0, 25, 0);
scene.add(spotLight);
function loadGlassesModel() {
const loader = new GLTFLoader();
loader.load('/static/models/sport_glasses_b307/scene.gltf', (gltf) => {
const mesh = gltf.scene;
mesh.position.set(0, 2, 2);
const scaleFactor = 0.04; // Adjust this value as needed
mesh.scale.set(scaleFactor, 0.05, scaleFactor);
scene.add(mesh);
});
}
function animate() {
requestAnimationFrame(animate);
controls.update();
renderer.render(scene, camera);
}
const controls = new OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.dampingFactor = 0.25;
controls.screenSpacePanning = false;
controls.maxPolarAngle = Math.PI / 2;
animate();
// Async function to start the webcam
async function startWebcam() {
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
webcamElement.srcObject = stream;
await new Promise((resolve) => {
webcamElement.onloadedmetadata = () => {
resolve();
};
});
isVideo = true;
await new Promise(resolve => requestAnimationFrame(resolve));
loadGlassesModel();
}
// Handle webcam switch state change
$("#webcam-switch").change(function () {
if (this.checked) {
startWebcam();
} else {
stopWebcam();
}
});
// Function to stop the webcam
function stopWebcam() {
if (isVideo) {
webcamElement.srcObject.getTracks().forEach(track => track.stop());
isVideo = false;
console.log("Webcam stopped");
}
}
// Async function to capture and send the image to Django
async function captureAndSendToDjango() {
if (isVideo && webcamElement) {
try {
const imageData = await webcamToImageData(webcamElement);
console.log("Image data:", imageData);
await sendImageToDjango(imageData);
} catch (error) {
console.error("Error capturing and sending image to Django:", error);
}
}
requestAnimationFrame(captureAndSendToDjango);
}
// Async function to convert webcam data to image data
async function webcamToImageData(webcamElement) {
return new Promise((resolve) => {
const canvas = document.createElement('canvas');
canvas.width = webcamElement.videoWidth;
canvas.height = webcamElement.videoHeight;
const context = canvas.getContext('2d');
context.drawImage(webcamElement, 0, 0, canvas.width, canvas.height);
const base64data = canvas.toDataURL('image/png').split(',')[1];
resolve(base64data);
});
}
// Function to draw landmarks on the canvas
function drawLandmarks(response) {
const canvas = document.getElementById('facemesh-canvas');
const context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
const landmarksArray = response.landmarks;
if (landmarksArray && landmarksArray.length > 0) {
context.fillStyle = 'red';
const cameraOffsetX = 0;
const cameraOffsetY = 0;
let leftEyeLandmarks;
let rightEyeLandmarks;
for (const landmarkList of landmarksArray) {
for (const landmark of landmarkList) {
const x = landmark[0] + cameraOffsetX;
const y = landmark[1] + cameraOffsetY;
context.beginPath();
context.arc(x, y, 1, 0, 2 * Math.PI);
context.fill();
console.log(`Eye coordinates: x=${x}, y=${y}`);
if (landmark[5] && landmark[5] === 'left_eye') {
leftEyeLandmarks = landmarkList;
} else if (landmark[5] && landmark[5] === 'right_eye') {
rightEyeLandmarks = landmarkList;
}
}
}
if (leftEyeLandmarks && rightEyeLandmarks) {
console.log("Eyes detected!");
eyesDetected = true;
const averageEyePosition = [
(leftEyeLandmarks[3][0] + rightEyeLandmarks[0][0]) / 2,
(leftEyeLandmarks[3][1] + rightEyeLandmarks[0][1]) / 2,
];
if (glassesModel) {
glassesModel.position.set(
(averageEyePosition[0] + cameraOffsetX) * scaleFactorX,
(averageEyePosition[1] + cameraOffsetY) * scaleFactorY,
-5
);
if (loadGlassesModel) {
loadGlassesModel.visible = eyesDetected;
}
}
} else {
eyesDetected = false;
if (glassesModel) {
glassesModel.visible = false;
}
}
} else {
console.error("No landmarks found in the response.");
}
}
// Async function to send the image to Django via AJAX
I'm facing issues with setting the glasses model in the correct position and making it follow face movements. Any suggestions on how to fix this problem would be greatly appreciated.