In my application, i intend to use PostureLandmarker while playing a video in Dash Player.
I'm able to do this, however, the video in Dash Player keeps buffering due to PostureLandmarker rendering the webcam video to detect posture.
const LANDMARKS = {};
export default function PoseDetector() {
const [lastVideoTime, setLastVideoTime] = useState(-1);
const [poseLandmarker, setPoseLandmarker] = useState(null);
const webcamRunningRef = useRef(false);
const videoRef = useRef(null);
const canvasRef = useRef(null);
const canvasCtxRef = useRef(null);
const enableWebcamButtonRef = useRef(null);
const [globalMessage, setGlobalMessage] = useState(null);
const [globalScore, setGlobalScore] = useState(0);
const [startDetector, setStartDetector] = useState(false);
const handleScoreUpdate = (value) => {
setGlobalScore((prevScore) => prevScore + value);
};
const [videoStarted] = useVideoStore((state) => [state.videoStarted]);
useEffect(() => {
if (videoStarted) {
enableCam();
const timeoutId = setTimeout(() => {
setStartDetector(true);
}, 1000); // 12000 milliseconds = 12 seconds
return () => clearTimeout(timeoutId);
}
}, [videoStarted]);
useEffect(() => {
const createPoseLandmarker = async () => {
const vision = await FilesetResolver.forVisionTasks(
"
https://cdn.jsdelivr.net/npm/@mediapipe/tasks-...@0.10.0/wasm"
);
const landmarker = await PoseLandmarker.createFromOptions(vision, {
baseOptions: {
modelAssetPath: `
https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task`,
delegate: "GPU",
},
runningMode: "VIDEO",
numPoses: 2,
});
setPoseLandmarker(landmarker);
};
createPoseLandmarker();
}, []);
const startWebcam = () => {
const constraints = { video: true };
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
videoRef.current.srcObject = stream;
videoRef.current.style.display = "block";
videoRef.current.play();
});
};
useEffect(() => {
if (startDetector) {
predictWebcam();
}
}, [startDetector]);
const processingFunc = (landmarks) => {};
const speakMessage = useCallback((message) => {
const utterance = new SpeechSynthesisUtterance(message);
window.speechSynthesis.speak(utterance);
}, []);
const predictWebcam = async () => {
const videoHeight = 360;
const videoWidth = 480;
const canvasElement = canvasRef.current;
const video = videoRef.current;
const canvasCtx = canvasElement.getContext("2d");
const drawingUtils = new DrawingUtils(canvasCtx);
canvasElement.height = videoHeight;
canvasElement.width = videoWidth;
video.height = videoHeight;
video.width = videoWidth;
if (!poseLandmarker) return;
await poseLandmarker.setOptions({ runningMode: "VIDEO" });
const startTimeMs = performance.now();
if (video.currentTime !== lastVideoTime) {
setLastVideoTime(video.currentTime);
poseLandmarker.detectForVideo(video, startTimeMs, (result) => {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
for (const landmark of result.landmarks) {
processingFunc(landmark);
drawingUtils.drawLandmarks(landmark, {
radius: (data) => DrawingUtils.lerp(data.from.z, -0.15, 0.1, 5, 1),
});
drawingUtils.drawConnectors(
landmark,
PoseLandmarker.POSE_CONNECTIONS
);
}
canvasCtx.restore();
});
}
if (webcamRunningRef.current) {
window.requestAnimationFrame(predictWebcam);
}
};
const enableCam = (event) => {
if (webcamRunningRef.current) {
webcamRunningRef.current = false;
} else {
webcamRunningRef.current = true;
startWebcam();
}
};
return (
<>
<Card>
<CardContent>
<div>
<div>
<h6>Score</h6>
<Typography>
{globalScore.toFixed(2)}
</Typography>
</div>
<div>
<h6>Message</h6>
<Typography>
{globalMessage ? globalMessage : "---"}
</Typography>
</div>
</div>
</CardContent>
</Card>
<div>
<div>
<video
id="webcam"
ref={videoRef}
autoPlay
muted
></video>
<canvas
id="output_canvas"
ref={canvasRef}
></canvas>
</div>
</div>
</>
);
}