import React, { useState, useRef, Fragment, useEffect } from "react"; import RecordRTC, { invokeSaveAsDialog } from "recordrtc"; import { Listbox, Transition } from "@headlessui/react"; import { CheckIcon, ChevronUpDownIcon } from "@heroicons/react/20/solid"; import { MicrophoneIcon, PauseIcon } from "@heroicons/react/24/outline"; import { ResumeIcon, TrashIcon } from "@radix-ui/react-icons"; import { StopIcon } from "@heroicons/react/24/solid"; import StopTime from "~/components/StopTime"; import axios from "axios"; import dayjs from "dayjs"; import { useRouter } from "next/router"; import { api } from "~/utils/api"; import fixWebmDuration from "fix-webm-duration"; import { TRPCClientError } from "@trpc/client"; import { useAtom } from "jotai/index"; import paywallAtom from "~/atoms/paywallAtom"; import recordVideoModalOpen from "~/atoms/recordVideoModalOpen"; import { usePostHog } from "posthog-js/react"; interface Props { closeModal: () => void; step: string; setStep: ( value: | ((prevState: "pre" | "in" | "post") => "pre" | "in" | "post") | "pre" | "in" | "post" ) => void; } export default function Recorder({ closeModal, step, setStep }: Props) { const [steam, setStream] = useState(null); const [blob, setBlob] = useState(null); const recorderRef = useRef(null); const [pause, setPause] = useState(false); const [audioDevices, setAudioDevices] = useState([]); const [selectedDevice, setSelectedDevice] = useState( null ); const router = useRouter(); const [, setRecordOpen] = useAtom(recordVideoModalOpen); const [submitting, setSubmitting] = useState(false); const apiUtils = api.useContext(); const getSignedUrl = api.video.getUploadUrl.useMutation(); const [duration, setDuration] = useState(0); const [, setPaywallOpen] = useAtom(paywallAtom); const videoRef = useRef(null); const posthog = usePostHog(); const handleRecording = async () => { const screenStream = await navigator.mediaDevices.getDisplayMedia({ video: { width: 1920, height: 1080, frameRate: 30, }, audio: { echoCancellation: true, noiseSuppression: true, sampleRate: 44100, }, }); let micStream; try { micStream = await navigator.mediaDevices.getUserMedia({ audio: { deviceId: selectedDevice?.deviceId }, }); } catch (error) { // Handle the case where microphone permissions are not granted console.error("Failed to access microphone:", error); } const mediaStream = new MediaStream(); if (micStream) { micStream .getAudioTracks() .forEach((track) => mediaStream.addTrack(track)); } screenStream .getVideoTracks() .forEach((track) => mediaStream.addTrack(track)); const firstVideoTrack = screenStream.getVideoTracks()[0]; if (firstVideoTrack) { firstVideoTrack.addEventListener("ended", () => handleStop()); } setStream(mediaStream); recorderRef.current = new RecordRTC(mediaStream, { type: "video" }); recorderRef.current.startRecording(); setStep("in"); posthog?.capture("recorder: start video recording"); }; const handleStop = () => { if (recorderRef.current === null) return; recorderRef.current.stopRecording(() => { if (recorderRef.current) { fixWebmDuration( recorderRef.current.getBlob(), duration * 1000, (seekableBlob) => { setBlob(seekableBlob); } ); steam?.getTracks().map((track) => track.stop()); } }); setStep("post"); posthog?.capture("recorder: video recording finished"); }; const handleDelete = () => { if (recorderRef.current === null) return; setBlob(null); recorderRef.current.stopRecording(() => { steam?.getTracks().map((track) => track.stop()); }); closeModal(); setStep("pre"); posthog?.capture("recorder: video deleted"); }; const handlePause = () => { if (recorderRef.current) { console.log(recorderRef.current?.state); if (pause) { recorderRef.current?.resumeRecording(); } else { recorderRef.current.pauseRecording(); } posthog?.capture("recorder: recording paused/resumed", { pause }); setPause(!pause); } }; useEffect(() => { async function getAudioDevices() { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: { echoCancellation: false }, }); stream.getTracks().forEach((track) => track.stop()); // release the stream const devices = await navigator.mediaDevices.enumerateDevices(); const audioDevices = devices.filter( (device) => device.kind === "audioinput" ); setAudioDevices(audioDevices); if (audioDevices[0]) setSelectedDevice(audioDevices[0]); } catch (error) { console.error(error); } } void getAudioDevices(); }, []); const handleSave = () => { if (blob) { const dateString = "Recording - " + dayjs().format("D MMM YYYY") + ".webm"; invokeSaveAsDialog(blob, dateString); } posthog?.capture("recorder: video downloaded"); }; const generateThumbnail = async (video: HTMLVideoElement) => { const canvas = document.createElement("canvas"); canvas.width = video.videoWidth; canvas.height = video.videoHeight; canvas .getContext("2d") ?.drawImage(video, 0, 0, canvas.width, canvas.height); return await new Promise((resolve) => canvas.toBlob(resolve)); }; const handleUpload = async () => { if (!blob || !videoRef.current) return; const dateString = "Recording - " + dayjs().format("D MMM YYYY") + ".webm"; setSubmitting(true); try { const { signedVideoUrl, signedThumbnailUrl, id } = await getSignedUrl.mutateAsync({ key: dateString, }); await axios .put(signedVideoUrl, blob.slice(), { headers: { "Content-Type": "video/webm" }, }) .then(async () => { if (!videoRef.current) return; return axios.put( signedThumbnailUrl, await generateThumbnail(videoRef.current), { headers: { "Content-Type": "image/png" }, } ); }) .then(() => { void router.push("share/" + id); setRecordOpen(false); posthog?.capture("recorder: video uploaded"); }) .catch((err) => { console.error(err); }); } catch (err) { if (err instanceof TRPCClientError) { if ( err.message === "Sorry, you have reached the maximum video upload limit on our free tier. Please upgrade to upload more." ) { posthog?.capture("recorder: video upload paywall hit"); setPaywallOpen(true); } else if (err.message === "UNAUTHORIZED") { window.open( `/sign-in?redirect=${encodeURIComponent("/window-close")}`, "Sign In", "width=500,height=500" ); posthog?.capture("recorder: guest tried to upload"); } } else { throw err; } } finally { setSubmitting(false); } void apiUtils.video.getAll.invalidate(); }; return (
{step === "pre" ? (
{audioDevices.map((audioDevice, i) => ( `relative cursor-default select-none py-2 pl-10 pr-4 text-gray-900 ${ active ? "bg-gray-200" : "" }` } value={audioDevice} > {({ selected }) => ( <> {audioDevice.label} {selected ? ( ) : null} )} ))}
) : null} {step === "in" ? (
{pause ? (
) : null} {step === "post" ? (
{blob ? (
) : null}
); }