Interactive Blob Reacting to Microphone and File Sound with Controls

Answered using ChatGPT 4o by ChatGPT4o

Question:

An Interactive Blob that reacted to microphone's voice and file sound, with play and stop button; html, css, js

Answer:

Creating an interactive blob that reacts to microphone input and file sound with play and stop buttons involves several steps. Below is a simple implementation using HTML, CSS, and JavaScript. This example uses the Web Audio API to capture microphone input and play audio files, and the Canvas API to draw and animate the blob.

HTML

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Interactive Blob</title>
    <link rel="stylesheet" href="styles.css">
</head>
<body>
    <div class="controls">
        <button id="startMic">Start Microphone</button>
        <button id="stopMic">Stop Microphone</button>
        <input type="file" id="audioFile" accept="audio/*">
        <button id="playFile">Play File</button>
        <button id="stopFile">Stop File</button>
    </div>
    <canvas id="blobCanvas"></canvas>
    <script src="script.js"></script>
</body>
</html>

CSS (styles.css)

body {
    display: flex;
    flex-direction: column;
    align-items: center;
    justify-content: center;
    height: 100vh;
    margin: 0;
    background-color: #282c34;
    color: white;
    font-family: Arial, sans-serif;
}

.controls {
    margin-bottom: 20px;
}

canvas {
    border: 1px solid white;
}

JavaScript (script.js)

const canvas = document.getElementById('blobCanvas');
const ctx = canvas.getContext('2d');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;

let audioContext;
let analyser;
let dataArray;
let source;
let animationId;

const startMicButton = document.getElementById('startMic');
const stopMicButton = document.getElementById('stopMic');
const audioFileInput = document.getElementById('audioFile');
const playFileButton = document.getElementById('playFile');
const stopFileButton = document.getElementById('stopFile');

startMicButton.addEventListener('click', startMicrophone);
stopMicButton.addEventListener('click', stopMicrophone);
playFileButton.addEventListener('click', playFile);
stopFileButton.addEventListener('click', stopFile);

function startMicrophone() {
    navigator.mediaDevices.getUserMedia({ audio: true })
        .then(stream => {
            audioContext = new (window.AudioContext || window.webkitAudioContext)();
            analyser = audioContext.createAnalyser();
            source = audioContext.createMediaStreamSource(stream);
            source.connect(analyser);
            analyser.fftSize = 256;
            const bufferLength = analyser.frequencyBinCount;
            dataArray = new Uint8Array(bufferLength);
            animate();
        })
        .catch(err => console.error('Error accessing microphone:', err));
}

function stopMicrophone() {
    if (source) {
        source.mediaStream.getTracks().forEach(track => track.stop());
        cancelAnimationFrame(animationId);
    }
}

function playFile() {
    const file = audioFileInput.files[0];
    if (file) {
        const reader = new FileReader();
        reader.onload = function(e) {
            audioContext = new (window.AudioContext || window.webkitAudioContext)();
            audioContext.decodeAudioData(e.target.result, buffer => {
                source = audioContext.createBufferSource();
                source.buffer = buffer;
                analyser = audioContext.createAnalyser();
                source.connect(analyser);
                analyser.connect(audioContext.destination);
                analyser.fftSize = 256;
                const bufferLength = analyser.frequencyBinCount;
                dataArray = new Uint8Array(bufferLength);
                source.start();
                animate();
            });
        };
        reader.readAsArrayBuffer(file);
    }
}

function stopFile() {
    if (source) {
        source.stop();
        cancelAnimationFrame(animationId);
    }
}

function animate() {
    animationId = requestAnimationFrame(animate);
    analyser.getByteFrequencyData(dataArray);
    ctx.clearRect(0, 0, canvas.width, canvas.height);

    const radius = Math.max(...dataArray) / 2;
    const centerX = canvas.width / 2;
    const centerY = canvas.height / 2;

    ctx.beginPath();
    ctx.arc(centerX, centerY, radius, 0, Math.PI * 2);
    ctx.fillStyle = 'rgba(0, 150, 255, 0.5)';
    ctx.fill();
    ctx.closePath();
}

Explanation

  1. HTML: The HTML file includes buttons for starting and stopping the microphone, selecting an audio file, and playing/stopping the file. It also includes a canvas element where the blob will be drawn.

  2. CSS: The CSS file styles the page, centering the controls and canvas.

  3. JavaScript: The JavaScript file handles the audio processing and animation:

    • Microphone Input: When the "Start Microphone" button is clicked, the microphone input is captured using getUserMedia. The audio stream is connected to an AnalyserNode to get frequency data.
    • File Input: When an audio file is selected and the "Play File" button is clicked, the file is read and decoded. The audio buffer is connected to an AnalyserNode.
    • Animation: The animate function uses the frequency data to draw a circle (blob) on the canvas. The radius of the circle is determined by the maximum frequency value.

This example provides a basic implementation. You can further enhance the visual effects and add more features as needed.