Interactive Blob Reacting to Microphone and Audio Files Using Web APIs

Answered using ChatGPT 4o by ChatGPT4o

Question:

An Interactive Blob that reacted to microphone and file voice and sounds; html, css, js

Answer:

Creating an interactive blob that reacts to microphone input and file-based sounds involves several steps. Below is a basic example using HTML, CSS, and JavaScript. This example will use the Web Audio API to capture microphone input and play audio files, and the Canvas API to draw and animate the blob.

HTML

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Interactive Blob</title>
    <link rel="stylesheet" href="styles.css">
</head>
<body>
    <h1>Interactive Blob</h1>
    <canvas id="blobCanvas"></canvas>
    <input type="file" id="audioFileInput" accept="audio/*">
    <button id="startMic">Start Microphone</button>
    <script src="script.js"></script>
</body>
</html>

CSS (styles.css)

body {
    display: flex;
    flex-direction: column;
    align-items: center;
    justify-content: center;
    height: 100vh;
    margin: 0;
    background-color: #282c34;
    color: white;
    font-family: Arial, sans-serif;
}

canvas {
    border: 1px solid white;
    margin-top: 20px;
}

button, input {
    margin-top: 10px;
}

JavaScript (script.js)

const canvas = document.getElementById('blobCanvas');
const ctx = canvas.getContext('2d');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;

const audioFileInput = document.getElementById('audioFileInput');
const startMicButton = document.getElementById('startMic');

let audioContext;
let analyser;
let dataArray;
let bufferLength;

function setupAudioContext() {
    audioContext = new (window.AudioContext || window.webkitAudioContext)();
    analyser = audioContext.createAnalyser();
    analyser.fftSize = 256;
    bufferLength = analyser.frequencyBinCount;
    dataArray = new Uint8Array(bufferLength);
}

function drawBlob() {
    requestAnimationFrame(drawBlob);
    analyser.getByteFrequencyData(dataArray);

    ctx.clearRect(0, 0, canvas.width, canvas.height);

    const radius = Math.max(...dataArray) / 2;
    const centerX = canvas.width / 2;
    const centerY = canvas.height / 2;

    ctx.beginPath();
    ctx.moveTo(centerX + radius, centerY);

    for (let i = 0; i <= 360; i++) {
        const angle = i * Math.PI / 180;
        const x = centerX + radius * Math.cos(angle);
        const y = centerY + radius * Math.sin(angle);
        ctx.lineTo(x, y);
    }

    ctx.closePath();
    ctx.fillStyle = 'rgba(0, 150, 255, 0.5)';
    ctx.fill();
}

function handleAudioFile(event) {
    const file = event.target.files[0];
    if (file) {
        const reader = new FileReader();
        reader.onload = function(e) {
            audioContext.decodeAudioData(e.target.result, function(buffer) {
                const source = audioContext.createBufferSource();
                source.buffer = buffer;
                source.connect(analyser);
                analyser.connect(audioContext.destination);
                source.start(0);
                drawBlob();
            });
        };
        reader.readAsArrayBuffer(file);
    }
}

function handleMicrophone() {
    navigator.mediaDevices.getUserMedia({ audio: true })
        .then(stream => {
            const source = audioContext.createMediaStreamSource(stream);
            source.connect(analyser);
            drawBlob();
        })
        .catch(err => console.error('Error accessing microphone:', err));
}

audioFileInput.addEventListener('change', handleAudioFile);
startMicButton.addEventListener('click', () => {
    if (!audioContext) {
        setupAudioContext();
    }
    handleMicrophone();
});

Explanation

  1. HTML: The HTML file includes a canvas element for drawing the blob, an input for selecting audio files, and a button to start the microphone.
  2. CSS: Basic styling to center the elements and give the canvas a border.
  3. JavaScript:
    • Audio Setup: The setupAudioContext function initializes the Web Audio API context and analyser.
    • Drawing the Blob: The drawBlob function uses the Canvas API to draw a blob that changes size based on the audio frequency data.
    • Handling Audio Files: The handleAudioFile function reads an audio file, decodes it, and plays it through the Web Audio API.
    • Handling Microphone Input: The handleMicrophone function captures audio from the microphone and connects it to the analyser.

This example provides a basic interactive blob that reacts to both microphone input and audio files. You can further enhance the visual effects and interactivity as needed.