|
| 1 | +<!doctype html> |
| 2 | +<html> |
| 3 | +<head> |
| 4 | + <meta charset="utf-8" /> |
| 5 | + |
| 6 | + <title>E I E I O</title> |
| 7 | + <meta name="description" content="[i æi i æi ou]"> |
| 8 | + <meta name="theme-color" content="#ab8c73"> |
| 9 | + <meta name="twitter:card" content="summary_large_image"> |
| 10 | + <link rel="icon" type="image/png" href="favicon.png"> |
| 11 | + |
| 12 | + <meta property="og:author" content="ucsd.edu"/> |
| 13 | + |
| 14 | + <meta http-equiv="Content-type" content="text/html; charset=utf-8" /> |
| 15 | + <meta name="viewport" content="width=device-width, initial-scale=1" /> |
| 16 | + <style type="text/css"> |
| 17 | + :root {color-scheme: dark; |
| 18 | + accent-color: #87f974;} |
| 19 | + body { |
| 20 | + color: #314422; |
| 21 | + background-color: #abcdef; |
| 22 | + margin: 0; |
| 23 | + padding: 0; |
| 24 | + font-family: -apple-system, system-ui, BlinkMacSystemFont, "Segoe UI", "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; |
| 25 | + font-family: 'Comic Sans MS', 'Chalkboard SE', 'Comic Neue', cursive; |
| 26 | + } |
| 27 | + #div { |
| 28 | + width: 600px; |
| 29 | + margin: 1in auto; |
| 30 | + padding: 2em; |
| 31 | + background-color: #ebfbab; |
| 32 | + color: #434231; |
| 33 | + border-radius: 0.5em; |
| 34 | + } |
| 35 | + a:link, a:visited { |
| 36 | + color: cyan; |
| 37 | + text-decoration: none; |
| 38 | + } |
| 39 | + @media (max-width: 700px) { |
| 40 | + #div { |
| 41 | + margin: 0 auto; |
| 42 | + width: auto; |
| 43 | + } |
| 44 | + } |
| 45 | + td { |
| 46 | + text-shadow: 5px 0 5px white, -5px 0 5px white, 0 -10px 5px white; |
| 47 | + } |
| 48 | + img { |
| 49 | + max-width: 100%; |
| 50 | + } |
| 51 | + </style> |
| 52 | +</head> |
| 53 | + |
| 54 | +<body> |
| 55 | +<div id="div"> |
| 56 | + <h1>vocal</h1> |
| 57 | + <p>click to start</p> |
| 58 | + <script> |
| 59 | +const context = new AudioContext() |
| 60 | +let audioCtx = context |
| 61 | + async function tryAudio () { |
| 62 | + try { |
| 63 | + if (context.state === 'suspended') { |
| 64 | + console.log('trying to resume') |
| 65 | + // 100ms timeout |
| 66 | + await Promise.race([context.resume(), new Promise(resolve => setTimeout(resolve, 100))]) |
| 67 | + console.log('resume resolved', context.state) |
| 68 | + } |
| 69 | + } catch { |
| 70 | + } |
| 71 | + if (context.state === 'running') { |
| 72 | + |
| 73 | + |
| 74 | + // const audio = new Audio('./ass/ets/substantial.mp3') |
| 75 | + // audio.play().catch(console.error) |
| 76 | + |
| 77 | + audioReady() |
| 78 | + |
| 79 | + return |
| 80 | + } |
| 81 | + window.requestAnimationFrame(tryAudio) |
| 82 | + } |
| 83 | + async function audioReady () { |
| 84 | + |
| 85 | +// load impulse response from file |
| 86 | + |
| 87 | + console.log('audioReady') |
| 88 | + // |
| 89 | + |
| 90 | + |
| 91 | + const osc = audioCtx.createOscillator(); |
| 92 | + |
| 93 | +// Generate a Liljencrants-Fant-like waveform |
| 94 | +const harmonics = 50; // Number of harmonics |
| 95 | +const real = new Float32Array(harmonics); |
| 96 | +const imag = new Float32Array(harmonics); |
| 97 | + |
| 98 | +for (let i = 1; i < harmonics; i++) { |
| 99 | + real[i] = Math.sin(Math.PI * i / harmonics); // Emphasize lower harmonics |
| 100 | + imag[i] = -0.5 * Math.cos(Math.PI * i / harmonics); // Asymmetry in phase |
| 101 | +} |
| 102 | + |
| 103 | +const wave = audioCtx.createPeriodicWave(real, imag, { disableNormalization: false }); |
| 104 | +osc.setPeriodicWave(wave); |
| 105 | + |
| 106 | +osc.connect(audioCtx.destination); |
| 107 | +// osc.start(); |
| 108 | + |
| 109 | +const sampleRate = audioCtx.sampleRate; |
| 110 | +const f0 = 150; // Fundamental frequency (Hz) |
| 111 | +const T0 = 1 / f0; |
| 112 | +const frameCount = Math.floor(sampleRate * T0); |
| 113 | + |
| 114 | +// 1. Create Rosenberg pulse data |
| 115 | +const pulseData = new Float32Array(frameCount); |
| 116 | +const openPhase = Math.floor(0.4 * frameCount); |
| 117 | +const closePhase = Math.floor(0.6 * frameCount); |
| 118 | + |
| 119 | +for (let i = 0; i < frameCount; i++) { |
| 120 | + if (i < openPhase) { |
| 121 | + pulseData[i] = 0.5 * (1 - Math.cos((Math.PI * i) / openPhase)); // Rise |
| 122 | + } else if (i < closePhase) { |
| 123 | + pulseData[i] = 1.0; // Open steady |
| 124 | + } else { |
| 125 | + const t = (i - closePhase) / (frameCount - closePhase); |
| 126 | + pulseData[i] = 0.5 * (1 + Math.cos(Math.PI * t)); // Fall |
| 127 | + } |
| 128 | +} |
| 129 | + |
| 130 | +// 2. Render a buffer in OfflineAudioContext |
| 131 | +const offlineCtx = new OfflineAudioContext(1, frameCount, sampleRate); |
| 132 | +const buffer = offlineCtx.createBuffer(1, frameCount, sampleRate); |
| 133 | +buffer.copyToChannel(pulseData, 0); |
| 134 | + |
| 135 | +// Create a source and play it in offline context |
| 136 | +const tempSource = offlineCtx.createBufferSource(); |
| 137 | +tempSource.buffer = buffer; |
| 138 | +tempSource.connect(offlineCtx.destination); |
| 139 | +tempSource.start(); |
| 140 | + |
| 141 | +// 3. Render and use it in the real AudioContext |
| 142 | +const renderedBuffer =await offlineCtx.startRendering()//.then(renderedBuffer => { |
| 143 | + const source = audioCtx.createBufferSource(); |
| 144 | + // source.buffer = renderedBuffer; |
| 145 | + source.buffer = await context.decodeAudioData(await fetch('./ass/ets/microwave-schwa.wav').then(r=>r.arrayBuffer())); |
| 146 | + source.loop = true; |
| 147 | + // source.connect(audioCtx.destination); |
| 148 | + source.start(); |
| 149 | + |
| 150 | + const gainNode = context.createGain() |
| 151 | + |
| 152 | + |
| 153 | + const filter1 = context.createBiquadFilter() |
| 154 | + filter1.type = 'peaking' |
| 155 | +filter1.frequency.value = 200; |
| 156 | +// filter1.frequency.value = 200; |
| 157 | +filter1.gain.value = 20; |
| 158 | + |
| 159 | +const filter2 = context.createBiquadFilter() |
| 160 | + filter2.type = 'peaking' |
| 161 | +filter2.frequency.value = 500; |
| 162 | +// filter2.frequency.value = 500; |
| 163 | +filter2.gain.value = 20; |
| 164 | + |
| 165 | +setTimeout(() => { |
| 166 | + // filter1.frequency.linearRampToValueAtTime(900, context.currentTime + 1) |
| 167 | + // filter2.frequency.linearRampToValueAtTime(2300, context.currentTime + 1) |
| 168 | +}, 1000) |
| 169 | +setInterval(() => { |
| 170 | + console.log('change') |
| 171 | + |
| 172 | +// https://en.wikipedia.org/wiki/Formant#Phonetics |
| 173 | + |
| 174 | +// filter1.frequency.linearRampToValueAtTime(240, context.currentTime + 1) |
| 175 | +// filter2.frequency.linearRampToValueAtTime(2400, context.currentTime + 1) |
| 176 | +// eeee |
| 177 | + |
| 178 | +source.detune.setValueAtTime(0, context.currentTime + 0) |
| 179 | +gainNode.gain.setValueAtTime(1, context.currentTime+ 0.0) |
| 180 | + filter1.frequency.setValueAtTime(240, context.currentTime) |
| 181 | +filter2.frequency.setValueAtTime(2400, context.currentTime) |
| 182 | +gainNode.gain.setValueAtTime(0, context.currentTime+ 0.4) |
| 183 | +// ahhh |
| 184 | +// source.detune.setValueAtTime(200, context.currentTime + 0.5) |
| 185 | +gainNode.gain.setValueAtTime(1, context.currentTime+ 0.5) |
| 186 | + filter1.frequency.setValueAtTime(585, context.currentTime+ 0.5) |
| 187 | +filter2.frequency.setValueAtTime(1710, context.currentTime+ 0.5) |
| 188 | +// ahh--eee |
| 189 | + filter1.frequency.setValueAtTime(585, context.currentTime+ 0.6) |
| 190 | +filter2.frequency.setValueAtTime(1710, context.currentTime+ 0.6) |
| 191 | + filter1.frequency.linearRampToValueAtTime(240, context.currentTime + 0.9) |
| 192 | +filter2.frequency.linearRampToValueAtTime(2400, context.currentTime + 0.9) |
| 193 | +gainNode.gain.setValueAtTime(0, context.currentTime+ 1) |
| 194 | + |
| 195 | + |
| 196 | +source.detune.setValueAtTime(-200, context.currentTime + 1.1) |
| 197 | +gainNode.gain.setValueAtTime(1, context.currentTime+ 1.1) |
| 198 | + filter1.frequency.setValueAtTime(240, context.currentTime + 1.1) |
| 199 | +filter2.frequency.setValueAtTime(2400, context.currentTime + 1.1) |
| 200 | +gainNode.gain.setValueAtTime(0, context.currentTime+ 1.5) |
| 201 | + |
| 202 | + |
| 203 | +// source.detune.setValueAtTime(200, context.currentTime + 1.6) |
| 204 | +gainNode.gain.setValueAtTime(1, context.currentTime+ 1.6) |
| 205 | + filter1.frequency.setValueAtTime(585, context.currentTime+ 1.6) |
| 206 | +filter2.frequency.setValueAtTime(1710, context.currentTime+ 1.6) |
| 207 | +// ahh--eee |
| 208 | + filter1.frequency.setValueAtTime(585, context.currentTime+ 1.7) |
| 209 | +filter2.frequency.setValueAtTime(1710, context.currentTime+ 1.7) |
| 210 | + filter1.frequency.linearRampToValueAtTime(240, context.currentTime + 2.0) |
| 211 | +filter2.frequency.linearRampToValueAtTime(2400, context.currentTime + 2.0) |
| 212 | +gainNode.gain.setValueAtTime(0, context.currentTime+ 2.1) |
| 213 | + |
| 214 | + |
| 215 | +source.detune.setValueAtTime(-400, context.currentTime + 2.2) |
| 216 | +gainNode.gain.setValueAtTime(1, context.currentTime+ 2.2) |
| 217 | +filter1.frequency.setValueAtTime(360, context.currentTime+ 2.2) |
| 218 | +filter2.frequency.setValueAtTime(640, context.currentTime+ 2.2) |
| 219 | +filter1.frequency.setValueAtTime(360, context.currentTime+ 2.3) |
| 220 | +filter2.frequency.setValueAtTime(640, context.currentTime+ 2.3) |
| 221 | + filter1.frequency.linearRampToValueAtTime(250, context.currentTime + 2.6) |
| 222 | +filter2.frequency.linearRampToValueAtTime(595, context.currentTime + 2.6) |
| 223 | +gainNode.gain.setValueAtTime(0, context.currentTime+ 3.3) |
| 224 | +}, 3500) |
| 225 | + |
| 226 | + source.connect(gainNode); |
| 227 | + gainNode.connect(filter1); |
| 228 | + filter1.connect(filter2); |
| 229 | + filter2.connect(audioCtx.destination); |
| 230 | +// }); |
| 231 | +console.log(filter1) |
| 232 | + } |
| 233 | + tryAudio() |
| 234 | + |
| 235 | + </script> |
| 236 | +</div> |
| 237 | +</body> |
| 238 | +</html> |
0 commit comments