diff --git a/core/src/avm1/globals/sound.rs b/core/src/avm1/globals/sound.rs index 2a4a70adcaf7..11856377e09c 100644 --- a/core/src/avm1/globals/sound.rs +++ b/core/src/avm1/globals/sound.rs @@ -113,16 +113,15 @@ fn duration<'gc>( this: Object<'gc>, _args: &[Value<'gc>], ) -> Result, Error<'gc>> { - if activation.swf_version() >= 6 { - if let Some(sound_object) = this.as_sound_object() { - return Ok(sound_object - .duration() - .map_or(Value::Undefined, |d| d.into())); - } else { - avm_warn!(activation, "Sound.duration: this is not a Sound"); - } + // TODO: Sound.duration was only added in SWFv6, but it is not version gated. + // Return undefined for player <6 if we ever add player version emulation. + if let Some(sound_object) = this.as_sound_object() { + return Ok(sound_object + .duration() + .map_or(Value::Undefined, |d| d.into())); + } else { + avm_warn!(activation, "Sound.duration: this is not a Sound"); } - Ok(Value::Undefined) } @@ -253,18 +252,14 @@ fn position<'gc>( this: Object<'gc>, _args: &[Value<'gc>], ) -> Result, Error<'gc>> { - if activation.swf_version() >= 6 { - if let Some(sound_object) = this.as_sound_object() { - // TODO: The position is "sticky"; even if the sound is no longer playing, it should return - // the previous valid position. - // Needs some audio backend work for this. - if sound_object.sound().is_some() { - avm_warn!(activation, "Sound.position: Unimplemented"); - return Ok(sound_object.position().into()); - } - } else { - avm_warn!(activation, "Sound.position: this is not a Sound"); + // TODO: Sound.position was only added in SWFv6, but it is not version gated. + // Return undefined for player <6 if we ever add player version emulation. + if let Some(sound_object) = this.as_sound_object() { + if sound_object.sound().is_some() { + return Ok(sound_object.position().into()); } + } else { + avm_warn!(activation, "Sound.position: this is not a Sound"); } Ok(Value::Undefined) } diff --git a/core/src/avm2.rs b/core/src/avm2.rs index 818006844afa..8ffa3891e18e 100644 --- a/core/src/avm2.rs +++ b/core/src/avm2.rs @@ -50,7 +50,7 @@ pub use crate::avm2::domain::Domain; pub use crate::avm2::events::Event; pub use crate::avm2::names::{Namespace, QName}; pub use crate::avm2::object::{ - ArrayObject, ClassObject, Object, ScriptObject, StageObject, TObject, + ArrayObject, ClassObject, Object, ScriptObject, SoundChannelObject, StageObject, TObject, }; pub use crate::avm2::value::Value; diff --git a/core/src/avm2/globals/flash/media/sound.rs b/core/src/avm2/globals/flash/media/sound.rs index 4c156f47f57a..e19d7d65f955 100644 --- a/core/src/avm2/globals/flash/media/sound.rs +++ b/core/src/avm2/globals/flash/media/sound.rs @@ -124,7 +124,7 @@ pub fn play<'gc>( .get(1) .cloned() .unwrap_or_else(|| 0.into()) - .coerce_to_i32(activation)? as u16; + .coerce_to_i32(activation)?; let sound_transform = args .get(2) .cloned() @@ -138,14 +138,8 @@ pub fn play<'gc>( } } - let sample_rate = if let Some(format) = activation.context.audio.get_sound_format(sound) { - format.sample_rate - } else { - return Ok(Value::Null); - }; - let in_sample = if position > 0.0 { - Some((position / 1000.0 * sample_rate as f64) as u32) + Some((position / 1000.0 * 44100.0) as u32) } else { None }; @@ -154,7 +148,7 @@ pub fn play<'gc>( event: SoundEvent::Start, in_sample, out_sample: None, - num_loops, + num_loops: num_loops.max(1) as u16, envelope: None, }; diff --git a/core/src/avm2/globals/flash/media/soundchannel.rs b/core/src/avm2/globals/flash/media/soundchannel.rs index 848bba898d49..5d211889c1f9 100644 --- a/core/src/avm2/globals/flash/media/soundchannel.rs +++ b/core/src/avm2/globals/flash/media/soundchannel.rs @@ -53,10 +53,13 @@ pub fn right_peak<'gc>( /// Impl `SoundChannel.position` pub fn position<'gc>( _activation: &mut Activation<'_, 'gc, '_>, - _this: Option>, + this: Option>, _args: &[Value<'gc>], ) -> Result, Error> { - Err("Sound.position is a stub.".into()) + if let Some(instance) = this.and_then(|this| this.as_sound_channel()) { + return Ok(instance.position().into()); + } + Ok(Value::Undefined) } /// Implements `soundTransform`'s getter @@ -65,7 +68,10 @@ pub fn sound_transform<'gc>( this: Option>, _args: &[Value<'gc>], ) -> Result, Error> { - if let Some(instance) = this.and_then(|this| this.as_sound_instance()) { + if let Some(instance) = this + .and_then(|this| this.as_sound_channel()) + .and_then(|channel| channel.instance()) + { let dobj_st = activation.context.local_sound_transform(instance).cloned(); if let Some(dobj_st) = dobj_st { @@ -82,7 +88,10 @@ pub fn set_sound_transform<'gc>( this: Option>, args: &[Value<'gc>], ) -> Result, Error> { - if let Some(instance) = this.and_then(|this| this.as_sound_instance()) { + if let Some(instance) = this + .and_then(|this| this.as_sound_channel()) + .and_then(|channel| channel.instance()) + { let as3_st = args .get(0) .cloned() @@ -104,7 +113,10 @@ pub fn stop<'gc>( this: Option>, _args: &[Value<'gc>], ) -> Result, Error> { - if let Some(instance) = this.and_then(|this| this.as_sound_instance()) { + if let Some(instance) = this + .and_then(|this| this.as_sound_channel()) + .and_then(|channel| channel.instance()) + { activation.context.stop_sound(instance); } diff --git a/core/src/avm2/object.rs b/core/src/avm2/object.rs index 4d00c79632fa..d34dc5bd264e 100644 --- a/core/src/avm2/object.rs +++ b/core/src/avm2/object.rs @@ -1197,7 +1197,7 @@ pub trait TObject<'gc>: 'gc + Collect + Debug + Into> + Clone + Copy fn set_sound(self, _mc: MutationContext<'gc, '_>, _sound: SoundHandle) {} /// Unwrap this object's sound instance handle. - fn as_sound_instance(self) -> Option { + fn as_sound_channel(self) -> Option> { None } diff --git a/core/src/avm2/object/soundchannel_object.rs b/core/src/avm2/object/soundchannel_object.rs index 709005fcba25..5dcb89a7210a 100644 --- a/core/src/avm2/object/soundchannel_object.rs +++ b/core/src/avm2/object/soundchannel_object.rs @@ -20,7 +20,11 @@ pub fn soundchannel_allocator<'gc>( Ok(SoundChannelObject(GcCell::allocate( activation.context.gc_context, - SoundChannelObjectData { base, sound: None }, + SoundChannelObjectData { + base, + sound: None, + position: 0.0, + }, )) .into()) } @@ -38,6 +42,9 @@ pub struct SoundChannelObjectData<'gc> { /// The sound this object holds. #[collect(require_static)] sound: Option, + + /// Position of the last playing sound in milliseconds. + position: f64, } impl<'gc> SoundChannelObject<'gc> { @@ -45,7 +52,7 @@ impl<'gc> SoundChannelObject<'gc> { pub fn from_sound_instance( activation: &mut Activation<'_, 'gc, '_>, sound: SoundInstanceHandle, - ) -> Result, Error> { + ) -> Result { let class = activation.avm2().classes().soundchannel; let proto = class .get_property( @@ -56,20 +63,35 @@ impl<'gc> SoundChannelObject<'gc> { .coerce_to_object(activation)?; let base = ScriptObjectData::base_new(Some(proto), Some(class)); - let mut sound_object: Object<'gc> = SoundChannelObject(GcCell::allocate( + let mut sound_object = SoundChannelObject(GcCell::allocate( activation.context.gc_context, SoundChannelObjectData { base, sound: Some(sound), + position: 0.0, }, - )) - .into(); + )); sound_object.install_instance_traits(activation, class)?; - class.call_native_init(Some(sound_object), &[], activation, Some(class))?; + class.call_native_init(Some(sound_object.into()), &[], activation, Some(class))?; Ok(sound_object) } + + /// Return the backend handle to the currently playing sound instance. + pub fn instance(self) -> Option { + self.0.read().sound + } + + /// Return the position of the playing sound in seconds. + pub fn position(self) -> f64 { + self.0.read().position + } + + /// Set the position of the playing sound in seconds. + pub fn set_position(self, mc: MutationContext<'gc, '_>, value: f64) { + self.0.write(mc).position = value; + } } impl<'gc> TObject<'gc> for SoundChannelObject<'gc> { @@ -94,13 +116,17 @@ impl<'gc> TObject<'gc> for SoundChannelObject<'gc> { Ok(SoundChannelObject(GcCell::allocate( activation.context.gc_context, - SoundChannelObjectData { base, sound: None }, + SoundChannelObjectData { + base, + sound: None, + position: 0.0, + }, )) .into()) } - fn as_sound_instance(self) -> Option { - self.0.read().sound + fn as_sound_channel(self) -> Option> { + Some(self) } fn set_sound_instance(self, mc: MutationContext<'gc, '_>, sound: SoundInstanceHandle) { diff --git a/core/src/backend/audio.rs b/core/src/backend/audio.rs index 9348d02ba6cc..c86bb710b878 100644 --- a/core/src/backend/audio.rs +++ b/core/src/backend/audio.rs @@ -1,7 +1,7 @@ use crate::{ avm1::SoundObject, avm2::Event as Avm2Event, - avm2::Object as Avm2Object, + avm2::SoundChannelObject, display_object::{self, DisplayObject, MovieClip, TDisplayObject}, }; use downcast_rs::Downcast; @@ -85,7 +85,7 @@ pub trait AudioBackend: Downcast { /// Get the position of a sound instance in milliseconds. /// Returns `None` if ther sound is not/no longer playing - fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option; + fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option; /// Get the duration of a sound in milliseconds. /// Returns `None` if sound is not registered. @@ -191,8 +191,8 @@ impl AudioBackend for NullAudioBackend { fn stop_sound(&mut self, _sound: SoundInstanceHandle) {} fn stop_all_sounds(&mut self) {} - fn get_sound_position(&self, _instance: SoundInstanceHandle) -> Option { - Some(0) + fn get_sound_position(&self, _instance: SoundInstanceHandle) -> Option { + Some(0.0) } fn get_sound_duration(&self, sound: SoundHandle) -> Option { if let Some(sound) = self.sounds.get(sound) { @@ -271,12 +271,21 @@ impl<'gc> AudioManager<'gc> { if let Some(pos) = audio.get_sound_position(sound.instance) { // Sounds still playing; update position. if let Some(avm1_object) = sound.avm1_object { - avm1_object.set_position(gc_context, pos); + avm1_object.set_position(gc_context, pos.round() as u32); + } else if let Some(avm2_object) = sound.avm2_object { + avm2_object.set_position(gc_context, pos); } true } else { - // Sound ended; fire end event. + // Sound ended. + let duration = sound + .sound + .and_then(|sound| audio.get_sound_duration(sound)) + .unwrap_or_default(); if let Some(object) = sound.avm1_object { + object.set_position(gc_context, duration.round() as u32); + + // Fire soundComplete event. action_queue.queue_actions( root, crate::context::ActionType::Method { @@ -289,13 +298,15 @@ impl<'gc> AudioManager<'gc> { } if let Some(object) = sound.avm2_object { + object.set_position(gc_context, duration); + //TODO: AVM2 events are usually not queued, but we can't //hold the update context in the audio manager yet. action_queue.queue_actions( root, crate::context::ActionType::Event2 { event: Avm2Event::new("soundComplete"), - target: object, + target: object.into(), }, false, ) @@ -338,7 +349,7 @@ impl<'gc> AudioManager<'gc> { pub fn attach_avm2_sound_channel( &mut self, instance: SoundInstanceHandle, - avm2_object: Avm2Object<'gc>, + avm2_object: SoundChannelObject<'gc>, ) { if let Some(i) = self .sounds @@ -548,8 +559,8 @@ pub struct SoundInstance<'gc> { /// The AVM1 `Sound` object associated with this sound, if any. avm1_object: Option>, - /// The AVM2 `Sound` object associated with this sound, if any. - avm2_object: Option>, + /// The AVM2 `SoundChannel` object associated with this sound, if any. + avm2_object: Option>, } /// A sound transform for a playing sound, for use by audio backends. diff --git a/core/src/backend/audio/decoders.rs b/core/src/backend/audio/decoders.rs index a4043deae159..2aedb48b74de 100644 --- a/core/src/backend/audio/decoders.rs +++ b/core/src/backend/audio/decoders.rs @@ -78,6 +78,18 @@ pub fn make_decoder( Ok(decoder) } +impl Decoder for Box { + #[inline] + fn num_channels(&self) -> u8 { + self.as_ref().num_channels() + } + + /// The sample rate of this audio decoder. + fn sample_rate(&self) -> u16 { + self.as_ref().sample_rate() + } +} + /// A "stream" sound is a sound that has its data distributed across `SoundStreamBlock` tags, /// one per each frame of a MovieClip. The sound is synced to the MovieClip's timeline, and will /// stop/seek as the MovieClip stops/seeks. diff --git a/core/src/backend/audio/mixer.rs b/core/src/backend/audio/mixer.rs index c214ab5b47e3..dd79066cc124 100644 --- a/core/src/backend/audio/mixer.rs +++ b/core/src/backend/audio/mixer.rs @@ -1,4 +1,6 @@ -use super::decoders::{self, AdpcmDecoder, NellymoserDecoder, PcmDecoder, SeekableDecoder}; +use super::decoders::{ + self, AdpcmDecoder, Decoder, NellymoserDecoder, PcmDecoder, SeekableDecoder, +}; use super::{SoundHandle, SoundInstanceHandle, SoundTransform}; use crate::tag_utils::SwfSlice; use generational_arena::Arena; @@ -27,11 +29,73 @@ pub struct AudioMixer { output_sample_rate: u32, } -/// An iterator for sound decoders that returns stereo samples. -type Signal = Box>; - type Error = Box; +/// An audio stream. +trait Stream: dasp::signal::Signal + Send { + /// The position of this stream in sample frames. + /// + /// For infinite streams, this will be the number of sample frames since the start of the + /// stream, starting from 0. + /// For finite streams, this will be the sample position in the underlying audio data. This may + /// not start from 0 if this sound did not start playing from the beginning. + fn source_position(&self) -> u32; + + /// The sample rate of the underlying audio source of this stream. For example, this will return + /// 22050 when playing a 22KHz audio file, even if the output rate is 44KHz. + fn source_sample_rate(&self) -> u16; +} + +/// A stream that wraps a `Decoder`. +struct DecoderStream { + decoder: D, + position: u32, + is_exhausted: bool, +} + +impl DecoderStream { + /// Creates a `DecoderStream` using the given decoder as a source. + fn new(decoder: D) -> Self { + Self { + decoder, + position: 0, + is_exhausted: false, + } + } +} + +impl Stream for DecoderStream { + #[inline] + fn source_position(&self) -> u32 { + self.position + } + + #[inline] + fn source_sample_rate(&self) -> u16 { + self.decoder.sample_rate() + } +} + +impl dasp::signal::Signal for DecoderStream { + type Frame = [i16; 2]; + + #[inline] + fn next(&mut self) -> [i16; 2] { + if let Some(frame) = self.decoder.next() { + self.position += 1; + frame + } else { + self.is_exhausted = true; + Default::default() + } + } + + #[inline] + fn is_exhausted(&self) -> bool { + self.is_exhausted + } +} + /// Contains the data and metadata for a sound in an SWF file. /// /// A sound is defined by the `DefineSound` SWF tags and contains the audio data for the sound. @@ -68,7 +132,7 @@ struct SoundInstance { handle: Option, /// The audio stream. Call `next()` to yield sample frames. - signal: Signal, + stream: Box, /// Flag indicating whether this sound is still playing. /// If this flag is false, the sound will be cleaned up during the @@ -167,93 +231,82 @@ impl AudioMixer { Ok(decoder) } - /// Transforms a `Signal` into a new `Signal` that matches the output sample rate. - fn make_resampler>( - &self, - format: &swf::SoundFormat, - mut signal: S, - ) -> dasp::signal::interpolate::Converter< - S, - impl dasp::interpolate::Interpolator, - > { + /// Transforms a `Stream` into a new `Stream` that matches the output sample rate. + fn make_resampler(&self, format: &swf::SoundFormat, mut stream: impl Stream) -> impl Stream { // TODO: Allow interpolator to be user-configurable? - let left = signal.next(); - let right = signal.next(); + let left = stream.next(); + let right = stream.next(); let interpolator = dasp::interpolate::linear::Linear::new(left, right); - dasp::signal::interpolate::Converter::from_hz_to_hz( - signal, + ConverterStream(dasp::signal::interpolate::Converter::from_hz_to_hz( + stream, interpolator, format.sample_rate.into(), self.output_sample_rate.into(), - ) + )) } - /// Creates a `Signal` for an "event" that decodes and resamples the audio stream to the + /// Creates a `Stream` for an "event" that decodes and resamples the audio stream to the /// output format. /// /// This also applies the custom envelope, start/end, and looping parameters from `settings`. - fn make_signal_from_event_sound( + fn make_stream_from_event_sound( &self, sound: &Sound, settings: &swf::SoundInfo, data: Cursor, - ) -> Result>, Error> { + ) -> Result, Error> { // Instantiate a decoder for the compression that the sound data uses. let decoder = Self::make_seekable_decoder(&sound.format, data)?; - // Wrap the decoder in the event sound signal (controls looping/envelope) - let signal = EventSoundSignal::new_with_settings( + // Wrap the decoder into an event sound stream (controls looping/envelope) + let stream = EventSoundStream::new_with_settings( decoder, settings, sound.num_sample_frames, sound.skip_sample_frames, ); - // Convert the `Decoder` to a `Signal`, and resample it the the output - // sample rate. - let signal = self.make_resampler(&sound.format, signal); + // Resample the stream to the output sample rate. + let stream = self.make_resampler(&sound.format, stream); if let Some(envelope) = &settings.envelope { - use dasp::Signal; let envelope_signal = EnvelopeSignal::new(&envelope[..], self.output_sample_rate); - Ok(Box::new(signal.mul_amp(envelope_signal))) + Ok(Box::new(MulAmpStream::new(stream, envelope_signal)) as Box) } else { - Ok(Box::new(signal)) + Ok(Box::new(stream) as Box) } } - /// Creates a `Signal` for a simple "event" sound that decodes and resamples the audio stream + /// Creates a `Stream` for a simple "event" sound that decodes and resamples the audio stream /// to the output format. /// /// This is used for cases where there is no custom envelope or looping on the sound instance. - /// Otherwise, `AudioMixer::make_signal_from_event_sound` should be used. - fn make_signal_from_simple_event_sound( + /// Otherwise, `AudioMixer::make_stream_from_event_sound` should be used. + fn make_stream_from_simple_event_sound( &self, format: &swf::SoundFormat, data_stream: R, - ) -> Result>, Error> { + ) -> Result, Error> { // Instantiate a decoder for the compression that the sound data uses. let decoder = decoders::make_decoder(format, data_stream)?; - // Convert the `Decoder` to a `Signal`, and resample it the the output - // sample rate. - let signal = dasp::signal::from_iter(decoder); - let signal = self.make_resampler(format, signal); - Ok(Box::new(signal)) + // Convert the `Decoder` to a `Stream`, and resample it to output sample rate. + let stream = DecoderStream::new(decoder); + let stream = self.make_resampler(format, stream); + Ok(Box::new(stream)) } - /// Creates a `Signal` that decodes and resamples a timeline "stream" sound. - fn make_signal_from_stream<'a>( + /// Creates a `Stream` that decodes and resamples a timeline "stream" sound. + fn make_stream_from_swf_slice<'a>( &self, format: &swf::SoundFormat, data_stream: SwfSlice, - ) -> Result>, Error> { + ) -> Result, Error> { // Instantiate a decoder for the compression that the sound data uses. let clip_stream_decoder = decoders::make_stream_decoder(format, data_stream)?; - // Convert the `Decoder` to a `Signal`, and resample it the the output - // sample rate. - let signal = dasp::signal::from_iter(clip_stream_decoder); - let signal = Box::new(self.make_resampler(format, signal)); - Ok(signal) + // Convert the `Decoder` to a `Stream`, and resample it to the output sample rate. + let stream = DecoderStream::new(clip_stream_decoder); + let stream = Box::new(self.make_resampler(format, stream)); + Ok(stream) } /// Callback to the audio thread. @@ -281,8 +334,8 @@ impl AudioMixer { { let mut output_frame = Stereo::::EQUILIBRIUM; for (_, sound) in sound_instances.iter_mut() { - if sound.active && !sound.signal.is_exhausted() { - let sound_frame = sound.signal.next(); + if sound.active && !sound.stream.is_exhausted() { + let sound_frame = sound.stream.next(); let [left_0, left_1] = sound_frame.mul_amp(sound.left_transform); let [right_0, right_1] = sound_frame.mul_amp(sound.right_transform); let sound_frame: Stereo = [ @@ -336,12 +389,12 @@ impl AudioMixer { // The audio data for stream sounds is distributed among the frames of a // movie clip. The stream tag reader will parse through the SWF and // feed the decoder audio data on the fly. - let signal = self.make_signal_from_stream(format, clip_data)?; + let stream = self.make_stream_from_swf_slice(format, clip_data)?; let mut sound_instances = self.sound_instances.lock().unwrap(); let handle = sound_instances.insert(SoundInstance { handle: None, - signal, + stream, active: true, left_transform: [1.0, 0.0], right_transform: [0.0, 1.0], @@ -359,25 +412,25 @@ impl AudioMixer { ) -> Result { let sound = &self.sounds[sound_handle]; let data = Cursor::new(ArcAsRef(Arc::clone(&sound.data))); - // Create a signal that decodes and resamples the sound. - let signal = if sound.skip_sample_frames == 0 + // Create a stream that decodes and resamples the sound. + let stream = if sound.skip_sample_frames == 0 && settings.in_sample.is_none() && settings.out_sample.is_none() && settings.num_loops <= 1 && settings.envelope.is_none() { - // For simple event sounds, just use the same signal as streams. - self.make_signal_from_simple_event_sound(&sound.format, data)? + // For simple event sounds, use a standard decoder stream. + self.make_stream_from_simple_event_sound(&sound.format, data)? } else { - // For event sounds with envelopes/other properties, wrap it in `EventSoundSignal`. - self.make_signal_from_event_sound(sound, settings, data)? + // For event sounds with envelopes/other properties, wrap it in `EventSoundStream`. + self.make_stream_from_event_sound(sound, settings, data)? }; // Add sound instance to active list. let mut sound_instances = self.sound_instances.lock().unwrap(); let handle = sound_instances.insert(SoundInstance { handle: Some(sound_handle), - signal, + stream, active: true, left_transform: [1.0, 0.0], right_transform: [0.0, 1.0], @@ -406,10 +459,14 @@ impl AudioMixer { /// Returns the position of a playing sound in milliseconds. /// ////// Returns `None` if the sound is no longer playing. - pub fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { + pub fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { let sound_instances = self.sound_instances.lock().unwrap(); - // TODO: Return actual position - sound_instances.get(instance).map(|_| 0) + sound_instances.get(instance).map(|instance| { + // Get the current sample position from the underlying audio source. + let num_sample_frames: f64 = instance.stream.source_position().into(); + let sample_rate: f64 = instance.stream.source_sample_rate().into(); + num_sample_frames * 1000.0 / sample_rate + }) } /// Returns the duration of a registered sound in milliseconds. @@ -498,17 +555,18 @@ impl Default for ArcAsRef { } } -/// A signal for event sound instances with custom envelopes, start/end point, or loop settings. -struct EventSoundSignal { +/// A stream for event sound instances with custom envelopes, start/end point, or loop settings. +struct EventSoundStream { decoder: Box, num_loops: u16, start_sample_frame: u32, end_sample_frame: Option, cur_sample_frame: u32, + skip_sample_frames: u32, is_exhausted: bool, } -impl EventSoundSignal { +impl EventSoundStream { fn new_with_settings( decoder: Box, settings: &swf::SoundInfo, @@ -525,16 +583,17 @@ impl EventSoundSignal { .unwrap_or(num_sample_frames) + skip_sample_frames; - let mut signal = Self { + let mut stream = Self { decoder, num_loops: settings.num_loops, start_sample_frame, end_sample_frame: Some(end_sample_frame), cur_sample_frame: start_sample_frame, + skip_sample_frames, is_exhausted: false, }; - signal.next_loop(); - signal + stream.next_loop(); + stream } /// Resets the decoder to the start point of the loop. @@ -549,9 +608,10 @@ impl EventSoundSignal { } } -impl dasp::signal::Signal for EventSoundSignal { +impl dasp::signal::Signal for EventSoundStream { type Frame = [i16; 2]; + #[inline] fn next(&mut self) -> Self::Frame { // Loop the sound if necessary, and get the next frame. if !self.is_exhausted { @@ -572,15 +632,123 @@ impl dasp::signal::Signal for EventSoundSignal { } } + #[inline] fn is_exhausted(&self) -> bool { self.is_exhausted } } +impl Stream for EventSoundStream { + #[inline] + fn source_position(&self) -> u32 { + self.cur_sample_frame + .saturating_sub(self.skip_sample_frames) + } + + #[inline] + fn source_sample_rate(&self) -> u16 { + self.decoder.sample_rate() + } +} + +/// A stream that converts a source stream to a different sample rate. +struct ConverterStream(dasp::signal::interpolate::Converter) +where + S: Stream, + I: dasp::interpolate::Interpolator; + +impl Stream for ConverterStream +where + S: Stream, + I: dasp::interpolate::Interpolator + Send, +{ + #[inline] + fn source_position(&self) -> u32 { + self.0.source().source_position() + } + + #[inline] + fn source_sample_rate(&self) -> u16 { + self.0.source().source_sample_rate() + } +} + +impl dasp::signal::Signal for ConverterStream +where + S: Stream, + I: dasp::interpolate::Interpolator + Send, +{ + type Frame = [i16; 2]; + + #[inline] + fn next(&mut self) -> [i16; 2] { + self.0.next() + } + + #[inline] + fn is_exhausted(&self) -> bool { + self.0.is_exhausted() + } +} + +/// A stream that multiples a source stream by an amplitude stream to produce an enveloped stream. +struct MulAmpStream +where + S: Stream, + E: dasp::signal::Signal + Send, +{ + stream: S, + envelope: E, +} + +impl MulAmpStream +where + S: Stream, + E: dasp::signal::Signal + Send, +{ + fn new(stream: S, envelope: E) -> Self { + Self { stream, envelope } + } +} + +impl Stream for MulAmpStream +where + S: Stream, + E: dasp::signal::Signal + Send, +{ + #[inline] + fn source_position(&self) -> u32 { + self.stream.source_position() + } + + #[inline] + fn source_sample_rate(&self) -> u16 { + self.stream.source_sample_rate() + } +} + +impl dasp::signal::Signal for MulAmpStream +where + S: Stream, + E: dasp::signal::Signal + Send, +{ + type Frame = [i16; 2]; + + #[inline] + fn next(&mut self) -> Self::Frame { + dasp::frame::Frame::mul_amp(self.stream.next(), self.envelope.next()) + } + + #[inline] + fn is_exhausted(&self) -> bool { + self.stream.is_exhausted() || self.envelope.is_exhausted() + } +} + /// A signal that represents the sound envelope for an event sound. -/// The sound signal gets multiplied by the envelope for volume/panning effects. +/// The sound stream gets multiplied by the envelope for volume/panning effects. struct EnvelopeSignal { - /// Iterator through the envelope points specified in the SWWF file. + /// Iterator through the envelope points specified in the SWF file. envelope: std::vec::IntoIter, /// The starting envelope point. @@ -714,7 +882,7 @@ macro_rules! impl_audio_mixer_backend { } #[inline] - fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { + fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { self.$mixer.get_sound_position(instance) } diff --git a/core/src/context.rs b/core/src/context.rs index 655024ab4a2b..603e20af3dd9 100644 --- a/core/src/context.rs +++ b/core/src/context.rs @@ -2,7 +2,9 @@ use crate::avm1::globals::system::SystemProperties; use crate::avm1::{Avm1, Object as Avm1Object, Timers, Value as Avm1Value}; -use crate::avm2::{Avm2, Event as Avm2Event, Object as Avm2Object, Value as Avm2Value}; +use crate::avm2::{ + Avm2, Event as Avm2Event, Object as Avm2Object, SoundChannelObject, Value as Avm2Value, +}; use crate::backend::{ audio::{AudioBackend, AudioManager, SoundHandle, SoundInstanceHandle}, locale::LocaleBackend, @@ -212,7 +214,7 @@ impl<'a, 'gc, 'gc_context> UpdateContext<'a, 'gc, 'gc_context> { pub fn attach_avm2_sound_channel( &mut self, instance: SoundInstanceHandle, - avm2_object: Avm2Object<'gc>, + avm2_object: SoundChannelObject<'gc>, ) { self.audio_manager .attach_avm2_sound_channel(instance, avm2_object); diff --git a/web/packages/core/src/ruffle-imports.ts b/web/packages/core/src/ruffle-imports.ts index 1c26dec8dba7..8fafeadf66d9 100644 --- a/web/packages/core/src/ruffle-imports.ts +++ b/web/packages/core/src/ruffle-imports.ts @@ -26,3 +26,15 @@ export function copyToAudioBuffer( dstBuffer.set(rightData); } } + +/** + * Returns `AudioContext.getOutputTimestamp`, defaulting to `context.currentTime` if + * `getOutputTimestamp` is unavailable. This is necessary because `web-sys` does not yet export + * `AudioBuffer.copyToChannel`. + * + * @internal + */ +export function getAudioOutputTimestamp(context: AudioContext): number { + const timestamp = context.getOutputTimestamp?.(); + return timestamp?.contextTime ?? context.currentTime - context.baseLatency; +} diff --git a/web/src/audio.rs b/web/src/audio.rs index d97a3a133695..2b161ebee5a9 100644 --- a/web/src/audio.rs +++ b/web/src/audio.rs @@ -16,6 +16,7 @@ pub struct WebAudioBackend { sounds: Arena, left_samples: Vec, right_samples: Vec, + output_time: f64, frame_rate: f64, min_sample_rate: u16, preload_stream_data: FnvHashMap, @@ -93,6 +94,22 @@ struct SoundInstance { /// either decoded on the fly with Decoder, or pre-decoded /// and played with and AudioBufferSourceNode. instance_type: SoundInstanceType, + + /// The time in seconds that this buffer started playing. + /// This time uses the same origin as `AudioContext.currentTime`. + start_time: f64, + + /// The starting point of the sound data in seconds. + /// `0.0` means the beginning of the sound. + loop_start: f64, + + /// The ending point of the sound data in seconds. + /// `f64::MAX` if no end point is specified. + loop_end: f64, + + /// The number of times the sound data will loop. + /// `1` means the sound plays once. + num_loops: u16, } /// The Drop impl ensures that the sound is stopped and remove from the audio context, @@ -324,6 +341,7 @@ impl WebAudioBackend { next_stream_id: 0, left_samples: vec![], right_samples: vec![], + output_time: 0.0, frame_rate: 1.0, min_sample_rate, }) @@ -350,6 +368,9 @@ impl WebAudioBackend { let sound_sample_rate: f64 = sound.format.sample_rate.into(); let mut is_stereo = sound.format.is_stereo; + let mut loop_start = f64::from(sound.skip_sample_frames) / 44100.0; + let mut loop_end = std::f64::MAX; + let mut num_loops = 1; let node: web_sys::AudioNode = match settings { Some(settings) if sound.skip_sample_frames > 0 @@ -360,12 +381,12 @@ impl WebAudioBackend { { // Event sound with non-default parameters. // Note that start/end values are in 44.1kHZ samples regardless of the sound's sample rate. - let start_sample_frame = f64::from(settings.in_sample.unwrap_or(0)) - / 44100.0 + loop_start = f64::from(settings.in_sample.unwrap_or(0)) / 44100.0 + f64::from(sound.skip_sample_frames) / sound_sample_rate; - node.set_loop(settings.num_loops > 1); - node.set_loop_start(start_sample_frame); - node.start_with_when_and_grain_offset(0.0, start_sample_frame) + num_loops = settings.num_loops; + node.set_loop(num_loops > 1); + node.set_loop_start(loop_start); + node.start_with_when_and_grain_offset(0.0, loop_start) .warn_on_error(); let current_time = self.context.current_time(); @@ -373,18 +394,18 @@ impl WebAudioBackend { // The length of the sound in the swf, or by the script playing it, doesn't // always line up with the actual length of the sound. // Always set a custom end point to make sure we're correct. - let end_sample_frame = if let Some(out_sample) = settings.out_sample { + loop_end = if let Some(out_sample) = settings.out_sample { f64::from(out_sample) / 44100.0 } else { f64::from(sound.num_sample_frames + u32::from(sound.skip_sample_frames)) / sound_sample_rate }; + // `AudioSourceBufferNode.loop` is a bool, so we have to stop the loop at the proper time. // `start_with_when_and_grain_offset_and_grain_duration` unfortunately doesn't work // as you might expect with loops, so we use `stop_with_when` to stop the loop. - let total_len = - (end_sample_frame - start_sample_frame) * f64::from(settings.num_loops); - node.set_loop_end(end_sample_frame); + let total_len = (loop_end - loop_start) * f64::from(settings.num_loops); + node.set_loop_end(loop_end); node.stop_with_when(current_time + total_len) .warn_on_error(); @@ -416,6 +437,10 @@ impl WebAudioBackend { let instance = SoundInstance { handle: Some(handle), format: sound.format.clone(), + start_time: self.context.current_time(), + loop_start, + loop_end, + num_loops, instance_type: SoundInstanceType::AudioBuffer(AudioBufferInstance { envelope_node: node.clone(), envelope_is_stereo: is_stereo, @@ -473,6 +498,10 @@ impl WebAudioBackend { let instance = SoundInstance { handle: Some(handle), format: sound.format.clone(), + start_time: self.context.current_time(), + loop_start: 0.0, + loop_end: std::f64::MAX, + num_loops: 1, instance_type: SoundInstanceType::Decoder(decoder), }; SOUND_INSTANCES.with(|instances| { @@ -1019,11 +1048,24 @@ impl AudioBackend for WebAudioBackend { }) } - fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { + fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option { SOUND_INSTANCES.with(|instances| { let instances = instances.borrow(); - // TODO: Return actual position - instances.get(instance).map(|_| 0) + instances.get(instance).map(|instance| { + // Estimate the position of the sound based on the current AudioContext time. + let mut dt = self.output_time - instance.start_time; + dt = dt.max(0.0); + let loop_time = instance.loop_end - instance.loop_start; + let loop_index = (dt / loop_time) as u16; + // If the sound is looping, the position cycles between the start and end times, + // except on the final loop, where we clamp to the final position. + if loop_index < instance.num_loops { + dt = dt.rem_euclid(loop_time); + } + dt += instance.loop_start; + dt = dt.min(instance.loop_end); + dt * 1000.0 + }) }) } @@ -1057,6 +1099,12 @@ impl AudioBackend for WebAudioBackend { } }) } + + fn tick(&mut self) { + // Update the output timestamp. + // We do this once per frame to avoid spamming it in `get_sound_position`. + self.output_time = get_audio_output_timestamp(&self.context); + } } #[wasm_bindgen(raw_module = "./ruffle-imports.js")] @@ -1070,6 +1118,11 @@ extern "C" { left_data: Option<&[f32]>, right_data: Option<&[f32]>, ); + + /// Imported JS method to call `AudioContext.getOutputTimestamp` because + /// it is not yet available in `web_sys`. + #[wasm_bindgen(js_name = "getAudioOutputTimestamp")] + fn get_audio_output_timestamp(context: &web_sys::AudioContext) -> f64; } // Janky resmapling code.