audio: cleanup audio module

Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
Stephen Gutekanst 2024-04-16 11:48:15 -07:00
parent be3300b80a
commit f1dbc3955c
2 changed files with 100 additions and 56 deletions

View file

@ -27,27 +27,22 @@ pub const global_events = .{
.tick = .{ .handler = tick },
};
tones: std.AutoHashMapUnmanaged(mach.core.Key, mach.EntityID) = .{},
pub const local_events = .{
.init = .{ .handler = init },
.tick = .{ .handler = tick },
};
fn init(audio: *mach.Audio.Mod, piano: *Mod) !void {
audio.init(.{ .allocator = gpa.allocator() });
try audio.state().init();
fn init(audio: *mach.Audio.Mod, piano: *Mod) void {
// Initialize audio module
audio.send(.init, .{});
// Initialize piano module state
piano.init(.{});
inline for (@typeInfo(mach.core.Key).Enum.fields) |field| {
const key: mach.core.Key = @enumFromInt(field.value);
const entity = try audio.newEntity();
try audio.set(entity, .samples, try fillTone(audio, key));
try audio.set(entity, .playing, false);
try audio.set(entity, .index, 0);
try piano.state().tones.put(gpa.allocator(), key, entity);
}
}
pub fn tick(
fn tick(
engine: *mach.Engine.Mod,
audio: *mach.Audio.Mod,
piano: *Mod,
) !void {
var iter = mach.core.pollEvents();
while (iter.next()) |event| {
@ -55,11 +50,15 @@ pub fn tick(
.key_press => |ev| {
const vol = try audio.state().player.volume();
switch (ev.key) {
// Arrow keys turn volume up/down
.down => try audio.state().player.setVolume(@max(0.0, vol - 0.1)),
.up => try audio.state().player.setVolume(@min(1.0, vol + 0.1)),
else => {
const entity = piano.state().tones.get(ev.key).?;
// Play a new sound
const entity = try audio.newEntity();
try audio.set(entity, .samples, try fillTone(audio, ev.key));
try audio.set(entity, .playing, true);
try audio.set(entity, .index, 0);
},
}
},
@ -76,7 +75,7 @@ pub fn tick(
back_buffer_view.release();
}
pub fn fillTone(audio: *mach.Audio.Mod, key: mach.core.Key) ![]const f32 {
fn fillTone(audio: *mach.Audio.Mod, key: mach.core.Key) ![]const f32 {
const frequency = keyToFrequency(key);
const channels = audio.state().player.channels().len;
const sample_rate: f32 = @floatFromInt(audio.state().player.sampleRate());
@ -105,7 +104,7 @@ pub fn fillTone(audio: *mach.Audio.Mod, key: mach.core.Key) ![]const f32 {
return samples;
}
pub fn keyToFrequency(key: mach.core.Key) f32 {
fn keyToFrequency(key: mach.core.Key) f32 {
// The frequencies here just come from a piano frequencies chart. You can google for them.
return switch (key) {
// First row of piano keys, the highest.

View file

@ -5,71 +5,116 @@ const sysaudio = mach.sysaudio;
pub const name = .mach_audio;
pub const Mod = mach.Mod(@This());
pub const components = .{
.samples = .{ .type = []const f32 },
.playing = .{ .type = bool },
.index = .{ .type = usize },
};
pub const local_events = .{
.init = .{ .handler = init },
.render = .{ .handler = render },
};
allocator: std.mem.Allocator,
ctx: sysaudio.Context = undefined,
player: sysaudio.Player = undefined,
ctx: sysaudio.Context,
player: sysaudio.Player,
mixing_buffer: []f32,
buffer: SampleBuffer = SampleBuffer.init(),
mutex: std.Thread.Mutex = .{},
cond: std.Thread.Condition = .{},
pub const SampleBuffer = std.fifo.LinearFifo(f32, .{ .Static = 4096 });
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
pub fn init(audio: *@This()) !void {
audio.ctx = try sysaudio.Context.init(null, audio.allocator, .{});
try audio.ctx.refresh();
// Enough space to hold 30ms of audio @ 48000hz, f32 audio samples, 6 channels
//
// This buffer is only used to transfer samples from the .render event handler to the audio thread,
// so it being larger than needed introduces no latency but it being smaller than needed could block
// the .render event handler.
pub const SampleBuffer = std.fifo.LinearFifo(f32, .{ .Static = 48000 * 0.03 * @sizeOf(f32) * 6 });
const device = audio.ctx.defaultDevice(.playback) orelse return error.NoDeviceFound;
audio.player = try audio.ctx.createPlayer(device, writeFn, .{ .user_data = audio });
try audio.player.start();
fn init(audio: *Mod) !void {
const allocator = gpa.allocator();
const ctx = try sysaudio.Context.init(null, allocator, .{});
try ctx.refresh();
// TODO(audio): let people handle these errors
// TODO(audio): enable selecting non-default devices
const device = ctx.defaultDevice(.playback) orelse return error.NoDeviceFound;
// TODO(audio): allow us to set user_data after creation of the player, so that we do not need
// __state access.
var player = try ctx.createPlayer(device, writeFn, .{ .user_data = &audio.__state });
const frame_size = @sizeOf(f32) * player.channels().len; // size of an audio frame
const sample_rate = player.sampleRate(); // number of samples per second
const sample_rate_ms = sample_rate / 1000; // number of samples per ms
// A 30ms buffer of audio that we will use to store mixed samples before sending them to the
// audio thread for playback.
//
// TODO(audio): enable audio rendering loop to run at different frequency to reduce this buffer
// size and reduce latency.
const mixing_buffer = try allocator.alloc(f32, 30 * sample_rate_ms * frame_size);
audio.init(.{
.allocator = allocator,
.ctx = ctx,
.player = player,
.mixing_buffer = mixing_buffer,
});
try player.start();
}
pub fn deinit(audio: *Mod) void {
fn deinit(audio: *Mod) void {
audio.state().player.deinit();
audio.state().ctx.deinit();
audio.state().allocator.free(audio.state().mixing_buffer);
var iter = audio.entities.entities.valueIterator();
while (iter.next()) |*entity| {
entity.samples.deinit(audio.state().allocator);
var archetypes_iter = audio.entities.query(.{ .all = &.{
.{ .mach_audio = &.{.samples} },
} });
while (archetypes_iter.next()) |archetype| {
const samples = archetype.slice(.mach_audio, .samples);
for (samples) |buf| buf.deinit(audio.state().allocator);
}
}
pub fn render(audio: *Mod) !void {
// Prepare the next 30ms of audio by querying entities and mixing the samples they want to play.
// 48_000 * 0.03 = 1440 = 30ms
var mixing_buffer: [1440]f32 = undefined;
fn render(audio: *Mod) !void {
// Prepare the next buffer of mixed audio by querying entities and mixing the samples they want
// to play.
var mixing_buffer = audio.state().mixing_buffer;
@memset(mixing_buffer, 0);
var max_samples: usize = 0;
var iter = audio.entities.query(.{ .all = &.{.{ .mach_audio = &.{ .samples, .playing, .index } }} });
while (iter.next()) |archetype| for (
archetype.slice(.entity, .id),
archetype.slice(.mach_audio, .samples),
archetype.slice(.mach_audio, .playing),
archetype.slice(.mach_audio, .index),
) |id, samples, playing, index| {
if (!playing) continue;
var archetypes_iter = audio.entities.query(.{ .all = &.{
.{ .mach_audio = &.{ .samples, .playing, .index } },
} });
while (archetypes_iter.next()) |archetype| {
for (
archetype.slice(.entity, .id),
archetype.slice(.mach_audio, .samples),
archetype.slice(.mach_audio, .playing),
archetype.slice(.mach_audio, .index),
) |id, samples, playing, index| {
if (!playing) continue;
const to_read = @min(samples.len - index, mixing_buffer.len);
mixSamples(mixing_buffer[0..to_read], samples[index..][0..to_read]);
max_samples = @max(max_samples, to_read);
if (index + to_read >= samples.len) {
try audio.set(id, .playing, false);
try audio.set(id, .index, 0);
continue;
const to_read = @min(samples.len - index, mixing_buffer.len);
mixSamples(mixing_buffer[0..to_read], samples[index..][0..to_read]);
max_samples = @max(max_samples, to_read);
if (index + to_read >= samples.len) {
// No longer playing, we've read all samples
try audio.set(id, .playing, false);
try audio.set(id, .index, 0);
continue;
}
try audio.set(id, .index, index + to_read);
}
}
try audio.set(id, .index, index + to_read);
};
// Write our mixed buffer to the audio thread via the sample buffer.
audio.state().mutex.lock();
defer audio.state().mutex.unlock();
while (audio.state().buffer.writableLength() < max_samples) {
@ -78,6 +123,7 @@ pub fn render(audio: *Mod) !void {
audio.state().buffer.writeAssumeCapacity(mixing_buffer[0..max_samples]);
}
// Callback invoked on the audio thread.
fn writeFn(audio_opaque: ?*anyopaque, output: []u8) void {
const audio: *@This() = @ptrCast(@alignCast(audio_opaque));
@ -93,7 +139,6 @@ fn writeFn(audio_opaque: ?*anyopaque, output: []u8) void {
const read_slice = audio.buffer.readableSlice(0);
const read_len = @min(read_slice.len, total_samples - i);
if (read_len == 0) return;
sysaudio.convertTo(
@ -105,12 +150,12 @@ fn writeFn(audio_opaque: ?*anyopaque, output: []u8) void {
i += read_len;
audio.buffer.discard(read_len);
audio.cond.signal();
}
}
// TODO: what's this weird behavior in ReleaseFast/Small?
// TODO(audio): remove this switch, currently ReleaseFast/ReleaseSmall have some weird behavior if
// we use suggestVectorLength
const vector_length = switch (builtin.mode) {
.Debug, .ReleaseSafe => std.simd.suggestVectorLength(f32),
else => null,