{sysaudio,examples}: add support for multiple channels
Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
parent
587ea05d1e
commit
1dc3a4cba8
3 changed files with 82 additions and 52 deletions
|
|
@ -6,33 +6,33 @@ const js = mach.sysjs;
|
||||||
pub const App = @This();
|
pub const App = @This();
|
||||||
|
|
||||||
audio: sysaudio,
|
audio: sysaudio,
|
||||||
device: sysaudio.Device,
|
device: *sysaudio.Device,
|
||||||
tone_engine: ToneEngine = .{},
|
tone_engine: ToneEngine = .{},
|
||||||
|
|
||||||
pub fn init(app: *App, _: *mach.Core) !void {
|
pub fn init(app: *App, core: *mach.Core) !void {
|
||||||
const audio = try sysaudio.init();
|
const audio = try sysaudio.init();
|
||||||
errdefer audio.deinit();
|
errdefer audio.deinit();
|
||||||
|
|
||||||
const device = try audio.requestDevice(.{ .mode = .output, .channels = 1 });
|
var device = try audio.requestDevice(core.allocator, .{ .mode = .output, .channels = 2 });
|
||||||
errdefer device.deinit();
|
errdefer device.deinit(core.allocator);
|
||||||
|
|
||||||
device.setCallback(callback, app);
|
device.setCallback(callback, app);
|
||||||
device.start();
|
try device.start();
|
||||||
|
|
||||||
app.audio = audio;
|
app.audio = audio;
|
||||||
app.device = device;
|
app.device = device;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callback(_: *sysaudio.Device, user_data: ?*anyopaque, buffer: []u8) void {
|
fn callback(device: *sysaudio.Device, user_data: ?*anyopaque, buffer: []u8) void {
|
||||||
// TODO(sysaudio): should make user_data pointer type-safe
|
// TODO(sysaudio): should make user_data pointer type-safe
|
||||||
const app: *App = @ptrCast(*App, @alignCast(@alignOf(App), user_data));
|
const app: *App = @ptrCast(*App, @alignCast(@alignOf(App), user_data));
|
||||||
|
|
||||||
// Where the magic happens: fill our audio buffer with PCM dat.
|
// Where the magic happens: fill our audio buffer with PCM dat.
|
||||||
app.tone_engine.render(buffer);
|
app.tone_engine.render(device.descriptor, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(app: *App, _: *mach.Core) void {
|
pub fn deinit(app: *App, core: *mach.Core) void {
|
||||||
app.device.deinit();
|
app.device.deinit(core.allocator);
|
||||||
app.audio.deinit();
|
app.audio.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -40,13 +40,12 @@ pub fn update(app: *App, engine: *mach.Core) !void {
|
||||||
while (engine.pollEvent()) |event| {
|
while (engine.pollEvent()) |event| {
|
||||||
switch (event) {
|
switch (event) {
|
||||||
.key_press => |ev| {
|
.key_press => |ev| {
|
||||||
app.device.start();
|
try app.device.start();
|
||||||
app.tone_engine.play(ToneEngine.keyToFrequency(ev.key));
|
app.tone_engine.play(ToneEngine.keyToFrequency(ev.key));
|
||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
app.audio.waitEvents();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A simple tone engine.
|
// A simple tone engine.
|
||||||
|
|
@ -70,15 +69,16 @@ pub const ToneEngine = struct {
|
||||||
duration: usize,
|
duration: usize,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn render(engine: *ToneEngine, buffer: []u8) void {
|
pub fn render(engine: *ToneEngine, descriptor: sysaudio.DeviceDescriptor, buffer: []u8) void {
|
||||||
// TODO(sysaudio): demonstrate how to properly handle format of the buffer here.
|
// TODO(sysaudio): demonstrate how to properly handle format of the buffer here.
|
||||||
// Right now we blindly assume f32 format, which is wrong (but always right in WASM.)
|
// Right now we blindly assume f32 format, which is wrong (but always right in WASM.)
|
||||||
//
|
const sample_rate = @intToFloat(f32, descriptor.sample_rate.?);
|
||||||
// TODO(sysaudio): get sample rate from callback, don't hard-code it here.
|
|
||||||
const sample_rate = 44100.0;
|
|
||||||
const buf = @ptrCast([*]f32, @alignCast(@alignOf(f32), buffer.ptr))[0 .. buffer.len / @sizeOf(f32)];
|
const buf = @ptrCast([*]f32, @alignCast(@alignOf(f32), buffer.ptr))[0 .. buffer.len / @sizeOf(f32)];
|
||||||
|
const frames = buf.len / descriptor.channels.?;
|
||||||
|
|
||||||
for (buf) |_, i| {
|
var frame: usize = 0;
|
||||||
|
while (frame < frames) : (frame += 1) {
|
||||||
|
// Render the sample for this frame (e.g. for both left and right audio channels.)
|
||||||
var sample: f32 = 0;
|
var sample: f32 = 0;
|
||||||
for (engine.playing) |*tone| {
|
for (engine.playing) |*tone| {
|
||||||
if (tone.sample_counter >= tone.duration) {
|
if (tone.sample_counter >= tone.duration) {
|
||||||
|
|
@ -104,11 +104,17 @@ pub const ToneEngine = struct {
|
||||||
sample += sine_wave * fade_in * fade_out;
|
sample += sine_wave * fade_in * fade_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf[i] = sample;
|
// Emit the sample on all channels.
|
||||||
|
var channel: usize = 0;
|
||||||
|
while (channel < descriptor.channels.?) : (channel += 1) {
|
||||||
|
var channel_buf = buf[channel * frames .. (channel + 1) * frames];
|
||||||
|
channel_buf[frame] = sample;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn play(engine: *ToneEngine, frequency: f32) void {
|
pub fn play(engine: *ToneEngine, frequency: f32) void {
|
||||||
|
// TODO(sysaudio): get from device
|
||||||
const sample_rate = 44100.0;
|
const sample_rate = 44100.0;
|
||||||
|
|
||||||
for (engine.playing) |*tone| {
|
for (engine.playing) |*tone| {
|
||||||
|
|
|
||||||
|
|
@ -55,8 +55,8 @@ pub fn waitEvents(self: Audio) void {
|
||||||
self.backend.waitEvents();
|
self.backend.waitEvents();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn requestDevice(self: Audio, config: DeviceDescriptor) Error!Device {
|
pub fn requestDevice(self: Audio, allocator: std.mem.Allocator, config: DeviceDescriptor) Error!*Device {
|
||||||
return self.backend.requestDevice(config);
|
return self.backend.requestDevice(allocator, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn inputDeviceIterator(self: Audio) DeviceIterator {
|
pub fn inputDeviceIterator(self: Audio) DeviceIterator {
|
||||||
|
|
|
||||||
|
|
@ -11,24 +11,28 @@ else
|
||||||
*const fn (device: *Device, user_data: ?*anyopaque, buffer: []u8) void;
|
*const fn (device: *Device, user_data: ?*anyopaque, buffer: []u8) void;
|
||||||
|
|
||||||
pub const Device = struct {
|
pub const Device = struct {
|
||||||
|
descriptor: DeviceDescriptor,
|
||||||
|
|
||||||
|
// Internal fields.
|
||||||
context: js.Object,
|
context: js.Object,
|
||||||
|
|
||||||
pub fn deinit(device: Device) void {
|
pub fn deinit(device: *Device, allocator: std.mem.Allocator) void {
|
||||||
device.context.deinit();
|
device.context.deinit();
|
||||||
|
allocator.destroy(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setCallback(device: Device, callback: DataCallback, user_data: ?*anyopaque) void {
|
pub fn setCallback(device: *Device, callback: DataCallback, user_data: ?*anyopaque) void {
|
||||||
device.context.set("device", js.createNumber(@intToFloat(f64, @ptrToInt(&device))));
|
device.context.set("device", js.createNumber(@intToFloat(f64, @ptrToInt(device))));
|
||||||
device.context.set("callback", js.createNumber(@intToFloat(f64, @ptrToInt(callback))));
|
device.context.set("callback", js.createNumber(@intToFloat(f64, @ptrToInt(callback))));
|
||||||
if (user_data) |ud|
|
if (user_data) |ud|
|
||||||
device.context.set("user_data", js.createNumber(@intToFloat(f64, @ptrToInt(ud))));
|
device.context.set("user_data", js.createNumber(@intToFloat(f64, @ptrToInt(ud))));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pause(device: Device) void {
|
pub fn pause(device: *Device) Error!void {
|
||||||
_ = device.context.call("suspend", &.{});
|
_ = device.context.call("suspend", &.{});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(device: Device) void {
|
pub fn start(device: *Device) Error!void {
|
||||||
_ = device.context.call("resume", &.{});
|
_ = device.context.call("resume", &.{});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -45,6 +49,7 @@ pub const DeviceIterator = struct {
|
||||||
pub const IteratorError = error{};
|
pub const IteratorError = error{};
|
||||||
|
|
||||||
pub const Error = error{
|
pub const Error = error{
|
||||||
|
OutOfMemory,
|
||||||
AudioUnsupported,
|
AudioUnsupported,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -62,13 +67,14 @@ pub fn deinit(audio: Audio) void {
|
||||||
audio.context_constructor.deinit();
|
audio.context_constructor.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO)sysaudio): implement waitEvents for WebAudio, will a WASM process terminate without this?
|
||||||
pub fn waitEvents(_: Audio) void {}
|
pub fn waitEvents(_: Audio) void {}
|
||||||
|
|
||||||
const default_channel_count = 2;
|
const default_channel_count = 2;
|
||||||
const default_sample_rate = 48000;
|
const default_sample_rate = 48000;
|
||||||
const default_buffer_size = 1024; // 21.33ms
|
const default_buffer_size_per_channel = 1024; // 21.33ms
|
||||||
|
|
||||||
pub fn requestDevice(audio: Audio, config: DeviceDescriptor) Error!Device {
|
pub fn requestDevice(audio: Audio, allocator: std.mem.Allocator, config: DeviceDescriptor) Error!*Device {
|
||||||
// NOTE: WebAudio only supports F32 audio format, so config.format is unused
|
// NOTE: WebAudio only supports F32 audio format, so config.format is unused
|
||||||
const mode = config.mode orelse .output;
|
const mode = config.mode orelse .output;
|
||||||
const channels = config.channels orelse default_channel_count;
|
const channels = config.channels orelse default_channel_count;
|
||||||
|
|
@ -84,7 +90,7 @@ pub fn requestDevice(audio: Audio, config: DeviceDescriptor) Error!Device {
|
||||||
const input_channels = if (mode == .input) js.createNumber(@intToFloat(f64, channels)) else js.createUndefined();
|
const input_channels = if (mode == .input) js.createNumber(@intToFloat(f64, channels)) else js.createUndefined();
|
||||||
const output_channels = if (mode == .output) js.createNumber(@intToFloat(f64, channels)) else js.createUndefined();
|
const output_channels = if (mode == .output) js.createNumber(@intToFloat(f64, channels)) else js.createUndefined();
|
||||||
|
|
||||||
const node = context.call("createScriptProcessor", &.{ js.createNumber(default_buffer_size), input_channels, output_channels }).view(.object);
|
const node = context.call("createScriptProcessor", &.{ js.createNumber(default_buffer_size_per_channel), input_channels, output_channels }).view(.object);
|
||||||
defer node.deinit();
|
defer node.deinit();
|
||||||
|
|
||||||
context.set("node", node.toValue());
|
context.set("node", node.toValue());
|
||||||
|
|
@ -107,7 +113,18 @@ pub fn requestDevice(audio: Audio, config: DeviceDescriptor) Error!Device {
|
||||||
_ = node.call("connect", &.{destination.toValue()});
|
_ = node.call("connect", &.{destination.toValue()});
|
||||||
}
|
}
|
||||||
|
|
||||||
return Device{ .context = context };
|
// TODO(sysaudio): introduce a descriptor type that has non-optional fields.
|
||||||
|
var descriptor = config;
|
||||||
|
descriptor.mode = descriptor.mode orelse .output;
|
||||||
|
descriptor.channels = descriptor.channels orelse default_channel_count;
|
||||||
|
descriptor.sample_rate = descriptor.sample_rate orelse default_sample_rate;
|
||||||
|
|
||||||
|
const device = try allocator.create(Device);
|
||||||
|
device.* = .{
|
||||||
|
.descriptor = descriptor,
|
||||||
|
.context = context,
|
||||||
|
};
|
||||||
|
return device;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn audioProcessEvent(args: js.Object, _: usize, captures: []js.Value) js.Value {
|
fn audioProcessEvent(args: js.Object, _: usize, captures: []js.Value) js.Value {
|
||||||
|
|
@ -117,24 +134,25 @@ fn audioProcessEvent(args: js.Object, _: usize, captures: []js.Value) js.Value {
|
||||||
defer audio_event.deinit();
|
defer audio_event.deinit();
|
||||||
const output_buffer = audio_event.get("outputBuffer").view(.object);
|
const output_buffer = audio_event.get("outputBuffer").view(.object);
|
||||||
defer output_buffer.deinit();
|
defer output_buffer.deinit();
|
||||||
|
const num_channels = @floatToInt(usize, output_buffer.get("numberOfChannels").view(.num));
|
||||||
|
|
||||||
const buffer_length = default_buffer_size * @sizeOf(f32);
|
const buffer_length = default_buffer_size_per_channel * num_channels * @sizeOf(f32);
|
||||||
var buffer: [buffer_length]u8 = undefined;
|
// TODO(sysaudio): reuse buffer, do not allocate in this hot path
|
||||||
|
const buffer = std.heap.page_allocator.alloc(u8, buffer_length) catch unreachable;
|
||||||
|
defer std.heap.page_allocator.free(buffer);
|
||||||
|
|
||||||
const callback = device_context.get("callback");
|
const callback = device_context.get("callback");
|
||||||
if (!callback.is(.undef)) {
|
if (!callback.is(.undef)) {
|
||||||
// Do not deinit, we are not making a new device, just creating a view to the current one.
|
var dev = @intToPtr(*Device, @floatToInt(usize, device_context.get("device").view(.num)));
|
||||||
var dev = Device{ .context = device_context };
|
|
||||||
const cb = @intToPtr(DataCallback, @floatToInt(usize, callback.view(.num)));
|
const cb = @intToPtr(DataCallback, @floatToInt(usize, callback.view(.num)));
|
||||||
const user_data = device_context.get("user_data");
|
const user_data = device_context.get("user_data");
|
||||||
const ud = if (user_data.is(.undef)) null else @intToPtr(*anyopaque, @floatToInt(usize, user_data.view(.num)));
|
const ud = if (user_data.is(.undef)) null else @intToPtr(*anyopaque, @floatToInt(usize, user_data.view(.num)));
|
||||||
|
|
||||||
var channel: usize = 0;
|
// TODO(sysaudio): do not reconstruct Uint8Array (expensive)
|
||||||
while (channel < @floatToInt(usize, output_buffer.get("numberOfChannels").view(.num))) : (channel += 1) {
|
const source = js.constructType("Uint8Array", &.{js.createNumber(@intToFloat(f64, buffer_length))});
|
||||||
const source = js.constructType("Uint8Array", &.{js.createNumber(buffer_length)});
|
|
||||||
defer source.deinit();
|
defer source.deinit();
|
||||||
|
|
||||||
cb(&dev, ud, buffer[0..]);
|
cb(dev, ud, buffer[0..]);
|
||||||
source.copyBytes(buffer[0..]);
|
source.copyBytes(buffer[0..]);
|
||||||
|
|
||||||
const float_source = js.constructType("Float32Array", &.{
|
const float_source = js.constructType("Float32Array", &.{
|
||||||
|
|
@ -148,11 +166,17 @@ fn audioProcessEvent(args: js.Object, _: usize, captures: []js.Value) js.Value {
|
||||||
js.global().set("float_source", float_source.toValue());
|
js.global().set("float_source", float_source.toValue());
|
||||||
js.global().set("output_buffer", output_buffer.toValue());
|
js.global().set("output_buffer", output_buffer.toValue());
|
||||||
|
|
||||||
// TODO: investigate if using copyToChannel would be better?
|
var channel: usize = 0;
|
||||||
|
while (channel < num_channels) : (channel += 1) {
|
||||||
|
// TODO(sysaudio): investigate if using copyToChannel would be better?
|
||||||
//_ = output_buffer.call("copyToChannel", &.{ float_source.toValue(), js.createNumber(@intToFloat(f64, channel)) });
|
//_ = output_buffer.call("copyToChannel", &.{ float_source.toValue(), js.createNumber(@intToFloat(f64, channel)) });
|
||||||
const output_data = output_buffer.call("getChannelData", &.{js.createNumber(@intToFloat(f64, channel))}).view(.object);
|
const output_data = output_buffer.call("getChannelData", &.{js.createNumber(@intToFloat(f64, channel))}).view(.object);
|
||||||
defer output_data.deinit();
|
defer output_data.deinit();
|
||||||
_ = output_data.call("set", &.{float_source.toValue()});
|
const channel_slice = float_source.call("slice", &.{
|
||||||
|
js.createNumber(@intToFloat(f64, channel * default_buffer_size_per_channel)),
|
||||||
|
js.createNumber(@intToFloat(f64, (channel + 1) * default_buffer_size_per_channel)),
|
||||||
|
});
|
||||||
|
_ = output_data.call("set", &.{channel_slice});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue