audio: add audio mixing/playback module
This commit is contained in:
parent
b0d6c88f3b
commit
be3300b80a
4 changed files with 300 additions and 181 deletions
151
examples/sysaudio/Piano.zig
Normal file
151
examples/sysaudio/Piano.zig
Normal file
|
|
@ -0,0 +1,151 @@
|
||||||
|
// A simple tone engine.
|
||||||
|
//
|
||||||
|
// `keyToFrequency` can be used to convert a keyboard key to a frequency, so that the
|
||||||
|
// keys asdfghj on your QWERTY keyboard will map to the notes C/D/E/F/G/A/B[4], the
|
||||||
|
// keys above qwertyu will map to C5 and the keys below zxcvbnm will map to C3.
|
||||||
|
//
|
||||||
|
// The duration is hard-coded to 1.5s. To prevent clicking, tones are faded in linearly over
|
||||||
|
// the first 1/64th duration of the tone. To provide a cool sustained effect, tones are faded
|
||||||
|
// out using 1-log10(x*10) (google it to see how it looks, it's strong for most of the duration of
|
||||||
|
// the note then fades out slowly.)
|
||||||
|
const std = @import("std");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
|
const mach = @import("mach");
|
||||||
|
const math = mach.math;
|
||||||
|
const sysaudio = mach.sysaudio;
|
||||||
|
|
||||||
|
pub const App = @This();
|
||||||
|
|
||||||
|
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||||
|
|
||||||
|
pub const name = .piano;
|
||||||
|
pub const Mod = mach.Mod(@This());
|
||||||
|
|
||||||
|
pub const global_events = .{
|
||||||
|
.init = .{ .handler = init },
|
||||||
|
.tick = .{ .handler = tick },
|
||||||
|
};
|
||||||
|
|
||||||
|
tones: std.AutoHashMapUnmanaged(mach.core.Key, mach.EntityID) = .{},
|
||||||
|
|
||||||
|
fn init(audio: *mach.Audio.Mod, piano: *Mod) !void {
|
||||||
|
audio.init(.{ .allocator = gpa.allocator() });
|
||||||
|
try audio.state().init();
|
||||||
|
|
||||||
|
piano.init(.{});
|
||||||
|
inline for (@typeInfo(mach.core.Key).Enum.fields) |field| {
|
||||||
|
const key: mach.core.Key = @enumFromInt(field.value);
|
||||||
|
const entity = try audio.newEntity();
|
||||||
|
try audio.set(entity, .samples, try fillTone(audio, key));
|
||||||
|
try audio.set(entity, .playing, false);
|
||||||
|
try audio.set(entity, .index, 0);
|
||||||
|
try piano.state().tones.put(gpa.allocator(), key, entity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tick(
|
||||||
|
engine: *mach.Engine.Mod,
|
||||||
|
audio: *mach.Audio.Mod,
|
||||||
|
piano: *Mod,
|
||||||
|
) !void {
|
||||||
|
var iter = mach.core.pollEvents();
|
||||||
|
while (iter.next()) |event| {
|
||||||
|
switch (event) {
|
||||||
|
.key_press => |ev| {
|
||||||
|
const vol = try audio.state().player.volume();
|
||||||
|
switch (ev.key) {
|
||||||
|
.down => try audio.state().player.setVolume(@max(0.0, vol - 0.1)),
|
||||||
|
.up => try audio.state().player.setVolume(@min(1.0, vol + 0.1)),
|
||||||
|
else => {
|
||||||
|
const entity = piano.state().tones.get(ev.key).?;
|
||||||
|
try audio.set(entity, .playing, true);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.close => engine.send(.exit, .{}),
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
audio.send(.render, .{});
|
||||||
|
|
||||||
|
const back_buffer_view = mach.core.swap_chain.getCurrentTextureView().?;
|
||||||
|
|
||||||
|
mach.core.swap_chain.present();
|
||||||
|
back_buffer_view.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fillTone(audio: *mach.Audio.Mod, key: mach.core.Key) ![]const f32 {
|
||||||
|
const frequency = keyToFrequency(key);
|
||||||
|
const channels = audio.state().player.channels().len;
|
||||||
|
const sample_rate: f32 = @floatFromInt(audio.state().player.sampleRate());
|
||||||
|
const duration: f32 = 1.5 * @as(f32, @floatFromInt(channels)) * sample_rate; // play the tone for 1.5s
|
||||||
|
const gain = 0.1;
|
||||||
|
|
||||||
|
const samples = try gpa.allocator().alloc(f32, @intFromFloat(duration));
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
while (i < samples.len) : (i += channels) {
|
||||||
|
const sample_index: f32 = @floatFromInt(i + 1);
|
||||||
|
const sine_wave = math.sin(frequency * 2.0 * math.pi * sample_index / sample_rate) * gain;
|
||||||
|
|
||||||
|
// A number ranging from 0.0 to 1.0 in the first 1/64th of the duration of the tone.
|
||||||
|
const fade_in = @min(sample_index / (duration / 64.0), 1.0);
|
||||||
|
|
||||||
|
// A number ranging from 1.0 to 0.0 over half the duration of the tone.
|
||||||
|
const progression = sample_index / duration; // 0.0 (tone start) to 1.0 (tone end)
|
||||||
|
const fade_out = 1.0 - math.clamp(math.log10(progression * 10.0), 0.0, 1.0);
|
||||||
|
|
||||||
|
for (0..channels) |channel| {
|
||||||
|
samples[i + channel] = sine_wave * fade_in * fade_out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn keyToFrequency(key: mach.core.Key) f32 {
|
||||||
|
// The frequencies here just come from a piano frequencies chart. You can google for them.
|
||||||
|
return switch (key) {
|
||||||
|
// First row of piano keys, the highest.
|
||||||
|
.q => 523.25, // C5
|
||||||
|
.w => 587.33, // D5
|
||||||
|
.e => 659.26, // E5
|
||||||
|
.r => 698.46, // F5
|
||||||
|
.t => 783.99, // G5
|
||||||
|
.y => 880.0, // A5
|
||||||
|
.u => 987.77, // B5
|
||||||
|
.i => 1046.5, // C6
|
||||||
|
.o => 1174.7, // D6
|
||||||
|
.p => 1318.5, // E6
|
||||||
|
.left_bracket => 1396.9, // F6
|
||||||
|
.right_bracket => 1568.0, // G6
|
||||||
|
|
||||||
|
// Second row of piano keys, the middle.
|
||||||
|
.a => 261.63, // C4
|
||||||
|
.s => 293.67, // D4
|
||||||
|
.d => 329.63, // E4
|
||||||
|
.f => 349.23, // F4
|
||||||
|
.g => 392.0, // G4
|
||||||
|
.h => 440.0, // A4
|
||||||
|
.j => 493.88, // B4
|
||||||
|
.k => 523.25, // C5
|
||||||
|
.l => 587.33, // D5
|
||||||
|
.semicolon => 659.26, // E5
|
||||||
|
.apostrophe => 698.46, // F5
|
||||||
|
|
||||||
|
// Third row of piano keys, the lowest.
|
||||||
|
.z => 130.81, // C3
|
||||||
|
.x => 146.83, // D3
|
||||||
|
.c => 164.81, // E3
|
||||||
|
.v => 174.61, // F3
|
||||||
|
.b => 196.00, // G3
|
||||||
|
.n => 220.0, // A3
|
||||||
|
.m => 246.94, // B3
|
||||||
|
.comma => 261.63, // C4
|
||||||
|
.period => 293.67, // D4
|
||||||
|
.slash => 329.63, // E5
|
||||||
|
else => 0.0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -1,186 +1,13 @@
|
||||||
// A simple tone engine.
|
|
||||||
//
|
|
||||||
// It renders 512 tones simultaneously, each with their own frequency and duration.
|
|
||||||
//
|
|
||||||
// `keyToFrequency` can be used to convert a keyboard key to a frequency, so that the
|
|
||||||
// keys asdfghj on your QWERTY keyboard will map to the notes C/D/E/F/G/A/B[4], the
|
|
||||||
// keys above qwertyu will map to C5 and the keys below zxcvbnm will map to C3.
|
|
||||||
//
|
|
||||||
// The duration is hard-coded to 1.5s. To prevent clicking, tones are faded in linearly over
|
|
||||||
// the first 1/64th duration of the tone. To provide a cool sustained effect, tones are faded
|
|
||||||
// out using 1-log10(x*10) (google it to see how it looks, it's strong for most of the duration of
|
|
||||||
// the note then fades out slowly.)
|
|
||||||
const std = @import("std");
|
|
||||||
const builtin = @import("builtin");
|
|
||||||
|
|
||||||
const mach = @import("mach");
|
const mach = @import("mach");
|
||||||
const math = mach.math;
|
|
||||||
const sysaudio = mach.sysaudio;
|
|
||||||
|
|
||||||
pub const App = @This();
|
const Piano = @import("Piano.zig");
|
||||||
|
|
||||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
// The list of modules to be used in our application.
|
||||||
|
// Our Piano itself is implemented in our own module called Piano.
|
||||||
audio_ctx: sysaudio.Context,
|
pub const modules = .{
|
||||||
player: sysaudio.Player,
|
mach.Engine,
|
||||||
playing: [512]Tone = std.mem.zeroes([512]Tone),
|
mach.Audio,
|
||||||
|
Piano,
|
||||||
const Tone = struct {
|
|
||||||
frequency: f32,
|
|
||||||
sample_counter: usize,
|
|
||||||
duration: usize,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn init(app: *App) !void {
|
pub const App = mach.App;
|
||||||
try mach.core.init(.{});
|
|
||||||
|
|
||||||
app.audio_ctx = try sysaudio.Context.init(null, gpa.allocator(), .{});
|
|
||||||
errdefer app.audio_ctx.deinit();
|
|
||||||
try app.audio_ctx.refresh();
|
|
||||||
|
|
||||||
const device = app.audio_ctx.defaultDevice(.playback) orelse return error.NoDeviceFound;
|
|
||||||
app.player = try app.audio_ctx.createPlayer(device, writeCallback, .{ .user_data = app });
|
|
||||||
try app.player.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deinit(app: *App) void {
|
|
||||||
defer _ = gpa.deinit();
|
|
||||||
defer mach.core.deinit();
|
|
||||||
|
|
||||||
app.player.deinit();
|
|
||||||
app.audio_ctx.deinit();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update(app: *App) !bool {
|
|
||||||
var iter = mach.core.pollEvents();
|
|
||||||
while (iter.next()) |event| {
|
|
||||||
switch (event) {
|
|
||||||
.key_press => |ev| {
|
|
||||||
const vol = try app.player.volume();
|
|
||||||
switch (ev.key) {
|
|
||||||
.down => try app.player.setVolume(@max(0.0, vol - 0.1)),
|
|
||||||
.up => try app.player.setVolume(@min(1.0, vol + 0.1)),
|
|
||||||
else => {},
|
|
||||||
}
|
|
||||||
app.fillTone(keyToFrequency(ev.key));
|
|
||||||
},
|
|
||||||
.close => return true,
|
|
||||||
else => {},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (builtin.cpu.arch != .wasm32) {
|
|
||||||
const back_buffer_view = mach.core.swap_chain.getCurrentTextureView().?;
|
|
||||||
|
|
||||||
mach.core.swap_chain.present();
|
|
||||||
back_buffer_view.release();
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writeCallback(ctx: ?*anyopaque, output: []u8) void {
|
|
||||||
const app: *App = @as(*App, @ptrCast(@alignCast(ctx)));
|
|
||||||
|
|
||||||
// const seconds_per_frame = 1.0 / @as(f32, @floatFromInt(player.sampleRate()));
|
|
||||||
const frame_size = app.player.format().frameSize(@intCast(app.player.channels().len));
|
|
||||||
const frames = output.len / frame_size;
|
|
||||||
_ = frames;
|
|
||||||
|
|
||||||
var frame: usize = 0;
|
|
||||||
while (frame < output.len) : (frame += frame_size) {
|
|
||||||
// Calculate the audio sample we'll play on both channels for this frame
|
|
||||||
var sample: f32 = 0;
|
|
||||||
for (&app.playing) |*tone| {
|
|
||||||
if (tone.sample_counter >= tone.duration) continue;
|
|
||||||
|
|
||||||
tone.sample_counter += 1;
|
|
||||||
const sample_counter = @as(f32, @floatFromInt(tone.sample_counter));
|
|
||||||
const duration = @as(f32, @floatFromInt(tone.duration));
|
|
||||||
|
|
||||||
// The sine wave that plays the frequency.
|
|
||||||
const gain = 0.1;
|
|
||||||
const sine_wave = math.sin(tone.frequency * 2.0 * math.pi * sample_counter / @as(f32, @floatFromInt(app.player.sampleRate()))) * gain;
|
|
||||||
|
|
||||||
// A number ranging from 0.0 to 1.0 in the first 1/64th of the duration of the tone.
|
|
||||||
const fade_in = @min(sample_counter / (duration / 64.0), 1.0);
|
|
||||||
|
|
||||||
// A number ranging from 1.0 to 0.0 over half the duration of the tone.
|
|
||||||
const progression = sample_counter / duration; // 0.0 (tone start) to 1.0 (tone end)
|
|
||||||
const fade_out = 1.0 - math.clamp(math.log10(progression * 10.0), 0.0, 1.0);
|
|
||||||
|
|
||||||
// Mix this tone into the sample we'll actually play on e.g. the speakers, reducing
|
|
||||||
// sine wave intensity if we're fading in or out over the entire duration of the
|
|
||||||
// tone.
|
|
||||||
sample += sine_wave * fade_in * fade_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert our float sample to the format the audio driver is working in
|
|
||||||
sysaudio.convertTo(
|
|
||||||
f32,
|
|
||||||
// Pass two samples (assume two channel audio)
|
|
||||||
// Note that in a real application this must match app.player.channels().len
|
|
||||||
&.{ sample, sample },
|
|
||||||
app.player.format(),
|
|
||||||
output[frame..][0..frame_size],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fillTone(app: *App, frequency: f32) void {
|
|
||||||
for (&app.playing) |*tone| {
|
|
||||||
if (tone.sample_counter >= tone.duration) {
|
|
||||||
tone.* = Tone{
|
|
||||||
.frequency = frequency,
|
|
||||||
.sample_counter = 0,
|
|
||||||
.duration = @as(usize, @intFromFloat(1.5 * @as(f32, @floatFromInt(app.player.sampleRate())))), // play the tone for 1.5s
|
|
||||||
};
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn keyToFrequency(key: mach.core.Key) f32 {
|
|
||||||
// The frequencies here just come from a piano frequencies chart. You can google for them.
|
|
||||||
return switch (key) {
|
|
||||||
// First row of piano keys, the highest.
|
|
||||||
.q => 523.25, // C5
|
|
||||||
.w => 587.33, // D5
|
|
||||||
.e => 659.26, // E5
|
|
||||||
.r => 698.46, // F5
|
|
||||||
.t => 783.99, // G5
|
|
||||||
.y => 880.0, // A5
|
|
||||||
.u => 987.77, // B5
|
|
||||||
.i => 1046.5, // C6
|
|
||||||
.o => 1174.7, // D6
|
|
||||||
.p => 1318.5, // E6
|
|
||||||
.left_bracket => 1396.9, // F6
|
|
||||||
.right_bracket => 1568.0, // G6
|
|
||||||
|
|
||||||
// Second row of piano keys, the middle.
|
|
||||||
.a => 261.63, // C4
|
|
||||||
.s => 293.67, // D4
|
|
||||||
.d => 329.63, // E4
|
|
||||||
.f => 349.23, // F4
|
|
||||||
.g => 392.0, // G4
|
|
||||||
.h => 440.0, // A4
|
|
||||||
.j => 493.88, // B4
|
|
||||||
.k => 523.25, // C5
|
|
||||||
.l => 587.33, // D5
|
|
||||||
.semicolon => 659.26, // E5
|
|
||||||
.apostrophe => 698.46, // F5
|
|
||||||
|
|
||||||
// Third row of piano keys, the lowest.
|
|
||||||
.z => 130.81, // C3
|
|
||||||
.x => 146.83, // D3
|
|
||||||
.c => 164.81, // E3
|
|
||||||
.v => 174.61, // F3
|
|
||||||
.b => 196.00, // G3
|
|
||||||
.n => 220.0, // A3
|
|
||||||
.m => 246.94, // B3
|
|
||||||
.comma => 261.63, // C4
|
|
||||||
.period => 293.67, // D4
|
|
||||||
.slash => 329.63, // E5
|
|
||||||
else => 0.0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
|
||||||
140
src/Audio.zig
Normal file
140
src/Audio.zig
Normal file
|
|
@ -0,0 +1,140 @@
|
||||||
|
const std = @import("std");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
const mach = @import("main.zig");
|
||||||
|
const sysaudio = mach.sysaudio;
|
||||||
|
|
||||||
|
pub const name = .mach_audio;
|
||||||
|
pub const Mod = mach.Mod(@This());
|
||||||
|
pub const components = .{
|
||||||
|
.samples = .{ .type = []const f32 },
|
||||||
|
.playing = .{ .type = bool },
|
||||||
|
.index = .{ .type = usize },
|
||||||
|
};
|
||||||
|
pub const local_events = .{
|
||||||
|
.render = .{ .handler = render },
|
||||||
|
};
|
||||||
|
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
ctx: sysaudio.Context = undefined,
|
||||||
|
player: sysaudio.Player = undefined,
|
||||||
|
buffer: SampleBuffer = SampleBuffer.init(),
|
||||||
|
mutex: std.Thread.Mutex = .{},
|
||||||
|
cond: std.Thread.Condition = .{},
|
||||||
|
|
||||||
|
pub const SampleBuffer = std.fifo.LinearFifo(f32, .{ .Static = 4096 });
|
||||||
|
|
||||||
|
pub fn init(audio: *@This()) !void {
|
||||||
|
audio.ctx = try sysaudio.Context.init(null, audio.allocator, .{});
|
||||||
|
try audio.ctx.refresh();
|
||||||
|
|
||||||
|
const device = audio.ctx.defaultDevice(.playback) orelse return error.NoDeviceFound;
|
||||||
|
audio.player = try audio.ctx.createPlayer(device, writeFn, .{ .user_data = audio });
|
||||||
|
try audio.player.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(audio: *Mod) void {
|
||||||
|
audio.state().player.deinit();
|
||||||
|
audio.state().ctx.deinit();
|
||||||
|
|
||||||
|
var iter = audio.entities.entities.valueIterator();
|
||||||
|
while (iter.next()) |*entity| {
|
||||||
|
entity.samples.deinit(audio.state().allocator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render(audio: *Mod) !void {
|
||||||
|
// Prepare the next 30ms of audio by querying entities and mixing the samples they want to play.
|
||||||
|
// 48_000 * 0.03 = 1440 = 30ms
|
||||||
|
var mixing_buffer: [1440]f32 = undefined;
|
||||||
|
var max_samples: usize = 0;
|
||||||
|
|
||||||
|
var iter = audio.entities.query(.{ .all = &.{.{ .mach_audio = &.{ .samples, .playing, .index } }} });
|
||||||
|
while (iter.next()) |archetype| for (
|
||||||
|
archetype.slice(.entity, .id),
|
||||||
|
archetype.slice(.mach_audio, .samples),
|
||||||
|
archetype.slice(.mach_audio, .playing),
|
||||||
|
archetype.slice(.mach_audio, .index),
|
||||||
|
) |id, samples, playing, index| {
|
||||||
|
if (!playing) continue;
|
||||||
|
|
||||||
|
const to_read = @min(samples.len - index, mixing_buffer.len);
|
||||||
|
mixSamples(mixing_buffer[0..to_read], samples[index..][0..to_read]);
|
||||||
|
max_samples = @max(max_samples, to_read);
|
||||||
|
|
||||||
|
if (index + to_read >= samples.len) {
|
||||||
|
try audio.set(id, .playing, false);
|
||||||
|
try audio.set(id, .index, 0);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
try audio.set(id, .index, index + to_read);
|
||||||
|
};
|
||||||
|
|
||||||
|
audio.state().mutex.lock();
|
||||||
|
defer audio.state().mutex.unlock();
|
||||||
|
while (audio.state().buffer.writableLength() < max_samples) {
|
||||||
|
audio.state().cond.wait(&audio.state().mutex);
|
||||||
|
}
|
||||||
|
audio.state().buffer.writeAssumeCapacity(mixing_buffer[0..max_samples]);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn writeFn(audio_opaque: ?*anyopaque, output: []u8) void {
|
||||||
|
const audio: *@This() = @ptrCast(@alignCast(audio_opaque));
|
||||||
|
|
||||||
|
// Clear buffer from previous samples
|
||||||
|
@memset(output, 0);
|
||||||
|
|
||||||
|
const total_samples = @divExact(output.len, audio.player.format().size());
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
while (i < total_samples) {
|
||||||
|
audio.mutex.lock();
|
||||||
|
defer audio.mutex.unlock();
|
||||||
|
|
||||||
|
const read_slice = audio.buffer.readableSlice(0);
|
||||||
|
const read_len = @min(read_slice.len, total_samples - i);
|
||||||
|
|
||||||
|
if (read_len == 0) return;
|
||||||
|
|
||||||
|
sysaudio.convertTo(
|
||||||
|
f32,
|
||||||
|
read_slice[0..read_len],
|
||||||
|
audio.player.format(),
|
||||||
|
output[i * @sizeOf(f32) ..][0 .. read_len * @sizeOf(f32)],
|
||||||
|
);
|
||||||
|
|
||||||
|
i += read_len;
|
||||||
|
audio.buffer.discard(read_len);
|
||||||
|
|
||||||
|
audio.cond.signal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: what's this weird behavior in ReleaseFast/Small?
|
||||||
|
const vector_length = switch (builtin.mode) {
|
||||||
|
.Debug, .ReleaseSafe => std.simd.suggestVectorLength(f32),
|
||||||
|
else => null,
|
||||||
|
};
|
||||||
|
|
||||||
|
inline fn mixSamples(a: []f32, b: []const f32) void {
|
||||||
|
std.debug.assert(a.len >= b.len);
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
|
||||||
|
// use SIMD when available
|
||||||
|
if (vector_length) |vec_len| {
|
||||||
|
const Vec = @Vector(vec_len, f32);
|
||||||
|
const vec_blocks_len = b.len - (b.len % vec_len);
|
||||||
|
|
||||||
|
while (i < vec_blocks_len) : (i += vec_len) {
|
||||||
|
const b_vec: Vec = b[i..][0..vec_len].*;
|
||||||
|
a[i..][0..vec_len].* += b_vec;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i < b.len) {
|
||||||
|
for (a[i..b.len], b[i..]) |*a_sample, b_sample| {
|
||||||
|
a_sample.* += b_sample;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -12,6 +12,7 @@ pub const Core = if (build_options.want_core) @import("Core.zig") else struct {}
|
||||||
// gamemode requires libc on linux
|
// gamemode requires libc on linux
|
||||||
pub const gamemode = if (builtin.os.tag != .linux or builtin.link_libc) @import("gamemode.zig");
|
pub const gamemode = if (builtin.os.tag != .linux or builtin.link_libc) @import("gamemode.zig");
|
||||||
pub const gfx = if (build_options.want_mach) @import("gfx/main.zig") else struct {};
|
pub const gfx = if (build_options.want_mach) @import("gfx/main.zig") else struct {};
|
||||||
|
pub const Audio = if (build_options.want_sysaudio) @import("Audio.zig") else struct {};
|
||||||
pub const math = @import("math/main.zig");
|
pub const math = @import("math/main.zig");
|
||||||
pub const testing = @import("testing.zig");
|
pub const testing = @import("testing.zig");
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue