Compare commits

...

23 Commits

Author SHA1 Message Date
Ben Niemann f94fb2fcd8 Disable mypy checking of profutil. 3 years ago
Ben Niemann 70c06f6903 Support FLAC 3 years ago
Ben Niemann d464ba0dd6 Generate test files in various formats during build. 3 years ago
Ben Niemann ee36b59342 Delete samples via context menu.
Also rename stuff (segment vs. sample, import vs. add)
3 years ago
Ben Niemann ef04c76e29 Fix crash when the last sample was removed while is was played. 3 years ago
Ben Niemann f007ab8234 Use QImage instead of QPixmap to render tiles. 3 years ago
Ben Niemann 1e8be00df2 Improve profiler tooling and minor optimization of sample rendering. 3 years ago
Ben Niemann 0607f9b326 Split out an async load_sample() method, and improve test coverage. 3 years ago
Ben Niemann b4a771845f Make uitest event simulation async (instead of calling QApplication::processEvent) 3 years ago
Ben Niemann eac4abc590 Print source_traceback for better debugging of asyncio issues. 3 years ago
Ben Niemann ec44ef29a3 Fix initial properties of TrackEditors. 3 years ago
Ben Niemann 583b3d7910 Use mutagen for file metadata and support AAC files. 3 years ago
Ben Niemann 98f34ea4b1 Support importing MP3 files. 3 years ago
Ben Niemann 105275d5cf Remember dialog settings. 3 years ago
Ben Niemann 9eccdc5d01 Fix moving samples around. 3 years ago
Ben Niemann f5a19601f9 Stereo samples only played back one channel. 3 years ago
Ben Niemann afa0bfb215 Rewrite sample rendering.
- Read sample data from raw files.
- Render multi channel samples.
- Asynchronous rendering.
3 years ago
Ben Niemann 13d444da0a Expose the TimeMapper's sample rate. 3 years ago
Ben Niemann 9ddcd1a336 Gracefully handle non-UTF8 stdout/stdin for subprocesses. 3 years ago
Ben Niemann e867916332 Fix bad key for raw audio files. 3 years ago
Ben Niemann 405aa62e67 Load samples from raw files in SampleTrack. 3 years ago
Ben Niemann f778ef25c9 A loaded AudioFile is always acquired. 3 years ago
Ben Niemann 8a293a0f12 Import samples as raw float arrays into project directory. 3 years ago

@ -0,0 +1,2 @@
from typing import Any
def __getattr__(arrr: str) -> Any: ...

@ -0,0 +1,88 @@
# -*- mode: python -*-
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import os.path
import subprocess
import sys
from waflib.Configure import conf
from waflib.Task import Task
from waflib import Utils
def configure(ctx):
ctx.find_program('ffmpeg')
class ffmpeg_runner(Task):
def __init__(self, args, **kwargs):
super().__init__(**kwargs)
self.__args = args
def __str__(self):
return self.outputs[0].relpath()
def keyword(self):
return 'Generating'
def run(self):
ctx = self.generator.bld
cwd = ctx.srcnode
cmd = [
ctx.env.FFMPEG[0],
'-y', '-nostdin',
'-i', self.inputs[0].path_from(cwd)
]
cmd.extend(self.__args)
cmd.append(self.outputs[0].path_from(cwd))
kw = {
'cwd': cwd.abspath(),
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
}
ctx.log_command(cmd, kw)
rc, out, _ = Utils.run_process(cmd, kw)
if rc:
sys.stderr.write(out.decode('utf-8'))
return rc
@conf
def run_ffmpeg(ctx, target, source, args, install=None, install_to=None, chmod=0o644):
target = ctx.path.get_bld().make_node(target)
task = ffmpeg_runner(env=ctx.env, args=args)
task.set_inputs(ctx.path.find_resource(source))
task.set_outputs(target)
ctx.add_to_group(task)
if install is None:
install = ctx.in_group(ctx.GRP_BUILD_MAIN)
if install:
if install_to is None:
install_to = os.path.join(
ctx.env.DATADIR, target.parent.path_from(ctx.bldnode.make_node('data')))
ctx.install_files(install_to, target, chmod=chmod)

@ -166,6 +166,7 @@ def configure(ctx):
pip_mgr.check_package(RUNTIME, 'toposort')
pip_mgr.check_package(RUNTIME, 'urwid')
pip_mgr.check_package(RUNTIME, 'fastjsonschema')
pip_mgr.check_package(RUNTIME, 'mutagen', version='1.44.0')
pip_mgr.check_package(BUILD, 'cssutils')
pip_mgr.check_package(BUILD, 'Cython', version='0.29.6')
pip_mgr.check_package(BUILD, 'Jinja2')
@ -180,6 +181,7 @@ def configure(ctx):
pip_mgr.check_package(DEV, 'pyfakefs')
pip_mgr.check_package(DEV, 'pylint', version='2.3.1')
pip_mgr.check_package(DEV, 'unittest-xml-reporting')
pip_mgr.check_package(DEV, 'pyprof2calltree')
# misc sys packages:
sys_mgr.check_package(RUNTIME, 'ffmpeg')

@ -61,7 +61,6 @@ Status ProcessorSoundFile::setup_internal() {
RETURN_IF_ERROR(stor_audio_file);
_audio_file = stor_audio_file.result();
_host_system->audio_file->acquire_audio_file(_audio_file);
_loop = false;
_playing = true;
_pos = 0;

@ -39,6 +39,8 @@ public:
Status setup();
void cleanup();
uint32_t sample_rate() const { return _sample_rate; }
void set_bpm(uint32_t bpm) { _bpm = bpm; }
uint32_t bpm() const { return _bpm; }

@ -35,6 +35,8 @@ cdef extern from "noisicaa/audioproc/public/time_mapper.h" namespace "noisicaa"
Status setup()
void cleanup()
uint32_t sample_rate() const
void set_bpm(uint32_t bpm)
uint32_t bpm() const

@ -31,6 +31,8 @@ class PyTimeMapper(object):
def setup(self, project: music.BaseProject = None) -> None: ...
def cleanup(self) -> None: ...
@property
def sample_rate(self) -> int: ...
@property
def end_time(self) -> PyMusicalTime: ...
@property
def num_samples(self) -> int: ...

@ -59,6 +59,10 @@ cdef class PyTimeMapper(object):
def __on_duration_changed(self, change):
self.duration = change.new_value
@property
def sample_rate(self):
return int(self.__tmap.sample_rate())
@property
def bpm(self):
return int(self.__tmap.bpm())

@ -19,6 +19,7 @@
# @end:license
import enum
import numpy
import types
from typing import Type
@ -101,3 +102,4 @@ class SndFile(object):
@property
def encoding(self) -> Encoding: ...
def get_samples(self) -> memoryview: ...
def read_samples(self, num_samples: int) -> numpy.ndarray: ...

@ -511,3 +511,10 @@ cdef class SndFile(object):
if items_read != num_items:
raise Error("Failed to read all items (%d < %d)" % (items_read, num_items))
return buf
def read_samples(self, num_samples):
cdef numpy.ndarray[float, ndim=2, mode="c"] buf = numpy.ndarray(
shape=(num_samples, self.num_channels), dtype=numpy.float32, order='C')
samples_read = sf_readf_float(self._sf, &buf[0,0], num_samples)
buf = buf[:samples_read]
return buf

@ -174,8 +174,6 @@ Status ProcessorMetronome::set_spec(const pb::MetronomeSpec& spec) {
spec.sample_path());
RETURN_IF_ERROR(stor_audio_file);
_host_system->audio_file->acquire_audio_file(stor_audio_file.result());
// Create the new spec. If you fail from here, ensure the audio file is released!
unique_ptr<Spec> new_spec(new Spec());
new_spec->audio_file = stor_audio_file.result();

@ -52,21 +52,21 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
context=self.context,
**kwargs)
def test_segments_changed(self):
async def test_segments_changed(self):
with self._trackItem():
with self.project.apply_mutations('test'):
seg = self.track.create_segment(MT(3, 4), MD(2, 4))
self.processQtEvents()
await self.processQtEvents()
self.renderWidget()
with self.project.apply_mutations('test'):
self.track.remove_segment(seg)
self.processQtEvents()
await self.processQtEvents()
self.renderWidget()
def test_events_changed(self):
async def test_events_changed(self):
with self.project.apply_mutations('test'):
seg = self.track.create_segment(MT(0, 4), MD(4, 4))
seg.segment.add_event(MEVT(MT(0, 4), NOTE_ON(0, 60, 100)))
@ -77,17 +77,17 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
seg.segment.add_event(MEVT(MT(1, 4), NOTE_ON(0, 61, 100)))
seg.segment.add_event(MEVT(MT(2, 4), NOTE_OFF(0, 61)))
self.processQtEvents()
await self.processQtEvents()
self.renderWidget()
with self.project.apply_mutations('test'):
while len(seg.segment.events) > 0:
seg.segment.remove_event(seg.segment.events[0])
self.processQtEvents()
await self.processQtEvents()
self.renderWidget()
def test_events_edited(self):
async def test_events_edited(self):
with self.project.apply_mutations('test'):
seg = self.track.create_segment(MT(0, 4), MD(4, 4))
seg.segment.add_event(MEVT(MT(0, 4), NOTE_ON(0, 60, 100)))
@ -102,23 +102,23 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
with grid.collect_mutations():
grid.addEvent(MEVT(MT(1, 4), NOTE_ON(0, 61, 100)))
grid.addEvent(MEVT(MT(2, 4), NOTE_OFF(0, 61)))
self.processQtEvents()
await self.processQtEvents()
self.renderWidget()
self.assertEqual(len(seg.segment.events), 4)
def test_scroll(self):
async def test_scroll(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(2, 4))
with self._trackItem() as ti:
yoff = ti.yOffset()
self.scrollWheel(-1)
await self.scrollWheel(-1)
self.assertGreater(ti.yOffset(), yoff)
self.scrollWheel(1)
await self.scrollWheel(1)
self.assertEqual(ti.yOffset(), yoff)
def test_playback_pos(self):
async def test_playback_pos(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(2, 4))
self.track.create_segment(MT(3, 4), MD(2, 4))
@ -128,16 +128,16 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
t = MT(0, 1)
while t < MT(8, 4):
ti.setPlaybackPosition(t)
self.processQtEvents()
await self.processQtEvents()
t += MD(1, 32)
def test_change_row_height(self):
async def test_change_row_height(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(4, 4))
with self._trackItem() as ti:
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = self.openContextMenu()
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = await self.openContextMenu()
incr_button = menu.findChild(QtWidgets.QAbstractButton, 'incr-row-height')
assert incr_button is not None
decr_button = menu.findChild(QtWidgets.QAbstractButton, 'decr-row-height')
@ -149,70 +149,70 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
decr_button.click()
self.assertEqual(ti.gridYSize(), h)
def test_move_segment(self):
async def test_move_segment(self):
with self.project.apply_mutations('test'):
seg = self.track.create_segment(MT(0, 4), MD(4, 4))
with self._trackItem() as ti:
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(1, 4)), ti.height() // 2))
self.pressMouseButton(Qt.LeftButton)
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(3, 4)), ti.height() // 2))
self.releaseMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(1, 4)), ti.height() // 2))
await self.pressMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(3, 4)), ti.height() // 2))
await self.releaseMouseButton(Qt.LeftButton)
self.assertEqual(seg.time, MT(2, 4))
def test_resize_segment(self):
async def test_resize_segment(self):
with self.project.apply_mutations('test'):
seg = self.track.create_segment(MT(0, 4), MD(4, 4))
with self._trackItem() as ti:
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(4, 4)), ti.height() // 2))
self.pressMouseButton(Qt.LeftButton)
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(5, 4)), ti.height() // 2))
self.releaseMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(4, 4)), ti.height() // 2))
await self.pressMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(5, 4)), ti.height() // 2))
await self.releaseMouseButton(Qt.LeftButton)
self.assertEqual(seg.time, MT(0, 4))
self.assertEqual(seg.segment.duration, MD(5, 4))
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(0, 4)), ti.height() // 2))
self.pressMouseButton(Qt.LeftButton)
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
self.releaseMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(0, 4)), ti.height() // 2))
await self.pressMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
await self.releaseMouseButton(Qt.LeftButton)
self.assertEqual(seg.time, MT(2, 4))
self.assertEqual(seg.segment.duration, MD(3, 4))
def test_add_segment(self):
async def test_add_segment(self):
assert len(self.track.segments) == 0
with self._trackItem() as ti:
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = self.openContextMenu()
self.triggerMenuAction(menu, 'add-segment')
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = await self.openContextMenu()
await self.triggerMenuAction(menu, 'add-segment')
self.assertEqual(len(self.track.segments), 1)
self.assertEqual(self.track.segments[0].time, MT(2, 4))
def test_delete_segment(self):
async def test_delete_segment(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(4, 4))
with self._trackItem() as ti:
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = self.openContextMenu()
self.triggerMenuAction(menu, 'delete-segment')
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
menu = await self.openContextMenu()
await self.triggerMenuAction(menu, 'delete-segment')
self.assertEqual(len(self.track.segments), 0)
def test_split_segment(self):
async def test_split_segment(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(4, 4))
with self._trackItem() as ti:
ti.setPlaybackPosition(MT(3, 4))
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(3, 4)), ti.height() // 2))
menu = self.openContextMenu()
self.triggerMenuAction(menu, 'split-segment')
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(3, 4)), ti.height() // 2))
menu = await self.openContextMenu()
await self.triggerMenuAction(menu, 'split-segment')
self.assertEqual(len(self.track.segments), 2)
self.assertEqual(self.track.segments[0].time, MT(0, 4))
@ -220,7 +220,7 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
self.assertEqual(self.track.segments[1].time, MT(3, 4))
self.assertEqual(self.track.segments[1].segment.duration, MD(1, 4))
def test_select_segments(self):
async def test_select_segments(self):
with self.project.apply_mutations('test'):
ref1 = self.track.create_segment(MT(0, 4), MD(4, 4))
ref2 = self.track.create_segment(MT(6, 4), MD(4, 4))
@ -231,51 +231,51 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
selected = lambda: {segment.segmentRef().id for segment in ti.selection()}
self.assertEqual(selected(), set())
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
self.clickMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(2, 4)), ti.height() // 2))
await self.clickMouseButton(Qt.LeftButton)
self.assertEqual(selected(), {ref1.id})
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(8, 4)), ti.height() // 2))
self.clickMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(8, 4)), ti.height() // 2))
await self.clickMouseButton(Qt.LeftButton)
self.assertEqual(selected(), {ref2.id})
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(14, 4)), ti.height() // 2))
self.pressKey(Qt.Key_Control)
self.clickMouseButton(Qt.LeftButton)
self.releaseKey(Qt.Key_Control)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(14, 4)), ti.height() // 2))
await self.pressKey(Qt.Key_Control)
await self.clickMouseButton(Qt.LeftButton)
await self.releaseKey(Qt.Key_Control)
self.assertEqual(selected(), {ref2.id, ref3.id})
self.pressKey(Qt.Key_Control)
self.clickMouseButton(Qt.LeftButton)
self.releaseKey(Qt.Key_Control)
await self.pressKey(Qt.Key_Control)
await self.clickMouseButton(Qt.LeftButton)
await self.releaseKey(Qt.Key_Control)
self.assertEqual(selected(), {ref2.id})
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(11, 4)), ti.height() // 2))
self.clickMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(11, 4)), ti.height() // 2))
await self.clickMouseButton(Qt.LeftButton)
self.assertEqual(selected(), set())
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(8, 4)), ti.height() // 2))
self.clickMouseButton(Qt.LeftButton)
self.moveMouse(QtCore.QPoint(ti.timeToX(MT(20, 4)), ti.height() // 2))
self.pressKey(Qt.Key_Shift)
self.clickMouseButton(Qt.LeftButton)
self.releaseKey(Qt.Key_Shift)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(8, 4)), ti.height() // 2))
await self.clickMouseButton(Qt.LeftButton)
await self.moveMouse(QtCore.QPoint(ti.timeToX(MT(20, 4)), ti.height() // 2))
await self.pressKey(Qt.Key_Shift)
await self.clickMouseButton(Qt.LeftButton)
await self.releaseKey(Qt.Key_Shift)
self.assertEqual(selected(), {ref2.id, ref3.id, ref4.id})
def test_select_all_segment(self):
async def test_select_all_segment(self):
with self.project.apply_mutations('test'):
ref1 = self.track.create_segment(MT(0, 4), MD(4, 4))
ref2 = self.track.create_segment(MT(6, 4), MD(4, 4))
with self._trackItem() as ti:
menu = self.openContextMenu()
self.triggerMenuAction(menu, 'select-all')
menu = await self.openContextMenu()
await self.triggerMenuAction(menu, 'select-all')
self.assertEqual(
{segment.segmentRef().id for segment in ti.selection()},
{ref1.id, ref2.id})
def test_clear_selection_segment(self):
async def test_clear_selection_segment(self):
with self.project.apply_mutations('test'):
self.track.create_segment(MT(0, 4), MD(4, 4))
self.track.create_segment(MT(6, 4), MD(4, 4))
@ -283,7 +283,7 @@ class PianoRollTrackEditorTest(track_editor_tests.TrackEditorItemTestMixin, uite
with self._trackItem() as ti:
ti.addToSelection(ti.segments[0])
menu = self.openContextMenu()
self.triggerMenuAction(menu, 'clear-selection')
menu = await self.openContextMenu()
await self.triggerMenuAction(menu, 'clear-selection')
self.assertEqual(len(ti.selection()), 0)

@ -1,37 +0,0 @@
/*
* @begin:license
*
* Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @end:license
*/
syntax = "proto2";
import "noisicaa/value_types/value_types.proto";
package noisicaa.pb;
message RenderSampleRequest {
required uint64 sample_id = 1;
optional Fraction scale_x = 2;
}
message RenderSampleResponse {
optional bool broken = 1;
repeated float rms = 2;
}

@ -20,21 +20,26 @@
#
# @end:license
import fractions
import asyncio
import base64
import contextlib
import logging
import random
from typing import Any, Dict, Optional, Callable
import os
import os.path
import subprocess
import time as time_lib
from typing import Any, Optional, List, Callable, Iterator
import mutagen
import numpy
from noisicaa.core.typing_extra import down_cast
from noisicaa import audioproc
from noisicaa import music
from noisicaa import core
from noisicaa import node_db
from noisicaa.bindings import sndfile
from noisicaa.music import node_connector
from noisicaa.music import rms
from noisicaa.music import samples as samples_lib
from . import ipc_pb2
from . import processor_messages
from . import node_description
from . import _model
@ -42,50 +47,6 @@ from . import _model
logger = logging.getLogger(__name__)
async def render_sample(
sample_ref: 'SampleRef',
scale_x: fractions.Fraction,
) -> ipc_pb2.RenderSampleResponse:
response = ipc_pb2.RenderSampleResponse()
sample = down_cast(samples_lib.Sample, sample_ref.sample)
try:
smpls = sample.samples
except sndfile.Error:
response.broken = True
return response
smpls = sample.samples[..., 0] # type: ignore
tmap = audioproc.TimeMapper(44100)
try:
tmap.setup(sample.project)
begin_time = sample_ref.time
begin_samplepos = tmap.musical_to_sample_time(begin_time)
num_samples = min(tmap.num_samples - begin_samplepos, len(smpls))
end_samplepos = begin_samplepos + num_samples
end_time = tmap.sample_to_musical_time(end_samplepos)
finally:
tmap.cleanup()
width = int(scale_x * (end_time - begin_time).fraction)
if width < num_samples / 10:
for p in range(0, width):
p_start = p * num_samples // width
p_end = (p + 1) * num_samples // width
s = smpls[p_start:p_end]
response.rms.append(rms.rms(s))
else:
response.broken = True
return response
class SampleTrackConnector(node_connector.NodeConnector):
_node = None # type: SampleTrack
@ -95,7 +56,6 @@ class SampleTrackConnector(node_connector.NodeConnector):
self.__node_id = self._node.pipeline_node_id
self.__listeners = core.ListenerMap[str]()
self.add_cleanup_function(self.__listeners.cleanup)
self.__sample_ids = {} # type: Dict[int, int]
def _init_internal(self) -> None:
for sample_ref in self._node.samples:
@ -115,13 +75,15 @@ class SampleTrackConnector(node_connector.NodeConnector):
raise TypeError("Unsupported change type %s" % type(change))
def __add_sample(self, sample_ref: 'SampleRef') -> None:
sample_id = self.__sample_ids[sample_ref.id] = random.getrandbits(64)
self._emit_message(processor_messages.add_sample(
node_id=self.__node_id,
id=sample_id,
id=sample_ref.id,
time=sample_ref.time,
sample_path=sample_ref.sample.path))
sample_rate=sample_ref.sample.sample_rate,
num_samples=sample_ref.sample.num_samples,
channel_paths=[
os.path.join(self._node.project.data_dir, channel.raw_path)
for channel in sample_ref.sample.channels]))
self.__listeners['cp:%s:time' % sample_ref.id] = sample_ref.time_changed.add(
lambda _: self.__sample_changed(sample_ref))
@ -130,26 +92,26 @@ class SampleTrackConnector(node_connector.NodeConnector):
lambda _: self.__sample_changed(sample_ref))
def __remove_sample(self, sample_ref: 'SampleRef') -> None:
sample_id = self.__sample_ids[sample_ref.id]
self._emit_message(processor_messages.remove_sample(
node_id=self.__node_id,
id=sample_id))
id=sample_ref.id))
del self.__listeners['cp:%s:time' % sample_ref.id]
del self.__listeners['cp:%s:sample' % sample_ref.id]
def __sample_changed(self, sample_ref: 'SampleRef') -> None:
sample_id = self.__sample_ids[sample_ref.id]
self._emit_message(processor_messages.remove_sample(
node_id=self.__node_id,
id=sample_id))
id=sample_ref.id))
self._emit_message(processor_messages.add_sample(
node_id=self.__node_id,
id=sample_id,
id=sample_ref.id,
time=sample_ref.time,
sample_path=sample_ref.sample.path))
sample_rate=sample_ref.sample.sample_rate,
num_samples=sample_ref.sample.num_samples,
channel_paths=[
os.path.join(self._node.project.data_dir, channel.raw_path)
for channel in sample_ref.sample.channels]))
class SampleRef(_model.SampleRef):
@ -164,6 +126,116 @@ class SampleRef(_model.SampleRef):
self.sample = sample
class SampleLoadError(Exception):
pass
class SampleReader(object):
def __init__(self) -> None:
self.sample_rate = None # type: int
self.num_samples = None # type: int
self.num_channels = None # type: int
def close(self) -> None:
pass
def read_samples(self, count: int) -> numpy.ndarray:
raise NotImplementedError
class SndFileReader(SampleReader):
mime_types = {
'audio/x-wav',
'audio/x-flac',
}
def __init__(self, path: str) -> None:
super().__init__()
try:
self.__sf = sndfile.SndFile(path)
except sndfile.Error as exc:
raise SampleLoadError(str(exc)) from None
self.sample_rate = self.__sf.sample_rate
self.num_samples = self.__sf.num_samples
self.num_channels = self.__sf.num_channels
def close(self) -> None:
self.__sf.close()
def read_samples(self, count: int) -> numpy.ndarray:
return self.__sf.read_samples(count)
class FFMpegReader(SampleReader):
mime_types = {
'audio/mpeg',
'audio/x-hx-aac-adts',
}
def __init__(self, path: str) -> None:
super().__init__()
info = mutagen.File(path).info
self.sample_rate = info.sample_rate
self.num_samples = int(info.length * info.sample_rate)
self.num_channels = info.channels
cmd = ['/usr/bin/ffmpeg', '-nostdin', '-y', '-i', path, '-f', 'f32le', '-']
self.__proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
def close(self) -> None:
self.__proc.kill()
self.__proc.wait()
def read_samples(self, count: int) -> numpy.ndarray:
buf = self.__proc.stdout.read(4 * count * self.num_channels)
if not buf:
self.__proc.wait()
assert self.__proc.returncode == 0, self.__proc.returncode
samples = numpy.frombuffer(buf, dtype=numpy.float32)
count = len(samples) // self.num_channels
samples = samples.reshape(count, self.num_channels)
return samples
@contextlib.contextmanager
def open_sample(path: str) -> Iterator[SampleReader]:
mtype = subprocess.check_output(
['/usr/bin/file', '--mime-type', '--brief', path]).decode('ascii').strip()
reader = None # type: SampleReader
if mtype in SndFileReader.mime_types:
reader = SndFileReader(path)
elif mtype in FFMpegReader.mime_types:
reader = FFMpegReader(path)
else:
raise SampleLoadError("Unsupported file type '%s'" % mtype)
try:
yield reader
finally:
reader.close()
class LoadedSample(object):
def __init__(self, data_dir: str) -> None:
self.__data_dir = data_dir
self.path = None # type: str
self.raw_paths = None # type: List[str]
self.sample_rate = None # type: int
self.num_samples = None # type: int
def discard(self) -> None:
for raw_path in self.raw_paths:
raw_path = os.path.join(self.__data_dir, raw_path)
if os.path.exists(raw_path):
os.unlink(raw_path)
class SampleTrack(_model.SampleTrack):
def create_node_connector(
self, message_cb: Callable[[audioproc.ProcessorMessage], None],
@ -176,8 +248,83 @@ class SampleTrack(_model.SampleTrack):
def description(self) -> node_db.NodeDescription:
return node_description.SampleTrackDescription
def create_sample(self, time: audioproc.MusicalTime, path: str) -> SampleRef:
smpl = self._pool.create(samples_lib.Sample, path=path)
async def load_sample(
self,
path: str,
event_loop: asyncio.AbstractEventLoop,
progress_cb: Callable[[float], None] = None,
) -> LoadedSample:
smpl = LoadedSample(self.project.data_dir)
smpl.path = path
sample_name_base = base64.b32encode(os.urandom(15)).decode('ascii')
sample_path_base = os.path.join('samples', sample_name_base)
os.makedirs(
os.path.dirname(os.path.join(self.project.data_dir, sample_path_base)),
exist_ok=True)
logger.info("Importing sample from '%s' as '%s'...", path, sample_name_base)
t0 = time_lib.time()
next_progress = t0 + 0.5
with open_sample(path) as reader:
logger.info("Sample rate: %d", reader.sample_rate)
logger.info("Num samples: approx. %d", reader.num_samples)
logger.info("Num channels: %d", reader.num_channels)
smpl.sample_rate = reader.sample_rate
smpl.raw_paths = [
sample_path_base + '-ch%02d.raw' % ch
for ch in range(reader.num_channels)]
raw_fps = []
try:
for raw_path in smpl.raw_paths:
raw_path = os.path.join(self.project.data_dir, raw_path)
raw_fps.append(open(raw_path, 'wb'))
smpl.num_samples = 0
while True:
data = reader.read_samples(10240)
if len(data) == 0:
break
smpl.num_samples += len(data)
data = data.transpose()
assert len(data) == len(raw_fps), (len(data), len(raw_fps))
for fp, samples in zip(raw_fps, data):
fp.write(samples.tobytes('C'))
if progress_cb is not None and time_lib.time() >= next_progress:
progress_cb(min(1.0, float(smpl.num_samples) / reader.num_samples))
next_progress = time_lib.time() + 0.1
await asyncio.sleep(0, loop=event_loop)
except:
smpl.discard()
raise
finally:
for fp in raw_fps:
fp.close()
logger.info("Sample imported in %.3fsec", time_lib.time() - t0)
return smpl
def create_sample(
self,
time: audioproc.MusicalTime,
loaded_sample: LoadedSample,
) -> SampleRef:
smpl = self._pool.create(
samples_lib.Sample,
path=loaded_sample.path,
sample_rate=loaded_sample.sample_rate,
num_samples=loaded_sample.num_samples)
for raw_path in loaded_sample.raw_paths:
smpl_channel = self._pool.create(samples_lib.SampleChannel, raw_path=raw_path)
smpl.channels.append(smpl_channel)
self.project.samples.append(smpl)
smpl_ref = self._pool.create(
@ -185,6 +332,7 @@ class SampleTrack(_model.SampleTrack):
time=time,
sample=smpl)
self.samples.append(smpl_ref)
return smpl_ref
def delete_sample(self, smpl_ref: SampleRef) -> None:

@ -20,7 +20,6 @@
#
# @end:license
import fractions
import os.path
from typing import List
@ -45,10 +44,14 @@ class SampleTrackConnectorTest(unittest_mixins.NodeDBMixin, unittest.AsyncTestCa
self.sample1 = self.pool.create(
samples.Sample,
sample_rate=44100,
num_samples=12344,
path=os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'))
self.project.samples.append(self.sample1)
self.sample2 = self.pool.create(
samples.Sample,
sample_rate=44100,
num_samples=12344,
path=os.path.join(unittest.TESTDATA_DIR, 'kick-gettinglaid.wav'))
self.project.samples.append(self.sample2)
@ -145,33 +148,59 @@ class SampleTrackTest(base_track_test.TrackTestMixin, unittest.AsyncTestCase):
node_uri = 'builtin://sample-track'
track_cls = model.SampleTrack
async def test_load_sample_wav(self):
path = os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav')
track = await self._add_track()
loaded_sample = await track.load_sample(path, self.loop)
self.assertEqual(loaded_sample.path, path)
self.assertEqual(loaded_sample.num_samples, 126208)
self.assertEqual(loaded_sample.sample_rate, 44100)
self.assertEqual(len(loaded_sample.raw_paths), 2)
async def test_load_sample_flac(self):
path = os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.flac')
track = await self._add_track()
loaded_sample = await track.load_sample(path, self.loop)
self.assertEqual(loaded_sample.path, path)
self.assertEqual(loaded_sample.num_samples, 126208)
self.assertEqual(loaded_sample.sample_rate, 44100)
self.assertEqual(len(loaded_sample.raw_paths), 2)
async def test_load_sample_mp3(self):
path = os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.mp3')
track = await self._add_track()
loaded_sample = await track.load_sample(path, self.loop)
self.assertEqual(loaded_sample.path, path)
self.assertEqual(loaded_sample.num_samples, 126208)
self.assertEqual(loaded_sample.sample_rate, 44100)
self.assertEqual(len(loaded_sample.raw_paths), 2)
async def test_load_sample_aac(self):
path = os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.aac')
track = await self._add_track()
loaded_sample = await track.load_sample(path, self.loop)
self.assertEqual(loaded_sample.path, path)
# The converted AAC apparently does not have the same length as the orig wav.
#self.assertEqual(loaded_sample.num_samples, 126208)
self.assertEqual(loaded_sample.sample_rate, 44100)
self.assertEqual(len(loaded_sample.raw_paths), 2)
async def test_create_sample(self):
track = await self._add_track()
loaded_sample = await track.load_sample(
os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'), self.loop)
with self.project.apply_mutations('test'):
track.create_sample(
audioproc.MusicalTime(1, 4),
os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'))
track.create_sample(audioproc.MusicalTime(1, 4), loaded_sample)
self.assertEqual(track.samples[0].time, audioproc.MusicalTime(1, 4))
async def test_delete_sample(self):
track = await self._add_track()
loaded_sample = await track.load_sample(
os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'), self.loop)
with self.project.apply_mutations('test'):
sample = track.create_sample(
audioproc.MusicalTime(1, 4),
os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'))
sample = track.create_sample(audioproc.MusicalTime(1, 4), loaded_sample)
with self.project.apply_mutations('test'):
track.delete_sample(sample)
self.assertEqual(len(track.samples), 0)
async def test_render_sample(self):
track = await self._add_track()
with self.project.apply_mutations('test'):
sample = track.create_sample(
audioproc.MusicalTime(1, 4),
os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'))
response = await model.render_sample(sample, fractions.Fraction(100, 1))
self.assertFalse(response.broken)
self.assertGreater(len(response.rms), 0)

@ -62,20 +62,22 @@ void SampleScript::apply_mutation(Logger* logger, pb::ProcessorMessage* msg) {
msg->GetExtension(pb::sample_script_add_sample);
StatusOr<AudioFile*> stor_audio_file =
_host_system->audio_file->load_audio_file(m.sample_path());
_host_system->audio_file->load_raw_file(
m.sample_rate(),
m.num_samples(),
{m.channel_paths().begin(), m.channel_paths().end()});
if (!stor_audio_file.is_error()) {
Sample sample;
sample.id = m.id();
sample.time = m.time();
sample.audio_file = stor_audio_file.result();
_host_system->audio_file->acquire_audio_file(sample.audio_file);
auto it = lower_bound(samples.begin(), samples.end(), sample, sample_comp);
samples.insert(it, sample);
} else {
_logger->warning(
"Failed to load audio file '%s': %s",
m.sample_path().c_str(), stor_audio_file.message());
m.channel_paths(0).c_str(), stor_audio_file.message());
}
} else if (msg->HasExtension(pb::sample_script_remove_sample)) {
const pb::SampleScriptRemoveSample& m =
@ -160,6 +162,7 @@ Status ProcessorSampleScript::process_block_internal(BlockContext* ctxt, TimeMap
// - Do a binary search to find the new script->offset.
script->offset = 0;
script->current_audio_file = nullptr;
while ((size_t)script->offset < script->samples.size()) {
const Sample& sample = script->samples[script->offset];
@ -176,7 +179,6 @@ Status ProcessorSampleScript::process_block_internal(BlockContext* ctxt, TimeMap
break;
} else if (sample.time >= stime->start_time) {
// We seeked into some empty space before an audio file.
script->current_audio_file = nullptr;
break;
}

@ -29,7 +29,10 @@ package noisicaa.pb;
message SampleScriptAddSample {
required uint64 id = 1;
required MusicalTime time = 2;
required string sample_path = 3;
required uint32 sample_rate = 3;
required uint32 num_samples = 4;
repeated string channel_paths = 5;
}
message SampleScriptRemoveSample {

@ -20,6 +20,8 @@
#
# @end:license
from typing import List
from noisicaa import audioproc
from noisicaa.builtin_nodes import processor_message_registry_pb2
@ -27,13 +29,17 @@ def add_sample(
node_id: str,
id: int, # pylint: disable=redefined-builtin
time: audioproc.MusicalTime,
sample_path: str
sample_rate: int,
num_samples: int,
channel_paths: List[str],
) -> audioproc.ProcessorMessage:
msg = audioproc.ProcessorMessage(node_id=node_id)
pb = msg.Extensions[processor_message_registry_pb2.sample_script_add_sample]
pb.id = id
pb.time.CopyFrom(time.to_proto())
pb.sample_path = sample_path
pb.sample_rate = sample_rate
pb.num_samples = num_samples
pb.channel_paths.extend(channel_paths)
return msg
def remove_sample(

@ -21,9 +21,11 @@
import math
import os
import os.path
import struct
from noisidev import unittest
from noisidev import unittest_processor_mixins
from noisicaa.constants import TEST_OPTS
from noisicaa.audioproc.public import musical_time
from . import processor_messages
@ -32,8 +34,17 @@ class ProcessorSampleScriptTest(
unittest_processor_mixins.ProcessorTestMixin,
unittest.TestCase):
def setup_testcase(self):
self.sample1_path = os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav')
self.sample2_path = os.path.join(unittest.TESTDATA_DIR, 'kick-gettinglaid.wav')
self.sample1_path = os.path.join(TEST_OPTS.TMP_DIR, 'sample1.raw')
self.sample2_path = os.path.join(TEST_OPTS.TMP_DIR, 'sample2.raw')
self.sample_rate = 44100
self.num_samples = 1 * self.sample_rate
for freq, path in [(200, self.sample1_path), (300, self.sample2_path)]:
with open(path, 'wb') as fp:
f = freq / self.sample_rate * math.pi / 180
for n in range(self.num_samples):
fp.write(struct.pack('@f', math.sin(f * n)))
self.host_system.set_block_size(4096)
@ -50,7 +61,9 @@ class ProcessorSampleScriptTest(
node_id='123',
id=0x0001,
time=musical_time.PyMusicalTime(2048, 44100),
sample_path=self.sample1_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample1_path]))
self.process_block()
self.assertTrue(all(math.isclose(v, 0.0) for v in self.buffers['out:left'][:2048]))
@ -61,12 +74,16 @@ class ProcessorSampleScriptTest(
node_id='123',
id=0x0001,
time=musical_time.PyMusicalTime(1024, 44100),
sample_path=self.sample1_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample1_path]))
self.processor.handle_message(processor_messages.add_sample(
node_id='123',
id=0x0002,
time=musical_time.PyMusicalTime(3072, 44100),
sample_path=self.sample2_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample2_path]))
self.process_block()
self.assertTrue(all(math.isclose(v, 0.0) for v in self.buffers['out:left'][:1024]))
@ -77,12 +94,16 @@ class ProcessorSampleScriptTest(
node_id='123',
id=0x0001,
time=musical_time.PyMusicalTime(2048, 44100),
sample_path=self.sample1_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample1_path]))
self.processor.handle_message(processor_messages.add_sample(
node_id='123',
id=0x0002,
time=musical_time.PyMusicalTime(1024, 44100),
sample_path=self.sample2_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample2_path]))
self.processor.handle_message(processor_messages.remove_sample(
node_id='123',
id=0x0002))
@ -97,7 +118,9 @@ class ProcessorSampleScriptTest(
node_id='123',
id=0x0001,
time=musical_time.PyMusicalTime(0, 1),
sample_path=self.sample1_path))
sample_rate=self.sample_rate,
num_samples=self.num_samples,
channel_paths=[self.sample1_path]))
self.ctxt.clear_time_map(self.host_system.block_size)
it = self.time_mapper.find(musical_time.PyMusicalTime(1, 16))

@ -21,11 +21,19 @@
# @end:license
import asyncio
import concurrent.futures
import fractions
import functools
import logging
from typing import Any, List, Tuple, Sequence
import math
import mmap
import os.path
import random
import time as time_lib
import traceback
from typing import Any, BinaryIO, Dict, List, Tuple
import numpy
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
@ -38,7 +46