//std::audio example 1 "single process"
void example_1(){
double sample_rate = 44100;
std::size_t frame_size =2;
std::size_t buffer_size=128;
std::audio_context<float> ctx{sample_rate,buffer_size,frame_size};//contruct from values
std::astream_process<float> proc(ctx,[](std::iastream const& input, std::oastream& output){
std::frame_buffer<float>& buff = ctx.borrow_buffer();//borrow a buffer from the context for usage
//prevents the need for dynamic allocation of a temporary buffer
input>>buff;//stream data into buffer for manipulation
for(auto&& frame: buff){
frame=0.0;//do something with audio
}
output<<buff;//stream to output
});//dsp object
//uses implied routing equivilent to
//std::aout<<proc<<std::ain;
//
proc.start();
//do other stuff
proc.stop();
}
//std::audio example 2 "process group"
void example_2()
{
std::audio_context<float> ctx;//default context created with std::default_* values
//version 1: capture context via lambda
std::astream_process<float> proc1(ctx,[&ctx](std::iastream const& input, std::oastream& output){
std::frame_buffer<float>& buff = ctx.borrow_buffer();
input>>buff;
for(auto&& frame: buff){
frame*=0.5;
}
output<<buff;
});//dsp object
//version 2: have context passed as argument
std::astream_process<float> proc2(ctx,[](std::iastream const& input, std::oastream& output,std::audio_context<float> const& context){
std::frame_buffer<float>& buff = ctx.borrow_buffer();
input>>buff;
for(auto&& frame: buff){
frame*=2.0;
}
output<<buff;
});
std::process_group<float> pgroup;//a group of processes that will happen consecutivley
pgroup.push(proc1);//add to group
pgroup.push(proc2);//add to group
//configure stream relationships in terms of std::ain / std:aout manually
//std::ain/std::aout are std::astream globals that refer to the default audio inputs and outputs supplied by the context in use
//std::ain/std::aout will route the audio to the enpoint specified by the context reference held by the process that is streaming the data
std::aout<<proc1<<proc2<<std::ain;//method 1
//std::ain>>proc2>>proc1>>std::aout;//method 2
pgroup.start();
//do other stuff
pgroup.stop();
}
//std::audio example 3 "audio files"
void example_3()
{
std::audio_context<float> ctx;
std::astream_process<float> proc(ctx,[](std::iafstream const& input, std::oafstream& output){
std::frame_buffer<float>& buff = ctx.borrow_buffer();
input>>buff;
for(auto&& frame: buff){
frame=0.0;
}
output<<buff;
});//dsp object
std::iafstream audio_file1(ctx,"filename1.extension");//an audio file handle
std::oafstream audio_file2(ctx,"filename2.extension");//an audio file handle
//routing
audio_file2<<proc<<audio_file1;//take input from file nad write to file
//audio_file1>>proc>>audio_file2;//equivilent syntax
proc.start();
//do other stuff
proc.stop();
}
//std::audio example 4 "combination routing"
void example_3()
{
std::audio_context<float> ctx;
//manually select hardware endpoints
std::size_t device_id = ctx.default_device_id();
std::iastream input_device = ctx.get_device<std::input_device>(device_id);
std::oastream output_device = ctx.get_device<std::output_device>(device_id);
std::astream_process<float> proc(ctx,[](std::iastream const& input,
std::oastream& output,
std::iafstream const& input_file,
std::oafstream& output_file){
std::frame_buffer<float>& buff = ctx.borrow_buffer();
(input + input_file)>>buff;//add streams to perform sum before writing to buffer
//or you could use seperate buffers
//like this
/*
std::frame_buffer<float> buff1;
std::frame_buffer<float> buff2;
input>>buff1;
input_file>>buff2;
buff1+=buff2;//buffer arithmatic
*/
output<<buff;//send the contents of buff to the hardware out and the file out
output_file<<buff;
});
std::iafstream audio_file1(ctx,"filename1.extension");//the actual files to be used above
std::oafstream audio_file2(ctx,"filename2.extension");
//connect the files to the process
//connect the hardware device to the process
audio_file2<<proc<<audio_file1;//take input from file
output_device<<proc<<input_device;//also take from hardware
proc.start();
//do other stuff
proc.stop();
}
namespace std{
inline namespace audio{
//working context for audio flow
template<typename>
class audio_context;
/*
*The context in which all audio data is centered.
*Contains: sampling rate, buffer size, frame size, etc...
*The values of ain,aout,afin,afout refer to the endpoints defined by the context, when applied to routing on a porocess tied to the context
*think of a context as the program level driver object
*/
//audio streams (think like std::fstream and its friends)
class astream;//audio stream
class oastream;//output audio stream
class iastream;//input audio stream
class oafstream;//output audio file stream
class iafstream;//input audio file stream
//stream endpoints
class ain;//audio input endpoint
class aout;//audio output endpoint
class afin;//audio file input endpoint
class afout//audio file output endpoint
//stream processing
template<typename>
class astream_process;//a dsp process applied to a stream
template<typename>
class process_group;//a group of processes that will act as one
//containers
template<typename>
class frame_buffer;//a sequence container that is resizeable at runtime, but only with explicit resize calls. contains frames(see below)
/*Implementation note on frame_buffer
*frame_buffer is intended to hold N number of frames which themselves can hold M number of samples
*meaning that the total size in samples if frame_buffer = N * M
*ideally frame_buffers representation of its sample data will be continuous in memory
*/
template<typename>
class frame;//a container that holds samples, thin array wrapper
//hardware representation
class device;//an audio device as recognized by the OS
class input_device;//an input device
class output_device;//an output device
// audio file formats
enum class afformat{
raw,//raw headerless audio bytes, interpreted only by the settings of the context.
//best used for temporary storage within the life of a context
wav,
flac//etc...
}
}
}
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/f490fd21-cf07-4d7b-8936-c71c97a1ab9d%40isocpp.org.
On Friday, 3 June 2016 20:04:59 MSK Bjorn Reese wrote:
> I would approach an audio API differently.
>
> The basic audio primitives are playing and recording.
I disagree. I would say the basic primitives are a sample and a frame. Samples can be obtained and processed in different ways, including recording, reading from file or generating algorithmically.
The processing is often represented as a pipeline or graph with producer, filter and consumer nodes. This is a higher level framework that builds upon the basic blocks of frames and digital signal processing algorithms.
From: Andrey Semashev Sent: Friday, June 3, 2016 1:10 PM Reply To: std-pr...@isocpp.org Subject: Re: [std-proposals] Any interest to adding audio support to the std library? |
That was the basis of my original idea for the library but I found it to bee too broad and constrained the idea to an audio context for the mean time, I am planning on working up an alternative version that does what you are suggesting. The same concepts that I use in my library can be applied to any type of signal so long as the program can receive that data from the OS
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/d9ecb64d-87e6-492c-9d87-0636aed66d9e%40isocpp.org.
This time I will skip the usage examples and write a specification of the types and behaviors needed for the library as well as the libraries relation to the platform that it is being used on. Hopefully that will make my intent for the library more clear and developable
On sábado, 4 de junho de 2016 02:20:15 BRT Andrey Semashev wrote:
> What is needed is a standardized interface for these different modules
> to work with each other. The interface should also allow me, the
> developer, to work with the media (e.g. create my own audio or image
> filter or a new codec or a new device driver).
That I agree with.
But should the standard mandate that there should be at least one? How does
someone write a plugin to the C++ Standard Library? Will we now mandate this
kind of ability?
If not, then will there be a requirement that the C++ library vendor provide
it? If so, please think carefully how Apple should code libc++ to work on iOS.
Again, I think this is not material for the C++ Standard Library.
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/62b7649a-5de5-4330-9bac-708375d36e2c%40isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/a27c2580-c766-4488-bc26-a7b15783e0fd%40isocpp.org.
On Saturday, 4 June 2016 11:43:17 MSK Thiago Macieira wrote:
> On sábado, 4 de junho de 2016 02:20:15 BRT Andrey Semashev wrote:
> > What is needed is a standardized interface for these different modules
> > to work with each other. The interface should also allow me, the
> > developer, to work with the media (e.g. create my own audio or image
> > filter or a new codec or a new device driver).
>
> That I agree with.
>
> But should the standard mandate that there should be at least one? How does
> someone write a plugin to the C++ Standard Library? Will we now mandate this
> kind of ability?
>
> If not, then will there be a requirement that the C++ library vendor provide
> it? If so, please think carefully how Apple should code libc++ to work on
> iOS.
I think there should not be a requirement to provide a single module. But there should be a standard way to enumerate the available devices and codecs. The standard should allow for implementation to have no hardware audio capability.
//std::audio example 1 "single process"
void example_1(){
double sample_rate = 44100;
std::size_t frame_size =2;
std::size_t buffer_size=128;
std::audio_context<float> ctx{sample_rate,buffer_size,frame_size};//contruct from values
std::astream_process<float> proc(ctx,[](std::frame_buffer<float> const& input, std::frame_buffer<float>& output){
for(std::size_t i =0; i <input.size();++i){
output[i] = input[i] * 1.0;
}
});
proc.start();
//do other stuff
proc.stop();
}
//std::audio example 1 "single process"
void example_1(){
double sample_rate = 44100;
std::size_t frame_size =2;
std::size_t buffer_size=128;
std::audio_context<float> ctx{sample_rate,buffer_size,frame_size};//contruct from values
std::astream_process<float> proc(ctx,[](std::frame_buffer<float> const& input, std::frame_buffer<float>& output){
output=intput;//note the copy required
for(auto&& i: output){
i*=1.0;
#include <type_traits>
//if sample must be signed when integer
template<typename T>
struct is_sample :public std::integral_constant<bool,std::is_floating_point<T>::value || (std::is_integral<T>::value && std::is_signed<T>::value)>{};
//otherwise
template<typename T>
using is_sample = std::is_arithmetic<T>;
From: alexande...@gmail.com Sent: Saturday, June 4, 2016 10:41 AM To: ISO C++ Standard - Future Proposals Reply To: std-pr...@isocpp.org Subject: Re: [std-proposals] Any interest to adding audio support to the std library? |
Is this based on an existing library? We're much more likely to adopt
a proposal that's been used widely than one that was invented for the
standard.
On Fri, Jun 3, 2016 at 2:37 AM, <alexande...@gmail.com> wrote:
> I have drafted some ideas on how I think the c++ std library could support
> audio functionality.
>
> I know that audio functionality is a very operating specific problem, but
> with the recent trend towards implementing a file-system library and
> possibly a graphics library I believe that audio would not be too much of a
> reach anymore.
>
> Here are some of the ideas I have so far. I have both some code examples of
> the intended usage as well as a list of the types needed to implement the
> given examples.
>
> Please keep in mind my drafts are still very rough.
>
>
> CODE EXAMPLES
>
> //std::audio example 1 "single process"
> void example_1(){
> double sample_rate = 44100;
> std::size_t frame_size =2;
> std::size_t buffer_size=128;
>
> std::audio_context<float>
> ctx{sample_rate,buffer_size,frame_size};//contruct from values
>
> std::astream_process<float> proc(ctx,[](std::iastream const& input,
> std::oastream& output){
> std::frame_buffer<float>& buff = ctx.borrow_buffer();//borrow a
> buffer from the context for usage
> //prevents the need for dynamic allocation of a temporary buffer
> input>>buff;//stream data into buffer for manipulation
> for(auto&& frame: buff){
> frame=0.0;//do something with audio
> }
> output<<buff;//stream to output
> });//dsp object
> //uses implied routing equivilent to
> //std::aout<<proc<<std::ain;
> //
>
> proc.start();
> //do other stuff
> proc.stop();
> }
>
> //std::audio example 2 "process group"
> void example_2(){
>
> std::audio_context<float> ctx;//default context created with
> std::default_* values
>
> //version 1: capture context via lambda
> std::astream_process<float> proc1(ctx,[&ctx](std::iastream const& input,
> std::oastream& output){
> std::frame_buffer<float>& buff = ctx.borrow_buffer();
> input>>buff;
> for(auto&& frame: buff){
> frame*=0.5;
> }
> output<<buff;
> });//dsp object
>
> //version 2: have context passed as argument
> std::astream_process<float> proc2(ctx,[](std::iastream const& input,
> std::oastream& output,std::audio_context<float> const& context){
> std::frame_buffer<float>& buff = ctx.borrow_buffer();
> input>>buff;
> for(auto&& frame: buff){
> frame*=2.0;
> }
> output<<buff;
> });
>
> std::process_group<float> pgroup;//a group of processes that will happen
> consecutivley
> pgroup.push(proc1);//add to group
> pgroup.push(proc2);//add to group
>
> //configure stream relationships in terms of std::ain / std:aout
> manually
> //std::ain/std::aout are std::astream globals that refer to the default
> audio inputs and outputs supplied by the context in use
> //std::ain/std::aout will route the audio to the enpoint specified by
> the context reference held by the process that is streaming the data
> std::aout<<proc1<<proc2<<std::ain;//method 1
> //std::ain>>proc2>>proc1>>std::aout;//method 2
>
> pgroup.start();
> //do other stuff
> pgroup.stop();
>
> }
>
>
> //std::audio example 3 "audio files"
> void example_3(){
>
> std::audio_context<float> ctx;
>
> std::astream_process<float> proc(ctx,[](std::iafstream const& input,
> std::oafstream& output){
> std::frame_buffer<float>& buff = ctx.borrow_buffer();
> input>>buff;
> for(auto&& frame: buff){
> frame=0.0;
> }
> output<<buff;
> });//dsp object
>
> std::iafstream audio_file1(ctx,"filename1.extension");//an audio file
> handle
> std::oafstream audio_file2(ctx,"filename2.extension");//an audio file
> handle
>
> //routing
> audio_file2<<proc<<audio_file1;//take input from file nad write to file
> //audio_file1>>proc>>audio_file2;//equivilent syntax
> proc.start();
> //do other stuff
> proc.stop();
> }
>
>
> //std::audio example 4 "combination routing"
> void example_3(){
>
> std::audio_context<float> ctx;
> //manually select hardware endpoints
> std::size_t device_id = ctx.default_device_id();
> std::iastream input_device =
> ctx.get_device<std::input_device>(device_id);
> std::oastream output_device =
> ctx.get_device<std::output_device>(device_id);
>
> std::astream_process<float> proc(ctx,[](std::iastream const& input,
> std::oastream& output,
> std::iafstream const&
> input_file,
> std::oafstream& output_file){
> std::frame_buffer<float>& buff = ctx.borrow_buffer();
> (input + input_file)>>buff;//add streams to perform sum before
> writing to buffer
> //or you could use seperate buffers
> //like this
> /*
> std::frame_buffer<float> buff1;
> std::frame_buffer<float> buff2;
>
> input>>buff1;
> input_file>>buff2;
> buff1+=buff2;//buffer arithmatic
> */
> output<<buff;//send the contents of buff to the hardware out and the
> file out
> output_file<<buff;
> });
>
> std::iafstream audio_file1(ctx,"filename1.extension");//the actual files
> to be used above
> std::oafstream audio_file2(ctx,"filename2.extension");
>
> //connect the files to the process
> //connect the hardware device to the process
> audio_file2<<proc<<audio_file1;//take input from file
> output_device<<proc<<input_device;//also take from hardware
> proc.start();
> //do other stuff
> proc.stop();
> }
>
>
>
> REQUIRED LIBRARY MEMBERS
>
>
> namespace std{
> inline namespace audio{
> //working context for audio flow
> template<typename>
> class audio_context;
> /*
> *The context in which all audio data is centered.
> *Contains: sampling rate, buffer size, frame size, etc...
> *The values of ain,aout,afin,afout refer to the endpoints defined by
> the context, when applied to routing on a porocess tied to the context
> *think of a context as the program level driver object
> */
>
> //audio streams (think like std::fstream and its friends)
> class astream;//audio stream
> class oastream;//output audio stream
> class iastream;//input audio stream
> class oafstream;//output audio file stream
> class iafstream;//input audio file stream
>
>
> //stream endpoints
> class ain;//audio input endpoint
> class aout;//audio output endpoint
> class afin;//audio file input endpoint
> class afout//audio file output endpoint
>
> //stream processing
> template<typename>
> class astream_process;//a dsp process applied to a stream
>
> template<typename>
> class process_group;//a group of processes that will act as one
>
> //containers
> template<typename>
> class frame_buffer;//a sequence container that is resizeable at
> runtime, but only with explicit resize calls. contains frames(see below)
> /*Implementation note on frame_buffer
> *frame_buffer is intended to hold N number of frames which
> themselves can hold M number of samples
> *meaning that the total size in samples if frame_buffer = N * M
> *ideally frame_buffers representation of its sample data will be
> continuous in memory
> */
>
> template<typename>
> class frame;//a container that holds samples, thin array wrapper
>
>
> //hardware representation
> class device;//an audio device as recognized by the OS
> class input_device;//an input device
> class output_device;//an output device
>
> // audio file formats
> enum class afformat{
> raw,//raw headerless audio bytes, interpreted only by the
> settings of the context.
> //best used for temporary storage within the life of a context
> wav,
> flac//etc...
> }
> }
> }
>
>
>
> --
> You received this message because you are subscribed to the Google Groups
> "ISO C++ Standard - Future Proposals" group.
> To unsubscribe from this group and stop receiving emails from it, send an
> email to std-proposal...@isocpp.org.
> To post to this group, send email to std-pr...@isocpp.org.
> To view this discussion on the web visit
Do you have any suggestions as far as an explicit notion of time should be in this context?
class frame_ticks // or some name that better describes this...
{
uint64_t fstart;
uint64_t fend;
uint64_t frate;
// ...
};
int number_of_frames = fend - fstart;
double seconds_elapsed = number_of_frames/frate;
double start = fstart/frate;//timepoint that corresponds to the start of the frame
double end = fend/frate;
double elapsed = end-start;//duration of frame?
std::audio::sample_clock;//a clock type that counts in frames(or samples)
/*
-has internal member of "rate" that is used for conversion
-keeps track of number of frames(or samples) elapsed
-can produce a time_point that is a real number of seconds from epoch.
-epoch can be reset
-clock will be synced with hardware??? if possible?? or good idea??
*/
std::chrono::time_point<std::audio::sample_clock> foo;//the current time since epoch in seconds
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/83016a47-e902-4292-b109-652e811d17fb%40isocpp.org.
> Do you have any suggestions as far as an explicit notion of time should be in this context?
On Friday, 10 June 2016 13:11:14 MSK alexande...@gmail.com wrote:
> I suppose we could use the std::chrono library as a basis, and define a
> clock type that has a dynamically set epoch. This would allow a time-point
> to have a value relative to when the stream was started?
>
> So maybe the introduction of:
>
> std::audio::sample_clock;//a clock type that counts in frames(or samples)
> /*
> -has internal member of "rate" that is used for conversion
> -keeps track of number of frames(or samples) elapsed
> -can produce a time_point that is a real number of seconds from epoch.
> -epoch can be reset
> -clock will be synced with hardware??? if possible?? or good idea??
>
> */
> std::chrono::time_point<std::audio::sample_clock> foo;//the current time
> since epoch in seconds
+1 for using chrono.
+1 for monotonic clock (as Ross suggested). At least, by default.
-1 for using an arbitrary epoch clock.
In my practice I found it useful to be able to synchronize multiple streams, which may not have started at the same time. Or in the same process at all.
I'm not sure there is much use in binding particular frames to the real world clock, at least not in audio processing domain. In video/image processing this could be useful, e.g. to present an image to the user at the given time. However, I feel it would still be useful to allow specifying a custom clock to the audio processing framework as well.
One use case I have in mind is providing a custom clock which is guaranteed to be equivalent to CLOCK_MONOTONIC on POSIX systems. Unlike std::chrono::steady_clock, this custom clock would be useful in interfacing with OS primitives like condition variables or events. Having such clock time points in audio frames would be useful.
One other thing that could be useful is a clock adaptor, which implements the usual clock interface, but provides time points in sample rate units. The actual time readings would be obtained from an underlying clock. Something like this:
template< typename BaseClock, unsigned int SampleRate >
class sample_rate_clock
{
public:
typedef BaseClock base_clock;
static constexpr unsigned int sample_rate = SampleRate;
typedef ratio< 1, sample_rate > period;
typedef typename base_clock::rep rep;
typedef std::chrono::duration< rep, period > duration;
typedef std::chrono::time_point< sample_rate_clock > time_point;
// ...etc. - other clock members as usual,
// imported from BaseClock as needed
static time_point now()
{
return time_point(duration_cast< duration >(
base_clock::now().time_since_epoch()));
}
};
Or, on the second thought, this might be a generic tool, not related to audio processing at all...
On Friday, 10 June 2016 13:36:48 MSK alexande...@gmail.com wrote:
> Or are we trying to define how to determine a specific timepoint that a
> specific frame corresponds to?
>
> double start = fstart/frate;//timepoint that corresponds to the start of
> the frame
> double end = fend/frate;
>
> double elapsed = end-start;//duration of frame?
Floating point types are definitely not the way to go for timestamps.
--
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposal...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/2271827.u9FbM9WbyY%40lastique-pc.
On Saturday, 11 June 2016 10:55:17 MSK alexande...@gmail.com wrote:
> I lie your thought on the clock idea and +1 for the example. A few thoughts
> though, I would like to explain the distinction that I draw between an
> "arbitrary epoch" and a "dynamic epoch" as I had proposed. It would not be
> arbitrary in that it would be set to some random point, but rather would be
> set too a significant time point in terms of the current instance of the
> program... It would be reset to the time when the stream was started. In
> port audio it would be set to the time when Pa_StartStream was called,
> having that as a reference point would allow you to manage the local time
> within that stream easily.
If I understood you right, that's exactly what I'm arguing against.
> As a second note the sample rate will most likely need to be a runtime
> parameter and cannot be a template parameter because of such( unless we
> specify the need for a factory to create clocks of different sample rates)
> we will probably not know the desired sample rate or even the supported
> sampling rates at compile time
A runtime-set precision cannot be implemented with chrono. I would suggest to just avoid the sample rate based clocks then and use timestamps in conventional units (ms, us, ns...).
On Saturday, 11 June 2016 11:03:41 MSK alexande...@gmail.com wrote:
> Or at least store a time point each time the stream is started using a clock
> with a well defined epoch, so that we might be able to find the length of
> time that the stream has been active and have meaningful measurements of
> time within the stream.
There is no need to have the epoch bound to the beginning of the stream to calculate its duration. Or any particular epoch at all. All you need is the timestamp of its first and last frames.
A fixed epoch becomes important when you try to synchronize multiple streams together. But even then it's not important what exactly is the epoch; what is important is that it has to be the same for all streams you are processing.
Sorry top posting or if this has been already pointed.
Is there any reason this hasn't been submitted to Boost and let it mature there for few years?
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/f490fd21-cf07-4d7b-8936-c71c97a1ab9d%40isocpp.org.
It doesn't have to go through boost, but I would like to see a public repository with a bunch of users.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/CAFdMc-0Dj4BGW0v8ORrcgGM54D5_ZunT2At0OWFEDFr-3CYWBw%40mail.gmail.com.
--
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/3560828.vAATzG8HgS%40lastique-pc.
#include <chrono>
#include <type_traits>
//define audio_clock as high_resolution clock if high_resolution clock is_steady==true else use steady_clock
//conditional will result in a clock that is monotonic either way
//but can result in using a high_resolution_clock on certain platforms
using audio_clock = typename std::conditional<std::chrono::high_resolution_clock::is_steady,
std::chrono::high_resolution_clock,
std::chrono::steady_clock>::type;
In that case I would argue for the use of the "std::chrono::steady_clock" based on the fact that it is specified to be a monotonic clock.Or a clock defined in a similar manor that counts in samples.I also would imagine that our clock would need to have "is_steady" always be true?
On Saturday, 11 June 2016 11:03:41 MSK alexande...@gmail.com wrote:
> Or at least store a time point each time the stream is started using a clock
> with a well defined epoch, so that we might be able to find the length of
> time that the stream has been active and have meaningful measurements of
> time within the stream.
There is no need to have the epoch bound to the beginning of the stream to calculate its duration. Or any particular epoch at all. All you need is the timestamp of its first and last frames.
A fixed epoch becomes important when you try to synchronize multiple streams together. But even then it's not important what exactly is the epoch; what is important is that it has to be the same for all streams you are processing.
--
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposals+unsubscribe@isocpp.org.
As I suggested earlier, I think the clock should be specified by the user (probably as a template parameter for the audio frame).
--
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/2897526.WjqoZ8n3I9%40lastique-pc.
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposal...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/CAOcFa%3DdvmLW0u8R3MesuJMc7rbkewr%2Bmbv1qjVSQ3ip_U%3DsbZA%40mail.gmail.com.
--
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposal...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/2897526.WjqoZ8n3I9%40lastique-pc.
On Monday, 13 June 2016 14:55:43 MSK alexande...@gmail.com wrote:
> If that should be the case we would have to write a type trait that makes
> sure that the user supplied clock was an acceptable clock source.
I'm not sure we need a trait for checking the clock for acceptance.
> I suggest
> using the built in ones for the mean time because the std makes certain
> guarantees about those clocks that can make sure that thy meet our needs.
> Not to mention that a default would be needed even if we allowed user
> supplied clocks
The problem I have with std::chrono::steady_clock is that it's not guaranteed to be equivalent to CLOCK_MONOTONIC. Or any other POSIX clock type. It may work as a default clock type, should we decide one is needed, but IMO it should be customizable from the start.
--
You received this message because you are subscribed to a topic in the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this topic, visit https://groups.google.com/a/isocpp.org/d/topic/std-proposals/Hkdh02Ejx6s/unsubscribe.
To unsubscribe from this group and all its topics, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/8842319.5S1MoY7pmH%40lastique-pc.
Since ultimately, the timing of audio frames could be used to synchronize audio from sources not sharing a clock, the resolution of the timestamp must be substantially larger than "number of samples ", I propose what was mentioned earlier, a steady clock with nanosecond resolution.
/R
You received this message because you are subscribed to the Google Groups "ISO C++ Standard - Future Proposals" group.
To unsubscribe from this group and stop receiving emails from it, send an email to std-proposal...@isocpp.org.
To post to this group, send email to std-pr...@isocpp.org.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/8AB38C1F-590D-4F81-9839-D7A4604E1831%40gmail.com.
To view this discussion on the web visit https://groups.google.com/a/isocpp.org/d/msgid/std-proposals/CAEvHzA0emfwFJxzU%2BYyKog0H52MLnq6BnyuOzC27QVNUA_8Thw%40mail.gmail.com.