1 #include "aattachmentpoint.h"
5 #include "automation.h"
7 #include "edlsession.h"
9 #include "floatautos.h"
14 #include "renderengine.h"
16 #include "transition.h"
17 #include "transportque.h"
18 #include "virtualaconsole.h"
19 #include "virtualanode.h"
24 VirtualANode::VirtualANode(RenderEngine *renderengine,
25 VirtualConsole *vconsole,
29 VirtualNode *parent_module)
30 : VirtualNode(renderengine,
37 for(int i = 0; i < MAXCHANNELS; i++)
39 pan_before[i] = pan_after[i] = 0;
43 VirtualANode::~VirtualANode()
51 VirtualNode* VirtualANode::create_module(Plugin *real_plugin,
55 return new VirtualANode(renderengine,
64 VirtualNode* VirtualANode::create_plugin(Plugin *real_plugin)
66 return new VirtualANode(renderengine,
76 int VirtualANode::read_data(double *output_temp,
77 int64_t start_position,
81 VirtualNode *previous_plugin = 0;
83 // This is a plugin on parent module with a preceeding effect.
84 // Get data from preceeding effect on parent module.
86 (previous_plugin = parent_node->get_previous_plugin(this)))
88 ((VirtualANode*)previous_plugin)->render(output_temp,
94 // First plugin on parent module.
95 // Read data from parent module
98 ((VirtualANode*)parent_node)->read_data(output_temp,
104 // This is the first node in the tree
106 ((AModule*)real_module)->render(output_temp,
109 renderengine->command->get_direction(),
116 int VirtualANode::render(double *output_temp,
117 int64_t start_position,
121 ARender *arender = ((VirtualAConsole*)vconsole)->arender;
124 render_as_module(arender->audio_out,
133 render_as_plugin(output_temp,
141 void VirtualANode::render_as_plugin(double *output_temp,
142 int64_t start_position,
148 !real_plugin->on) return;
150 // If we're the first plugin in the parent module, data needs to be read from
151 // what comes before the parent module. Otherwise, data needs to come from the
153 ((AAttachmentPoint*)attachment)->render(
155 plugin_buffer_number,
161 int VirtualANode::render_as_module(double **audio_out,
163 int64_t start_position,
168 int direction = renderengine->command->get_direction();
169 EDL *edl = vconsole->renderengine->edl;
172 // Process last subnode. This calls read_data, propogates up the chain
173 // of subnodes, and finishes the chain.
176 VirtualANode *node = (VirtualANode*)subnodes.values[subnodes.total - 1];
177 node->render(output_temp,
183 // Read data from previous entity
185 read_data(output_temp,
192 render_fade(output_temp,
196 track->automation->autos[AUTOMATION_FADE],
200 // Get the peak but don't limit
201 // Calculate position relative to project for meters
202 int64_t project_sample_rate = edl->session->sample_rate;
203 int64_t start_position_project = start_position *
204 project_sample_rate /
206 if(real_module && renderengine->command->realtime)
208 ARender *arender = ((VirtualAConsole*)vconsole)->arender;
209 // Starting sample of meter block
210 int64_t meter_render_start;
211 // Ending sample of meter block
212 int64_t meter_render_end;
213 // Number of samples in each meter fragment normalized to requested rate
214 int meter_render_fragment = arender->meter_render_fragment *
219 // Scan fragment in meter sized fragments
220 for(int i = 0; i < len; )
222 int current_level = ((AModule*)real_module)->current_level;
224 meter_render_start = i;
225 meter_render_end = i + meter_render_fragment;
226 if(meter_render_end > len)
227 meter_render_end = len;
228 // Number of samples into the fragment this meter sized fragment is,
229 // normalized to project sample rate.
230 int64_t meter_render_start_project = meter_render_start *
231 project_sample_rate /
234 // Scan meter sized fragment
235 for( ; i < meter_render_end; i++)
237 double sample = fabs(output_temp[i]);
238 if(sample > peak) peak = sample;
241 ((AModule*)real_module)->level_history[current_level] =
243 ((AModule*)real_module)->level_samples[current_level] =
244 (direction == PLAY_FORWARD) ?
245 (start_position_project + meter_render_start_project) :
246 (start_position_project - meter_render_start_project);
247 ((AModule*)real_module)->current_level =
248 arender->get_next_peak(current_level);
252 // process pans and copy the output to the output channels
253 // Keep rendering unmuted fragments until finished.
254 int mute_position = 0;
256 for(int i = 0; i < len; )
259 int mute_fragment = len - i;
260 int mute_fragment_project = mute_fragment *
261 project_sample_rate /
263 start_position_project = start_position +
264 ((direction == PLAY_FORWARD) ? i : -i);
265 start_position_project = start_position_project *
266 project_sample_rate /
269 // How many samples until the next mute?
270 get_mute_fragment(start_position_project,
272 mute_fragment_project,
273 (Autos*)track->automation->autos[AUTOMATION_MUTE],
276 // Fragment is playable
285 double *buffer = audio_out[j];
287 render_pan(output_temp + mute_position,
288 buffer + mute_position,
292 (Autos*)track->automation->autos[AUTOMATION_PAN],
300 len -= mute_fragment;
302 mute_position += mute_fragment;
308 int VirtualANode::render_fade(double *buffer,
310 int64_t input_position,
316 double value, fade_value;
317 FloatAuto *previous = 0;
319 EDL *edl = vconsole->renderengine->edl;
320 int64_t project_sample_rate = edl->session->sample_rate;
321 if(use_nudge) input_position += track->nudge *
325 // Normalize input position to project sample rate here.
326 // Automation functions are general to video and audio so it
327 // can't normalize itself.
328 int64_t input_position_project = input_position *
329 project_sample_rate /
331 int64_t len_project = len *
332 project_sample_rate /
335 if(((FloatAutos*)autos)->automation_is_constant(input_position_project,
340 if(fade_value <= INFINITYGAIN)
343 value = DB::fromdb(fade_value);
344 for(int64_t i = 0; i < len; i++)
351 for(int64_t i = 0; i < len; i++)
353 int64_t slope_len = len - i;
354 input_position_project = input_position *
355 project_sample_rate /
358 fade_value = ((FloatAutos*)autos)->get_value(input_position_project,
363 if(fade_value <= INFINITYGAIN)
366 value = DB::fromdb(fade_value);
370 if(direction == PLAY_FORWARD)
380 int VirtualANode::render_pan(double *input, // start of input fragment
381 double *output, // start of output fragment
382 int64_t fragment_len, // fragment length in input scale
383 int64_t input_position, // starting sample of input buffer in project
384 int64_t sample_rate, // sample rate of input_position
391 double intercept = 1.0;
392 EDL *edl = vconsole->renderengine->edl;
393 int64_t project_sample_rate = edl->session->sample_rate;
394 if(use_nudge) input_position += track->nudge *
398 for(int i = 0; i < fragment_len; )
400 int64_t slope_len = (fragment_len - i) *
401 project_sample_rate /
404 // Get slope intercept formula for next fragment
405 get_pan_automation(slope,
408 project_sample_rate /
415 slope_len = slope_len * sample_rate / project_sample_rate;
416 slope = slope * sample_rate / project_sample_rate;
417 slope_len = MIN(slope_len, fragment_len - i);
419 //printf("VirtualANode::render_pan 3 %d %lld %f %p %p\n", i, slope_len, slope, output, input);
422 for(double j = 0; j < slope_len; j++, i++)
424 value = slope * j + intercept;
425 output[i] += input[i] * value;
430 for(int j = 0; j < slope_len; j++, i++)
432 output[i] += input[i] * intercept;
437 if(direction == PLAY_FORWARD)
438 input_position += slope_len;
440 input_position -= slope_len;
442 //printf("VirtualANode::render_pan 4\n");
449 void VirtualANode::get_pan_automation(double &slope,
451 int64_t input_position,
460 PanAuto *prev_keyframe = 0;
461 PanAuto *next_keyframe = 0;
462 prev_keyframe = (PanAuto*)autos->get_prev_auto(input_position,
464 (Auto* &)prev_keyframe);
465 next_keyframe = (PanAuto*)autos->get_next_auto(input_position,
467 (Auto* &)next_keyframe);
469 if(direction == PLAY_FORWARD)
471 // Two distinct automation points within range
472 if(next_keyframe->position > prev_keyframe->position)
474 slope = ((double)next_keyframe->values[channel] - prev_keyframe->values[channel]) /
475 ((double)next_keyframe->position - prev_keyframe->position);
476 intercept = ((double)input_position - prev_keyframe->position) * slope +
477 prev_keyframe->values[channel];
479 if(next_keyframe->position < input_position + slope_len)
480 slope_len = next_keyframe->position - input_position;
483 // One automation point within range
486 intercept = prev_keyframe->values[channel];
491 // Two distinct automation points within range
492 if(next_keyframe->position < prev_keyframe->position)
494 slope = ((double)next_keyframe->values[channel] - prev_keyframe->values[channel]) /
495 ((double)next_keyframe->position - prev_keyframe->position);
496 intercept = ((double)input_position - prev_keyframe->position) * slope +
497 prev_keyframe->values[channel];
499 if(next_keyframe->position > input_position - slope_len)
500 slope_len = input_position - next_keyframe->position;
503 // One automation point within range
506 intercept = next_keyframe->values[channel];