OpenShot Audio Library | OpenShotAudio 0.3.2
Loading...
Searching...
No Matches
juce_Convolution.cpp
1/*
2 ==============================================================================
3
4 This file is part of the JUCE library.
5 Copyright (c) 2017 - ROLI Ltd.
6
7 JUCE is an open source library subject to commercial or open-source
8 licensing.
9
10 By using JUCE, you agree to the terms of both the JUCE 5 End-User License
11 Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
12 27th April 2017).
13
14 End User License Agreement: www.juce.com/juce-5-licence
15 Privacy Policy: www.juce.com/juce-5-privacy-policy
16
17 Or: You may also use this code under the terms of the GPL v3 (see
18 www.gnu.org/licenses).
19
20 JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
21 EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
22 DISCLAIMED.
23
24 ==============================================================================
25*/
26
27namespace juce
28{
29namespace dsp
30{
31
36{
37 ConvolutionEngine() = default;
38
39 //==============================================================================
40 struct ProcessingInformation
41 {
42 enum class SourceType
43 {
44 sourceBinaryData,
45 sourceAudioFile,
46 sourceAudioBuffer,
47 sourceNone
48 };
49
50 SourceType sourceType = SourceType::sourceNone;
51
52 const void* sourceData;
53 int sourceDataSize;
54 File fileImpulseResponse;
55
56 double originalSampleRate;
57 int originalSize = 0;
58 int originalNumChannels = 1;
59
60 AudioBuffer<float>* buffer;
61
62 bool wantsStereo = true;
63 bool wantsTrimming = true;
64 bool wantsNormalisation = true;
65 int64 wantedSize = 0;
66 int finalSize = 0;
67
68 double sampleRate = 0;
69 size_t maximumBufferSize = 0;
70 };
71
72 //==============================================================================
73 void reset()
74 {
75 bufferInput.clear();
76 bufferOverlap.clear();
77 bufferTempOutput.clear();
78
79 for (auto i = 0; i < buffersInputSegments.size(); ++i)
80 buffersInputSegments.getReference (i).clear();
81
82 currentSegment = 0;
83 inputDataPos = 0;
84 }
85
87 void initializeConvolutionEngine (ProcessingInformation& info, int channel)
88 {
89 blockSize = (size_t) nextPowerOfTwo ((int) info.maximumBufferSize);
90
91 FFTSize = blockSize > 128 ? 2 * blockSize
92 : 4 * blockSize;
93
94 numSegments = ((size_t) info.finalSize) / (FFTSize - blockSize) + 1u;
95
96 numInputSegments = (blockSize > 128 ? numSegments : 3 * numSegments);
97
98 FFTobject.reset (new FFT (roundToInt (std::log2 (FFTSize))));
99
100 bufferInput.setSize (1, static_cast<int> (FFTSize));
101 bufferOutput.setSize (1, static_cast<int> (FFTSize * 2));
102 bufferTempOutput.setSize (1, static_cast<int> (FFTSize * 2));
103 bufferOverlap.setSize (1, static_cast<int> (FFTSize));
104
105 buffersInputSegments.clear();
106 buffersImpulseSegments.clear();
107 bufferOutput.clear();
108
109 for (size_t i = 0; i < numInputSegments; ++i)
110 {
112 newInputSegment.setSize (1, static_cast<int> (FFTSize * 2));
113 buffersInputSegments.add (newInputSegment);
114 }
115
116 for (auto i = 0u; i < numSegments; ++i)
117 {
119 newImpulseSegment.setSize (1, static_cast<int> (FFTSize * 2));
120 buffersImpulseSegments.add (newImpulseSegment);
121 }
122
123 std::unique_ptr<FFT> FFTTempObject (new FFT (roundToInt (std::log2 (FFTSize))));
124
125 auto* channelData = info.buffer->getWritePointer (channel);
126
127 for (size_t n = 0; n < numSegments; ++n)
128 {
129 buffersImpulseSegments.getReference (static_cast<int> (n)).clear();
130
131 auto* impulseResponse = buffersImpulseSegments.getReference (static_cast<int> (n)).getWritePointer (0);
132
133 if (n == 0)
134 impulseResponse[0] = 1.0f;
135
136 for (size_t i = 0; i < FFTSize - blockSize; ++i)
137 if (i + n * (FFTSize - blockSize) < (size_t) info.finalSize)
138 impulseResponse[i] = channelData[i + n * (FFTSize - blockSize)];
139
140 FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
141 prepareForConvolution (impulseResponse);
142 }
143
144 reset();
145
146 isReady = true;
147 }
148
151 {
152 if (FFTSize != other.FFTSize)
153 {
154 FFTobject.reset (new FFT (roundToInt (std::log2 (other.FFTSize))));
155 FFTSize = other.FFTSize;
156 }
157
158 currentSegment = other.currentSegment;
159 numInputSegments = other.numInputSegments;
160 numSegments = other.numSegments;
161 blockSize = other.blockSize;
162 inputDataPos = other.inputDataPos;
163
164 bufferInput = other.bufferInput;
165 bufferTempOutput = other.bufferTempOutput;
166 bufferOutput = other.bufferOutput;
167
168 buffersInputSegments = other.buffersInputSegments;
169 buffersImpulseSegments = other.buffersImpulseSegments;
170 bufferOverlap = other.bufferOverlap;
171
172 isReady = true;
173 }
174
176 void processSamples (const float* input, float* output, size_t numSamples)
177 {
178 if (! isReady)
179 return;
180
181 // Overlap-add, zero latency convolution algorithm with uniform partitioning
182 size_t numSamplesProcessed = 0;
183
184 auto indexStep = numInputSegments / numSegments;
185
186 auto* inputData = bufferInput.getWritePointer (0);
187 auto* outputTempData = bufferTempOutput.getWritePointer (0);
188 auto* outputData = bufferOutput.getWritePointer (0);
189 auto* overlapData = bufferOverlap.getWritePointer (0);
190
191 while (numSamplesProcessed < numSamples)
192 {
193 const bool inputDataWasEmpty = (inputDataPos == 0);
194 auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
195
196 // copy the input samples
197 FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
198
199 auto* inputSegmentData = buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0);
200 FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (FFTSize));
201
202 // Forward FFT
203 FFTobject->performRealOnlyForwardTransform (inputSegmentData);
205
206 // Complex multiplication
208 {
209 FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (FFTSize + 1));
210
211 auto index = currentSegment;
212
213 for (size_t i = 1; i < numSegments; ++i)
214 {
215 index += indexStep;
216
217 if (index >= numInputSegments)
218 index -= numInputSegments;
219
220 convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (index)).getWritePointer (0),
221 buffersImpulseSegments.getReference (static_cast<int> (i)).getWritePointer (0),
223 }
224 }
225
226 FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (FFTSize + 1));
227
228 convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0),
229 buffersImpulseSegments.getReference (0).getWritePointer (0),
230 outputData);
231
232 // Inverse FFT
234 FFTobject->performRealOnlyInverseTransform (outputData);
235
236 // Add overlap
237 for (size_t i = 0; i < numSamplesToProcess; ++i)
238 output[i + numSamplesProcessed] = outputData[inputDataPos + i] + overlapData[inputDataPos + i];
239
240 // Input buffer full => Next block
241 inputDataPos += numSamplesToProcess;
242
243 if (inputDataPos == blockSize)
244 {
245 // Input buffer is empty again now
246 FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (FFTSize));
247
248 inputDataPos = 0;
249
250 // Extra step for segSize > blockSize
251 FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (FFTSize - 2 * blockSize));
252
253 // Save the overlap
254 FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (FFTSize - blockSize));
255
256 // Update current segment
257 currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
258 }
259
261 }
262 }
263
265 void prepareForConvolution (float *samples) noexcept
266 {
267 auto FFTSizeDiv2 = FFTSize / 2;
268
269 for (size_t i = 0; i < FFTSizeDiv2; i++)
270 samples[i] = samples[2 * i];
271
272 samples[FFTSizeDiv2] = 0;
273
274 for (size_t i = 1; i < FFTSizeDiv2; i++)
275 samples[i + FFTSizeDiv2] = -samples[2 * (FFTSize - i) + 1];
276 }
277
279 void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
280 {
281 auto FFTSizeDiv2 = FFTSize / 2;
282
283 FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
285
286 FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
287 FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
288
289 output[FFTSize] += input[FFTSize] * impulse[FFTSize];
290 }
291
297 {
298 auto FFTSizeDiv2 = FFTSize / 2;
299
300 for (size_t i = 1; i < FFTSizeDiv2; i++)
301 {
302 samples[2 * (FFTSize - i)] = samples[i];
303 samples[2 * (FFTSize - i) + 1] = -samples[FFTSizeDiv2 + i];
304 }
305
306 samples[1] = 0.f;
307
308 for (size_t i = 1; i < FFTSizeDiv2; i++)
309 {
310 samples[2 * i] = samples[2 * (FFTSize - i)];
311 samples[2 * i + 1] = -samples[2 * (FFTSize - i) + 1];
312 }
313 }
314
315 //==============================================================================
316 std::unique_ptr<FFT> FFTobject;
317
318 size_t FFTSize = 0;
319 size_t currentSegment = 0, numInputSegments = 0, numSegments = 0, blockSize = 0, inputDataPos = 0;
320
321 AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
322 Array<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
323
324 bool isReady = false;
325
326 //==============================================================================
327 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (ConvolutionEngine)
328};
329
330
331
332//==============================================================================
338{
339 enum class ChangeRequest
340 {
341 changeEngine = 0,
342 changeSampleRate,
343 changeMaximumBufferSize,
344 changeSource,
345 changeImpulseResponseSize,
346 changeStereo,
347 changeTrimming,
348 changeNormalisation,
349 changeIgnore,
350 numChangeRequestTypes
351 };
352
353 using SourceType = ConvolutionEngine::ProcessingInformation::SourceType;
354
355 //==============================================================================
356 Pimpl() : Thread ("Convolution"), abstractFifo (fifoSize)
357 {
358 abstractFifo.reset();
359 fifoRequestsType.resize (fifoSize);
360 fifoRequestsParameter.resize (fifoSize);
361
362 requestsType.resize (fifoSize);
363 requestsParameter.resize (fifoSize);
364
365 for (auto i = 0; i < 4; ++i)
366 engines.add (new ConvolutionEngine());
367
368 currentInfo.maximumBufferSize = 0;
369 currentInfo.buffer = &impulseResponse;
370
371 temporaryBuffer.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
372 impulseResponseOriginal.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
373 impulseResponse.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
374 }
375
376 ~Pimpl() override
377 {
378 stopThread (10000);
379 }
380
381 //==============================================================================
383 void initProcessing (int maximumBufferSize)
384 {
385 stopThread (1000);
386
387 interpolationBuffer.setSize (1, maximumBufferSize, false, false, true);
388 mustInterpolate = false;
389 }
390
391 //==============================================================================
393 void addToFifo (ChangeRequest type, juce::var parameter)
394 {
395 int start1, size1, start2, size2;
396 abstractFifo.prepareToWrite (1, start1, size1, start2, size2);
397
398 // If you hit this assertion then you have requested more impulse response
399 // changes than the Convolution class can handle.
400 jassert (size1 + size2 > 0);
401
402 if (size1 > 0)
403 {
404 fifoRequestsType.setUnchecked (start1, type);
405 fifoRequestsParameter.setUnchecked (start1, parameter);
406 }
407
408 if (size2 > 0)
409 {
410 fifoRequestsType.setUnchecked (start2, type);
411 fifoRequestsParameter.setUnchecked (start2, parameter);
412 }
413
414 abstractFifo.finishedWrite (size1 + size2);
415 }
416
418 void addToFifo (ChangeRequest* types, juce::var* parameters, int numEntries)
419 {
420 int start1, size1, start2, size2;
422
423 // If you hit this assertion then you have requested more impulse response
424 // changes than the Convolution class can handle.
425 jassert (numEntries > 0 && size1 + size2 > 0);
426
427 if (size1 > 0)
428 {
429 for (auto i = 0; i < size1; ++i)
430 {
431 fifoRequestsType.setUnchecked (start1 + i, types[i]);
432 fifoRequestsParameter.setUnchecked (start1 + i, parameters[i]);
433 }
434 }
435
436 if (size2 > 0)
437 {
438 for (auto i = 0; i < size2; ++i)
439 {
440 fifoRequestsType.setUnchecked (start2 + i, types[i + size1]);
441 fifoRequestsParameter.setUnchecked (start2 + i, parameters[i + size1]);
442 }
443 }
444
445 abstractFifo.finishedWrite (size1 + size2);
446 }
447
449 void readFromFifo (ChangeRequest& type, juce::var& parameter)
450 {
451 int start1, size1, start2, size2;
452 abstractFifo.prepareToRead (1, start1, size1, start2, size2);
453
454 if (size1 > 0)
455 {
456 type = fifoRequestsType[start1];
457 parameter = fifoRequestsParameter[start1];
458 }
459
460 if (size2 > 0)
461 {
462 type = fifoRequestsType[start2];
463 parameter = fifoRequestsParameter[start2];
464 }
465
466 abstractFifo.finishedRead (size1 + size2);
467 }
468
471 {
472 return abstractFifo.getNumReady();
473 }
474
475 //==============================================================================
483 {
484 if (getNumRemainingEntries() == 0 || isThreadRunning() || mustInterpolate)
485 return;
486
487 auto numRequests = 0;
488
489 // retrieve the information from the FIFO for processing
490 while (getNumRemainingEntries() > 0 && numRequests < fifoSize)
491 {
492 ChangeRequest type = ChangeRequest::changeEngine;
493 juce::var parameter;
494
495 readFromFifo (type, parameter);
496
497 requestsType.setUnchecked (numRequests, type);
498 requestsParameter.setUnchecked (numRequests, parameter);
499
500 numRequests++;
501 }
502
503 // remove any useless messages
504 for (auto i = 0; i < (int) ChangeRequest::numChangeRequestTypes; ++i)
505 {
506 bool exists = false;
507
508 for (auto n = numRequests; --n >= 0;)
509 {
510 if (requestsType[n] == (ChangeRequest) i)
511 {
512 if (! exists)
513 exists = true;
514 else
515 requestsType.setUnchecked (n, ChangeRequest::changeIgnore);
516 }
517 }
518 }
519
520 changeLevel = 0;
521
522 for (auto n = 0; n < numRequests; ++n)
523 {
524 switch (requestsType[n])
525 {
526 case ChangeRequest::changeEngine:
527 changeLevel = 3;
528 break;
529
530 case ChangeRequest::changeSampleRate:
531 {
532 double newSampleRate = requestsParameter[n];
533
534 if (currentInfo.sampleRate != newSampleRate)
535 changeLevel = 3;
536
537 currentInfo.sampleRate = newSampleRate;
538 }
539 break;
540
541 case ChangeRequest::changeMaximumBufferSize:
542 {
543 int newMaximumBufferSize = requestsParameter[n];
544
545 if (currentInfo.maximumBufferSize != (size_t) newMaximumBufferSize)
546 changeLevel = 3;
547
548 currentInfo.maximumBufferSize = (size_t) newMaximumBufferSize;
549 }
550 break;
551
552 case ChangeRequest::changeSource:
553 {
554 auto* arrayParameters = requestsParameter[n].getArray();
555 auto newSourceType = static_cast<SourceType> (static_cast<int> (arrayParameters->getUnchecked (0)));
556
557 if (currentInfo.sourceType != newSourceType)
558 changeLevel = jmax (2, changeLevel);
559
560 if (newSourceType == SourceType::sourceBinaryData)
561 {
563 auto* newMemoryBlock = prm.getBinaryData();
564
565 auto* newPtr = newMemoryBlock->getData();
566 auto newSize = (int) newMemoryBlock->getSize();
567
568 if (currentInfo.sourceData != newPtr || currentInfo.sourceDataSize != newSize)
569 changeLevel = jmax (2, changeLevel);
570
571 currentInfo.sourceType = SourceType::sourceBinaryData;
572 currentInfo.sourceData = newPtr;
573 currentInfo.sourceDataSize = newSize;
574 currentInfo.fileImpulseResponse = File();
575 }
576 else if (newSourceType == SourceType::sourceAudioFile)
577 {
578 File newFile (arrayParameters->getUnchecked (1).toString());
579
580 if (currentInfo.fileImpulseResponse != newFile)
581 changeLevel = jmax (2, changeLevel);
582
583 currentInfo.sourceType = SourceType::sourceAudioFile;
584 currentInfo.fileImpulseResponse = newFile;
585 currentInfo.sourceData = nullptr;
586 currentInfo.sourceDataSize = 0;
587 }
588 else if (newSourceType == SourceType::sourceAudioBuffer)
589 {
590 double originalSampleRate (arrayParameters->getUnchecked (1));
591 changeLevel = jmax (2, changeLevel);
592
593 currentInfo.sourceType = SourceType::sourceAudioBuffer;
594 currentInfo.originalSampleRate = originalSampleRate;
595 currentInfo.fileImpulseResponse = File();
596 currentInfo.sourceData = nullptr;
597 currentInfo.sourceDataSize = 0;
598 }
599 }
600 break;
601
602 case ChangeRequest::changeImpulseResponseSize:
603 {
604 int64 newSize = requestsParameter[n];
605
606 if (currentInfo.wantedSize != newSize)
607 changeLevel = jmax (1, changeLevel);
608
609 currentInfo.wantedSize = newSize;
610 }
611 break;
612
613 case ChangeRequest::changeStereo:
614 {
615 bool newWantsStereo = requestsParameter[n];
616
617 if (currentInfo.wantsStereo != newWantsStereo)
618 changeLevel = jmax (0, changeLevel);
619
620 currentInfo.wantsStereo = newWantsStereo;
621 }
622 break;
623
624 case ChangeRequest::changeTrimming:
625 {
626 bool newWantsTrimming = requestsParameter[n];
627
628 if (currentInfo.wantsTrimming != newWantsTrimming)
629 changeLevel = jmax (1, changeLevel);
630
631 currentInfo.wantsTrimming = newWantsTrimming;
632 }
633 break;
634
635 case ChangeRequest::changeNormalisation:
636 {
637 bool newWantsNormalisation = requestsParameter[n];
638
639 if (currentInfo.wantsNormalisation != newWantsNormalisation)
640 changeLevel = jmax (1, changeLevel);
641
642 currentInfo.wantsNormalisation = newWantsNormalisation;
643 }
644 break;
645
646 case ChangeRequest::changeIgnore:
647 break;
648
649 default:
650 jassertfalse;
651 break;
652 }
653 }
654
655 if (currentInfo.sourceType == SourceType::sourceNone)
656 {
657 currentInfo.sourceType = SourceType::sourceAudioBuffer;
658
659 if (currentInfo.sampleRate == 0)
660 currentInfo.sampleRate = 44100;
661
662 if (currentInfo.maximumBufferSize == 0)
663 currentInfo.maximumBufferSize = 128;
664
665 currentInfo.originalSampleRate = currentInfo.sampleRate;
666 currentInfo.wantedSize = 1;
667 currentInfo.fileImpulseResponse = File();
668 currentInfo.sourceData = nullptr;
669 currentInfo.sourceDataSize = 0;
670
672 newBuffer.setSize (1, 1);
673 newBuffer.setSample (0, 0, 1.f);
674
676 }
677
678 // action depending on the change level
679 if (changeLevel == 3)
680 {
681 loadImpulseResponse();
682 processImpulseResponse();
683 initializeConvolutionEngines();
684 }
685 else if (changeLevel > 0)
686 {
687 startThread();
688 }
689 }
690
691 //==============================================================================
696 {
697 const SpinLock::ScopedLockType sl (processLock);
698
699 currentInfo.originalNumChannels = (block.getNumChannels() > 1 ? 2 : 1);
700 currentInfo.originalSize = (int) jmin ((size_t) maximumTimeInSamples, block.getNumSamples());
701
702 for (auto channel = 0; channel < currentInfo.originalNumChannels; ++channel)
703 temporaryBuffer.copyFrom (channel, 0, block.getChannelPointer ((size_t) channel), (int) currentInfo.originalSize);
704 }
705
706 //==============================================================================
708 void reset()
709 {
710 for (auto* e : engines)
711 e->reset();
712
713 mustInterpolate = false;
714
715 processFifo();
716 }
717
722 {
723 processFifo();
724
725 size_t numChannels = jmin (input.getNumChannels(), (size_t) (currentInfo.wantsStereo ? 2 : 1));
726 size_t numSamples = jmin (input.getNumSamples(), output.getNumSamples());
727
728 if (mustInterpolate == false)
729 {
730 for (size_t channel = 0; channel < numChannels; ++channel)
731 engines[(int) channel]->processSamples (input.getChannelPointer (channel), output.getChannelPointer (channel), numSamples);
732 }
733 else
734 {
735 auto interpolated = dsp::AudioBlock<float> (interpolationBuffer).getSubBlock (0, numSamples);
736
737 for (size_t channel = 0; channel < numChannels; ++channel)
738 {
739 auto&& buffer = output.getSingleChannelBlock (channel);
740
741 interpolationBuffer.copyFrom (0, 0, input.getChannelPointer (channel), (int) numSamples);
742
743 engines[(int) channel]->processSamples (input.getChannelPointer (channel), buffer.getChannelPointer (0), numSamples);
744 changeVolumes[channel].applyGain (buffer.getChannelPointer (0), (int) numSamples);
745
746 auto* interPtr = interpolationBuffer.getWritePointer (0);
747 engines[(int) channel + 2]->processSamples (interPtr, interPtr, numSamples);
748 changeVolumes[channel + 2].applyGain (interPtr, (int) numSamples);
749
750 buffer += interpolated;
751 }
752
753 if (input.getNumChannels() > 1 && currentInfo.wantsStereo == false)
754 {
755 auto&& buffer = output.getSingleChannelBlock (1);
756
757 changeVolumes[1].applyGain (buffer.getChannelPointer (0), (int) numSamples);
758 changeVolumes[3].applyGain (buffer.getChannelPointer (0), (int) numSamples);
759 }
760
761 if (changeVolumes[0].isSmoothing() == false)
762 {
763 mustInterpolate = false;
764
765 for (auto channel = 0; channel < 2; ++channel)
766 engines[channel]->copyStateFromOtherEngine (*engines[channel + 2]);
767 }
768 }
769
770 if (input.getNumChannels() > 1 && currentInfo.wantsStereo == false)
771 output.getSingleChannelBlock (1).copyFrom (output.getSingleChannelBlock (0));
772 }
773
774 //==============================================================================
775 const int64 maximumTimeInSamples = 10 * 96000;
776
777private:
778 //==============================================================================
782 void run() override
783 {
784 if (changeLevel == 2)
785 {
786 loadImpulseResponse();
787
789 return;
790 }
791
792 processImpulseResponse();
793
795 return;
796
797 initializeConvolutionEngines();
798 }
799
801 void loadImpulseResponse()
802 {
803 if (currentInfo.sourceType == SourceType::sourceBinaryData)
804 {
805 if (! (copyAudioStreamInAudioBuffer (new MemoryInputStream (currentInfo.sourceData, (size_t) currentInfo.sourceDataSize, false))))
806 return;
807 }
808 else if (currentInfo.sourceType == SourceType::sourceAudioFile)
809 {
810 if (! (copyAudioStreamInAudioBuffer (new FileInputStream (currentInfo.fileImpulseResponse))))
811 return;
812 }
813 else if (currentInfo.sourceType == SourceType::sourceAudioBuffer)
814 {
815 copyBufferFromTemporaryLocation();
816 }
817 }
818
822 void processImpulseResponse()
823 {
824 trimAndResampleImpulseResponse (currentInfo.originalNumChannels, currentInfo.originalSampleRate, currentInfo.wantsTrimming);
825
827 return;
828
829 if (currentInfo.wantsNormalisation)
830 {
831 if (currentInfo.originalNumChannels > 1)
832 {
833 normaliseImpulseResponse (currentInfo.buffer->getWritePointer (0), (int) currentInfo.finalSize, 1.0);
834 normaliseImpulseResponse (currentInfo.buffer->getWritePointer (1), (int) currentInfo.finalSize, 1.0);
835 }
836 else
837 {
838 normaliseImpulseResponse (currentInfo.buffer->getWritePointer (0), (int) currentInfo.finalSize, 1.0);
839 }
840 }
841
842 if (currentInfo.originalNumChannels == 1)
843 currentInfo.buffer->copyFrom (1, 0, *currentInfo.buffer, 0, 0, (int) currentInfo.finalSize);
844 }
845
849 bool copyAudioStreamInAudioBuffer (InputStream* stream)
850 {
851 AudioFormatManager manager;
852 manager.registerBasicFormats();
853 std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (stream));
854
855 if (formatReader != nullptr)
856 {
857 currentInfo.originalNumChannels = formatReader->numChannels > 1 ? 2 : 1;
858 currentInfo.originalSampleRate = formatReader->sampleRate;
859 currentInfo.originalSize = static_cast<int> (jmin (maximumTimeInSamples, formatReader->lengthInSamples));
860
861 impulseResponseOriginal.clear();
862 formatReader->read (&(impulseResponseOriginal), 0, (int) currentInfo.originalSize, 0, true, currentInfo.originalNumChannels > 1);
863
864 return true;
865 }
866
867 return false;
868 }
869
873 void copyBufferFromTemporaryLocation()
874 {
875 const SpinLock::ScopedLockType sl (processLock);
876
877 for (auto channel = 0; channel < currentInfo.originalNumChannels; ++channel)
878 impulseResponseOriginal.copyFrom (channel, 0, temporaryBuffer, channel, 0, (int) currentInfo.originalSize);
879 }
880
882 void trimAndResampleImpulseResponse (int numChannels, double srcSampleRate, bool mustTrim)
883 {
884 auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
885 auto indexStart = 0;
886 auto indexEnd = currentInfo.originalSize - 1;
887
888 if (mustTrim)
889 {
890 indexStart = currentInfo.originalSize - 1;
891 indexEnd = 0;
892
893 for (auto channel = 0; channel < numChannels; ++channel)
894 {
895 auto localIndexStart = 0;
896 auto localIndexEnd = currentInfo.originalSize - 1;
897
898 auto* channelData = impulseResponseOriginal.getReadPointer (channel);
899
900 while (localIndexStart < currentInfo.originalSize - 1
901 && channelData[localIndexStart] <= thresholdTrim
902 && channelData[localIndexStart] >= -thresholdTrim)
903 ++localIndexStart;
904
905 while (localIndexEnd >= 0
906 && channelData[localIndexEnd] <= thresholdTrim
907 && channelData[localIndexEnd] >= -thresholdTrim)
908 --localIndexEnd;
909
910 indexStart = jmin (indexStart, localIndexStart);
911 indexEnd = jmax (indexEnd, localIndexEnd);
912 }
913
914 if (indexStart > 0)
915 {
916 for (auto channel = 0; channel < numChannels; ++channel)
917 {
918 auto* channelData = impulseResponseOriginal.getWritePointer (channel);
919
920 for (auto i = 0; i < indexEnd - indexStart + 1; ++i)
921 channelData[i] = channelData[i + indexStart];
922
923 for (auto i = indexEnd - indexStart + 1; i < currentInfo.originalSize - 1; ++i)
924 channelData[i] = 0.0f;
925 }
926 }
927 }
928
929 if (currentInfo.sampleRate == srcSampleRate)
930 {
931 // No resampling
932 currentInfo.finalSize = jmin (static_cast<int> (currentInfo.wantedSize), indexEnd - indexStart + 1);
933
934 impulseResponse.clear();
935
936 for (auto channel = 0; channel < numChannels; ++channel)
937 impulseResponse.copyFrom (channel, 0, impulseResponseOriginal, channel, 0, (int) currentInfo.finalSize);
938 }
939 else
940 {
941 // Resampling
942 auto factorReading = srcSampleRate / currentInfo.sampleRate;
943 currentInfo.finalSize = jmin (static_cast<int> (currentInfo.wantedSize), roundToInt ((indexEnd - indexStart + 1) / factorReading));
944
945 impulseResponse.clear();
946
947 MemoryAudioSource memorySource (impulseResponseOriginal, false);
948 ResamplingAudioSource resamplingSource (&memorySource, false, (int) numChannels);
949
950 resamplingSource.setResamplingRatio (factorReading);
951 resamplingSource.prepareToPlay ((int) currentInfo.finalSize, currentInfo.sampleRate);
952
953 AudioSourceChannelInfo info;
954 info.startSample = 0;
955 info.numSamples = (int) currentInfo.finalSize;
956 info.buffer = &impulseResponse;
957
958 resamplingSource.getNextAudioBlock (info);
959 }
960
961 // Filling the second channel with the first if necessary
962 if (numChannels == 1)
963 impulseResponse.copyFrom (1, 0, impulseResponse, 0, 0, (int) currentInfo.finalSize);
964 }
965
967 void normaliseImpulseResponse (float* samples, int numSamples, double factorResampling) const
968 {
969 auto magnitude = 0.0f;
970
971 for (auto i = 0; i < numSamples; ++i)
972 magnitude += samples[i] * samples[i];
973
974 auto magnitudeInv = 1.0f / (4.0f * std::sqrt (magnitude)) * 0.5f * static_cast <float> (factorResampling);
975
976 for (auto i = 0; i < numSamples; ++i)
977 samples[i] *= magnitudeInv;
978 }
979
980 // ================================================================================================================
984 void initializeConvolutionEngines()
985 {
986 if (currentInfo.maximumBufferSize == 0)
987 return;
988
989 if (changeLevel == 3)
990 {
991 for (auto i = 0; i < 2; ++i)
992 engines[i]->initializeConvolutionEngine (currentInfo, i);
993
994 mustInterpolate = false;
995 }
996 else
997 {
998 for (auto i = 0; i < 2; ++i)
999 {
1000 engines[i + 2]->initializeConvolutionEngine (currentInfo, i);
1001 engines[i + 2]->reset();
1002
1004 return;
1005 }
1006
1007 for (auto i = 0; i < 2; ++i)
1008 {
1009 changeVolumes[i].setTargetValue (1.0f);
1010 changeVolumes[i].reset (currentInfo.sampleRate, 0.05);
1011 changeVolumes[i].setTargetValue (0.0f);
1012
1013 changeVolumes[i + 2].setTargetValue (0.0f);
1014 changeVolumes[i + 2].reset (currentInfo.sampleRate, 0.05);
1015 changeVolumes[i + 2].setTargetValue (1.0f);
1016
1017 }
1018
1019 mustInterpolate = true;
1020 }
1021 }
1022
1023
1024 //==============================================================================
1025 static constexpr int fifoSize = 1024; // the size of the fifo which handles all the change requests
1026 AbstractFifo abstractFifo; // the abstract fifo
1027
1028 Array<ChangeRequest> fifoRequestsType; // an array of ChangeRequest
1029 Array<juce::var> fifoRequestsParameter; // an array of change parameters
1030
1031 Array<ChangeRequest> requestsType; // an array of ChangeRequest
1032 Array<juce::var> requestsParameter; // an array of change parameters
1033
1034 int changeLevel = 0; // the current level of requested change in the convolution engine
1035
1036 //==============================================================================
1037 ConvolutionEngine::ProcessingInformation currentInfo; // the information about the impulse response to load
1038
1039 AudioBuffer<float> temporaryBuffer; // a temporary buffer that is used when the function copyAndLoadImpulseResponse is called in the main API
1040 SpinLock processLock; // a necessary lock to use with this temporary buffer
1041
1042 AudioBuffer<float> impulseResponseOriginal; // a buffer with the original impulse response
1043 AudioBuffer<float> impulseResponse; // a buffer with the impulse response trimmed, resampled, resized and normalised
1044
1045 //==============================================================================
1046 OwnedArray<ConvolutionEngine> engines; // the 4 convolution engines being used
1047
1048 AudioBuffer<float> interpolationBuffer; // a buffer to do the interpolation between the convolution engines 0-1 and 2-3
1049 LogRampedValue<float> changeVolumes[4]; // the volumes for each convolution engine during interpolation
1050
1051 bool mustInterpolate = false; // tells if the convolution engines outputs must be currently interpolated
1052
1053 //==============================================================================
1054 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Pimpl)
1055};
1056
1057
1058//==============================================================================
1060{
1061 pimpl.reset (new Pimpl());
1062 pimpl->addToFifo (Convolution::Pimpl::ChangeRequest::changeEngine, juce::var (0));
1063}
1064
1068
1069void Convolution::loadImpulseResponse (const void* sourceData, size_t sourceDataSize,
1070 bool wantsStereo, bool wantsTrimming, size_t size,
1071 bool wantsNormalisation)
1072{
1073 if (sourceData == nullptr)
1074 return;
1075
1076 auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
1077 auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
1078
1079 Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
1080 Pimpl::ChangeRequest::changeImpulseResponseSize,
1081 Pimpl::ChangeRequest::changeStereo,
1082 Pimpl::ChangeRequest::changeTrimming,
1083 Pimpl::ChangeRequest::changeNormalisation };
1084
1086
1087 sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceBinaryData));
1088 sourceParameter.add (juce::var (sourceData, sourceDataSize));
1089
1090 juce::var parameters[] = { juce::var (sourceParameter),
1091 juce::var (static_cast<int64> (wantedSize)),
1092 juce::var (wantsStereo),
1093 juce::var (wantsTrimming),
1094 juce::var (wantsNormalisation) };
1095
1096 pimpl->addToFifo (types, parameters, 5);
1097}
1098
1099void Convolution::loadImpulseResponse (const File& fileImpulseResponse, bool wantsStereo,
1100 bool wantsTrimming, size_t size, bool wantsNormalisation)
1101{
1102 if (! fileImpulseResponse.existsAsFile())
1103 return;
1104
1105 auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
1106 auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
1107
1108 Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
1109 Pimpl::ChangeRequest::changeImpulseResponseSize,
1110 Pimpl::ChangeRequest::changeStereo,
1111 Pimpl::ChangeRequest::changeTrimming,
1112 Pimpl::ChangeRequest::changeNormalisation };
1113
1115
1116 sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioFile));
1117 sourceParameter.add (juce::var (fileImpulseResponse.getFullPathName()));
1118
1119 juce::var parameters[] = { juce::var (sourceParameter),
1120 juce::var (static_cast<int64> (wantedSize)),
1121 juce::var (wantsStereo),
1122 juce::var (wantsTrimming),
1123 juce::var (wantsNormalisation) };
1124
1125 pimpl->addToFifo (types, parameters, 5);
1126}
1127
1129 double bufferSampleRate, bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
1130{
1132 wantsStereo, wantsTrimming, wantsNormalisation, size);
1133}
1134
1136 bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
1137{
1138 jassert (bufferSampleRate > 0);
1139
1140 if (block.getNumSamples() == 0)
1141 return;
1142
1143 auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
1144 auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
1145
1146 pimpl->copyBufferToTemporaryLocation (block);
1147
1148 Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
1149 Pimpl::ChangeRequest::changeImpulseResponseSize,
1150 Pimpl::ChangeRequest::changeStereo,
1151 Pimpl::ChangeRequest::changeTrimming,
1152 Pimpl::ChangeRequest::changeNormalisation };
1153
1155 sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioBuffer));
1157
1158 juce::var parameters[] = { juce::var (sourceParameter),
1159 juce::var (static_cast<int64> (wantedSize)),
1160 juce::var (wantsStereo),
1161 juce::var (wantsTrimming),
1162 juce::var (wantsNormalisation) };
1163
1164 pimpl->addToFifo (types, parameters, 5);
1165}
1166
1168{
1169 jassert (isPositiveAndBelow (spec.numChannels, static_cast<uint32> (3))); // only mono and stereo is supported
1170
1171 Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSampleRate,
1172 Pimpl::ChangeRequest::changeMaximumBufferSize };
1173
1174 juce::var parameters[] = { juce::var (spec.sampleRate),
1175 juce::var (static_cast<int> (spec.maximumBlockSize)) };
1176
1177 pimpl->addToFifo (types, parameters, 2);
1178 pimpl->initProcessing (static_cast<int> (spec.maximumBlockSize));
1179
1180 for (size_t channel = 0; channel < spec.numChannels; ++channel)
1181 {
1182 volumeDry[channel].reset (spec.sampleRate, 0.05);
1183 volumeWet[channel].reset (spec.sampleRate, 0.05);
1184 }
1185
1186 sampleRate = spec.sampleRate;
1187 dryBuffer = AudioBlock<float> (dryBufferStorage,
1188 jmin (spec.numChannels, 2u),
1189 spec.maximumBlockSize);
1190
1191 isActive = true;
1192}
1193
1195{
1196 dryBuffer.clear();
1197 pimpl->reset();
1198}
1199
1200void Convolution::processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output, bool isBypassed) noexcept
1201{
1202 if (! isActive)
1203 return;
1204
1205 jassert (input.getNumChannels() == output.getNumChannels());
1206 jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
1207
1208 auto numChannels = jmin (input.getNumChannels(), (size_t) 2);
1209 auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
1210
1211 auto dry = dryBuffer.getSubsetChannelBlock (0, numChannels);
1212
1213 if (volumeDry[0].isSmoothing())
1214 {
1215 dry.copyFrom (input);
1216
1217 for (size_t channel = 0; channel < numChannels; ++channel)
1218 volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
1219
1220 pimpl->processSamples (input, output);
1221
1222 for (size_t channel = 0; channel < numChannels; ++channel)
1223 volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
1224
1225 output += dry;
1226 }
1227 else
1228 {
1229 if (! currentIsBypassed)
1230 pimpl->processSamples (input, output);
1231
1232 if (isBypassed != currentIsBypassed)
1233 {
1234 currentIsBypassed = isBypassed;
1235
1236 for (size_t channel = 0; channel < numChannels; ++channel)
1237 {
1238 volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
1239 volumeDry[channel].reset (sampleRate, 0.05);
1240 volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
1241
1242 volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
1243 volumeWet[channel].reset (sampleRate, 0.05);
1244 volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
1245 }
1246 }
1247 }
1248}
1249
1250} // namespace dsp
1251} // namespace juce
void prepareToWrite(int numToWrite, int &startIndex1, int &blockSize1, int &startIndex2, int &blockSize2) const noexcept
void prepareToRead(int numWanted, int &startIndex1, int &blockSize1, int &startIndex2, int &blockSize2) const noexcept
void finishedRead(int numRead) noexcept
void finishedWrite(int numWritten) noexcept
int getNumReady() const noexcept
void setUnchecked(int indexToChange, ParameterType newValue)
Definition juce_Array.h:568
ElementType getUnchecked(int index) const
Definition juce_Array.h:252
int size() const noexcept
Definition juce_Array.h:215
ElementType * getRawDataPointer() noexcept
Definition juce_Array.h:310
void add(const ElementType &newElement)
Definition juce_Array.h:418
void resize(int targetNumItems)
Definition juce_Array.h:670
void clear()
Definition juce_Array.h:188
ElementType & getReference(int index) noexcept
Definition juce_Array.h:267
void setSize(int newNumChannels, int newNumSamples, bool keepExistingContent=false, bool clearExtraSpace=false, bool avoidReallocating=false)
Type * getWritePointer(int channelNumber) noexcept
void copyFrom(int destChannel, int destStartSample, const AudioBuffer &source, int sourceChannel, int sourceStartSample, int numSamples) noexcept
const Type * getReadPointer(int channelNumber) const noexcept
static Type decibelsToGain(Type decibels, Type minusInfinityDb=Type(defaultMinusInfinitydB))
bool existsAsFile() const
const String & getFullPathName() const noexcept
Definition juce_File.h:149
static void JUCE_CALLTYPE fill(float *dest, float valueToFill, int numValues) noexcept
static void JUCE_CALLTYPE copy(float *dest, const float *src, int numValues) noexcept
static void JUCE_CALLTYPE subtractWithMultiply(float *dest, const float *src, float multiplier, int numValues) noexcept
static void JUCE_CALLTYPE addWithMultiply(float *dest, const float *src, float multiplier, int numValues) noexcept
static void JUCE_CALLTYPE add(float *dest, float amountToAdd, int numValues) noexcept
void applyGain(FloatType *samples, int numSamples) noexcept
void reset(double sampleRate, double rampLengthInSeconds) noexcept
GenericScopedLock< SpinLock > ScopedLockType
bool threadShouldExit() const
bool stopThread(int timeOutMilliseconds)
bool isThreadRunning() const
AudioBlock getSubBlock(size_t newOffset, size_t newLength) const noexcept
constexpr size_t getNumChannels() const noexcept
SampleType * getChannelPointer(size_t channel) const noexcept
constexpr size_t getNumSamples() const noexcept
AudioBlock & clear() noexcept
void loadImpulseResponse(const void *sourceData, size_t sourceDataSize, bool wantsStereo, bool wantsTrimming, size_t size, bool wantsNormalisation=true)
void prepare(const ProcessSpec &)
void copyAndLoadImpulseResponseFromBlock(AudioBlock< float > block, double bufferSampleRate, bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
void copyAndLoadImpulseResponseFromBuffer(AudioBuffer< float > &buffer, double bufferSampleRate, bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
void reset(double sampleRate, double rampLengthInSeconds) noexcept
void setTargetValue(FloatType newValue) noexcept
void updateSymmetricFrequencyDomainData(float *samples) noexcept
void initializeConvolutionEngine(ProcessingInformation &info, int channel)
void prepareForConvolution(float *samples) noexcept
void convolutionProcessingAndAccumulate(const float *input, const float *impulse, float *output)
void processSamples(const float *input, float *output, size_t numSamples)
void copyStateFromOtherEngine(const ConvolutionEngine &other)
void initProcessing(int maximumBufferSize)
void readFromFifo(ChangeRequest &type, juce::var &parameter)
void addToFifo(ChangeRequest type, juce::var parameter)
void addToFifo(ChangeRequest *types, juce::var *parameters, int numEntries)
int getNumRemainingEntries() const noexcept
void copyBufferToTemporaryLocation(dsp::AudioBlock< float > block)
void processSamples(const AudioBlock< const float > &input, AudioBlock< float > &output)