From 774031a7a936ed81df58d7d71225078ce824a7d6 Mon Sep 17 00:00:00 2001 From: essej Date: Tue, 26 Apr 2022 17:58:45 -0400 Subject: [PATCH] separated AAX from normal build so that pffft could be used instead for licensing purposes. prevent file position jump when changing prebuffering. eliminate unecessary fft size calculations --- CMakeLists.txt | 62 +- Source/PS_Source/Stretch.cpp | 58 +- Source/PS_Source/Stretch.h | 21 + Source/PS_Source/StretchSource.cpp | 13 +- Source/PS_Source/StretchSource.h | 2 + Source/PS_Source/globals.h | 2 +- Source/PluginEditor.cpp | 24 +- Source/PluginProcessor.cpp | 34 +- Source/PluginProcessor.h | 3 +- Source/pffft/pffft.c | 1884 ++++++++++++++++++++++++++++ Source/pffft/pffft.h | 177 +++ release/distmac.sh | 5 +- release/distwin.sh | 4 +- 13 files changed, 2253 insertions(+), 36 deletions(-) create mode 100644 Source/pffft/pffft.c create mode 100644 Source/pffft/pffft.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 5775030..47ad08b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -100,7 +100,7 @@ if (AAX_SDK_PATH) juce_set_aax_sdk_path (${AAX_SDK_PATH}) if (APPLE OR (NOT ("${CMAKE_VS_PLATFORM_NAME}" STREQUAL "Win32"))) - list (APPEND FormatsToBuild AAX) + list (APPEND AaxFormatsToBuild AAX) endif() endif() @@ -135,7 +135,7 @@ set (MacPList " # the formats specified by the FORMATS arguments. This function accepts many optional arguments. # Check the readme at `docs/CMake API.md` in the JUCE repo for the full list. -function(sono_add_custom_plugin_target target_name product_name formats is_instrument plugincode) +function(sono_add_custom_plugin_target target_name product_name formats is_instrument no_fftw plugincode) if (is_instrument) set (vst3cats Instrument Network) @@ -197,16 +197,28 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr ) + set (FFTW_DEPLIBS "") + + # platform specific stuff if (APPLE) list (APPEND HEADER_INCLUDES deps/mac/include) list (APPEND LIB_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/deps/mac/lib) list (APPEND PlatSourceFiles Source/CrossPlatformUtilsMac.mm) + + if (no_fftw) + # use vDSP + list (APPEND PLAT_COMPILE_DEFS + PS_USE_VDSP_FFT=1 + ) + else() + list (APPEND FFTW_DEPLIBS fftw3f) + endif() elseif (WIN32) list (APPEND HEADER_INCLUDES deps/windows ../asiosdk/common) list (APPEND PlatSourceFiles Source/CrossPlatformUtilsWindows.cpp) - message (STATUS "Win generator platform is: ${CMAKE_VS_PLATFORM_NAME}" ) + message (STATUS "Win generator platform is: ${CMAKE_VS_PLATFORM_NAME}" ) if ("${CMAKE_VS_PLATFORM_NAME}" STREQUAL "Win32") list (APPEND LIB_PATHS $<$:${CMAKE_CURRENT_SOURCE_DIR}/deps/windows/Debug32> @@ -225,11 +237,36 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr WINVER=0x0601 _WIN32_WINNT=0x0601 NOMINMAX) + + if (no_fftw) + # use pffft + list (APPEND PLAT_COMPILE_DEFS + PS_USE_PFFFT=1 + ) + else() + list (APPEND FFTW_DEPLIBS + fftw3f + mingwex + gcc + mingw32 + msvcrt + ) + endif() + else() # Linux list (APPEND PlatSourceFiles Source/CrossPlatformUtilsLinux.cpp) list ( APPEND PLAT_COMPILE_DEFS JUCE_USE_MP3AUDIOFORMAT=1 ) + + if (no_fftw) + # use pffft + list (APPEND PLAT_COMPILE_DEFS + PS_USE_PFFFT=1 + ) + else() + list (APPEND FFTW_DEPLIBS fftw3f) + endif() endif() @@ -277,7 +314,9 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr Source/WDL/resample.cpp Source/WDL/wdltypes.h Source/WDL/denormal.h - Source/WDL/heapbuf.h + Source/WDL/heapbuf.h + Source/pffft/pffft.h + Source/pffft/pffft.c ) target_sources("${target_name}" PRIVATE @@ -369,6 +408,8 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr set_target_properties(${target_name}_PSData PROPERTIES FOLDER "Targets") + + if (UNIX AND NOT APPLE) target_compile_options("${target_name}_PSData" PRIVATE @@ -413,11 +454,7 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr ${target_name}_PSData - fftw3f - $<$:mingwex> - $<$:gcc> - $<$:mingw32> - $<$:msvcrt> + ${FFTW_DEPLIBS} PUBLIC juce::juce_recommended_config_flags @@ -428,7 +465,12 @@ function(sono_add_custom_plugin_target target_name product_name formats is_instr endfunction() # most of the targets -sono_add_custom_plugin_target(PaulXStretch PaulXStretch "${FormatsToBuild}" FALSE "Pxst") +sono_add_custom_plugin_target(PaulXStretch PaulXStretch "${FormatsToBuild}" FALSE FALSE "Pxst") + +if (AaxFormatsToBuild) + # AAX builds without fftw + sono_add_custom_plugin_target(PaulXStretchAAX PaulXStretch "${AaxFormatsToBuild}" FALSE TRUE "Pxst") +endif() # Mobile targets #sono_add_custom_plugin_target(PaulXStretch "AUv3 Standalone" FALSE "NBus") diff --git a/Source/PS_Source/Stretch.cpp b/Source/PS_Source/Stretch.cpp index 2b0c9ba..6394310 100644 --- a/Source/PS_Source/Stretch.cpp +++ b/Source/PS_Source/Stretch.cpp @@ -58,6 +58,13 @@ FFT::FFT(int nsamples_, bool no_inverse) m_workReal.resize(nsamples,false); m_workImag.resize(nsamples,false); + //Logger::writeToLog("fftsize: " + String(nsamples) + " log2N: " + String(log2N)); +#elif PS_USE_PFFFT + + planpffft = pffft_new_setup(nsamples, PFFFT_REAL); + + m_work.resize(2*nsamples,false); + //Logger::writeToLog("fftsize: " + String(nsamples) + " log2N: " + String(log2N)); #else @@ -88,6 +95,10 @@ FFT::~FFT() { #if PS_USE_VDSP_FFT vDSP_destroy_fftsetup((FFTSetup)planfft); +#elif PS_USE_PFFFT + if (planpffft) { + pffft_destroy_setup(planpffft); + } #else fftwf_destroy_plan(planfftw); if (planifftw!=nullptr) @@ -139,6 +150,24 @@ void FFT::smp2freq() freq[0] = 0.0; +#elif PS_USE_PFFFT + const int halfsamples = nsamples / 2; + auto * databuf = data.data(); + + pffft_transform_ordered(planpffft, smp.data(), databuf, m_work.data(), PFFFT_FORWARD); + + data[1] = 0.0f; + + // compute magnitude + + FloatVectorOperations::multiply(databuf, databuf, nsamples); + + for (int k=1, l=2; k < halfsamples; ++k, l+=2) { + freq[k] = sqrt(databuf[l] + databuf[l+1]); + } + + freq[0] = 0.0; + #else for (int i=0;i #include @@ -107,6 +115,11 @@ private: buf = (float*)malloc(size*sizeof(float)); else buf = (double*)malloc(size * sizeof(double)); +#elif PS_USE_PFFFT + if constexpr (std::is_same::value) + buf = (float*)pffft_aligned_malloc(size*sizeof(float)); + else + buf = (double*)pffft_aligned_malloc(size * sizeof(double)); #else if constexpr (std::is_same::value) buf = (float*)fftwf_malloc(size*sizeof(float)); @@ -123,6 +136,11 @@ private: free(buf); else free(buf); +#elif PS_USE_PFFFT + if constexpr (std::is_same::value) + pffft_aligned_free(buf); + else + pffft_aligned_free(buf); #else if constexpr (std::is_same::value) fftwf_free(buf); @@ -159,6 +177,9 @@ class FFT int log2N; FFTWBuffer m_workReal; FFTWBuffer m_workImag; +#elif PS_USE_PFFFT + PFFFT_Setup *planpffft = nullptr; + FFTWBuffer m_work; #else fftwf_plan planfftw,planifftw; #endif diff --git a/Source/PS_Source/StretchSource.cpp b/Source/PS_Source/StretchSource.cpp index f044cc2..2c60a25 100644 --- a/Source/PS_Source/StretchSource.cpp +++ b/Source/PS_Source/StretchSource.cpp @@ -599,6 +599,14 @@ void StretchAudioSource::playDrySound(const AudioSourceChannelInfo & bufferToFil bufs[i][j + bufferToFill.startSample] = maingain * m_resampler_outbuf[j*m_num_outchans + i]; } +double StretchAudioSource::getLastSourcePositionPercent() +{ + if (m_inputfile == nullptr || m_inputfile->info.nsamples == 0) + return 0.0; + return (1.0/m_inputfile->info.nsamples)*m_last_filepos; +} + + double StretchAudioSource::getInfilePositionPercent() { if (m_inputfile == nullptr || m_inputfile->info.nsamples == 0) @@ -686,6 +694,8 @@ void StretchAudioSource::setFFTSize(int size, bool force) jassert(size>0); if (force || (m_xfadetask.state == 0 && (m_process_fftsize == 0 || size != m_process_fftsize))) { + DBG("Using FFT size: " << size); + ScopedLock locker(m_cs); if (m_xfadetask.buffer.getNumChannels() < m_num_outchans) { @@ -704,7 +714,8 @@ void StretchAudioSource::setFFTSize(int size, bool force) m_process_fftsize = size; initObjects(); } - + + ++m_param_change_count; } } diff --git a/Source/PS_Source/StretchSource.h b/Source/PS_Source/StretchSource.h index 74f3c23..bc57cfd 100644 --- a/Source/PS_Source/StretchSource.h +++ b/Source/PS_Source/StretchSource.h @@ -100,6 +100,8 @@ public: double getLastSeekPos() const { return m_seekpos; } CriticalSection* getMutex() { return &m_cs; } int64_t getLastSourcePosition() const { return m_last_filepos; } + double getLastSourcePositionPercent(); + int m_prebuffersize = 0; void setSpectralOrderPreset(int id); diff --git a/Source/PS_Source/globals.h b/Source/PS_Source/globals.h index f10150e..48f06e5 100644 --- a/Source/PS_Source/globals.h +++ b/Source/PS_Source/globals.h @@ -288,7 +288,7 @@ inline String secondsToString2(double secs) result.preallocateBytes(32); bool empty = true; if ((int)rt.inHours()>0) - result << String((int)rt.inHours() % 24).paddedLeft('0', empty ? 1 : 2) << ':'; + result << String((int)rt.inHours()).paddedLeft('0', empty ? 1 : 2) << ':'; result << String((int)rt.inMinutes() % 60).paddedLeft('0', 2) << ':'; result << String((int)rt.inSeconds() % 60).paddedLeft('0', 2); auto millis = (int)rt.inMilliseconds() % 1000; diff --git a/Source/PluginEditor.cpp b/Source/PluginEditor.cpp index da70c82..82d5f81 100644 --- a/Source/PluginEditor.cpp +++ b/Source/PluginEditor.cpp @@ -554,7 +554,7 @@ void PaulstretchpluginAudioProcessorEditor::showRenderDialog() int prefh = jmin(contentraw->getPreferredHeight(), getHeight() - 10); contentraw->setSize(prefw, prefh); std::unique_ptr content(contentraw); - auto & cb = CallOutBox::launchAsynchronously(std::move(content), m_render_button.getBounds(), this); + auto & cb = CallOutBox::launchAsynchronously(std::move(content), m_render_button.getBounds(), this, false); cb.setDismissalMouseClicksAreAlwaysConsumed(true); } @@ -1247,7 +1247,11 @@ void PaulstretchpluginAudioProcessorEditor::showSettingsMenu() void PaulstretchpluginAudioProcessorEditor::showAbout() { String fftlib; -#if !PS_USE_VDSP_FFT +#if PS_USE_VDSP_FFT + fftlib = "vDSP"; +#elif PS_USE_PFFFT + fftlib = "pffft"; +#else fftlib = fftwf_version; #endif String juceversiontxt = String("JUCE ") + String(JUCE_MAJOR_VERSION) + "." + String(JUCE_MINOR_VERSION); @@ -1272,11 +1276,17 @@ void PaulstretchpluginAudioProcessorEditor::showAbout() if (fftlib.isNotEmpty()) text += String("Using ") + fftlib + String(" for FFT\n\n"); + #if !JUCE_IOS - text += juceversiontxt + String(" used under the GPL license.\n\n"); + if (PluginHostType::getPluginLoadedAs() == AudioProcessor::wrapperType_AAX) { + text += juceversiontxt + String("\n\n"); + } + else { + text += juceversiontxt + String(" used under the GPL license.\n\n"); + } #endif - // text += String("GPL licensed source code for this plugin at : https://bitbucket.org/xenakios/paulstretchplugin/overview\n"); + text += String("GPL licensed source code at : https://github.com/essej/paulxstretch\n"); if (host.type != juce::PluginHostType::UnknownHost) { text += String("Running in : ") + host.getHostDescription()+ String("\n"); @@ -1300,7 +1310,7 @@ void PaulstretchpluginAudioProcessorEditor::showAbout() wrap->setSize(jmin(defWidth, getWidth() - 20), jmin(defHeight, getHeight() - 24)); auto bounds = getLocalArea(nullptr, m_settings_button.getScreenBounds()); - auto & cb = CallOutBox::launchAsynchronously(std::move(wrap), bounds, this); + auto & cb = CallOutBox::launchAsynchronously(std::move(wrap), bounds, this, false); cb.setDismissalMouseClicksAreAlwaysConsumed(true); } @@ -1496,6 +1506,7 @@ void WaveformComponent::paint(Graphics & g) g.setColour(Colours::white); if (CursorPosCallback) { + /* double timediff = (Time::getMillisecondCounterHiRes() - m_last_source_pos_update_time)*(1.0/m_sas->getRate()); double curpos = ((double)m_last_source_pos / m_sas->getOutputSamplerate()); double prebufoffset = (double)m_sas->m_prebuffersize / m_sas->getOutputSamplerate(); @@ -1503,7 +1514,8 @@ void WaveformComponent::paint(Graphics & g) curpos = 1.0 / m_sas->getInfileLengthSeconds()*(curpos+(timediff / 1000.0)); //g.fillRect(normalizedToViewX(curpos), m_topmargin, 1, getHeight() - m_topmargin); //g.drawText(String(curpos), 1, 30, 200,30, Justification::left); - g.fillRect(normalizedToViewX(CursorPosCallback()), m_topmargin, 1, getHeight() - m_topmargin); + */ + g.fillRect(normalizedToViewX(CursorPosCallback()), m_topmargin, 1, getHeight() - m_topmargin); } if (m_rec_pos >= 0.0) { diff --git a/Source/PluginProcessor.cpp b/Source/PluginProcessor.cpp index fff9606..051341f 100644 --- a/Source/PluginProcessor.cpp +++ b/Source/PluginProcessor.cpp @@ -20,8 +20,11 @@ int get_optimized_updown(int n, bool up) { while (true) { n = orig_n; +#if PS_USE_VDSP_FFT // only powers of two allowed if using VDSP FFT -#if !PS_USE_VDSP_FFT +#elif PS_USE_PFFFT + // only powers of two allowed if using pffft +#else while (!(n % 11)) n /= 11; while (!(n % 7)) n /= 7; while (!(n % 5)) n /= 5; @@ -320,9 +323,11 @@ void PaulstretchpluginAudioProcessor::setPreBufferAmount(int x) m_cur_num_out_chans = *m_outchansparam; //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels"); String err; + setFFTSize(*getFloatParameter(cpi_fftsize), true); startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) }, m_cur_num_out_chans, m_curmaxblocksize, err); - + m_stretch_source->seekPercent(m_stretch_source->getLastSourcePositionPercent()); + m_prebuffering_inited = true; } } @@ -545,15 +550,20 @@ void PaulstretchpluginAudioProcessor::parameterGestureChanged(int parameterIndex { } -void PaulstretchpluginAudioProcessor::setFFTSize(double size, bool force) +void PaulstretchpluginAudioProcessor::setFFTSize(float size, bool force) { - if (m_prebuffer_amount == 5) - m_fft_size_to_use = pow(2, 7.0 + size * 14.5); - else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering - int optim = optimizebufsize(m_fft_size_to_use); - m_fft_size_to_use = optim; - m_stretch_source->setFFTSize(optim, force); - //Logger::writeToLog(String(m_fft_size_to_use)); + if (fabsf(m_last_fftsizeparamval - size) > 0.00001f || force) { + + if (m_prebuffer_amount == 5) + m_fft_size_to_use = pow(2, 7.0 + size * 14.5); + else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering + int optim = optimizebufsize(m_fft_size_to_use); + m_fft_size_to_use = optim; + m_stretch_source->setFFTSize(optim, force); + + m_last_fftsizeparamval = size; + //Logger::writeToLog(String(m_fft_size_to_use)); + } } void PaulstretchpluginAudioProcessor::startplay(Range playrange, int numoutchans, int maxBlockSize, String& err) @@ -575,7 +585,7 @@ void PaulstretchpluginAudioProcessor::startplay(Range playrange, int num m_bufferingthread.startThread(); } m_stretch_source->setNumOutChannels(numoutchans); - m_stretch_source->setFFTSize(m_fft_size_to_use); + m_stretch_source->setFFTSize(m_fft_size_to_use, true); m_stretch_source->setProcessParameters(&m_ppar); m_stretch_source->m_prebuffersize = bufamt; @@ -854,7 +864,7 @@ void PaulstretchpluginAudioProcessor::prepareToPlay(double sampleRate, int sampl } if (m_prebuffering_inited == false) { - setFFTSize(*getFloatParameter(cpi_fftsize)); + setFFTSize(*getFloatParameter(cpi_fftsize), true); m_stretch_source->setProcessParameters(&m_ppar); m_stretch_source->setFFTWindowingType(1); String err; diff --git a/Source/PluginProcessor.h b/Source/PluginProcessor.h index fae44d4..00d572f 100644 --- a/Source/PluginProcessor.h +++ b/Source/PluginProcessor.h @@ -276,6 +276,7 @@ private: double m_smoothed_prebuffer_ready = 0.0; SignalSmoother m_prebufsmoother; int m_fft_size_to_use = 1024; + float m_last_fftsizeparamval = -1.0f; double m_last_outpos_pos = 0.0; double m_last_in_pos = 0.0; std::vector m_bufamounts{ 4096,8192,16384,32768,65536,262144 }; @@ -283,7 +284,7 @@ private: int mPluginWindowWidth = 820; int mPluginWindowHeight = 710; - void setFFTSize(double size, bool force=false); + void setFFTSize(float size, bool force=false); void startplay(Range playrange, int numoutchans, int maxBlockSize, String& err); SharedResourcePointer m_thumbcache; AudioParameterInt* m_outchansparam = nullptr; diff --git a/Source/pffft/pffft.c b/Source/pffft/pffft.c new file mode 100644 index 0000000..cf75b37 --- /dev/null +++ b/Source/pffft/pffft.c @@ -0,0 +1,1884 @@ +/* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com ) + + Based on original fortran 77 code from FFTPACKv4 from NETLIB + (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber + of NCAR, in 1985. + + As confirmed by the NCAR fftpack software curators, the following + FFTPACKv5 license applies to FFTPACKv4 sources. My changes are + released under the same terms. + + FFTPACK license: + + http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html + + Copyright (c) 2004 the University Corporation for Atmospheric + Research ("UCAR"). All rights reserved. Developed by NCAR's + Computational and Information Systems Laboratory, UCAR, + www.cisl.ucar.edu. + + Redistribution and use of the Software in source and binary forms, + with or without modification, is permitted provided that the + following conditions are met: + + - Neither the names of NCAR's Computational and Information Systems + Laboratory, the University Corporation for Atmospheric Research, + nor the names of its sponsors or contributors may be used to + endorse or promote products derived from this Software without + specific prior written permission. + + - Redistributions of source code must retain the above copyright + notices, this list of conditions, and the disclaimer below. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the disclaimer below in the + documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + + PFFFT : a Pretty Fast FFT. + + This file is largerly based on the original FFTPACK implementation, modified in + order to take advantage of SIMD instructions of modern CPUs. +*/ + +/* + ChangeLog: + - 2011/10/02, version 1: This is the very first release of this file. +*/ + +#include "pffft.h" +#include +#include +#include +#include + +/* detect compiler flavour */ +#if defined(_MSC_VER) +# define COMPILER_MSVC +#elif defined(__GNUC__) +# define COMPILER_GCC +#endif + +#if defined(COMPILER_GCC) +# define ALWAYS_INLINE(return_type) inline return_type __attribute__ ((always_inline)) +# define NEVER_INLINE(return_type) return_type __attribute__ ((noinline)) +# define RESTRICT __restrict +# define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ varname__[size__]; +#elif defined(COMPILER_MSVC) +# define ALWAYS_INLINE(return_type) __forceinline return_type +# define NEVER_INLINE(return_type) __declspec(noinline) return_type +# define RESTRICT __restrict +# define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ *varname__ = (type__*)_alloca(size__ * sizeof(type__)) +#endif + + +/* + vector support macros: the rest of the code is independant of + SSE/Altivec/NEON -- adding support for other platforms with 4-element + vectors should be limited to these macros +*/ + + +// define PFFFT_SIMD_DISABLE if you want to use scalar code instead of simd code +//#define PFFFT_SIMD_DISABLE + +/* + Altivec support macros +*/ +#if !defined(PFFFT_SIMD_DISABLE) && (defined(__ppc__) || defined(__ppc64__) || defined(__powerpc__) || defined(__powerpc64__)) +#include +typedef vector float v4sf; +# define SIMD_SZ 4 +# define VZERO() ((vector float) vec_splat_u8(0)) +# define VMUL(a,b) vec_madd(a,b, VZERO()) +# define VADD(a,b) vec_add(a,b) +# define VMADD(a,b,c) vec_madd(a,b,c) +# define VSUB(a,b) vec_sub(a,b) +inline v4sf ld_ps1(const float *p) { v4sf v=vec_lde(0,p); return vec_splat(vec_perm(v, v, vec_lvsl(0, p)), 0); } +# define LD_PS1(p) ld_ps1(&p) +# define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = vec_mergeh(in1, in2); out2 = vec_mergel(in1, in2); out1 = tmp__; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { \ + vector unsigned char vperm1 = (vector unsigned char){0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27}; \ + vector unsigned char vperm2 = (vector unsigned char){4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31}; \ + v4sf tmp__ = vec_perm(in1, in2, vperm1); out2 = vec_perm(in1, in2, vperm2); out1 = tmp__; \ + } +# define VTRANSPOSE4(x0,x1,x2,x3) { \ + v4sf y0 = vec_mergeh(x0, x2); \ + v4sf y1 = vec_mergel(x0, x2); \ + v4sf y2 = vec_mergeh(x1, x3); \ + v4sf y3 = vec_mergel(x1, x3); \ + x0 = vec_mergeh(y0, y2); \ + x1 = vec_mergel(y0, y2); \ + x2 = vec_mergeh(y1, y3); \ + x3 = vec_mergel(y1, y3); \ + } +# define VSWAPHL(a,b) vec_perm(a,b, (vector unsigned char){16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15}) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0xF) == 0) + +/* + SSE1 support macros +*/ +#elif !defined(PFFFT_SIMD_DISABLE) && (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(i386) || defined(_M_IX86)) + +#include +typedef __m128 v4sf; +# define SIMD_SZ 4 // 4 floats by simd vector -- this is pretty much hardcoded in the preprocess/finalize functions anyway so you will have to work if you want to enable AVX with its 256-bit vectors. +# define VZERO() _mm_setzero_ps() +# define VMUL(a,b) _mm_mul_ps(a,b) +# define VADD(a,b) _mm_add_ps(a,b) +# define VMADD(a,b,c) _mm_add_ps(_mm_mul_ps(a,b), c) +# define VSUB(a,b) _mm_sub_ps(a,b) +# define LD_PS1(p) _mm_set1_ps(p) +# define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_unpacklo_ps(in1, in2); out2 = _mm_unpackhi_ps(in1, in2); out1 = tmp__; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(2,0,2,0)); out2 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(3,1,3,1)); out1 = tmp__; } +# define VTRANSPOSE4(x0,x1,x2,x3) _MM_TRANSPOSE4_PS(x0,x1,x2,x3) +# define VSWAPHL(a,b) _mm_shuffle_ps(b, a, _MM_SHUFFLE(3,2,1,0)) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0xF) == 0) + +/* + ARM NEON support macros +*/ +#elif !defined(PFFFT_SIMD_DISABLE) && (defined(__arm__) || defined(__aarch64__) || defined(__arm64__)) +# include +typedef float32x4_t v4sf; +# define SIMD_SZ 4 +# define VZERO() vdupq_n_f32(0) +# define VMUL(a,b) vmulq_f32(a,b) +# define VADD(a,b) vaddq_f32(a,b) +# define VMADD(a,b,c) vmlaq_f32(c,a,b) +# define VSUB(a,b) vsubq_f32(a,b) +# define LD_PS1(p) vld1q_dup_f32(&(p)) +# define INTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vzipq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vuzpq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; } +# define VTRANSPOSE4(x0,x1,x2,x3) { \ + float32x4x2_t t0_ = vzipq_f32(x0, x2); \ + float32x4x2_t t1_ = vzipq_f32(x1, x3); \ + float32x4x2_t u0_ = vzipq_f32(t0_.val[0], t1_.val[0]); \ + float32x4x2_t u1_ = vzipq_f32(t0_.val[1], t1_.val[1]); \ + x0 = u0_.val[0]; x1 = u0_.val[1]; x2 = u1_.val[0]; x3 = u1_.val[1]; \ + } +// marginally faster version +//# define VTRANSPOSE4(x0,x1,x2,x3) { asm("vtrn.32 %q0, %q1;\n vtrn.32 %q2,%q3\n vswp %f0,%e2\n vswp %f1,%e3" : "+w"(x0), "+w"(x1), "+w"(x2), "+w"(x3)::); } +# define VSWAPHL(a,b) vcombine_f32(vget_low_f32(b), vget_high_f32(a)) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0x3) == 0) +#else +# if !defined(PFFFT_SIMD_DISABLE) +# warning "building with simd disabled !\n"; +# define PFFFT_SIMD_DISABLE // fallback to scalar code +# endif +#endif + +// fallback mode for situations where SSE/Altivec are not available, use scalar mode instead +#ifdef PFFFT_SIMD_DISABLE +typedef float v4sf; +# define SIMD_SZ 1 +# define VZERO() 0.f +# define VMUL(a,b) ((a)*(b)) +# define VADD(a,b) ((a)+(b)) +# define VMADD(a,b,c) ((a)*(b)+(c)) +# define VSUB(a,b) ((a)-(b)) +# define LD_PS1(p) (p) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0x3) == 0) +#endif + +// shortcuts for complex multiplcations +#define VCPLXMUL(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VSUB(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VADD(ai,tmp); } +#define VCPLXMULCONJ(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VADD(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VSUB(ai,tmp); } +#ifndef SVMUL +// multiply a scalar with a vector +#define SVMUL(f,v) VMUL(LD_PS1(f),v) +#endif + +#if !defined(PFFFT_SIMD_DISABLE) +typedef union v4sf_union { + v4sf v; + float f[4]; +} v4sf_union; + +#include + +#define assertv4(v,f0,f1,f2,f3) assert(v.f[0] == (f0) && v.f[1] == (f1) && v.f[2] == (f2) && v.f[3] == (f3)) + +/* detect bugs with the vector support macros */ +void validate_pffft_simd(void) { + float f[16] = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 }; + v4sf_union a0, a1, a2, a3, t, u; + memcpy(a0.f, f, 4*sizeof(float)); + memcpy(a1.f, f+4, 4*sizeof(float)); + memcpy(a2.f, f+8, 4*sizeof(float)); + memcpy(a3.f, f+12, 4*sizeof(float)); + + t = a0; u = a1; t.v = VZERO(); + printf("VZERO=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 0, 0, 0, 0); + t.v = VADD(a1.v, a2.v); + printf("VADD(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 12, 14, 16, 18); + t.v = VMUL(a1.v, a2.v); + printf("VMUL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 45, 60, 77); + t.v = VMADD(a1.v, a2.v,a0.v); + printf("VMADD(4:7,8:11,0:3)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 46, 62, 80); + + INTERLEAVE2(a1.v,a2.v,t.v,u.v); + printf("INTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]); + assertv4(t, 4, 8, 5, 9); assertv4(u, 6, 10, 7, 11); + UNINTERLEAVE2(a1.v,a2.v,t.v,u.v); + printf("UNINTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]); + assertv4(t, 4, 6, 8, 10); assertv4(u, 5, 7, 9, 11); + + t.v=LD_PS1(f[15]); + printf("LD_PS1(15)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); + assertv4(t, 15, 15, 15, 15); + t.v = VSWAPHL(a1.v, a2.v); + printf("VSWAPHL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); + assertv4(t, 8, 9, 6, 7); + VTRANSPOSE4(a0.v, a1.v, a2.v, a3.v); + printf("VTRANSPOSE4(0:3,4:7,8:11,12:15)=[%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", + a0.f[0], a0.f[1], a0.f[2], a0.f[3], a1.f[0], a1.f[1], a1.f[2], a1.f[3], + a2.f[0], a2.f[1], a2.f[2], a2.f[3], a3.f[0], a3.f[1], a3.f[2], a3.f[3]); + assertv4(a0, 0, 4, 8, 12); assertv4(a1, 1, 5, 9, 13); assertv4(a2, 2, 6, 10, 14); assertv4(a3, 3, 7, 11, 15); +} +#else +void validate_pffft_simd() {} // allow test_pffft.c to call this function even when simd is not available.. +#endif //!PFFFT_SIMD_DISABLE + +/* SSE and co like 16-bytes aligned pointers */ +#define MALLOC_V4SF_ALIGNMENT 64 // with a 64-byte alignment, we are even aligned on L2 cache lines... +void *pffft_aligned_malloc(size_t nb_bytes) { + void *p, *p0 = malloc(nb_bytes + MALLOC_V4SF_ALIGNMENT); + if (!p0) return (void *) 0; + p = (void *) (((size_t) p0 + MALLOC_V4SF_ALIGNMENT) & (~((size_t) (MALLOC_V4SF_ALIGNMENT-1)))); + *((void **) p - 1) = p0; + return p; +} + +void pffft_aligned_free(void *p) { + if (p) free(*((void **) p - 1)); +} + +int pffft_simd_size() { return SIMD_SZ; } + +/* + passf2 and passb2 has been merged here, fsign = -1 for passf2, +1 for passb2 +*/ +static NEVER_INLINE(void) passf2_ps(int ido, int l1, const v4sf *cc, v4sf *ch, const float *wa1, float fsign) { + int k, i; + int l1ido = l1*ido; + if (ido <= 2) { + for (k=0; k < l1ido; k += ido, ch += ido, cc+= 2*ido) { + ch[0] = VADD(cc[0], cc[ido+0]); + ch[l1ido] = VSUB(cc[0], cc[ido+0]); + ch[1] = VADD(cc[1], cc[ido+1]); + ch[l1ido + 1] = VSUB(cc[1], cc[ido+1]); + } + } else { + for (k=0; k < l1ido; k += ido, ch += ido, cc += 2*ido) { + for (i=0; i 2); + for (k=0; k< l1ido; k += ido, cc+= 3*ido, ch +=ido) { + for (i=0; i 2); + for (k = 0; k < l1; ++k, cc += 5*ido, ch += ido) { + for (i = 0; i < ido-1; i += 2) { + ti5 = VSUB(cc_ref(i , 2), cc_ref(i , 5)); + ti2 = VADD(cc_ref(i , 2), cc_ref(i , 5)); + ti4 = VSUB(cc_ref(i , 3), cc_ref(i , 4)); + ti3 = VADD(cc_ref(i , 3), cc_ref(i , 4)); + tr5 = VSUB(cc_ref(i-1, 2), cc_ref(i-1, 5)); + tr2 = VADD(cc_ref(i-1, 2), cc_ref(i-1, 5)); + tr4 = VSUB(cc_ref(i-1, 3), cc_ref(i-1, 4)); + tr3 = VADD(cc_ref(i-1, 3), cc_ref(i-1, 4)); + ch_ref(i-1, 1) = VADD(cc_ref(i-1, 1), VADD(tr2, tr3)); + ch_ref(i , 1) = VADD(cc_ref(i , 1), VADD(ti2, ti3)); + cr2 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr11, tr2),SVMUL(tr12, tr3))); + ci2 = VADD(cc_ref(i , 1), VADD(SVMUL(tr11, ti2),SVMUL(tr12, ti3))); + cr3 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr12, tr2),SVMUL(tr11, tr3))); + ci3 = VADD(cc_ref(i , 1), VADD(SVMUL(tr12, ti2),SVMUL(tr11, ti3))); + cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4)); + ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); + cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4)); + ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); + dr3 = VSUB(cr3, ci4); + dr4 = VADD(cr3, ci4); + di3 = VADD(ci3, cr4); + di4 = VSUB(ci3, cr4); + dr5 = VADD(cr2, ci5); + dr2 = VSUB(cr2, ci5); + di5 = VSUB(ci2, cr5); + di2 = VADD(ci2, cr5); + wr1=wa1[i]; wi1=fsign*wa1[i+1]; wr2=wa2[i]; wi2=fsign*wa2[i+1]; + wr3=wa3[i]; wi3=fsign*wa3[i+1]; wr4=wa4[i]; wi4=fsign*wa4[i+1]; + VCPLXMUL(dr2, di2, LD_PS1(wr1), LD_PS1(wi1)); + ch_ref(i - 1, 2) = dr2; + ch_ref(i, 2) = di2; + VCPLXMUL(dr3, di3, LD_PS1(wr2), LD_PS1(wi2)); + ch_ref(i - 1, 3) = dr3; + ch_ref(i, 3) = di3; + VCPLXMUL(dr4, di4, LD_PS1(wr3), LD_PS1(wi3)); + ch_ref(i - 1, 4) = dr4; + ch_ref(i, 4) = di4; + VCPLXMUL(dr5, di5, LD_PS1(wr4), LD_PS1(wi4)); + ch_ref(i - 1, 5) = dr5; + ch_ref(i, 5) = di5; + } + } +#undef ch_ref +#undef cc_ref +} + +static NEVER_INLINE(void) radf2_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, const float *wa1) { + static const float minus_one = -1.f; + int i, k, l1ido = l1*ido; + for (k=0; k < l1ido; k += ido) { + v4sf a = cc[k], b = cc[k + l1ido]; + ch[2*k] = VADD(a, b); + ch[2*(k+ido)-1] = VSUB(a, b); + } + if (ido < 2) return; + if (ido != 2) { + for (k=0; k < l1ido; k += ido) { + for (i=2; i 5) { + wa[i1-1] = wa[i-1]; + wa[i1] = wa[i]; + } + } + l1 = l2; + } +} /* cffti1 */ + + +v4sf *cfftf1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, const float *wa, const int *ifac, int isign) { + v4sf *in = (v4sf*)input_readonly; + v4sf *out = (in == work2 ? work1 : work2); + int nf = ifac[1], k1; + int l1 = 1; + int iw = 0; + assert(in != out && work1 != work2); + for (k1=2; k1<=nf+1; k1++) { + int ip = ifac[k1]; + int l2 = ip*l1; + int ido = n / l2; + int idot = ido + ido; + switch (ip) { + case 5: { + int ix2 = iw + idot; + int ix3 = ix2 + idot; + int ix4 = ix3 + idot; + passf5_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign); + } break; + case 4: { + int ix2 = iw + idot; + int ix3 = ix2 + idot; + passf4_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], isign); + } break; + case 2: { + passf2_ps(idot, l1, in, out, &wa[iw], isign); + } break; + case 3: { + int ix2 = iw + idot; + passf3_ps(idot, l1, in, out, &wa[iw], &wa[ix2], isign); + } break; + default: + assert(0); + } + l1 = l2; + iw += (ip - 1)*idot; + if (out == work2) { + out = work1; in = work2; + } else { + out = work2; in = work1; + } + } + + return in; /* this is in fact the output .. */ +} + + +struct PFFFT_Setup { + int N; + int Ncvec; // nb of complex simd vectors (N/4 if PFFFT_COMPLEX, N/8 if PFFFT_REAL) + int ifac[15]; + pffft_transform_t transform; + v4sf *data; // allocated room for twiddle coefs + float *e; // points into 'data' , N/4*3 elements + float *twiddle; // points into 'data', N/4 elements +}; + +PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform) { + PFFFT_Setup *s = (PFFFT_Setup*)malloc(sizeof(PFFFT_Setup)); + int k, m; + /* unfortunately, the fft size must be a multiple of 16 for complex FFTs + and 32 for real FFTs -- a lot of stuff would need to be rewritten to + handle other cases (or maybe just switch to a scalar fft, I don't know..) */ + if (transform == PFFFT_REAL) { assert((N%(2*SIMD_SZ*SIMD_SZ))==0 && N>0); } + if (transform == PFFFT_COMPLEX) { assert((N%(SIMD_SZ*SIMD_SZ))==0 && N>0); } + //assert((N % 32) == 0); + s->N = N; + s->transform = transform; + /* nb of complex simd vectors */ + s->Ncvec = (transform == PFFFT_REAL ? N/2 : N)/SIMD_SZ; + s->data = (v4sf*)pffft_aligned_malloc(2*s->Ncvec * sizeof(v4sf)); + s->e = (float*)s->data; + s->twiddle = (float*)(s->data + (2*s->Ncvec*(SIMD_SZ-1))/SIMD_SZ); + + if (transform == PFFFT_REAL) { + for (k=0; k < s->Ncvec; ++k) { + int i = k/SIMD_SZ; + int j = k%SIMD_SZ; + for (m=0; m < SIMD_SZ-1; ++m) { + float A = -2*M_PI*(m+1)*k / N; + s->e[(2*(i*3 + m) + 0) * SIMD_SZ + j] = cos(A); + s->e[(2*(i*3 + m) + 1) * SIMD_SZ + j] = sin(A); + } + } + rffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac); + } else { + for (k=0; k < s->Ncvec; ++k) { + int i = k/SIMD_SZ; + int j = k%SIMD_SZ; + for (m=0; m < SIMD_SZ-1; ++m) { + float A = -2*M_PI*(m+1)*k / N; + s->e[(2*(i*3 + m) + 0)*SIMD_SZ + j] = cos(A); + s->e[(2*(i*3 + m) + 1)*SIMD_SZ + j] = sin(A); + } + } + cffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac); + } + + /* check that N is decomposable with allowed prime factors */ + for (k=0, m=1; k < s->ifac[1]; ++k) { m *= s->ifac[2+k]; } + if (m != N/SIMD_SZ) { + pffft_destroy_setup(s); s = 0; + } + + return s; +} + + +void pffft_destroy_setup(PFFFT_Setup *s) { + pffft_aligned_free(s->data); + free(s); +} + +#if !defined(PFFFT_SIMD_DISABLE) + +/* [0 0 1 2 3 4 5 6 7 8] -> [0 8 7 6 5 4 3 2 1] */ +static void reversed_copy(int N, const v4sf *in, int in_stride, v4sf *out) { + v4sf g0, g1; + int k; + INTERLEAVE2(in[0], in[1], g0, g1); in += in_stride; + + *--out = VSWAPHL(g0, g1); // [g0l, g0h], [g1l g1h] -> [g1l, g0h] + for (k=1; k < N; ++k) { + v4sf h0, h1; + INTERLEAVE2(in[0], in[1], h0, h1); in += in_stride; + *--out = VSWAPHL(g1, h0); + *--out = VSWAPHL(h0, h1); + g1 = h1; + } + *--out = VSWAPHL(g1, g0); +} + +static void unreversed_copy(int N, const v4sf *in, v4sf *out, int out_stride) { + v4sf g0, g1, h0, h1; + int k; + g0 = g1 = in[0]; ++in; + for (k=1; k < N; ++k) { + h0 = *in++; h1 = *in++; + g1 = VSWAPHL(g1, h0); + h0 = VSWAPHL(h0, h1); + UNINTERLEAVE2(h0, g1, out[0], out[1]); out += out_stride; + g1 = h1; + } + h0 = *in++; h1 = g0; + g1 = VSWAPHL(g1, h0); + h0 = VSWAPHL(h0, h1); + UNINTERLEAVE2(h0, g1, out[0], out[1]); +} + +void pffft_zreorder(PFFFT_Setup *setup, const float *in, float *out, pffft_direction_t direction) { + int k, N = setup->N, Ncvec = setup->Ncvec; + const v4sf *vin = (const v4sf*)in; + v4sf *vout = (v4sf*)out; + assert(in != out); + if (setup->transform == PFFFT_REAL) { + int k, dk = N/32; + if (direction == PFFFT_FORWARD) { + for (k=0; k < dk; ++k) { + INTERLEAVE2(vin[k*8 + 0], vin[k*8 + 1], vout[2*(0*dk + k) + 0], vout[2*(0*dk + k) + 1]); + INTERLEAVE2(vin[k*8 + 4], vin[k*8 + 5], vout[2*(2*dk + k) + 0], vout[2*(2*dk + k) + 1]); + } + reversed_copy(dk, vin+2, 8, (v4sf*)(out + N/2)); + reversed_copy(dk, vin+6, 8, (v4sf*)(out + N)); + } else { + for (k=0; k < dk; ++k) { + UNINTERLEAVE2(vin[2*(0*dk + k) + 0], vin[2*(0*dk + k) + 1], vout[k*8 + 0], vout[k*8 + 1]); + UNINTERLEAVE2(vin[2*(2*dk + k) + 0], vin[2*(2*dk + k) + 1], vout[k*8 + 4], vout[k*8 + 5]); + } + unreversed_copy(dk, (v4sf*)(in + N/4), (v4sf*)(out + N - 6*SIMD_SZ), -8); + unreversed_copy(dk, (v4sf*)(in + 3*N/4), (v4sf*)(out + N - 2*SIMD_SZ), -8); + } + } else { + if (direction == PFFFT_FORWARD) { + for (k=0; k < Ncvec; ++k) { + int kk = (k/4) + (k%4)*(Ncvec/4); + INTERLEAVE2(vin[k*2], vin[k*2+1], vout[kk*2], vout[kk*2+1]); + } + } else { + for (k=0; k < Ncvec; ++k) { + int kk = (k/4) + (k%4)*(Ncvec/4); + UNINTERLEAVE2(vin[kk*2], vin[kk*2+1], vout[k*2], vout[k*2+1]); + } + } + } +} + +void pffft_cplx_finalize(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + assert(in != out); + for (k=0; k < dk; ++k) { + r0 = in[8*k+0]; i0 = in[8*k+1]; + r1 = in[8*k+2]; i1 = in[8*k+3]; + r2 = in[8*k+4]; i2 = in[8*k+5]; + r3 = in[8*k+6]; i3 = in[8*k+7]; + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + VCPLXMUL(r1,i1,e[k*6+0],e[k*6+1]); + VCPLXMUL(r2,i2,e[k*6+2],e[k*6+3]); + VCPLXMUL(r3,i3,e[k*6+4],e[k*6+5]); + + sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3); + si0 = VADD(i0,i2); di0 = VSUB(i0, i2); + si1 = VADD(i1,i3); di1 = VSUB(i1, i3); + + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 -1 0 0 -1 0 1] [r1] + [1 -1 1 -1 0 0 0 0] [r2] + [1 0 -1 0 0 1 0 -1] [r3] + [0 0 0 0 1 1 1 1] * [i0] + [0 1 0 -1 1 0 -1 0] [i1] + [0 0 0 0 1 -1 1 -1] [i2] + [0 -1 0 1 1 0 -1 0] [i3] + */ + + r0 = VADD(sr0, sr1); i0 = VADD(si0, si1); + r1 = VADD(dr0, di1); i1 = VSUB(di0, dr1); + r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1); + r3 = VSUB(dr0, di1); i3 = VADD(di0, dr1); + + *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1; + *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3; + } +} + +void pffft_cplx_preprocess(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + assert(in != out); + for (k=0; k < dk; ++k) { + r0 = in[8*k+0]; i0 = in[8*k+1]; + r1 = in[8*k+2]; i1 = in[8*k+3]; + r2 = in[8*k+4]; i2 = in[8*k+5]; + r3 = in[8*k+6]; i3 = in[8*k+7]; + + sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3); + si0 = VADD(i0,i2); di0 = VSUB(i0, i2); + si1 = VADD(i1,i3); di1 = VSUB(i1, i3); + + r0 = VADD(sr0, sr1); i0 = VADD(si0, si1); + r1 = VSUB(dr0, di1); i1 = VADD(di0, dr1); + r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1); + r3 = VADD(dr0, di1); i3 = VSUB(di0, dr1); + + VCPLXMULCONJ(r1,i1,e[k*6+0],e[k*6+1]); + VCPLXMULCONJ(r2,i2,e[k*6+2],e[k*6+3]); + VCPLXMULCONJ(r3,i3,e[k*6+4],e[k*6+5]); + + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1; + *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3; + } +} + + +static ALWAYS_INLINE(void) pffft_real_finalize_4x4(const v4sf *in0, const v4sf *in1, const v4sf *in, + const v4sf *e, v4sf *out) { + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + r0 = *in0; i0 = *in1; + r1 = *in++; i1 = *in++; r2 = *in++; i2 = *in++; r3 = *in++; i3 = *in++; + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 -1 0 0 -1 0 1] [r1] + [1 0 -1 0 0 1 0 -1] [r2] + [1 -1 1 -1 0 0 0 0] [r3] + [0 0 0 0 1 1 1 1] * [i0] + [0 -1 0 1 -1 0 1 0] [i1] + [0 -1 0 1 1 0 -1 0] [i2] + [0 0 0 0 -1 1 -1 1] [i3] + */ + + //cerr << "matrix initial, before e , REAL:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; + //cerr << "matrix initial, before e, IMAG :\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; + + VCPLXMUL(r1,i1,e[0],e[1]); + VCPLXMUL(r2,i2,e[2],e[3]); + VCPLXMUL(r3,i3,e[4],e[5]); + + //cerr << "matrix initial, real part:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; + //cerr << "matrix initial, imag part:\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; + + sr0 = VADD(r0,r2); dr0 = VSUB(r0,r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r3,r1); + si0 = VADD(i0,i2); di0 = VSUB(i0,i2); + si1 = VADD(i1,i3); di1 = VSUB(i3,i1); + + r0 = VADD(sr0, sr1); + r3 = VSUB(sr0, sr1); + i0 = VADD(si0, si1); + i3 = VSUB(si1, si0); + r1 = VADD(dr0, di1); + r2 = VSUB(dr0, di1); + i1 = VSUB(dr1, di0); + i2 = VADD(dr1, di0); + + *out++ = r0; + *out++ = i0; + *out++ = r1; + *out++ = i1; + *out++ = r2; + *out++ = i2; + *out++ = r3; + *out++ = i3; + +} + +static NEVER_INLINE(void) pffft_real_finalize(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */ + + v4sf_union cr, ci, *uout = (v4sf_union*)out; + v4sf save = in[7], zero=VZERO(); + float xr0, xi0, xr1, xi1, xr2, xi2, xr3, xi3; + static const float s = M_SQRT2/2; + + cr.v = in[0]; ci.v = in[Ncvec*2-1]; + assert(in != out); + pffft_real_finalize_4x4(&zero, &zero, in+1, e, out); + + /* + [cr0 cr1 cr2 cr3 ci0 ci1 ci2 ci3] + + [Xr(1)] ] [1 1 1 1 0 0 0 0] + [Xr(N/4) ] [0 0 0 0 1 s 0 -s] + [Xr(N/2) ] [1 0 -1 0 0 0 0 0] + [Xr(3N/4)] [0 0 0 0 1 -s 0 s] + [Xi(1) ] [1 -1 1 -1 0 0 0 0] + [Xi(N/4) ] [0 0 0 0 0 -s -1 -s] + [Xi(N/2) ] [0 -1 0 1 0 0 0 0] + [Xi(3N/4)] [0 0 0 0 0 -s 1 -s] + */ + + xr0=(cr.f[0]+cr.f[2]) + (cr.f[1]+cr.f[3]); uout[0].f[0] = xr0; + xi0=(cr.f[0]+cr.f[2]) - (cr.f[1]+cr.f[3]); uout[1].f[0] = xi0; + xr2=(cr.f[0]-cr.f[2]); uout[4].f[0] = xr2; + xi2=(cr.f[3]-cr.f[1]); uout[5].f[0] = xi2; + xr1= ci.f[0] + s*(ci.f[1]-ci.f[3]); uout[2].f[0] = xr1; + xi1=-ci.f[2] - s*(ci.f[1]+ci.f[3]); uout[3].f[0] = xi1; + xr3= ci.f[0] - s*(ci.f[1]-ci.f[3]); uout[6].f[0] = xr3; + xi3= ci.f[2] - s*(ci.f[1]+ci.f[3]); uout[7].f[0] = xi3; + + for (k=1; k < dk; ++k) { + v4sf save_next = in[8*k+7]; + pffft_real_finalize_4x4(&save, &in[8*k+0], in + 8*k+1, + e + k*6, out + k*8); + save = save_next; + } + +} + +static ALWAYS_INLINE(void) pffft_real_preprocess_4x4(const v4sf *in, + const v4sf *e, v4sf *out, int first) { + v4sf r0=in[0], i0=in[1], r1=in[2], i1=in[3], r2=in[4], i2=in[5], r3=in[6], i3=in[7]; + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 0 -1 0 -1 -1 0] [r1] + [1 -1 -1 1 0 0 0 0] [r2] + [1 0 0 -1 0 1 1 0] [r3] + [0 0 0 0 1 -1 1 -1] * [i0] + [0 -1 1 0 1 0 0 1] [i1] + [0 0 0 0 1 1 -1 -1] [i2] + [0 1 -1 0 1 0 0 1] [i3] + */ + + v4sf sr0 = VADD(r0,r3), dr0 = VSUB(r0,r3); + v4sf sr1 = VADD(r1,r2), dr1 = VSUB(r1,r2); + v4sf si0 = VADD(i0,i3), di0 = VSUB(i0,i3); + v4sf si1 = VADD(i1,i2), di1 = VSUB(i1,i2); + + r0 = VADD(sr0, sr1); + r2 = VSUB(sr0, sr1); + r1 = VSUB(dr0, si1); + r3 = VADD(dr0, si1); + i0 = VSUB(di0, di1); + i2 = VADD(di0, di1); + i1 = VSUB(si0, dr1); + i3 = VADD(si0, dr1); + + VCPLXMULCONJ(r1,i1,e[0],e[1]); + VCPLXMULCONJ(r2,i2,e[2],e[3]); + VCPLXMULCONJ(r3,i3,e[4],e[5]); + + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + if (!first) { + *out++ = r0; + *out++ = i0; + } + *out++ = r1; + *out++ = i1; + *out++ = r2; + *out++ = i2; + *out++ = r3; + *out++ = i3; +} + +static NEVER_INLINE(void) pffft_real_preprocess(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */ + + v4sf_union Xr, Xi, *uout = (v4sf_union*)out; + float cr0, ci0, cr1, ci1, cr2, ci2, cr3, ci3; + static const float s = M_SQRT2; + assert(in != out); + for (k=0; k < 4; ++k) { + Xr.f[k] = ((float*)in)[8*k]; + Xi.f[k] = ((float*)in)[8*k+4]; + } + + pffft_real_preprocess_4x4(in, e, out+1, 1); // will write only 6 values + + /* + [Xr0 Xr1 Xr2 Xr3 Xi0 Xi1 Xi2 Xi3] + + [cr0] [1 0 2 0 1 0 0 0] + [cr1] [1 0 0 0 -1 0 -2 0] + [cr2] [1 0 -2 0 1 0 0 0] + [cr3] [1 0 0 0 -1 0 2 0] + [ci0] [0 2 0 2 0 0 0 0] + [ci1] [0 s 0 -s 0 -s 0 -s] + [ci2] [0 0 0 0 0 -2 0 2] + [ci3] [0 -s 0 s 0 -s 0 -s] + */ + for (k=1; k < dk; ++k) { + pffft_real_preprocess_4x4(in+8*k, e + k*6, out-1+k*8, 0); + } + + cr0=(Xr.f[0]+Xi.f[0]) + 2*Xr.f[2]; uout[0].f[0] = cr0; + cr1=(Xr.f[0]-Xi.f[0]) - 2*Xi.f[2]; uout[0].f[1] = cr1; + cr2=(Xr.f[0]+Xi.f[0]) - 2*Xr.f[2]; uout[0].f[2] = cr2; + cr3=(Xr.f[0]-Xi.f[0]) + 2*Xi.f[2]; uout[0].f[3] = cr3; + ci0= 2*(Xr.f[1]+Xr.f[3]); uout[2*Ncvec-1].f[0] = ci0; + ci1= s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[1] = ci1; + ci2= 2*(Xi.f[3]-Xi.f[1]); uout[2*Ncvec-1].f[2] = ci2; + ci3=-s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[3] = ci3; +} + + +void pffft_transform_internal(PFFFT_Setup *setup, const float *finput, float *foutput, v4sf *scratch, + pffft_direction_t direction, int ordered) { + int k, Ncvec = setup->Ncvec; + int nf_odd = (setup->ifac[1] & 1); + + // temporary buffer is allocated on the stack if the scratch pointer is NULL + int stack_allocate = (scratch == 0 ? Ncvec*2 : 1); + VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate); + + const v4sf *vinput = (const v4sf*)finput; + v4sf *voutput = (v4sf*)foutput; + v4sf *buff[2] = { voutput, scratch ? scratch : scratch_on_stack }; + int ib = (nf_odd ^ ordered ? 1 : 0); + + assert(VALIGNED(finput) && VALIGNED(foutput)); + + //assert(finput != foutput); + if (direction == PFFFT_FORWARD) { + ib = !ib; + if (setup->transform == PFFFT_REAL) { + ib = (rfftf1_ps(Ncvec*2, vinput, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + pffft_real_finalize(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e); + } else { + v4sf *tmp = buff[ib]; + for (k=0; k < Ncvec; ++k) { + UNINTERLEAVE2(vinput[k*2], vinput[k*2+1], tmp[k*2], tmp[k*2+1]); + } + ib = (cfftf1_ps(Ncvec, buff[ib], buff[!ib], buff[ib], + setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1); + pffft_cplx_finalize(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e); + } + if (ordered) { + pffft_zreorder(setup, (float*)buff[!ib], (float*)buff[ib], PFFFT_FORWARD); + } else ib = !ib; + } else { + if (vinput == buff[ib]) { + ib = !ib; // may happen when finput == foutput + } + if (ordered) { + pffft_zreorder(setup, (float*)vinput, (float*)buff[ib], PFFFT_BACKWARD); + vinput = buff[ib]; ib = !ib; + } + if (setup->transform == PFFFT_REAL) { + pffft_real_preprocess(Ncvec, vinput, buff[ib], (v4sf*)setup->e); + ib = (rfftb1_ps(Ncvec*2, buff[ib], buff[0], buff[1], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + pffft_cplx_preprocess(Ncvec, vinput, buff[ib], (v4sf*)setup->e); + ib = (cfftf1_ps(Ncvec, buff[ib], buff[0], buff[1], + setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1); + for (k=0; k < Ncvec; ++k) { + INTERLEAVE2(buff[ib][k*2], buff[ib][k*2+1], buff[ib][k*2], buff[ib][k*2+1]); + } + } + } + + if (buff[ib] != voutput) { + /* extra copy required -- this situation should only happen when finput == foutput */ + assert(finput==foutput); + for (k=0; k < Ncvec; ++k) { + v4sf a = buff[ib][2*k], b = buff[ib][2*k+1]; + voutput[2*k] = a; voutput[2*k+1] = b; + } + ib = !ib; + } + assert(buff[ib] == voutput); +} + +void pffft_zconvolve_accumulate(PFFFT_Setup *s, const float *a, const float *b, float *ab, float scaling) { + int Ncvec = s->Ncvec; + const v4sf * RESTRICT va = (const v4sf*)a; + const v4sf * RESTRICT vb = (const v4sf*)b; + v4sf * RESTRICT vab = (v4sf*)ab; + +#ifdef __arm__ + __builtin_prefetch(va); + __builtin_prefetch(vb); + __builtin_prefetch(vab); + __builtin_prefetch(va+2); + __builtin_prefetch(vb+2); + __builtin_prefetch(vab+2); + __builtin_prefetch(va+4); + __builtin_prefetch(vb+4); + __builtin_prefetch(vab+4); + __builtin_prefetch(va+6); + __builtin_prefetch(vb+6); + __builtin_prefetch(vab+6); +# ifndef __clang__ +# define ZCONVOLVE_USING_INLINE_NEON_ASM +# endif +#endif + + float ar, ai, br, bi, abr, abi; +#ifndef ZCONVOLVE_USING_INLINE_ASM + v4sf vscal = LD_PS1(scaling); + int i; +#endif + + assert(VALIGNED(a) && VALIGNED(b) && VALIGNED(ab)); + ar = ((v4sf_union*)va)[0].f[0]; + ai = ((v4sf_union*)va)[1].f[0]; + br = ((v4sf_union*)vb)[0].f[0]; + bi = ((v4sf_union*)vb)[1].f[0]; + abr = ((v4sf_union*)vab)[0].f[0]; + abi = ((v4sf_union*)vab)[1].f[0]; + +#ifdef ZCONVOLVE_USING_INLINE_ASM // inline asm version, unfortunately miscompiled by clang 3.2, at least on ubuntu.. so this will be restricted to gcc + const float *a_ = a, *b_ = b; float *ab_ = ab; + int N = Ncvec; + asm volatile("mov r8, %2 \n" + "vdup.f32 q15, %4 \n" + "1: \n" + "pld [%0,#64] \n" + "pld [%1,#64] \n" + "pld [%2,#64] \n" + "pld [%0,#96] \n" + "pld [%1,#96] \n" + "pld [%2,#96] \n" + "vld1.f32 {q0,q1}, [%0,:128]! \n" + "vld1.f32 {q4,q5}, [%1,:128]! \n" + "vld1.f32 {q2,q3}, [%0,:128]! \n" + "vld1.f32 {q6,q7}, [%1,:128]! \n" + "vld1.f32 {q8,q9}, [r8,:128]! \n" + + "vmul.f32 q10, q0, q4 \n" + "vmul.f32 q11, q0, q5 \n" + "vmul.f32 q12, q2, q6 \n" + "vmul.f32 q13, q2, q7 \n" + "vmls.f32 q10, q1, q5 \n" + "vmla.f32 q11, q1, q4 \n" + "vld1.f32 {q0,q1}, [r8,:128]! \n" + "vmls.f32 q12, q3, q7 \n" + "vmla.f32 q13, q3, q6 \n" + "vmla.f32 q8, q10, q15 \n" + "vmla.f32 q9, q11, q15 \n" + "vmla.f32 q0, q12, q15 \n" + "vmla.f32 q1, q13, q15 \n" + "vst1.f32 {q8,q9},[%2,:128]! \n" + "vst1.f32 {q0,q1},[%2,:128]! \n" + "subs %3, #2 \n" + "bne 1b \n" + : "+r"(a_), "+r"(b_), "+r"(ab_), "+r"(N) : "r"(scaling) : "r8", "q0","q1","q2","q3","q4","q5","q6","q7","q8","q9", "q10","q11","q12","q13","q15","memory"); +#else // default routine, works fine for non-arm cpus with current compilers + for (i=0; i < Ncvec; i += 2) { + v4sf ar, ai, br, bi; + ar = va[2*i+0]; ai = va[2*i+1]; + br = vb[2*i+0]; bi = vb[2*i+1]; + VCPLXMUL(ar, ai, br, bi); + vab[2*i+0] = VMADD(ar, vscal, vab[2*i+0]); + vab[2*i+1] = VMADD(ai, vscal, vab[2*i+1]); + ar = va[2*i+2]; ai = va[2*i+3]; + br = vb[2*i+2]; bi = vb[2*i+3]; + VCPLXMUL(ar, ai, br, bi); + vab[2*i+2] = VMADD(ar, vscal, vab[2*i+2]); + vab[2*i+3] = VMADD(ai, vscal, vab[2*i+3]); + } +#endif + if (s->transform == PFFFT_REAL) { + ((v4sf_union*)vab)[0].f[0] = abr + ar*br*scaling; + ((v4sf_union*)vab)[1].f[0] = abi + ai*bi*scaling; + } +} + + +#else // defined(PFFFT_SIMD_DISABLE) + +// standard routine using scalar floats, without SIMD stuff. + +#define pffft_zreorder_nosimd pffft_zreorder +void pffft_zreorder_nosimd(PFFFT_Setup *setup, const float *in, float *out, pffft_direction_t direction) { + int k, N = setup->N; + if (setup->transform == PFFFT_COMPLEX) { + for (k=0; k < 2*N; ++k) out[k] = in[k]; + return; + } + else if (direction == PFFFT_FORWARD) { + float x_N = in[N-1]; + for (k=N-1; k > 1; --k) out[k] = in[k-1]; + out[0] = in[0]; + out[1] = x_N; + } else { + float x_N = in[1]; + for (k=1; k < N-1; ++k) out[k] = in[k+1]; + out[0] = in[0]; + out[N-1] = x_N; + } +} + +#define pffft_transform_internal_nosimd pffft_transform_internal +void pffft_transform_internal_nosimd(PFFFT_Setup *setup, const float *input, float *output, float *scratch, + pffft_direction_t direction, int ordered) { + int Ncvec = setup->Ncvec; + int nf_odd = (setup->ifac[1] & 1); + + // temporary buffer is allocated on the stack if the scratch pointer is NULL + int stack_allocate = (scratch == 0 ? Ncvec*2 : 1); + VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate); + float *buff[2]; + int ib; + if (scratch == 0) scratch = scratch_on_stack; + buff[0] = output; buff[1] = scratch; + + if (setup->transform == PFFFT_COMPLEX) ordered = 0; // it is always ordered. + ib = (nf_odd ^ ordered ? 1 : 0); + + if (direction == PFFFT_FORWARD) { + if (setup->transform == PFFFT_REAL) { + ib = (rfftf1_ps(Ncvec*2, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1); + } + if (ordered) { + pffft_zreorder(setup, buff[ib], buff[!ib], PFFFT_FORWARD); ib = !ib; + } + } else { + if (input == buff[ib]) { + ib = !ib; // may happen when finput == foutput + } + if (ordered) { + pffft_zreorder(setup, input, buff[!ib], PFFFT_BACKWARD); + input = buff[!ib]; + } + if (setup->transform == PFFFT_REAL) { + ib = (rfftb1_ps(Ncvec*2, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1); + } + } + if (buff[ib] != output) { + int k; + // extra copy required -- this situation should happens only when finput == foutput + assert(input==output); + for (k=0; k < Ncvec; ++k) { + float a = buff[ib][2*k], b = buff[ib][2*k+1]; + output[2*k] = a; output[2*k+1] = b; + } + ib = !ib; + } + assert(buff[ib] == output); +} + +#define pffft_zconvolve_accumulate_nosimd pffft_zconvolve_accumulate +void pffft_zconvolve_accumulate_nosimd(PFFFT_Setup *s, const float *a, const float *b, + float *ab, float scaling) { + int i, Ncvec = s->Ncvec; + + if (s->transform == PFFFT_REAL) { + // take care of the fftpack ordering + ab[0] += a[0]*b[0]*scaling; + ab[2*Ncvec-1] += a[2*Ncvec-1]*b[2*Ncvec-1]*scaling; + ++ab; ++a; ++b; --Ncvec; + } + for (i=0; i < Ncvec; ++i) { + float ar, ai, br, bi; + ar = a[2*i+0]; ai = a[2*i+1]; + br = b[2*i+0]; bi = b[2*i+1]; + VCPLXMUL(ar, ai, br, bi); + ab[2*i+0] += ar*scaling; + ab[2*i+1] += ai*scaling; + } +} + +#endif // defined(PFFFT_SIMD_DISABLE) + +void pffft_transform(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction) { + pffft_transform_internal(setup, input, output, (v4sf*)work, direction, 0); +} + +void pffft_transform_ordered(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction) { + pffft_transform_internal(setup, input, output, (v4sf*)work, direction, 1); +} diff --git a/Source/pffft/pffft.h b/Source/pffft/pffft.h new file mode 100644 index 0000000..5db3d11 --- /dev/null +++ b/Source/pffft/pffft.h @@ -0,0 +1,177 @@ +/* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com ) + + Based on original fortran 77 code from FFTPACKv4 from NETLIB, + authored by Dr Paul Swarztrauber of NCAR, in 1985. + + As confirmed by the NCAR fftpack software curators, the following + FFTPACKv5 license applies to FFTPACKv4 sources. My changes are + released under the same terms. + + FFTPACK license: + + http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html + + Copyright (c) 2004 the University Corporation for Atmospheric + Research ("UCAR"). All rights reserved. Developed by NCAR's + Computational and Information Systems Laboratory, UCAR, + www.cisl.ucar.edu. + + Redistribution and use of the Software in source and binary forms, + with or without modification, is permitted provided that the + following conditions are met: + + - Neither the names of NCAR's Computational and Information Systems + Laboratory, the University Corporation for Atmospheric Research, + nor the names of its sponsors or contributors may be used to + endorse or promote products derived from this Software without + specific prior written permission. + + - Redistributions of source code must retain the above copyright + notices, this list of conditions, and the disclaimer below. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the disclaimer below in the + documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. +*/ + +/* + PFFFT : a Pretty Fast FFT. + + This is basically an adaptation of the single precision fftpack + (v4) as found on netlib taking advantage of SIMD instruction found + on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON). + + For architectures where no SIMD instruction is available, the code + falls back to a scalar version. + + Restrictions: + + - 1D transforms only, with 32-bit single precision. + + - supports only transforms for inputs of length N of the form + N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128, + 144, 160, etc are all acceptable lengths). Performance is best for + 128<=N<=8192. + + - all (float*) pointers in the functions below are expected to + have an "simd-compatible" alignment, that is 16 bytes on x86 and + powerpc CPUs. + + You can allocate such buffers with the functions + pffft_aligned_malloc / pffft_aligned_free (or with stuff like + posix_memalign..) + +*/ + +#ifndef PFFFT_H +#define PFFFT_H + +#include // for size_t + +#ifdef __cplusplus +extern "C" { +#endif + + /* opaque struct holding internal stuff (precomputed twiddle factors) + this struct can be shared by many threads as it contains only + read-only data. + */ + typedef struct PFFFT_Setup PFFFT_Setup; + + /* direction of the transform */ + typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t; + + /* type of transform */ + typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t; + + /* + prepare for performing transforms of size N -- the returned + PFFFT_Setup structure is read-only so it can safely be shared by + multiple concurrent threads. + */ + PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform); + void pffft_destroy_setup(PFFFT_Setup *); + /* + Perform a Fourier transform , The z-domain data is stored in the + most efficient order for transforming it back, or using it for + convolution. If you need to have its content sorted in the + "usual" way, that is as an array of interleaved complex numbers, + either use pffft_transform_ordered , or call pffft_zreorder after + the forward fft, and before the backward fft. + + Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x. + Typically you will want to scale the backward transform by 1/N. + + The 'work' pointer should point to an area of N (2*N for complex + fft) floats, properly aligned. If 'work' is NULL, then stack will + be used instead (this is probably the best strategy for small + FFTs, say for N < 16384). + + input and output may alias. + */ + void pffft_transform(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction); + + /* + Similar to pffft_transform, but makes sure that the output is + ordered as expected (interleaved complex numbers). This is + similar to calling pffft_transform and then pffft_zreorder. + + input and output may alias. + */ + void pffft_transform_ordered(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction); + + /* + call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(..., + PFFFT_FORWARD) if you want to have the frequency components in + the correct "canonical" order, as interleaved complex numbers. + + (for real transforms, both 0-frequency and half frequency + components, which are real, are assembled in the first entry as + F(0)+i*F(n/2+1). Note that the original fftpack did place + F(n/2+1) at the end of the arrays). + + input and output should not alias. + */ + void pffft_zreorder(PFFFT_Setup *setup, const float *input, float *output, pffft_direction_t direction); + + /* + Perform a multiplication of the frequency components of dft_a and + dft_b and accumulate them into dft_ab. The arrays should have + been obtained with pffft_transform(.., PFFFT_FORWARD) and should + *not* have been reordered with pffft_zreorder (otherwise just + perform the operation yourself as the dft coefs are stored as + interleaved complex numbers). + + the operation performed is: dft_ab += (dft_a * fdt_b)*scaling + + The dft_a, dft_b and dft_ab pointers may alias. + */ + void pffft_zconvolve_accumulate(PFFFT_Setup *setup, const float *dft_a, const float *dft_b, float *dft_ab, float scaling); + + /* + the float buffers must have the correct alignment (16-byte boundary + on intel and powerpc). This function may be used to obtain such + correctly aligned buffers. + */ + void *pffft_aligned_malloc(size_t nb_bytes); + void pffft_aligned_free(void *); + + /* return 4 or 1 wether support SSE/Altivec instructions was enable when building pffft.c */ + int pffft_simd_size(void); + +#ifdef __cplusplus +} +#endif + +#endif // PFFFT_H diff --git a/release/distmac.sh b/release/distmac.sh index d1977d1..1789f17 100755 --- a/release/distmac.sh +++ b/release/distmac.sh @@ -12,7 +12,7 @@ MAINNAME="PaulXStretch" #BUILDDIR=../Builds/MacOSX/build/Release BUILDDIR=../build/${MAINNAME}_artefacts/Release -#INSTBUILDDIR=../build/${MAINNAME}Inst_artefacts/Release +AAXBUILDDIR=../build/${MAINNAME}AAX_artefacts/Release rm -rf ${MAINNAME} @@ -24,7 +24,8 @@ mkdir -p ${MAINNAME} cp -pLRv ${BUILDDIR}/Standalone/${MAINNAME}.app ${MAINNAME}/ cp -pLRv ${BUILDDIR}/AU/${MAINNAME}.component ${MAINNAME}/ cp -pLRv ${BUILDDIR}/VST3/${MAINNAME}.vst3 ${MAINNAME}/ -cp -pRHv ${BUILDDIR}/AAX/${MAINNAME}.aaxplugin ${MAINNAME}/ + +cp -pRHv ${AAXBUILDDIR}/AAX/${MAINNAME}.aaxplugin ${MAINNAME}/ #cp -pLRv ${BUILDDIR}/${MAINNAME}.app ${MAINNAME}/ #cp -pLRv ${BUILDDIR}/${MAINNAME}.component ${MAINNAME}/ diff --git a/release/distwin.sh b/release/distwin.sh index 41312ab..e96dc55 100755 --- a/release/distwin.sh +++ b/release/distwin.sh @@ -17,6 +17,7 @@ if [ -z "$CERTFILE" ] ; then fi BUILDDIR="../build/${BASENAME}_artefacts/Release" +AAXBUILDDIR="../build/${BASENAME}AAX_artefacts/Release" BUILDDIR32="../build32/${BASENAME}_artefacts/Release" #BUILDDIR='../Builds/VisualStudio2019/x64/Release' @@ -30,7 +31,8 @@ mkdir -p ${BASENAME}/Plugins/AAX cp -v ${BUILDDIR}/Standalone/${BASENAME}.exe ${BASENAME}/ cp -pHLRv ${BUILDDIR}/VST3/${BASENAME}.vst3 ${BASENAME}/Plugins/VST3/ #cp -v ${BUILDDIR}/VST/SonoBus.dll ${BASENAME}/Plugins/ -cp -pHLRv ${BUILDDIR}/AAX/${BASENAME}.aaxplugin ${BASENAME}/Plugins/AAX/ + +cp -pHLRv ${AAXBUILDDIR}/AAX/${BASENAME}.aaxplugin ${BASENAME}/Plugins/AAX/ #mkdir -p SonoBus/Plugins32