Skip to content

Commit

Permalink
Small fixes and compiler wrangling
Browse files Browse the repository at this point in the history
  • Loading branch information
jatinchowdhury18 committed Nov 25, 2023
1 parent 3e203a2 commit b9b0df5
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 12 deletions.
2 changes: 1 addition & 1 deletion modules/JUCE
Submodule JUCE updated 1435 files
3 changes: 3 additions & 0 deletions modules/cmake/WarningFlags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ if(WIN32)
-Wno-cast-function-type
-Wno-range-loop-bind-reference
-Wno-sign-conversion
-Wno-implicit-int-float-conversion
-Wno-implicit-const-int-float-conversion
-Wno-header-hygiene
)
elseif((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") OR (CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
message(STATUS "Setting MSVC compiler flags")
Expand Down
2 changes: 1 addition & 1 deletion src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -167,11 +167,11 @@ if (NOT(${JAI_COMPILER} STREQUAL "JAI_COMPILER-NOTFOUND"))
endif()

# AVX/SSE files for accelerated neural nets
make_lib_simd_runtime(rnn_accelerated processors/drive/neural_utils/RNNAccelerated.cpp)
if(IS_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../modules/math_approx")
message(STATUS "Using RTNeural with math_approx")
add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/../modules/math_approx" math_approx)
endif()
make_lib_simd_runtime(rnn_accelerated processors/drive/neural_utils/RNNAccelerated.cpp)
foreach(target IN ITEMS rnn_accelerated_sse_or_arm rnn_accelerated_avx)
target_link_libraries(${target} PRIVATE config_flags juce::juce_recommended_lto_flags warning_flags)
target_include_directories(${target}
Expand Down
2 changes: 1 addition & 1 deletion src/jai/SharedJaiContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ struct JaiContextWrapper
JaiContextWrapper();
~JaiContextWrapper();

operator jai::Context*() { return internal; }; // NOLINT
operator jai::Context*() { return internal; } // NOLINT

private:
jai::Context* internal = nullptr;
Expand Down
4 changes: 4 additions & 0 deletions src/pch.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,11 @@ JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wzero-as-null-pointer-constant",
#include <RTNeural/RTNeural.h>
JUCE_END_IGNORE_WARNINGS_GCC_LIKE

JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wshadow-field-in-constructor")
#include <chowdsp_wdf/chowdsp_wdf.h>
JUCE_END_IGNORE_WARNINGS_GCC_LIKE


#include <ea_variant/ea_variant.h>
#include <sst/cpputils.h>

Expand Down
14 changes: 6 additions & 8 deletions src/processors/drive/neural_utils/RNNAccelerated.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ struct ApproxMathsProvider
return math_approx::sigmoid<9> (x);
}
};
using RNNMathsProvider = ApproxMathsProvider;
#else
using RNNMathsProvider = RTNEURAL_NAMESPACE::DefaultMathsProvider;
#endif

#include "model_loaders.h"
Expand All @@ -34,7 +37,7 @@ struct ApproxMathsProvider
#pragma GCC diagnostic pop
#endif

#if __AVX__ // Intel/AVX
#if (__MMX__ || __SSE__ || __amd64__) && BYOD_COMPILING_WITH_AVX // INTEL + AVX
namespace rnn_avx
#else
namespace rnn_sse_arm
Expand All @@ -47,13 +50,8 @@ template <int inputSize, int hiddenSize, int RecurrentLayerType, int SRCMode>
struct RNNAccelerated<inputSize, hiddenSize, RecurrentLayerType, SRCMode>::Internal
{
using RecurrentLayerTypeComplete = std::conditional_t<RecurrentLayerType == RecurrentLayerType::LSTMLayer,
#if RTNEURAL_USE_MATH_APPROX
RTNEURAL_NAMESPACE::LSTMLayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode, ApproxMathsProvider>,
RTNEURAL_NAMESPACE::GRULayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode, ApproxMathsProvider>>;
#else
RTNEURAL_NAMESPACE::LSTMLayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode>,
RTNEURAL_NAMESPACE::GRULayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode>>;
#endif
RTNEURAL_NAMESPACE::LSTMLayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode, RNNMathsProvider>,
RTNEURAL_NAMESPACE::GRULayerT<float, inputSize, hiddenSize, (RTNEURAL_NAMESPACE::SampleRateCorrectionMode) SRCMode, RNNMathsProvider>>;
using DenseLayerType = RTNEURAL_NAMESPACE::DenseT<float, hiddenSize, 1>;
RTNEURAL_NAMESPACE::ModelT<float, inputSize, 1, RecurrentLayerTypeComplete, DenseLayerType> model;
};
Expand Down
2 changes: 1 addition & 1 deletion src/processors/drive/neural_utils/RNNAccelerated.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class RNNAccelerated
};
} // namespace rnn_sse_arm

#if __AVX__ // Intel/AVX
#if __MMX__ || __SSE__ || __amd64__ // INTEL
namespace rnn_avx
{
template <int inputSize, int hiddenSize, int RecurrentLayerType, int SRCMode>
Expand Down

0 comments on commit b9b0df5

Please sign in to comment.