Line data Source code
1 : #ifndef AMREX_PARSER_H_ 2 : #define AMREX_PARSER_H_ 3 : 4 : #include <AMReX_Arena.H> 5 : #include <AMReX_Array.H> 6 : #include <AMReX_GpuDevice.H> 7 : #include <AMReX_Parser_Exe.H> 8 : #include <AMReX_REAL.H> 9 : #include <AMReX_Vector.H> 10 : 11 : #include <memory> 12 : #include <string> 13 : #include <set> 14 : 15 : namespace amrex { 16 : 17 : template <int N> 18 : struct ParserExecutor 19 : { 20 : template <int M=N, std::enable_if_t<M==0,int> = 0> 21 : [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE 22 : double operator() () const noexcept 23 : { 24 : AMREX_IF_ON_DEVICE((return parser_exe_eval(m_device_executor, nullptr);)) 25 : AMREX_IF_ON_HOST((return parser_exe_eval(m_host_executor, nullptr);)) 26 : } 27 : 28 : template <typename... Ts> 29 : [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE 30 : std::enable_if_t<sizeof...(Ts) == N && !amrex::Same<float,Ts...>::value, double> 31 : operator() (Ts... var) const noexcept 32 : { 33 0 : amrex::GpuArray<double,N> l_var{var...}; 34 : AMREX_IF_ON_DEVICE((return parser_exe_eval(m_device_executor, l_var.data());)) 35 0 : AMREX_IF_ON_HOST((return parser_exe_eval(m_host_executor, l_var.data());)) 36 : } 37 : 38 : template <typename... Ts> 39 : [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE 40 : std::enable_if_t<sizeof...(Ts) == N && amrex::Same<float,Ts...>::value, float> 41 : operator() (Ts... var) const noexcept 42 : { 43 : amrex::GpuArray<double,N> l_var{var...}; 44 : AMREX_IF_ON_DEVICE((return static_cast<float>(parser_exe_eval(m_device_executor, l_var.data()));)) 45 : AMREX_IF_ON_HOST((return static_cast<float>(parser_exe_eval(m_host_executor, l_var.data()));)) 46 : } 47 : 48 : [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE 49 : double operator() (GpuArray<double,N> const& var) const noexcept 50 : { 51 : AMREX_IF_ON_DEVICE((return parser_exe_eval(m_device_executor, var.data());)) 52 : AMREX_IF_ON_HOST((return parser_exe_eval(m_host_executor, var.data());)) 53 : } 54 : 55 : AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE 56 : explicit operator bool () const { 57 : AMREX_IF_ON_DEVICE((return m_device_executor != nullptr;)) 58 : AMREX_IF_ON_HOST((return m_host_executor != nullptr;)) 59 : } 60 : 61 : char* m_host_executor = nullptr; 62 : #ifdef AMREX_USE_GPU 63 : char* m_device_executor = nullptr; 64 : #endif 65 : }; 66 : 67 : class Parser 68 : { 69 : public: 70 : Parser (std::string const& func_body); 71 0 : Parser () = default; 72 : void define (std::string const& func_body); 73 : 74 : explicit operator bool () const; 75 : 76 : void setConstant (std::string const& name, double c); 77 : 78 : void registerVariables (Vector<std::string> const& vars); 79 : 80 : void print () const; 81 : void printExe () const; 82 : 83 : [[nodiscard]] int depth () const; 84 : [[nodiscard]] int maxStackSize () const; 85 : 86 : [[nodiscard]] std::string expr () const; 87 : 88 : [[nodiscard]] std::set<std::string> symbols () const; 89 : 90 : //! This compiles for both GPU and CPU 91 : template <int N> [[nodiscard]] ParserExecutor<N> compile () const; 92 : 93 : //! This compiles for CPU only 94 : template <int N> [[nodiscard]] ParserExecutor<N> compileHost () const; 95 : 96 : private: 97 : 98 : struct Data { 99 : std::string m_expression; 100 : struct amrex_parser* m_parser = nullptr; 101 : int m_nvars = 0; 102 : mutable char* m_host_executor = nullptr; 103 : #ifdef AMREX_USE_GPU 104 : mutable char* m_device_executor = nullptr; 105 : #endif 106 : mutable int m_max_stack_size = 0; 107 : mutable int m_exe_size = 0; 108 : mutable Vector<char const*> m_locals; 109 : Data () = default; 110 : ~Data (); 111 : Data (Data const&) = delete; 112 : Data (Data &&) = delete; 113 : Data& operator= (Data const&) = delete; 114 : Data& operator= (Data &&) = delete; 115 : }; 116 : 117 : std::shared_ptr<Data> m_data; 118 : Vector<std::string> m_vars; 119 : }; 120 : 121 : template <int N> 122 : ParserExecutor<N> 123 0 : Parser::compileHost () const 124 : { 125 0 : if (m_data && m_data->m_parser) { 126 : AMREX_ASSERT(N == m_data->m_nvars); 127 : 128 0 : if (!(m_data->m_host_executor)) { 129 : int stack_size; 130 0 : m_data->m_exe_size = static_cast<int> 131 0 : (parser_exe_size(m_data->m_parser, m_data->m_max_stack_size, 132 : stack_size)); 133 : 134 0 : if (m_data->m_max_stack_size > AMREX_PARSER_STACK_SIZE) { 135 0 : amrex::Abort("amrex::Parser: AMREX_PARSER_STACK_SIZE, " 136 : + std::to_string(AMREX_PARSER_STACK_SIZE) + ", is too small for " 137 0 : + m_data->m_expression); 138 : } 139 0 : if (stack_size != 0) { 140 0 : amrex::Abort("amrex::Parser: something went wrong with parser stack! " 141 : + std::to_string(stack_size)); 142 : } 143 : 144 0 : m_data->m_host_executor = (char*)The_Pinned_Arena()->alloc(m_data->m_exe_size); 145 : 146 : try { 147 0 : m_data->m_locals = parser_compile(m_data->m_parser, 148 0 : m_data->m_host_executor); 149 0 : } catch (const std::runtime_error& e) { 150 0 : throw std::runtime_error(std::string(e.what()) + " in Parser expression \"" 151 0 : + m_data->m_expression + "\""); 152 : } 153 : } 154 : 155 : #ifdef AMREX_USE_GPU 156 : return ParserExecutor<N>{m_data->m_host_executor, m_data->m_device_executor}; 157 : #else 158 0 : return ParserExecutor<N>{m_data->m_host_executor}; 159 : #endif 160 : } else { 161 0 : return ParserExecutor<N>{}; 162 : } 163 : } 164 : 165 : template <int N> 166 : ParserExecutor<N> 167 0 : Parser::compile () const 168 : { 169 0 : auto exe = compileHost<N>(); 170 : 171 : #ifdef AMREX_USE_GPU 172 : if (m_data && m_data->m_parser && !(m_data->m_device_executor)) { 173 : m_data->m_device_executor = (char*)The_Arena()->alloc(m_data->m_exe_size); 174 : Gpu::htod_memcpy_async(m_data->m_device_executor, m_data->m_host_executor, 175 : m_data->m_exe_size); 176 : Gpu::streamSynchronize(); 177 : exe.m_device_executor = m_data->m_device_executor; 178 : } 179 : #endif 180 : 181 0 : return exe; 182 : } 183 : 184 : } 185 : 186 : #endif