lanczos_sse2.hpp 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // (C) Copyright John Maddock 2006.
  2. // Use, modification and distribution are subject to the
  3. // Boost Software License, Version 1.0. (See accompanying file
  4. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef BOOST_MATH_SPECIAL_FUNCTIONS_LANCZOS_SSE2
  6. #define BOOST_MATH_SPECIAL_FUNCTIONS_LANCZOS_SSE2
  7. #ifdef _MSC_VER
  8. #pragma once
  9. #endif
  10. #include <emmintrin.h>
  11. #if defined(__GNUC__) || defined(__PGI) || defined(__SUNPRO_CC)
  12. #define ALIGN16 __attribute__((__aligned__(16)))
  13. #else
  14. #define ALIGN16 __declspec(align(16))
  15. #endif
  16. namespace boost{ namespace math{ namespace lanczos{
  17. template <>
  18. inline double lanczos13m53::lanczos_sum<double>(const double& x)
  19. {
  20. static const ALIGN16 double coeff[26] = {
  21. static_cast<double>(2.506628274631000270164908177133837338626L),
  22. static_cast<double>(1u),
  23. static_cast<double>(210.8242777515793458725097339207133627117L),
  24. static_cast<double>(66u),
  25. static_cast<double>(8071.672002365816210638002902272250613822L),
  26. static_cast<double>(1925u),
  27. static_cast<double>(186056.2653952234950402949897160456992822L),
  28. static_cast<double>(32670u),
  29. static_cast<double>(2876370.628935372441225409051620849613599L),
  30. static_cast<double>(357423u),
  31. static_cast<double>(31426415.58540019438061423162831820536287L),
  32. static_cast<double>(2637558u),
  33. static_cast<double>(248874557.8620541565114603864132294232163L),
  34. static_cast<double>(13339535u),
  35. static_cast<double>(1439720407.311721673663223072794912393972L),
  36. static_cast<double>(45995730u),
  37. static_cast<double>(6039542586.35202800506429164430729792107L),
  38. static_cast<double>(105258076u),
  39. static_cast<double>(17921034426.03720969991975575445893111267L),
  40. static_cast<double>(150917976u),
  41. static_cast<double>(35711959237.35566804944018545154716670596L),
  42. static_cast<double>(120543840u),
  43. static_cast<double>(42919803642.64909876895789904700198885093L),
  44. static_cast<double>(39916800u),
  45. static_cast<double>(23531376880.41075968857200767445163675473L),
  46. static_cast<double>(0u)
  47. };
  48. static const double lim = 4.31965e+25; // By experiment, the largest x for which the SSE2 code does not go bad.
  49. if (x > lim)
  50. {
  51. double z = 1 / x;
  52. return ((((((((((((coeff[24] * z + coeff[22]) * z + coeff[20]) * z + coeff[18]) * z + coeff[16]) * z + coeff[14]) * z + coeff[12]) * z + coeff[10]) * z + coeff[8]) * z + coeff[6]) * z + coeff[4]) * z + coeff[2]) * z + coeff[0]) / ((((((((((((coeff[25] * z + coeff[23]) * z + coeff[21]) * z + coeff[19]) * z + coeff[17]) * z + coeff[15]) * z + coeff[13]) * z + coeff[11]) * z + coeff[9]) * z + coeff[7]) * z + coeff[5]) * z + coeff[3]) * z + coeff[1]);
  53. }
  54. __m128d vx = _mm_load1_pd(&x);
  55. __m128d sum_even = _mm_load_pd(coeff);
  56. __m128d sum_odd = _mm_load_pd(coeff+2);
  57. __m128d nc_odd, nc_even;
  58. __m128d vx2 = _mm_mul_pd(vx, vx);
  59. sum_even = _mm_mul_pd(sum_even, vx2);
  60. nc_even = _mm_load_pd(coeff + 4);
  61. sum_odd = _mm_mul_pd(sum_odd, vx2);
  62. nc_odd = _mm_load_pd(coeff + 6);
  63. sum_even = _mm_add_pd(sum_even, nc_even);
  64. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  65. sum_even = _mm_mul_pd(sum_even, vx2);
  66. nc_even = _mm_load_pd(coeff + 8);
  67. sum_odd = _mm_mul_pd(sum_odd, vx2);
  68. nc_odd = _mm_load_pd(coeff + 10);
  69. sum_even = _mm_add_pd(sum_even, nc_even);
  70. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  71. sum_even = _mm_mul_pd(sum_even, vx2);
  72. nc_even = _mm_load_pd(coeff + 12);
  73. sum_odd = _mm_mul_pd(sum_odd, vx2);
  74. nc_odd = _mm_load_pd(coeff + 14);
  75. sum_even = _mm_add_pd(sum_even, nc_even);
  76. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  77. sum_even = _mm_mul_pd(sum_even, vx2);
  78. nc_even = _mm_load_pd(coeff + 16);
  79. sum_odd = _mm_mul_pd(sum_odd, vx2);
  80. nc_odd = _mm_load_pd(coeff + 18);
  81. sum_even = _mm_add_pd(sum_even, nc_even);
  82. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  83. sum_even = _mm_mul_pd(sum_even, vx2);
  84. nc_even = _mm_load_pd(coeff + 20);
  85. sum_odd = _mm_mul_pd(sum_odd, vx2);
  86. nc_odd = _mm_load_pd(coeff + 22);
  87. sum_even = _mm_add_pd(sum_even, nc_even);
  88. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  89. sum_even = _mm_mul_pd(sum_even, vx2);
  90. nc_even = _mm_load_pd(coeff + 24);
  91. sum_odd = _mm_mul_pd(sum_odd, vx);
  92. sum_even = _mm_add_pd(sum_even, nc_even);
  93. sum_even = _mm_add_pd(sum_even, sum_odd);
  94. double ALIGN16 t[2];
  95. _mm_store_pd(t, sum_even);
  96. return t[0] / t[1];
  97. }
  98. template <>
  99. inline double lanczos13m53::lanczos_sum_expG_scaled<double>(const double& x)
  100. {
  101. static const ALIGN16 double coeff[26] = {
  102. static_cast<double>(0.006061842346248906525783753964555936883222L),
  103. static_cast<double>(1u),
  104. static_cast<double>(0.5098416655656676188125178644804694509993L),
  105. static_cast<double>(66u),
  106. static_cast<double>(19.51992788247617482847860966235652136208L),
  107. static_cast<double>(1925u),
  108. static_cast<double>(449.9445569063168119446858607650988409623L),
  109. static_cast<double>(32670u),
  110. static_cast<double>(6955.999602515376140356310115515198987526L),
  111. static_cast<double>(357423u),
  112. static_cast<double>(75999.29304014542649875303443598909137092L),
  113. static_cast<double>(2637558u),
  114. static_cast<double>(601859.6171681098786670226533699352302507L),
  115. static_cast<double>(13339535u),
  116. static_cast<double>(3481712.15498064590882071018964774556468L),
  117. static_cast<double>(45995730u),
  118. static_cast<double>(14605578.08768506808414169982791359218571L),
  119. static_cast<double>(105258076u),
  120. static_cast<double>(43338889.32467613834773723740590533316085L),
  121. static_cast<double>(150917976u),
  122. static_cast<double>(86363131.28813859145546927288977868422342L),
  123. static_cast<double>(120543840u),
  124. static_cast<double>(103794043.1163445451906271053616070238554L),
  125. static_cast<double>(39916800u),
  126. static_cast<double>(56906521.91347156388090791033559122686859L),
  127. static_cast<double>(0u)
  128. };
  129. static const double lim = 4.76886e+25; // By experiment, the largest x for which the SSE2 code does not go bad.
  130. if (x > lim)
  131. {
  132. double z = 1 / x;
  133. return ((((((((((((coeff[24] * z + coeff[22]) * z + coeff[20]) * z + coeff[18]) * z + coeff[16]) * z + coeff[14]) * z + coeff[12]) * z + coeff[10]) * z + coeff[8]) * z + coeff[6]) * z + coeff[4]) * z + coeff[2]) * z + coeff[0]) / ((((((((((((coeff[25] * z + coeff[23]) * z + coeff[21]) * z + coeff[19]) * z + coeff[17]) * z + coeff[15]) * z + coeff[13]) * z + coeff[11]) * z + coeff[9]) * z + coeff[7]) * z + coeff[5]) * z + coeff[3]) * z + coeff[1]);
  134. }
  135. __m128d vx = _mm_load1_pd(&x);
  136. __m128d sum_even = _mm_load_pd(coeff);
  137. __m128d sum_odd = _mm_load_pd(coeff+2);
  138. __m128d nc_odd, nc_even;
  139. __m128d vx2 = _mm_mul_pd(vx, vx);
  140. sum_even = _mm_mul_pd(sum_even, vx2);
  141. nc_even = _mm_load_pd(coeff + 4);
  142. sum_odd = _mm_mul_pd(sum_odd, vx2);
  143. nc_odd = _mm_load_pd(coeff + 6);
  144. sum_even = _mm_add_pd(sum_even, nc_even);
  145. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  146. sum_even = _mm_mul_pd(sum_even, vx2);
  147. nc_even = _mm_load_pd(coeff + 8);
  148. sum_odd = _mm_mul_pd(sum_odd, vx2);
  149. nc_odd = _mm_load_pd(coeff + 10);
  150. sum_even = _mm_add_pd(sum_even, nc_even);
  151. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  152. sum_even = _mm_mul_pd(sum_even, vx2);
  153. nc_even = _mm_load_pd(coeff + 12);
  154. sum_odd = _mm_mul_pd(sum_odd, vx2);
  155. nc_odd = _mm_load_pd(coeff + 14);
  156. sum_even = _mm_add_pd(sum_even, nc_even);
  157. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  158. sum_even = _mm_mul_pd(sum_even, vx2);
  159. nc_even = _mm_load_pd(coeff + 16);
  160. sum_odd = _mm_mul_pd(sum_odd, vx2);
  161. nc_odd = _mm_load_pd(coeff + 18);
  162. sum_even = _mm_add_pd(sum_even, nc_even);
  163. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  164. sum_even = _mm_mul_pd(sum_even, vx2);
  165. nc_even = _mm_load_pd(coeff + 20);
  166. sum_odd = _mm_mul_pd(sum_odd, vx2);
  167. nc_odd = _mm_load_pd(coeff + 22);
  168. sum_even = _mm_add_pd(sum_even, nc_even);
  169. sum_odd = _mm_add_pd(sum_odd, nc_odd);
  170. sum_even = _mm_mul_pd(sum_even, vx2);
  171. nc_even = _mm_load_pd(coeff + 24);
  172. sum_odd = _mm_mul_pd(sum_odd, vx);
  173. sum_even = _mm_add_pd(sum_even, nc_even);
  174. sum_even = _mm_add_pd(sum_even, sum_odd);
  175. double ALIGN16 t[2];
  176. _mm_store_pd(t, sum_even);
  177. return t[0] / t[1];
  178. }
  179. #ifdef _MSC_VER
  180. BOOST_STATIC_ASSERT(sizeof(double) == sizeof(long double));
  181. template <>
  182. inline long double lanczos13m53::lanczos_sum<long double>(const long double& x)
  183. {
  184. return lanczos_sum<double>(static_cast<double>(x));
  185. }
  186. template <>
  187. inline long double lanczos13m53::lanczos_sum_expG_scaled<long double>(const long double& x)
  188. {
  189. return lanczos_sum_expG_scaled<double>(static_cast<double>(x));
  190. }
  191. #endif
  192. } // namespace lanczos
  193. } // namespace math
  194. } // namespace boost
  195. #undef ALIGN16
  196. #endif // BOOST_MATH_SPECIAL_FUNCTIONS_LANCZOS