logo

oasis

Own branch of Oasis Linux (upstream: <https://git.sr.ht/~mcf/oasis/>) git clone https://anongit.hacktivis.me/git/oasis.git

0003-Use-__asm__-instead-of-asm-to-compile-with-std-c99.patch (8871B)


  1. From 2c79a37719c4f115ad718adae6856f903f5a1e7b Mon Sep 17 00:00:00 2001
  2. From: Michael Forney <mforney@mforney.org>
  3. Date: Tue, 17 Jan 2023 13:07:11 -0800
  4. Subject: [PATCH] Use __asm__ instead of asm to compile with -std=c99
  5. ---
  6. module/zcommon/zfs_fletcher_intel.c | 44 +++++++--------
  7. module/zcommon/zfs_fletcher_sse.c | 86 ++++++++++++++---------------
  8. 2 files changed, 65 insertions(+), 65 deletions(-)
  9. diff --git a/module/zcommon/zfs_fletcher_intel.c b/module/zcommon/zfs_fletcher_intel.c
  10. index 34590a155..9213d083c 100644
  11. --- a/module/zcommon/zfs_fletcher_intel.c
  12. +++ b/module/zcommon/zfs_fletcher_intel.c
  13. @@ -83,18 +83,18 @@ fletcher_4_avx2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
  14. #define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \
  15. { \
  16. - asm volatile("vmovdqu %0, %%ymm0" :: "m" ((ctx)->avx[0])); \
  17. - asm volatile("vmovdqu %0, %%ymm1" :: "m" ((ctx)->avx[1])); \
  18. - asm volatile("vmovdqu %0, %%ymm2" :: "m" ((ctx)->avx[2])); \
  19. - asm volatile("vmovdqu %0, %%ymm3" :: "m" ((ctx)->avx[3])); \
  20. + __asm__ volatile("vmovdqu %0, %%ymm0" :: "m" ((ctx)->avx[0])); \
  21. + __asm__ volatile("vmovdqu %0, %%ymm1" :: "m" ((ctx)->avx[1])); \
  22. + __asm__ volatile("vmovdqu %0, %%ymm2" :: "m" ((ctx)->avx[2])); \
  23. + __asm__ volatile("vmovdqu %0, %%ymm3" :: "m" ((ctx)->avx[3])); \
  24. }
  25. #define FLETCHER_4_AVX2_SAVE_CTX(ctx) \
  26. { \
  27. - asm volatile("vmovdqu %%ymm0, %0" : "=m" ((ctx)->avx[0])); \
  28. - asm volatile("vmovdqu %%ymm1, %0" : "=m" ((ctx)->avx[1])); \
  29. - asm volatile("vmovdqu %%ymm2, %0" : "=m" ((ctx)->avx[2])); \
  30. - asm volatile("vmovdqu %%ymm3, %0" : "=m" ((ctx)->avx[3])); \
  31. + __asm__ volatile("vmovdqu %%ymm0, %0" : "=m" ((ctx)->avx[0])); \
  32. + __asm__ volatile("vmovdqu %%ymm1, %0" : "=m" ((ctx)->avx[1])); \
  33. + __asm__ volatile("vmovdqu %%ymm2, %0" : "=m" ((ctx)->avx[2])); \
  34. + __asm__ volatile("vmovdqu %%ymm3, %0" : "=m" ((ctx)->avx[3])); \
  35. }
  36. @@ -107,15 +107,15 @@ fletcher_4_avx2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
  37. FLETCHER_4_AVX2_RESTORE_CTX(ctx);
  38. do {
  39. - asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
  40. - asm volatile("vpaddq %ymm4, %ymm0, %ymm0");
  41. - asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
  42. - asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
  43. - asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
  44. + __asm__ volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
  45. + __asm__ volatile("vpaddq %ymm4, %ymm0, %ymm0");
  46. + __asm__ volatile("vpaddq %ymm0, %ymm1, %ymm1");
  47. + __asm__ volatile("vpaddq %ymm1, %ymm2, %ymm2");
  48. + __asm__ volatile("vpaddq %ymm2, %ymm3, %ymm3");
  49. } while ((ip += 2) < ipend);
  50. FLETCHER_4_AVX2_SAVE_CTX(ctx);
  51. - asm volatile("vzeroupper");
  52. + __asm__ volatile("vzeroupper");
  53. }
  54. static void
  55. @@ -130,20 +130,20 @@ fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
  56. FLETCHER_4_AVX2_RESTORE_CTX(ctx);
  57. - asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask));
  58. + __asm__ volatile("vmovdqu %0, %%ymm5" :: "m" (mask));
  59. do {
  60. - asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
  61. - asm volatile("vpshufb %ymm5, %ymm4, %ymm4");
  62. + __asm__ volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
  63. + __asm__ volatile("vpshufb %ymm5, %ymm4, %ymm4");
  64. - asm volatile("vpaddq %ymm4, %ymm0, %ymm0");
  65. - asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
  66. - asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
  67. - asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
  68. + __asm__ volatile("vpaddq %ymm4, %ymm0, %ymm0");
  69. + __asm__ volatile("vpaddq %ymm0, %ymm1, %ymm1");
  70. + __asm__ volatile("vpaddq %ymm1, %ymm2, %ymm2");
  71. + __asm__ volatile("vpaddq %ymm2, %ymm3, %ymm3");
  72. } while ((ip += 2) < ipend);
  73. FLETCHER_4_AVX2_SAVE_CTX(ctx);
  74. - asm volatile("vzeroupper");
  75. + __asm__ volatile("vzeroupper");
  76. }
  77. static boolean_t fletcher_4_avx2_valid(void)
  78. diff --git a/module/zcommon/zfs_fletcher_sse.c b/module/zcommon/zfs_fletcher_sse.c
  79. index 8ab9b9acb..b942de4a2 100644
  80. --- a/module/zcommon/zfs_fletcher_sse.c
  81. +++ b/module/zcommon/zfs_fletcher_sse.c
  82. @@ -82,18 +82,18 @@ fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
  83. #define FLETCHER_4_SSE_RESTORE_CTX(ctx) \
  84. { \
  85. - asm volatile("movdqu %0, %%xmm0" :: "m" ((ctx)->sse[0])); \
  86. - asm volatile("movdqu %0, %%xmm1" :: "m" ((ctx)->sse[1])); \
  87. - asm volatile("movdqu %0, %%xmm2" :: "m" ((ctx)->sse[2])); \
  88. - asm volatile("movdqu %0, %%xmm3" :: "m" ((ctx)->sse[3])); \
  89. + __asm__ volatile("movdqu %0, %%xmm0" :: "m" ((ctx)->sse[0])); \
  90. + __asm__ volatile("movdqu %0, %%xmm1" :: "m" ((ctx)->sse[1])); \
  91. + __asm__ volatile("movdqu %0, %%xmm2" :: "m" ((ctx)->sse[2])); \
  92. + __asm__ volatile("movdqu %0, %%xmm3" :: "m" ((ctx)->sse[3])); \
  93. }
  94. #define FLETCHER_4_SSE_SAVE_CTX(ctx) \
  95. { \
  96. - asm volatile("movdqu %%xmm0, %0" : "=m" ((ctx)->sse[0])); \
  97. - asm volatile("movdqu %%xmm1, %0" : "=m" ((ctx)->sse[1])); \
  98. - asm volatile("movdqu %%xmm2, %0" : "=m" ((ctx)->sse[2])); \
  99. - asm volatile("movdqu %%xmm3, %0" : "=m" ((ctx)->sse[3])); \
  100. + __asm__ volatile("movdqu %%xmm0, %0" : "=m" ((ctx)->sse[0])); \
  101. + __asm__ volatile("movdqu %%xmm1, %0" : "=m" ((ctx)->sse[1])); \
  102. + __asm__ volatile("movdqu %%xmm2, %0" : "=m" ((ctx)->sse[2])); \
  103. + __asm__ volatile("movdqu %%xmm3, %0" : "=m" ((ctx)->sse[3])); \
  104. }
  105. static void
  106. @@ -104,21 +104,21 @@ fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
  107. FLETCHER_4_SSE_RESTORE_CTX(ctx);
  108. - asm volatile("pxor %xmm4, %xmm4");
  109. + __asm__ volatile("pxor %xmm4, %xmm4");
  110. do {
  111. - asm volatile("movdqu %0, %%xmm5" :: "m"(*ip));
  112. - asm volatile("movdqa %xmm5, %xmm6");
  113. - asm volatile("punpckldq %xmm4, %xmm5");
  114. - asm volatile("punpckhdq %xmm4, %xmm6");
  115. - asm volatile("paddq %xmm5, %xmm0");
  116. - asm volatile("paddq %xmm0, %xmm1");
  117. - asm volatile("paddq %xmm1, %xmm2");
  118. - asm volatile("paddq %xmm2, %xmm3");
  119. - asm volatile("paddq %xmm6, %xmm0");
  120. - asm volatile("paddq %xmm0, %xmm1");
  121. - asm volatile("paddq %xmm1, %xmm2");
  122. - asm volatile("paddq %xmm2, %xmm3");
  123. + __asm__ volatile("movdqu %0, %%xmm5" :: "m"(*ip));
  124. + __asm__ volatile("movdqa %xmm5, %xmm6");
  125. + __asm__ volatile("punpckldq %xmm4, %xmm5");
  126. + __asm__ volatile("punpckhdq %xmm4, %xmm6");
  127. + __asm__ volatile("paddq %xmm5, %xmm0");
  128. + __asm__ volatile("paddq %xmm0, %xmm1");
  129. + __asm__ volatile("paddq %xmm1, %xmm2");
  130. + __asm__ volatile("paddq %xmm2, %xmm3");
  131. + __asm__ volatile("paddq %xmm6, %xmm0");
  132. + __asm__ volatile("paddq %xmm0, %xmm1");
  133. + __asm__ volatile("paddq %xmm1, %xmm2");
  134. + __asm__ volatile("paddq %xmm2, %xmm3");
  135. } while ((ip += 2) < ipend);
  136. FLETCHER_4_SSE_SAVE_CTX(ctx);
  137. @@ -135,13 +135,13 @@ fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
  138. do {
  139. uint32_t scratch1 = BSWAP_32(ip[0]);
  140. uint32_t scratch2 = BSWAP_32(ip[1]);
  141. - asm volatile("movd %0, %%xmm5" :: "r"(scratch1));
  142. - asm volatile("movd %0, %%xmm6" :: "r"(scratch2));
  143. - asm volatile("punpcklqdq %xmm6, %xmm5");
  144. - asm volatile("paddq %xmm5, %xmm0");
  145. - asm volatile("paddq %xmm0, %xmm1");
  146. - asm volatile("paddq %xmm1, %xmm2");
  147. - asm volatile("paddq %xmm2, %xmm3");
  148. + __asm__ volatile("movd %0, %%xmm5" :: "r"(scratch1));
  149. + __asm__ volatile("movd %0, %%xmm6" :: "r"(scratch2));
  150. + __asm__ volatile("punpcklqdq %xmm6, %xmm5");
  151. + __asm__ volatile("paddq %xmm5, %xmm0");
  152. + __asm__ volatile("paddq %xmm0, %xmm1");
  153. + __asm__ volatile("paddq %xmm1, %xmm2");
  154. + __asm__ volatile("paddq %xmm2, %xmm3");
  155. } while ((ip += 2) < ipend);
  156. FLETCHER_4_SSE_SAVE_CTX(ctx);
  157. @@ -179,23 +179,23 @@ fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
  158. FLETCHER_4_SSE_RESTORE_CTX(ctx);
  159. - asm volatile("movdqu %0, %%xmm7"::"m" (mask));
  160. - asm volatile("pxor %xmm4, %xmm4");
  161. + __asm__ volatile("movdqu %0, %%xmm7"::"m" (mask));
  162. + __asm__ volatile("pxor %xmm4, %xmm4");
  163. do {
  164. - asm volatile("movdqu %0, %%xmm5"::"m" (*ip));
  165. - asm volatile("pshufb %xmm7, %xmm5");
  166. - asm volatile("movdqa %xmm5, %xmm6");
  167. - asm volatile("punpckldq %xmm4, %xmm5");
  168. - asm volatile("punpckhdq %xmm4, %xmm6");
  169. - asm volatile("paddq %xmm5, %xmm0");
  170. - asm volatile("paddq %xmm0, %xmm1");
  171. - asm volatile("paddq %xmm1, %xmm2");
  172. - asm volatile("paddq %xmm2, %xmm3");
  173. - asm volatile("paddq %xmm6, %xmm0");
  174. - asm volatile("paddq %xmm0, %xmm1");
  175. - asm volatile("paddq %xmm1, %xmm2");
  176. - asm volatile("paddq %xmm2, %xmm3");
  177. + __asm__ volatile("movdqu %0, %%xmm5"::"m" (*ip));
  178. + __asm__ volatile("pshufb %xmm7, %xmm5");
  179. + __asm__ volatile("movdqa %xmm5, %xmm6");
  180. + __asm__ volatile("punpckldq %xmm4, %xmm5");
  181. + __asm__ volatile("punpckhdq %xmm4, %xmm6");
  182. + __asm__ volatile("paddq %xmm5, %xmm0");
  183. + __asm__ volatile("paddq %xmm0, %xmm1");
  184. + __asm__ volatile("paddq %xmm1, %xmm2");
  185. + __asm__ volatile("paddq %xmm2, %xmm3");
  186. + __asm__ volatile("paddq %xmm6, %xmm0");
  187. + __asm__ volatile("paddq %xmm0, %xmm1");
  188. + __asm__ volatile("paddq %xmm1, %xmm2");
  189. + __asm__ volatile("paddq %xmm2, %xmm3");
  190. } while ((ip += 2) < ipend);
  191. FLETCHER_4_SSE_SAVE_CTX(ctx);
  192. --
  193. 2.37.3