forked from intel/isa-l
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgf_4vect_dot_prod_avx512_gfni.asm
253 lines (216 loc) · 6.17 KB
/
gf_4vect_dot_prod_avx512_gfni.asm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Copyright(c) 2023 Intel Corporation All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in
; the documentation and/or other materials provided with the
; distribution.
; * Neither the name of Intel Corporation nor the names of its
; contributors may be used to endorse or promote products derived
; from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;
;;; gf_4vect_dot_prod_avx512_gfni(len, vec, *g_tbls, **buffs, **dests);
;;;
%include "reg_sizes.asm"
%include "gf_vect_gfni.inc"
%if AS_FEATURE_LEVEL >= 10
%ifidn __OUTPUT_FORMAT__, elf64
%define arg0 rdi
%define arg1 rsi
%define arg2 rdx
%define arg3 rcx
%define arg4 r8
%define arg5 r9
%define tmp r11
%define tmp2 r10
%define tmp3 r13 ; must be saved and restored
%define tmp4 r12 ; must be saved and restored
%define tmp5 r14 ; must be saved and restored
%define tmp6 r15 ; must be saved and restored
%define func(x) x: endbranch
%macro FUNC_SAVE 0
push r12
push r13
push r14
push r15
%endmacro
%macro FUNC_RESTORE 0
pop r15
pop r14
pop r13
pop r12
%endmacro
%endif
%ifidn __OUTPUT_FORMAT__, win64
%define arg0 rcx
%define arg1 rdx
%define arg2 r8
%define arg3 r9
%define arg4 r12 ; must be saved, loaded and restored
%define arg5 r15 ; must be saved and restored
%define tmp r11
%define tmp2 r10
%define tmp3 r13 ; must be saved and restored
%define tmp4 r14 ; must be saved and restored
%define tmp5 rdi ; must be saved and restored
%define tmp6 rsi ; must be saved and restored
%define stack_size 3*16 + 7*8 ; must be an odd multiple of 8
%define arg(x) [rsp + stack_size + 8 + 8*x]
%define func(x) proc_frame x
%macro FUNC_SAVE 0
alloc_stack stack_size
vmovdqa [rsp + 0*16], xmm6
vmovdqa [rsp + 1*16], xmm7
vmovdqa [rsp + 2*16], xmm8
mov [rsp + 3*16 + 0*8], r12
mov [rsp + 3*16 + 1*8], r13
mov [rsp + 3*16 + 2*8], r14
mov [rsp + 3*16 + 3*8], r15
mov [rsp + 3*16 + 4*8], rdi
mov [rsp + 3*16 + 5*8], rsi
end_prolog
mov arg4, arg(4)
%endmacro
%macro FUNC_RESTORE 0
vmovdqa xmm6, [rsp + 0*16]
vmovdqa xmm7, [rsp + 1*16]
vmovdqa xmm8, [rsp + 2*16]
mov r12, [rsp + 3*16 + 0*8]
mov r13, [rsp + 3*16 + 1*8]
mov r14, [rsp + 3*16 + 2*8]
mov r15, [rsp + 3*16 + 3*8]
mov rdi, [rsp + 3*16 + 4*8]
mov rsi, [rsp + 3*16 + 5*8]
add rsp, stack_size
%endmacro
%endif
%define len arg0
%define vec arg1
%define mul_array arg2
%define src arg3
%define dest1 arg4
%define ptr arg5
%define vec_i tmp2
%define dest2 tmp3
%define dest3 tmp4
%define dest4 tmp5
%define vskip3 tmp6
%define pos rax
%ifndef EC_ALIGNED_ADDR
;;; Use Un-aligned load/store
%define XLDR vmovdqu8
%define XSTR vmovdqu8
%else
;;; Use Non-temporal load/stor
%ifdef NO_NT_LDST
%define XLDR vmovdqa64
%define XSTR vmovdqa64
%else
%define XLDR vmovntdqa
%define XSTR vmovntdq
%endif
%endif
%define xgft1 zmm5
%define xgft2 zmm6
%define xgft3 zmm7
%define xgft4 zmm8
%define x0 zmm0
%define xp1 zmm1
%define xp2 zmm2
%define xp3 zmm3
%define xp4 zmm4
default rel
[bits 64]
section .text
;;
;; Encodes 64 bytes of all "k" sources into 4x 64 bytes (parity disks)
;;
%macro ENCODE_64B_4 0-1
%define %%KMASK %1
vpxorq xp1, xp1, xp1
vpxorq xp2, xp2, xp2
vpxorq xp3, xp3, xp3
vpxorq xp4, xp4, xp4
mov tmp, mul_array
xor vec_i, vec_i
%%next_vect:
mov ptr, [src + vec_i]
%if %0 == 1
vmovdqu8 x0{%%KMASK}, [ptr + pos] ;Get next source vector (less than 64 bytes)
%else
XLDR x0, [ptr + pos] ;Get next source vector (64 bytes)
%endif
add vec_i, 8
vbroadcastf32x2 xgft1, [tmp]
vbroadcastf32x2 xgft2, [tmp + vec]
vbroadcastf32x2 xgft3, [tmp + vec*2]
vbroadcastf32x2 xgft4, [tmp + vskip3]
add tmp, 8
GF_MUL_XOR EVEX, x0, xgft1, xgft1, xp1, xgft2, xgft2, xp2, xgft3, xgft3, xp3, \
xgft4, xgft4, xp4
cmp vec_i, vec
jl %%next_vect
%if %0 == 1
vmovdqu8 [dest1 + pos]{%%KMASK}, xp1
vmovdqu8 [dest2 + pos]{%%KMASK}, xp2
vmovdqu8 [dest3 + pos]{%%KMASK}, xp3
vmovdqu8 [dest4 + pos]{%%KMASK}, xp4
%else
XSTR [dest1 + pos], xp1
XSTR [dest2 + pos], xp2
XSTR [dest3 + pos], xp3
XSTR [dest4 + pos], xp4
%endif
%endmacro
align 16
mk_global gf_4vect_dot_prod_avx512_gfni, function
func(gf_4vect_dot_prod_avx512_gfni)
FUNC_SAVE
xor pos, pos
mov vskip3, vec
imul vskip3, 8*3
shl vec, 3 ;vec *= 8. Make vec_i count by 8
mov dest2, [dest1 + 8]
mov dest3, [dest1 + 2*8]
mov dest4, [dest1 + 3*8]
mov dest1, [dest1]
cmp len, 64
jl .len_lt_64
.loop64:
ENCODE_64B_4
add pos, 64 ;Loop on 64 bytes at a time
sub len, 64
cmp len, 64
jge .loop64
.len_lt_64:
cmp len, 0
jle .exit
xor tmp, tmp
bts tmp, len
dec tmp
kmovq k1, tmp
ENCODE_64B_4 k1
.exit:
vzeroupper
FUNC_RESTORE
ret
endproc_frame
%endif ; if AS_FEATURE_LEVEL >= 10