-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfloat_dsp_sse.asm
1555 lines (1433 loc) · 43.6 KB
/
float_dsp_sse.asm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
;*****************************************************************************
;* x86-optimized Float DSP functions
;******************************************************************************
;*****************************************************************************
;* Author: Peter Barfuss <[email protected]>
;*
;* Permission to use, copy, modify, and/or distribute this software for any
;* purpose with or without fee is hereby granted, provided that the above
;* copyright notice and this permission notice appear in all copies.
;*
;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
;*****************************************************************************
%ifdef ARCH_X86_64
%ifidn __OUTPUT_FORMAT__,win64
%define WIN64
%else
%define UNIX64
%endif
%define gprsize 8
%else
%define gprsize 4
; x86_32 doesn't require PIC.
; Some distros prefer shared objects to be PIC, but nothing breaks if
; the code contains a few textrels, so we'll skip that complexity.
%undef PIC
%endif
%ifidn __OUTPUT_FORMAT__,elf
%define BINFMT_IS_ELF
%elifidn __OUTPUT_FORMAT__,elf32
%define BINFMT_IS_ELF
%elifidn __OUTPUT_FORMAT__,elf64
%define BINFMT_IS_ELF
%endif
%ifidn __OUTPUT_FORMAT__,win32
%define mangle(x) _ %+ x
%else
%define mangle(x) x
%endif
%ifdef WIN64
%define PIC
%endif
%ifdef PIC
default rel
%endif
%ifdef WIN64 ; Windows x64 ;=================================================
%define r0 rcx
%define r1 rdx
%define r2 r8
%define r3 r9
%define r4 rdi
%define r5 rsi
%macro LOAD_IF_USED 2 ; reg_id, number_of_args
%if %1 < %2
mov r%1, [rbp + stack_offset + %1*8]
%endif
%endmacro
%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
%assign regs_used %2
%if regs_used > 4
push r4
push r5
%assign stack_offset stack_offset+16
%endif
LOAD_IF_USED 4, %1
LOAD_IF_USED 5, %1
%endmacro
%macro RET 0
%if regs_used > 4
pop r5
pop r4
%endif
pop rbp
ret
%endmacro
%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
%define r0 rdi
%define r1 rsi
%define r2 rdx
%define r3 rcx
%define r4 r8
%define r5 r9
%define r6 r10
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
%endmacro
%macro RET 0
pop rbp
ret
%endmacro
%else ; X86_32 ;==============================================================
%define r0 ecx
%define r1 edx
%define r2 ebx
%define r3 esi
%define r4 edi
%define r5 eax
%define rbp ebp
%define rsp esp
%macro PUSH_IF_USED 1 ; reg_id
%if %1 < regs_used
push r%1
%assign stack_offset stack_offset+4
%endif
%endmacro
%macro POP_IF_USED 1 ; reg_id
%if %1 < regs_used
pop r%1
%endif
%endmacro
%macro LOAD_IF_USED 2 ; reg_id, number_of_args
%if %1 < %2
mov r%1, [esp + stack_offset + 8 + %1*4]
%endif
%endmacro
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
%assign regs_used %2
PUSH_IF_USED 2
PUSH_IF_USED 3
PUSH_IF_USED 4
PUSH_IF_USED 5
LOAD_IF_USED 0, %1
LOAD_IF_USED 1, %1
LOAD_IF_USED 2, %1
LOAD_IF_USED 3, %1
LOAD_IF_USED 4, %1
LOAD_IF_USED 5, %1
%endmacro
%macro RET 0
POP_IF_USED 5
POP_IF_USED 4
POP_IF_USED 3
POP_IF_USED 2
pop rbp
ret
%endmacro
%endif ;======================================================================
;=============================================================================
; arch-independent part
;=============================================================================
; Symbol prefix for C linkage
%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
cglobal_internal %1 %+ SUFFIX, %2
%endmacro
%macro cglobal_internal 1-2+
%xdefine %1 mangle(%1)
global %1
%ifdef BINFMT_IS_ELF
[type %1 function]
%endif
align 16
%1:
%ifdef WIN64
;%assign stack_offset 8
%assign stack_offset 0
%else
%assign stack_offset 0
%endif
push rbp
mov rbp, rsp
PROLOGUE %2
%endmacro
%macro cendfunc 1
cendfunc_internal %1 %+ SUFFIX
%endmacro
%macro cendfunc_internal 1
%ifdef BINFMT_IS_ELF
%1 %+ size EQU $-%1
[size %1 %1 %+ size]
%endif
%endmacro
; This is needed for ELF, otherwise the GNU linker assumes the stack is
; executable by default.
%ifdef BINFMT_IS_ELF
SECTION .note.GNU-stack noalloc noexec nowrite progbits
%endif
%macro CAT_XDEFINE 3
%xdefine %1%2 %3
%endmacro
%macro CAT_UNDEF 2
%undef %1%2
%endmacro
; base-4 constants for shuffles
%assign i 0
%rep 256
%assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
%if j < 10
CAT_XDEFINE q000, j, i
%elif j < 100
CAT_XDEFINE q00, j, i
%elif j < 1000
CAT_XDEFINE q0, j, i
%else
CAT_XDEFINE q, j, i
%endif
%assign i i+1
%endrep
%undef i
%undef j
section .rodata align=16
pdw_80000000: times 4 dd 0x80000000
pdw_7fffffff: times 4 dd 0x7fffffff
ps_p1m1p1m1: dd 0x0, 0x80000000, 0x0, 0x80000000
ps_p1p1p1m1: dd 0x0, 0x0, 0x0, 0x80000000
align 16
ps_25: times 4 dd 0.25
ps_5: times 4 dd 0.5
section .text align=16
%xdefine SUFFIX _sse
;-----------------------------------------------------------------------------
; void vector_fmul(float *dst, const float *src0, const float *src1, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul, 4,4,2
lea r3, [r3*4 - 2*0x10]
.loop:
movaps xmm0, [r1 + r3]
movaps xmm1, [r1 + r3 + 0x10]
mulps xmm0, [r2 + r3]
mulps xmm1, [r2 + r3 + 0x10]
movaps [r0 + r3], xmm0
movaps [r0 + r3 + 0x10], xmm1
sub r3, 0x20
jge .loop
RET
cendfunc vector_fmul
;------------------------------------------------------------------------------
; void vector_fmac_scalar(float *dst, const float *src, float mul, uint32_t len)
;------------------------------------------------------------------------------
%ifdef UNIX64
cglobal vector_fmac_scalar, 3,3,3
%define len r2
%else
cglobal vector_fmac_scalar, 4,4,3
%define len r3
%endif
%ifdef ARCH_X86_64
%ifdef WIN64
movaps xmm0, xmm2
%endif
%else
movss xmm0, [esp + stack_offset + 0x10]
%endif
shufps xmm0, xmm0, 0
lea len, [len*4-2*0x10]
.loop:
movaps xmm1, [r1+len]
movaps xmm2, [r1+len+0x10]
mulps xmm1, xmm0
mulps xmm2, xmm0
addps xmm1, [r0+len]
addps xmm2, [r0+len+0x10]
movaps [r0+len ], xmm1
movaps [r0+len+0x10], xmm2
sub len, 0x20
jge .loop
RET
%undef len
cendfunc vector_fmac_scalar
;------------------------------------------------------------------------------
; void vector_fmul_scalar(float *dst, const float *src, float mul, uint32_t len)
;------------------------------------------------------------------------------
%ifdef UNIX64
cglobal vector_fmul_scalar, 3,3,2
%define len r2
%else
cglobal vector_fmul_scalar, 4,4,3
%define len r3
%ifdef WIN64
movaps xmm0, xmm2
%else
movss xmm0, [esp + stack_offset + 0x10]
%endif
%endif
shufps xmm0, xmm0, 0
lea len, [len*4-0x10]
.loop:
movaps xmm1, [r1+len]
mulps xmm1, xmm0
movaps [r0+len], xmm1
sub len, 0x10
jge .loop
RET
%undef len
cendfunc vector_fmul_scalar
;-----------------------------------------------------------------------------
; void vector_fmul_copy(float *dst, const float *src, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul_copy, 3,4,2
lea r2, [r2*4 - 0x40]
.loop:
movaps xmm0, [r1 + r2]
movaps xmm1, [r1 + r2 + 0x10]
movaps xmm2, [r1 + r2 + 0x20]
movaps xmm3, [r1 + r2 + 0x30]
movaps [r0 + r2], xmm0
movaps [r0 + r2 + 0x10], xmm1
movaps [r0 + r2 + 0x20], xmm2
movaps [r0 + r2 + 0x30], xmm3
sub r2, 0x40
jge .loop
RET
cendfunc vector_fmul_copy
;-----------------------------------------------------------------------------
; void vector_fmul_add(float *dst, const float *src0, const float *src1,
; const float *src2, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul_add, 5,6,2
shl r4, 2
xor r5, r5
.loop:
movaps xmm0, [r1 + r5]
movaps xmm1, [r1 + r5 + 0x10]
movaps xmm2, [r1 + r5 + 0x20]
movaps xmm3, [r1 + r5 + 0x30]
mulps xmm0, [r2 + r5]
mulps xmm1, [r2 + r5 + 0x10]
mulps xmm2, [r2 + r5 + 0x20]
mulps xmm3, [r2 + r5 + 0x30]
addps xmm0, [r3 + r5]
addps xmm1, [r3 + r5 + 0x10]
addps xmm2, [r3 + r5 + 0x20]
addps xmm3, [r3 + r5 + 0x30]
movaps [r0 + r5], xmm0
movaps [r0 + r5 + 0x10], xmm1
movaps [r0 + r5 + 0x20], xmm2
movaps [r0 + r5 + 0x30], xmm3
add r5, 0x40
cmp r5, r4
jl .loop
RET
cendfunc vector_fmul_add
;-----------------------------------------------------------------------------
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul_reverse, 4,4,2
lea r3, [r3*4 - 2*0x10]
.loop:
movaps xmm0, [r2]
movaps xmm1, [r2 + 0x10]
shufps xmm0, xmm0, 0x1b
shufps xmm1, xmm1, 0x1b
mulps xmm0, [r1 + r3 + 0x10]
mulps xmm1, [r1 + r3]
movaps [r0 + r3 + 0x10], xmm0
movaps [r0 + r3], xmm1
add r2, 2*0x10
sub r3, 2*0x10
jge .loop
RET
cendfunc vector_fmul_reverse
;-----------------------------------------------------------------------------
; void vector_fmul_window(float *dst, const float *src0, const float *src1,
; const float *win, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul_window, 5,6,6
%ifdef WIN64
movsxd rdi, edi
%endif
shl r4, 0x2
mov r5, r4
neg r5
add r0, r4
add r1, r4
add r3, r4
sub r4, 0x10
.loop:
movaps xmm1, [r3+r4]
movaps xmm0, [r3+r5]
movaps xmm5, [r2+r4]
movaps xmm4, [r1+r5]
shufps xmm1, xmm1, 0x1b
shufps xmm5, xmm5, 0x1b
movaps xmm2, xmm0
movaps xmm3, xmm1
mulps xmm2, xmm4
mulps xmm3, xmm5
mulps xmm1, xmm4
mulps xmm0, xmm5
addps xmm2, xmm3
subps xmm1, xmm0
shufps xmm2, xmm2, 0x1b
movaps [r0+r5], xmm1
movaps [r0+r4], xmm2
sub r4, 0x10
add r5, 0x10
jl .loop
RET
cendfunc vector_fmul_window
;-----------------------------------------------------------------------------
; void butterflies_float(float *src0, float *src1, uint32_t len);
;-----------------------------------------------------------------------------
cglobal butterflies_float, 3,3,3
test r2, r2
jz .end
shl r2, 2
lea r0, [r0 + r2]
lea r1, [r1 + r2]
neg r2
.loop:
movaps xmm0, [r0 + r2]
movaps xmm1, [r1 + r2]
movaps xmm2, xmm0
subps xmm2, xmm1
addps xmm0, xmm1
movaps [r1 + r2], xmm2
movaps [r0 + r2], xmm0
add r2, 0x10
jl .loop
.end:
RET
cendfunc butterflies_float
;-----------------------------------------------------------------------------
; float scalarproduct_float(float *v1, float *v2, uint32_t len)
;-----------------------------------------------------------------------------
cglobal scalarproduct_float, 3,3,3
shl r2, 2
xor r3, r3
xorps xmm0, xmm0
.loop:
movups xmm1, [r0+r3]
movups xmm2, [r1+r3]
mulps xmm1, xmm2
addps xmm0, xmm1
add r3, 0x10
cmp r3, r2
jl .loop
movaps xmm1, xmm0
shufps xmm1, xmm0, 0x1b
addps xmm1, xmm0
movhlps xmm0, xmm1
addss xmm0, xmm1
%ifndef ARCH_X86_64
movss [esp + stack_offset + 8], xmm0
fld dword [esp + stack_offset + 8]
%endif
RET
cendfunc scalarproduct_float
;-----------------------------------------------------------------------------
; float calc_power_spectrum(float *psd, FFTComplex *vin, uint32_t len)
;-----------------------------------------------------------------------------
cglobal calc_power_spectrum, 3,3,3
shl r2, 1
xor r3, r3
.loop:
movups xmm2, [r1+2*r3] ; [r0 i0 r1 i1]
movups xmm5, [r1+2*r3+0x10] ; [r2 i2 r3 i3]
movaps xmm3, xmm2
shufps xmm2, xmm5, q2020 ; {r0,r1,r2,r3}
shufps xmm3, xmm5, q3131 ; {i0,i1,i2,i3}
mulps xmm2, xmm2 ; {r0^2, r1^2, r2^r, r3^2}
mulps xmm3, xmm3 ; {i0^2, i1^2, i2^2, i3^2}
addps xmm3, xmm2 ; {r0^2 + i0^2, r1^2 + i1^2, r2^2 + i2^2, r3^2 + i3^2}
movups [r0+r3], xmm3
add r3, 0x10
cmp r3, r2
jl .loop
RET
cendfunc calc_power_spectrum
;-----------------------------------------------------
;void vector_clipf(float *dst, const float *src, float min, float max, uint32_t len)
;-----------------------------------------------------
%ifdef WIN64
cglobal vector_clipf, 5,6,6
SWAP 0, 2
SWAP 1, 3
%else
cglobal vector_clipf, 3,4,6
%ifdef ARCH_X86_32
movss xmm0, [esp + stack_offset + 0x10]
movss xmm1, [esp + stack_offset + 0x14]
movss r2, [esp + stack_offset + 0x18]
%endif
shl r2, 2
xor r3, r3
%endif
shufps xmm0, xmm0, 0x0
shufps xmm1, xmm1, 0x0
.loop:
movaps xmm2, [r1+r2 ]
movaps xmm3, [r1+r2+0x10]
movaps xmm4, [r1+r2+0x20]
movaps xmm5, [r1+r2+0x30]
maxps xmm2, xmm0
maxps xmm3, xmm0
maxps xmm4, xmm0
maxps xmm5, xmm0
minps xmm2, xmm1
minps xmm3, xmm1
minps xmm4, xmm1
minps xmm5, xmm1
movaps [r0+r2 ], xmm2
movaps [r0+r2+0x10], xmm3
movaps [r0+r2+0x20], xmm4
movaps [r0+r2+0x30], xmm5
add r3, 0x40
cmp r3, r2
jl .loop
RET
cendfunc vector_clipf
;-----------------------------------------------------------------------------
; void vector_fmul_cf(FFTComplex *dst, const FFTComplex *src0, const float *src1, uint32_t len)
;-----------------------------------------------------------------------------
cglobal vector_fmul_cf, 4,4,2, dst, src0, src1, len
shl r3, 2
xor r4, r4
.loop:
movaps xmm1, [r1+2*r4] ; [r0 i0 r1 i1]
movaps xmm2, [r1+2*r4+0x10] ; [r2 i2 r3 i3]
movaps xmm0, [r2+r4]
movaps xmm3, xmm1
shufps xmm1, xmm2, q3131 ; {r0,r1,r2,r3}
shufps xmm3, xmm2, q2020 ; {i0,i1,i2,i3}
mulps xmm1, xmm0
mulps xmm3, xmm0
movaps xmm2, xmm3
unpcklps xmm2, xmm1
unpckhps xmm3, xmm1
movaps [r0+2*r4], xmm2 ; [r0 i0 r1 i1]
movaps [r0+2*r4+0x10], xmm3 ; [r2 i2 r3 i3]
add r4, 0x10
cmp r4, r3
jl .loop
RET
cendfunc vector_fmul_cf
;-----------------------------------------------------------------------------
; void sbr_sum64x5(float *z)
;-----------------------------------------------------------------------------
cglobal sbr_sum64x5, 1,2,4
xor r1, r1
.loop:
movaps xmm0, [r0+r1+ 0]
addps xmm0, [r0+r1+ 256]
addps xmm0, [r0+r1+ 512]
addps xmm0, [r0+r1+ 768]
addps xmm0, [r0+r1+1024]
movaps [r0+r1], xmm0
add r1, 0x10
cmp r1, 1024
jne .loop
RET
cendfunc sbr_sum64x5
;-----------------------------------------------------------------------------
; void sbrenc_sum128x5(float *z)
;-----------------------------------------------------------------------------
cglobal sbrenc_sum128x5, 1,2,4
xor r1, r1
.loop:
movaps xmm0, [r0+r1+ 0]
addps xmm0, [r0+r1+ 512]
addps xmm0, [r0+r1+1024]
addps xmm0, [r0+r1+1536]
addps xmm0, [r0+r1+2048]
movaps [r0+r1], xmm0
add r1, 0x10
cmp r1, 2048
jne .loop
RET
cendfunc sbrenc_sum128x5
;-----------------------------------------------------------------------------
; void sbr_qmf_pre_shuffle(float *z)
;-----------------------------------------------------------------------------
cglobal sbr_qmf_pre_shuffle, 1,4,6
mov r3, 0x60
xor r1, r1
.loop:
movups xmm0, [r0 + r1 + 0x84]
movups xmm2, [r0 + r1 + 0x94]
movups xmm1, [r0 + r3 + 0x14]
movups xmm3, [r0 + r3 + 0x04]
xorps xmm2, [pdw_80000000]
xorps xmm0, [pdw_80000000]
shufps xmm2, xmm2, 0x1b
shufps xmm0, xmm0, 0x1b
movaps xmm5, xmm2
unpcklps xmm2, xmm3
unpckhps xmm5, xmm3
movaps xmm4, xmm0
unpcklps xmm0, xmm1
unpckhps xmm4, xmm1
movaps [r0 + 2*r3 + 0x100], xmm2
movaps [r0 + 2*r3 + 0x110], xmm5
movaps [r0 + 2*r3 + 0x120], xmm0
movaps [r0 + 2*r3 + 0x130], xmm4
add r1, 0x20
sub r3, 0x20
jge .loop
movaps xmm2, [r0]
movlps [r0 + 0x100], xmm2
RET
cendfunc sbr_qmf_pre_shuffle
;-----------------------------------------------------------------------------
; void sbr_sum64x5(float *z)
;-----------------------------------------------------------------------------
cglobal sbr_ldqmf_pre_shuffle, 1,4,6
xor r1, r1
mov r2, 0x70
.loop:
movaps xmm0, [r0+r2+0x40] ; z[47-k]
movaps xmm1, [r0+r1+0xc0] ; z[k+48]
movaps xmm2, [r0+r2 ] ; z[31-k]
movaps xmm3, [r0+r1+ 0] ; z[k]
shufps xmm0, xmm0, 0x1b
shufps xmm2, xmm2, 0x1b
movaps xmm4, xmm1
movaps xmm5, xmm3
addps xmm1, xmm0 ; z[47-k] + z[k+48]
subps xmm0, xmm4 ; z[47-k] - z[k+48]
subps xmm3, xmm2 ; z[31-k] - z[k ]
addps xmm2, xmm5 ; z[31-k] + z[k ]
xorps xmm2, [pdw_80000000]
movaps [r0+r1+0x100], xmm1
movaps [r0+r1+0x140], xmm3
movaps [r0+r1+0x180], xmm0
movaps [r0+r1+0x1c0], xmm2
sub r2, 0x10
add r1, 0x10
cmp r1, 0x100
jne .loop
RET
cendfunc sbr_ldqmf_pre_shuffle
;-----------------------------------------------------------------------------
; float sbr_qmf_post_shuffle_sse(FFTComplex *z)
;-----------------------------------------------------------------------------
cglobal sbr_qmf_post_shuffle, 2,3,4
lea r2, [r1 + (64-4)*4]
xor r3, r3
.loop:
movaps xmm0, [r1+r3]
movaps xmm1, [r2]
shufps xmm1, xmm1, 0x1b ; [0 1 2 3] -> [3 2 1 0]
movaps xmm2, xmm0
unpcklps xmm2, xmm1 ; [0.0 1.3 0.1 1.2]
unpckhps xmm0, xmm1 ; [0.2 1.1 0.3 1.0]
movaps [r0 + 2*r3 + 0], xmm2
movaps [r0 + 2*r3 + 0x10], xmm0
sub r2, 0x10
add r3, 0x10
cmp r3, 256
jl .loop
RET
cendfunc sbr_qmf_post_shuffle
;-----------------------------------------------------------------------------
; float sbr_qmf_post_shuffle_avx(FFTComplex *z)
;-----------------------------------------------------------------------------
cglobal_internal sbr_qmf_post_shuffle_avx, 2,3,4
lea r2, [r1 + (64-8)*4]
xor r3, r3
.loop:
vmovups ymm0, [r1+r3]
vmovups ymm1, [r2]
vperm2f128 ymm1, ymm1, ymm1, 0x01 ; [0 1 2 3 4 5 6 7] -> [4 5 6 7 0 1 2 3]
vshufps ymm1, ymm1, ymm1, 0x1b ; [4 5 6 7 0 1 2 3] -> [7 6 5 4 3 2 1 0]
vunpckhps ymm2, ymm0, ymm1
vunpcklps ymm0, ymm0, ymm1
vextractf128 [r0 + 2*r3 ], ymm0, 0
vextractf128 [r0 + 2*r3 + 0x10], ymm2, 0
vextractf128 [r0 + 2*r3 + 0x20], ymm0, 1
vextractf128 [r0 + 2*r3 + 0x30], ymm2, 1
sub r2, 0x20
add r3, 0x20
cmp r3, 256
jl .loop
vzeroupper
RET
cendfunc_internal sbr_qmf_post_shuffle_avx
;-----------------------------------------------------------------------------
; void sbr_qmf_deint_bfly(float *v, const float *src0, const float *src1)
;-----------------------------------------------------------------------------
cglobal sbr_qmf_deint_bfly, 3,5,8
%ifdef WIN64
sub rsp, 2*16+16
movaps [rsp + 0x20], xmm7
movaps [rsp + 0x10], xmm6
%endif
mov r4, 64*4-32
lea r3, [r0 + 64*4]
.loop:
movaps xmm0, [r1+r4]
movaps xmm4, [r1+r4+0x10]
movaps xmm1, [r2]
movaps xmm5, [r2+0x10]
%ifdef ARCH_X86_64
pshufd xmm2, xmm0, 0x1b
pshufd xmm3, xmm1, 0x1b
pshufd xmm6, xmm4, 0x1b
pshufd xmm7, xmm5, 0x1b
%else
movaps xmm2, xmm0
movaps xmm3, xmm1
shufps xmm2, xmm2, 0x1b
shufps xmm3, xmm3, 0x1b
movaps xmm6, xmm4
movaps xmm7, xmm5
shufps xmm6, xmm6, 0x1b
shufps xmm7, xmm7, 0x1b
%endif
subps xmm5, xmm2
addps xmm7, xmm0
subps xmm1, xmm6
addps xmm3, xmm4
movaps [r3], xmm1
movaps [r3+0x10], xmm5
movaps [r0+r4], xmm7
movaps [r0+r4+0x10], xmm3
add r2, 0x20
add r3, 0x20
sub r4, 0x20
jge .loop
%ifdef WIN64
movaps xmm7, [rsp + 0x20]
movaps xmm6, [rsp + 0x10]
add rsp, 2*16+16
%endif
RET
cendfunc sbr_qmf_deint_bfly
;-----------------------------------------------------------------------------
; void sbr_qmf_deint_neg(float *v, const float *src)
;-----------------------------------------------------------------------------
cglobal sbr_qmf_deint_neg, 2,4,4
mov r2, -128
mov r3, 0x70
add r1, 0x100
movaps xmm3, [pdw_80000000]
.loop:
movaps xmm0, [r1 + 2*r2]
movaps xmm1, [r1 + 2*r2 + 0x10]
movaps xmm2, xmm0
shufps xmm2, xmm1, q2020
shufps xmm1, xmm0, q1313
xorps xmm2, xmm3
movaps [r0 + r3], xmm1
movaps [r0 + r2 + 0x100], xmm2
sub r3, 0x10
add r2, 0x10
jl .loop
RET
cendfunc sbr_qmf_deint_neg
;-----------------------------------------------------------------------------
; void sbrenc_qmf_deint_bfly(float *v, const float *src0, const float *src1)
;-----------------------------------------------------------------------------
cglobal sbrenc_qmf_deint_bfly, 2,5,8
%ifdef WIN64
sub rsp, 2*16+16
movaps [rsp + 0x20], xmm7
movaps [rsp + 0x10], xmm6
%endif
mov r4, 64*4-32
lea r3, [r0 + 64*4]
.loop:
movaps xmm0, [r1+r4]
movaps xmm4, [r1+r4+0x10]
movaps xmm1, [r2]
movaps xmm5, [r2+0x10]
%ifdef ARCH_X86_64
pshufd xmm2, xmm0, 0x1b
pshufd xmm3, xmm1, 0x1b
pshufd xmm6, xmm4, 0x1b
pshufd xmm7, xmm5, 0x1b
%else
movaps xmm2, xmm0
movaps xmm3, xmm1
shufps xmm2, xmm2, 0x1b
shufps xmm3, xmm3, 0x1b
movaps xmm6, xmm4
movaps xmm7, xmm5
shufps xmm6, xmm6, 0x1b
shufps xmm7, xmm7, 0x1b
%endif
addps xmm5, xmm2
subps xmm0, xmm7
addps xmm1, xmm6
subps xmm4, xmm3
movaps [r3], xmm1
movaps [r3+0x10], xmm5
movaps [r0+r4], xmm0
movaps [r0+r4+0x10], xmm4
add r2, 0x20
add r3, 0x20
sub r4, 0x20
jge .loop
%ifdef WIN64
movaps xmm7, [rsp + 0x20]
movaps xmm6, [rsp + 0x10]
add rsp, 2*16+16
%endif
RET
cendfunc sbrenc_qmf_deint_bfly
%macro ACSTEP 3 ;xmm0, xmm1
movaps xmm3, %1
movaps xmm4, %1
mulps xmm3, %2
mulps xmm4, %3
mulps %1, %1
addps xmm6, xmm3 ; r01r += x[i].re * x[i+1].re, x[i].im * x[i+1].im; r01i += x[i].re * x[i+1].im, x[i].im * x[i+1].re;
addps xmm5, xmm4 ; r02r += x[i].re * x[i+2].re, x[i].im * x[i+2].im; r02i += x[i].re * x[i+2].im, x[i].im * x[i+2].re;
addps xmm7, %1 ; r11r += x[i].re * x[i].re, x[i].im * x[i].im;
movlhps %2, %2
%endmacro
cglobal sbr_autocorrelate, 3,4,8
shl r2, 3
movlps xmm7, [r0+8 ]
movlhps xmm7, xmm7
movaps xmm6, xmm7
movaps xmm5, xmm7
add r0, 16
movlps xmm1, [r0 ]
shufps xmm1, xmm1, q0110
movlps xmm2, [r0+8]
shufps xmm2, xmm2, q0110
mulps xmm6, xmm1 ; r01r = x[1].re * x[2].re, x[1].im * x[2].im; r01i = x[1].re * x[2].im, x[1].im * x[2].re
mulps xmm5, xmm2 ; r02r = x[1].re * x[3].re, x[1].im * x[3].im; r02i = x[1].re * x[3].im, x[1].im * x[3].re
mulps xmm7, xmm7 ; r11r = x[1].re * x[1].re, x[1].im * x[1].im;
shufps xmm1, xmm1, q1010
mov r3, 2*8
align 16
.loop:
movlps xmm0, [r0+r3 ]
shufps xmm0, xmm0, q0110
ACSTEP xmm1, xmm2, xmm0
movlps xmm1, [r0+r3+8]
shufps xmm1, xmm1, q0110
ACSTEP xmm2, xmm0, xmm1
movlps xmm2, [r0+r3+16]
shufps xmm2, xmm2, q0110
ACSTEP xmm0, xmm1, xmm2
add r3, 24
cmp r3, r2
jl .loop
xorps xmm5, [ps_p1p1p1m1]
xorps xmm6, [ps_p1p1p1m1]
movaps xmm2, xmm5
movaps xmm0, xmm6
shufps xmm2, xmm2, q0301
shufps xmm0, xmm0, q0301
addps xmm5, xmm2
addps xmm6, xmm0
shufps xmm6, xmm5, q2020
xorps xmm6, [ps_p1m1p1m1]
movaps [r1 ], xmm6
movss xmm2, xmm7
shufps xmm7, xmm7, q0001
addss xmm7, xmm2
movss [r1+0x10], xmm7
RET
cendfunc sbr_autocorrelate
;-----------------------------------------------------------------------------
; void sbr_hf_g_filt(FFTComplex *Y, FFTComplex *X_high[40],
; const float *g_filt, size_t m_max, size_t ixh)
;-----------------------------------------------------------------------------
%define STEP 40*4*2
cglobal sbr_hf_g_filt, 5, 5, 5
lea r1, [r1 + 8*r4] ; offset by ixh elements into X_high
mov r4, r3
and r3, 0xFE
lea r2, [r2 + r3*4]
lea r0, [r0 + r3*8]
neg r3
jz .loop1
.loop2:
movlps xmm0, [r2 + 4*r3]
movlps xmm2, [r1 + 0*STEP]
movhps xmm2, [r1 + 1*STEP]
unpcklps xmm0, xmm0
mulps xmm0, xmm2
movups [r0 + 8*r3], xmm0
add r1, 2*STEP
add r3, 2
jnz .loop2
and r4, 1 ; number of single element loops
jz .end
.loop1:
; element 0 and 1 can be computed at the same time
movss xmm0, [r2]
movlps xmm2, [r1]
unpcklps xmm0, xmm0
mulps xmm0, xmm2
movlps [r0], xmm0
.end:
RET
cendfunc sbr_hf_g_filt
;-----------------------------------------------------------------------------
; void sbr_hf_gen(FFTComplex *X_high, FFTComplex *X_low,
; float alpha[4], unsigned int start, unsigned int end)
;-----------------------------------------------------------------------------
cglobal sbr_hf_gen, 5,5,8
%ifdef WIN64
sub rsp, 2*16+16
movaps [rsp + 0x20], xmm7
movaps [rsp + 0x10], xmm6
%endif
movaps xmm2, [r2] ; (a0[0] a0[1])*bw = (a[2] a[3])*bw = (a2 a3)
movhlps xmm1, xmm2 ; (a1[0] a1[1])*bw*bw = (a[0] a[1])*bw*bw = (a0 a1)
movaps xmm3, xmm1 ; (a2 a3)
movaps xmm4, xmm2 ; (a0 a1)
shufps xmm3, xmm3, 0x55 ; (-a3 a3 -a3 a3)
shufps xmm4, xmm4, 0x55 ; (-a1 a1 -a1 a1)
shufps xmm1, xmm1, 0x00 ; (a2 a2 a2 a2)
shufps xmm2, xmm2, 0x00 ; (a0 a0 a0 a0)
xorps xmm3, [ps_p1m1p1m1]
xorps xmm4, [ps_p1m1p1m1]
shl r3, 3
shl r4, 3
lea r1, [r1 - 2*2*4]
movaps xmm0, [r1 + r3]
.loop2:
movups xmm7, [r1 + r3 + 8] ; BbCc
movaps xmm5, xmm7
movaps xmm6, xmm0
shufps xmm0, xmm0, 0xb1 ; aAbB
shufps xmm7, xmm7, 0xb1 ; bBcC
mulps xmm0, xmm4
mulps xmm6, xmm2
mulps xmm7, xmm3
mulps xmm5, xmm1
addps xmm7, xmm0
addps xmm7, xmm5
addps xmm7, xmm6
movaps xmm0, [r1 + r3 +16] ; CcDd