123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760 |
- %include "jsimdext.inc"
- SECTION SEG_CONST
- alignz 32
- GLOBAL_DATA(jconst_fancy_upsample_avx2)
- EXTN(jconst_fancy_upsample_avx2):
- PW_ONE times 16 dw 1
- PW_TWO times 16 dw 2
- PW_THREE times 16 dw 3
- PW_SEVEN times 16 dw 7
- PW_EIGHT times 16 dw 8
- alignz 32
- SECTION SEG_TEXT
- BITS 32
- %define max_v_samp(b) (b) + 8
- %define downsamp_width(b) (b) + 12
- %define input_data(b) (b) + 16
- %define output_data_ptr(b) (b) + 20
- align 32
- GLOBAL_FUNCTION(jsimd_h2v1_fancy_upsample_avx2)
- EXTN(jsimd_h2v1_fancy_upsample_avx2):
- push ebp
- mov ebp, esp
- pushpic ebx
- push esi
- push edi
- get_GOT ebx
- mov eax, JDIMENSION [downsamp_width(ebp)]
- test eax, eax
- jz near .return
- mov ecx, INT [max_v_samp(ebp)]
- test ecx, ecx
- jz near .return
- mov esi, JSAMPARRAY [input_data(ebp)]
- mov edi, POINTER [output_data_ptr(ebp)]
- mov edi, JSAMPARRAY [edi]
- alignx 16, 7
- .rowloop:
- push eax
- push edi
- push esi
- mov esi, JSAMPROW [esi]
- mov edi, JSAMPROW [edi]
- test eax, SIZEOF_YMMWORD-1
- jz short .skip
- mov dl, JSAMPLE [esi+(eax-1)*SIZEOF_JSAMPLE]
- mov JSAMPLE [esi+eax*SIZEOF_JSAMPLE], dl
- .skip:
- vpxor ymm0, ymm0, ymm0
- vpcmpeqb xmm7, xmm7, xmm7
- vpsrldq xmm7, xmm7, (SIZEOF_XMMWORD-1)
- vpand ymm7, ymm7, YMMWORD [esi+0*SIZEOF_YMMWORD]
- add eax, byte SIZEOF_YMMWORD-1
- and eax, byte -SIZEOF_YMMWORD
- cmp eax, byte SIZEOF_YMMWORD
- ja short .columnloop
- alignx 16, 7
- .columnloop_last:
- vpcmpeqb xmm6, xmm6, xmm6
- vpslldq xmm6, xmm6, (SIZEOF_XMMWORD-1)
- vperm2i128 ymm6, ymm6, ymm6, 1
- vpand ymm6, ymm6, YMMWORD [esi+0*SIZEOF_YMMWORD]
- jmp short .upsample
- alignx 16, 7
- .columnloop:
- vmovdqu ymm6, YMMWORD [esi+1*SIZEOF_YMMWORD]
- vperm2i128 ymm6, ymm0, ymm6, 0x20
- vpslldq ymm6, ymm6, 15
- .upsample:
- vmovdqu ymm1, YMMWORD [esi+0*SIZEOF_YMMWORD]
- vperm2i128 ymm2, ymm0, ymm1, 0x20
- vpalignr ymm2, ymm1, ymm2, 15
- vperm2i128 ymm4, ymm0, ymm1, 0x03
- vpalignr ymm3, ymm4, ymm1, 1
- vpor ymm2, ymm2, ymm7
- vpor ymm3, ymm3, ymm6
- vpsrldq ymm7, ymm4, (SIZEOF_XMMWORD-1)
- vpunpckhbw ymm4, ymm1, ymm0
- vpunpcklbw ymm5, ymm1, ymm0
- vperm2i128 ymm1, ymm5, ymm4, 0x20
- vperm2i128 ymm4, ymm5, ymm4, 0x31
- vpunpckhbw ymm5, ymm2, ymm0
- vpunpcklbw ymm6, ymm2, ymm0
- vperm2i128 ymm2, ymm6, ymm5, 0x20
- vperm2i128 ymm5, ymm6, ymm5, 0x31
- vpunpckhbw ymm6, ymm3, ymm0
- vpunpcklbw ymm0, ymm3, ymm0
- vperm2i128 ymm3, ymm0, ymm6, 0x20
- vperm2i128 ymm6, ymm0, ymm6, 0x31
- vpxor ymm0, ymm0, ymm0
- vpmullw ymm1, ymm1, [GOTOFF(ebx,PW_THREE)]
- vpmullw ymm4, ymm4, [GOTOFF(ebx,PW_THREE)]
- vpaddw ymm2, ymm2, [GOTOFF(ebx,PW_ONE)]
- vpaddw ymm5, ymm5, [GOTOFF(ebx,PW_ONE)]
- vpaddw ymm3, ymm3, [GOTOFF(ebx,PW_TWO)]
- vpaddw ymm6, ymm6, [GOTOFF(ebx,PW_TWO)]
- vpaddw ymm2, ymm2, ymm1
- vpaddw ymm5, ymm5, ymm4
- vpsrlw ymm2, ymm2, 2
- vpsrlw ymm5, ymm5, 2
- vpaddw ymm3, ymm3, ymm1
- vpaddw ymm6, ymm6, ymm4
- vpsrlw ymm3, ymm3, 2
- vpsrlw ymm6, ymm6, 2
- vpsllw ymm3, ymm3, BYTE_BIT
- vpsllw ymm6, ymm6, BYTE_BIT
- vpor ymm2, ymm2, ymm3
- vpor ymm5, ymm5, ymm6
- vmovdqu YMMWORD [edi+0*SIZEOF_YMMWORD], ymm2
- vmovdqu YMMWORD [edi+1*SIZEOF_YMMWORD], ymm5
- sub eax, byte SIZEOF_YMMWORD
- add esi, byte 1*SIZEOF_YMMWORD
- add edi, byte 2*SIZEOF_YMMWORD
- cmp eax, byte SIZEOF_YMMWORD
- ja near .columnloop
- test eax, eax
- jnz near .columnloop_last
- pop esi
- pop edi
- pop eax
- add esi, byte SIZEOF_JSAMPROW
- add edi, byte SIZEOF_JSAMPROW
- dec ecx
- jg near .rowloop
- .return:
- vzeroupper
- pop edi
- pop esi
- poppic ebx
- pop ebp
- ret
- %define max_v_samp(b) (b) + 8
- %define downsamp_width(b) (b) + 12
- %define input_data(b) (b) + 16
- %define output_data_ptr(b) (b) + 20
- %define original_ebp ebp + 0
- %define wk(i) ebp - (WK_NUM - (i)) * SIZEOF_YMMWORD
-
- %define WK_NUM 4
- %define gotptr wk(0) - SIZEOF_POINTER
- align 32
- GLOBAL_FUNCTION(jsimd_h2v2_fancy_upsample_avx2)
- EXTN(jsimd_h2v2_fancy_upsample_avx2):
- push ebp
- mov eax, esp
- sub esp, byte 4
- and esp, byte (-SIZEOF_YMMWORD)
- mov [esp], eax
- mov ebp, esp
- lea esp, [wk(0)]
- pushpic eax
- push ebx
- push esi
- push edi
- get_GOT ebx
- movpic POINTER [gotptr], ebx
- mov edx, eax
- mov eax, JDIMENSION [downsamp_width(edx)]
- test eax, eax
- jz near .return
- mov ecx, INT [max_v_samp(edx)]
- test ecx, ecx
- jz near .return
- mov esi, JSAMPARRAY [input_data(edx)]
- mov edi, POINTER [output_data_ptr(edx)]
- mov edi, JSAMPARRAY [edi]
- alignx 16, 7
- .rowloop:
- push eax
- push ecx
- push edi
- push esi
- mov ecx, JSAMPROW [esi-1*SIZEOF_JSAMPROW]
- mov ebx, JSAMPROW [esi+0*SIZEOF_JSAMPROW]
- mov esi, JSAMPROW [esi+1*SIZEOF_JSAMPROW]
- mov edx, JSAMPROW [edi+0*SIZEOF_JSAMPROW]
- mov edi, JSAMPROW [edi+1*SIZEOF_JSAMPROW]
- test eax, SIZEOF_YMMWORD-1
- jz short .skip
- push edx
- mov dl, JSAMPLE [ecx+(eax-1)*SIZEOF_JSAMPLE]
- mov JSAMPLE [ecx+eax*SIZEOF_JSAMPLE], dl
- mov dl, JSAMPLE [ebx+(eax-1)*SIZEOF_JSAMPLE]
- mov JSAMPLE [ebx+eax*SIZEOF_JSAMPLE], dl
- mov dl, JSAMPLE [esi+(eax-1)*SIZEOF_JSAMPLE]
- mov JSAMPLE [esi+eax*SIZEOF_JSAMPLE], dl
- pop edx
- .skip:
-
- vmovdqu ymm0, YMMWORD [ebx+0*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [ecx+0*SIZEOF_YMMWORD]
- vmovdqu ymm2, YMMWORD [esi+0*SIZEOF_YMMWORD]
- pushpic ebx
- movpic ebx, POINTER [gotptr]
- vpxor ymm3, ymm3, ymm3
- vpunpckhbw ymm4, ymm0, ymm3
- vpunpcklbw ymm5, ymm0, ymm3
- vperm2i128 ymm0, ymm5, ymm4, 0x20
- vperm2i128 ymm4, ymm5, ymm4, 0x31
- vpunpckhbw ymm5, ymm1, ymm3
- vpunpcklbw ymm6, ymm1, ymm3
- vperm2i128 ymm1, ymm6, ymm5, 0x20
- vperm2i128 ymm5, ymm6, ymm5, 0x31
- vpunpckhbw ymm6, ymm2, ymm3
- vpunpcklbw ymm3, ymm2, ymm3
- vperm2i128 ymm2, ymm3, ymm6, 0x20
- vperm2i128 ymm6, ymm3, ymm6, 0x31
- vpmullw ymm0, ymm0, [GOTOFF(ebx,PW_THREE)]
- vpmullw ymm4, ymm4, [GOTOFF(ebx,PW_THREE)]
- vpcmpeqb xmm7, xmm7, xmm7
- vpsrldq xmm7, xmm7, (SIZEOF_XMMWORD-2)
- vpaddw ymm1, ymm1, ymm0
- vpaddw ymm5, ymm5, ymm4
- vpaddw ymm2, ymm2, ymm0
- vpaddw ymm6, ymm6, ymm4
- vmovdqu YMMWORD [edx+0*SIZEOF_YMMWORD], ymm1
- vmovdqu YMMWORD [edx+1*SIZEOF_YMMWORD], ymm5
- vmovdqu YMMWORD [edi+0*SIZEOF_YMMWORD], ymm2
- vmovdqu YMMWORD [edi+1*SIZEOF_YMMWORD], ymm6
- vpand ymm1, ymm1, ymm7
- vpand ymm2, ymm2, ymm7
- vmovdqa YMMWORD [wk(0)], ymm1
- vmovdqa YMMWORD [wk(1)], ymm2
- poppic ebx
- add eax, byte SIZEOF_YMMWORD-1
- and eax, byte -SIZEOF_YMMWORD
- cmp eax, byte SIZEOF_YMMWORD
- ja short .columnloop
- alignx 16, 7
- .columnloop_last:
-
- pushpic ebx
- movpic ebx, POINTER [gotptr]
- vpcmpeqb xmm1, xmm1, xmm1
- vpslldq xmm1, xmm1, (SIZEOF_XMMWORD-2)
- vperm2i128 ymm1, ymm1, ymm1, 1
- vpand ymm2, ymm1, YMMWORD [edi+1*SIZEOF_YMMWORD]
- vpand ymm1, ymm1, YMMWORD [edx+1*SIZEOF_YMMWORD]
- vmovdqa YMMWORD [wk(2)], ymm1
- vmovdqa YMMWORD [wk(3)], ymm2
- jmp near .upsample
- alignx 16, 7
- .columnloop:
-
- vmovdqu ymm0, YMMWORD [ebx+1*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [ecx+1*SIZEOF_YMMWORD]
- vmovdqu ymm2, YMMWORD [esi+1*SIZEOF_YMMWORD]
- pushpic ebx
- movpic ebx, POINTER [gotptr]
- vpxor ymm3, ymm3, ymm3
- vpunpckhbw ymm4, ymm0, ymm3
- vpunpcklbw ymm5, ymm0, ymm3
- vperm2i128 ymm0, ymm5, ymm4, 0x20
- vperm2i128 ymm4, ymm5, ymm4, 0x31
- vpunpckhbw ymm5, ymm1, ymm3
- vpunpcklbw ymm6, ymm1, ymm3
- vperm2i128 ymm1, ymm6, ymm5, 0x20
- vperm2i128 ymm5, ymm6, ymm5, 0x31
- vpunpckhbw ymm6, ymm2, ymm3
- vpunpcklbw ymm7, ymm2, ymm3
- vperm2i128 ymm2, ymm7, ymm6, 0x20
- vperm2i128 ymm6, ymm7, ymm6, 0x31
- vpmullw ymm0, ymm0, [GOTOFF(ebx,PW_THREE)]
- vpmullw ymm4, ymm4, [GOTOFF(ebx,PW_THREE)]
- vpaddw ymm1, ymm1, ymm0
- vpaddw ymm5, ymm5, ymm4
- vpaddw ymm2, ymm2, ymm0
- vpaddw ymm6, ymm6, ymm4
- vmovdqu YMMWORD [edx+2*SIZEOF_YMMWORD], ymm1
- vmovdqu YMMWORD [edx+3*SIZEOF_YMMWORD], ymm5
- vmovdqu YMMWORD [edi+2*SIZEOF_YMMWORD], ymm2
- vmovdqu YMMWORD [edi+3*SIZEOF_YMMWORD], ymm6
- vperm2i128 ymm1, ymm3, ymm1, 0x20
- vpslldq ymm1, ymm1, 14
- vperm2i128 ymm2, ymm3, ymm2, 0x20
- vpslldq ymm2, ymm2, 14
- vmovdqa YMMWORD [wk(2)], ymm1
- vmovdqa YMMWORD [wk(3)], ymm2
- .upsample:
-
- vmovdqu ymm7, YMMWORD [edx+0*SIZEOF_YMMWORD]
- vmovdqu ymm3, YMMWORD [edx+1*SIZEOF_YMMWORD]
- vpxor ymm1, ymm1, ymm1
- vperm2i128 ymm0, ymm1, ymm7, 0x03
- vpalignr ymm0, ymm0, ymm7, 2
- vperm2i128 ymm4, ymm1, ymm3, 0x20
- vpslldq ymm4, ymm4, 14
- vperm2i128 ymm5, ymm1, ymm7, 0x03
- vpsrldq ymm5, ymm5, 14
- vperm2i128 ymm6, ymm1, ymm3, 0x20
- vpalignr ymm6, ymm3, ymm6, 14
- vpor ymm0, ymm0, ymm4
- vpor ymm5, ymm5, ymm6
- vperm2i128 ymm2, ymm1, ymm3, 0x03
- vpalignr ymm2, ymm2, ymm3, 2
- vperm2i128 ymm4, ymm1, ymm3, 0x03
- vpsrldq ymm4, ymm4, 14
- vperm2i128 ymm1, ymm1, ymm7, 0x20
- vpalignr ymm1, ymm7, ymm1, 14
- vpor ymm1, ymm1, YMMWORD [wk(0)]
- vpor ymm2, ymm2, YMMWORD [wk(2)]
- vmovdqa YMMWORD [wk(0)], ymm4
- vpmullw ymm7, ymm7, [GOTOFF(ebx,PW_THREE)]
- vpmullw ymm3, ymm3, [GOTOFF(ebx,PW_THREE)]
- vpaddw ymm1, ymm1, [GOTOFF(ebx,PW_EIGHT)]
- vpaddw ymm5, ymm5, [GOTOFF(ebx,PW_EIGHT)]
- vpaddw ymm0, ymm0, [GOTOFF(ebx,PW_SEVEN)]
- vpaddw ymm2, [GOTOFF(ebx,PW_SEVEN)]
- vpaddw ymm1, ymm1, ymm7
- vpaddw ymm5, ymm5, ymm3
- vpsrlw ymm1, ymm1, 4
- vpsrlw ymm5, ymm5, 4
- vpaddw ymm0, ymm0, ymm7
- vpaddw ymm2, ymm2, ymm3
- vpsrlw ymm0, ymm0, 4
- vpsrlw ymm2, ymm2, 4
- vpsllw ymm0, ymm0, BYTE_BIT
- vpsllw ymm2, ymm2, BYTE_BIT
- vpor ymm1, ymm1, ymm0
- vpor ymm5, ymm5, ymm2
- vmovdqu YMMWORD [edx+0*SIZEOF_YMMWORD], ymm1
- vmovdqu YMMWORD [edx+1*SIZEOF_YMMWORD], ymm5
-
- vmovdqu ymm6, YMMWORD [edi+0*SIZEOF_YMMWORD]
- vmovdqu ymm4, YMMWORD [edi+1*SIZEOF_YMMWORD]
- vpxor ymm1, ymm1, ymm1
- vperm2i128 ymm7, ymm1, ymm6, 0x03
- vpalignr ymm7, ymm7, ymm6, 2
- vperm2i128 ymm3, ymm1, ymm4, 0x20
- vpslldq ymm3, ymm3, 14
- vperm2i128 ymm0, ymm1, ymm6, 0x03
- vpsrldq ymm0, ymm0, 14
- vperm2i128 ymm2, ymm1, ymm4, 0x20
- vpalignr ymm2, ymm4, ymm2, 14
- vpor ymm7, ymm7, ymm3
- vpor ymm0, ymm0, ymm2
- vperm2i128 ymm5, ymm1, ymm4, 0x03
- vpalignr ymm5, ymm5, ymm4, 2
- vperm2i128 ymm3, ymm1, ymm4, 0x03
- vpsrldq ymm3, ymm3, 14
- vperm2i128 ymm1, ymm1, ymm6, 0x20
- vpalignr ymm1, ymm6, ymm1, 14
- vpor ymm1, ymm1, YMMWORD [wk(1)]
- vpor ymm5, ymm5, YMMWORD [wk(3)]
- vmovdqa YMMWORD [wk(1)], ymm3
- vpmullw ymm6, ymm6, [GOTOFF(ebx,PW_THREE)]
- vpmullw ymm4, ymm4, [GOTOFF(ebx,PW_THREE)]
- vpaddw ymm1, ymm1, [GOTOFF(ebx,PW_EIGHT)]
- vpaddw ymm0, ymm0, [GOTOFF(ebx,PW_EIGHT)]
- vpaddw ymm7, ymm7, [GOTOFF(ebx,PW_SEVEN)]
- vpaddw ymm5, ymm5, [GOTOFF(ebx,PW_SEVEN)]
- vpaddw ymm1, ymm1, ymm6
- vpaddw ymm0, ymm0, ymm4
- vpsrlw ymm1, ymm1, 4
- vpsrlw ymm0, ymm0, 4
- vpaddw ymm7, ymm7, ymm6
- vpaddw ymm5, ymm5, ymm4
- vpsrlw ymm7, ymm7, 4
- vpsrlw ymm5, ymm5, 4
- vpsllw ymm7, ymm7, BYTE_BIT
- vpsllw ymm5, ymm5, BYTE_BIT
- vpor ymm1, ymm1, ymm7
- vpor ymm0, ymm0, ymm5
- vmovdqu YMMWORD [edi+0*SIZEOF_YMMWORD], ymm1
- vmovdqu YMMWORD [edi+1*SIZEOF_YMMWORD], ymm0
- poppic ebx
- sub eax, byte SIZEOF_YMMWORD
- add ecx, byte 1*SIZEOF_YMMWORD
- add ebx, byte 1*SIZEOF_YMMWORD
- add esi, byte 1*SIZEOF_YMMWORD
- add edx, byte 2*SIZEOF_YMMWORD
- add edi, byte 2*SIZEOF_YMMWORD
- cmp eax, byte SIZEOF_YMMWORD
- ja near .columnloop
- test eax, eax
- jnz near .columnloop_last
- pop esi
- pop edi
- pop ecx
- pop eax
- add esi, byte 1*SIZEOF_JSAMPROW
- add edi, byte 2*SIZEOF_JSAMPROW
- sub ecx, byte 2
- jg near .rowloop
- .return:
- vzeroupper
- pop edi
- pop esi
- pop ebx
- mov esp, ebp
- pop esp
- pop ebp
- ret
- %define max_v_samp(b) (b) + 8
- %define output_width(b) (b) + 12
- %define input_data(b) (b) + 16
- %define output_data_ptr(b) (b) + 20
- align 32
- GLOBAL_FUNCTION(jsimd_h2v1_upsample_avx2)
- EXTN(jsimd_h2v1_upsample_avx2):
- push ebp
- mov ebp, esp
- push esi
- push edi
- mov edx, JDIMENSION [output_width(ebp)]
- add edx, byte (SIZEOF_YMMWORD-1)
- and edx, -SIZEOF_YMMWORD
- jz short .return
- mov ecx, INT [max_v_samp(ebp)]
- test ecx, ecx
- jz short .return
- mov esi, JSAMPARRAY [input_data(ebp)]
- mov edi, POINTER [output_data_ptr(ebp)]
- mov edi, JSAMPARRAY [edi]
- alignx 16, 7
- .rowloop:
- push edi
- push esi
- mov esi, JSAMPROW [esi]
- mov edi, JSAMPROW [edi]
- mov eax, edx
- alignx 16, 7
- .columnloop:
- cmp eax, byte SIZEOF_YMMWORD
- ja near .above_16
- vmovdqu xmm0, XMMWORD [esi+0*SIZEOF_YMMWORD]
- vpunpckhbw xmm1, xmm0, xmm0
- vpunpcklbw xmm0, xmm0, xmm0
- vmovdqu XMMWORD [edi+0*SIZEOF_XMMWORD], xmm0
- vmovdqu XMMWORD [edi+1*SIZEOF_XMMWORD], xmm1
- jmp short .nextrow
- .above_16:
- vmovdqu ymm0, YMMWORD [esi+0*SIZEOF_YMMWORD]
- vpermq ymm0, ymm0, 0xd8
- vpunpckhbw ymm1, ymm0, ymm0
- vpunpcklbw ymm0, ymm0, ymm0
- vmovdqu YMMWORD [edi+0*SIZEOF_YMMWORD], ymm0
- vmovdqu YMMWORD [edi+1*SIZEOF_YMMWORD], ymm1
- sub eax, byte 2*SIZEOF_YMMWORD
- jz short .nextrow
- add esi, byte SIZEOF_YMMWORD
- add edi, byte 2*SIZEOF_YMMWORD
- jmp short .columnloop
- alignx 16, 7
- .nextrow:
- pop esi
- pop edi
- add esi, byte SIZEOF_JSAMPROW
- add edi, byte SIZEOF_JSAMPROW
- dec ecx
- jg short .rowloop
- .return:
- vzeroupper
- pop edi
- pop esi
- pop ebp
- ret
- %define max_v_samp(b) (b) + 8
- %define output_width(b) (b) + 12
- %define input_data(b) (b) + 16
- %define output_data_ptr(b) (b) + 20
- align 32
- GLOBAL_FUNCTION(jsimd_h2v2_upsample_avx2)
- EXTN(jsimd_h2v2_upsample_avx2):
- push ebp
- mov ebp, esp
- push ebx
- push esi
- push edi
- mov edx, JDIMENSION [output_width(ebp)]
- add edx, byte (SIZEOF_YMMWORD-1)
- and edx, -SIZEOF_YMMWORD
- jz near .return
- mov ecx, INT [max_v_samp(ebp)]
- test ecx, ecx
- jz near .return
- mov esi, JSAMPARRAY [input_data(ebp)]
- mov edi, POINTER [output_data_ptr(ebp)]
- mov edi, JSAMPARRAY [edi]
- alignx 16, 7
- .rowloop:
- push edi
- push esi
- mov esi, JSAMPROW [esi]
- mov ebx, JSAMPROW [edi+0*SIZEOF_JSAMPROW]
- mov edi, JSAMPROW [edi+1*SIZEOF_JSAMPROW]
- mov eax, edx
- alignx 16, 7
- .columnloop:
- cmp eax, byte SIZEOF_YMMWORD
- ja short .above_16
- vmovdqu xmm0, XMMWORD [esi+0*SIZEOF_XMMWORD]
- vpunpckhbw xmm1, xmm0, xmm0
- vpunpcklbw xmm0, xmm0, xmm0
- vmovdqu XMMWORD [ebx+0*SIZEOF_XMMWORD], xmm0
- vmovdqu XMMWORD [ebx+1*SIZEOF_XMMWORD], xmm1
- vmovdqu XMMWORD [edi+0*SIZEOF_XMMWORD], xmm0
- vmovdqu XMMWORD [edi+1*SIZEOF_XMMWORD], xmm1
- jmp near .nextrow
- .above_16:
- vmovdqu ymm0, YMMWORD [esi+0*SIZEOF_YMMWORD]
- vpermq ymm0, ymm0, 0xd8
- vpunpckhbw ymm1, ymm0, ymm0
- vpunpcklbw ymm0, ymm0, ymm0
- vmovdqu YMMWORD [ebx+0*SIZEOF_YMMWORD], ymm0
- vmovdqu YMMWORD [ebx+1*SIZEOF_YMMWORD], ymm1
- vmovdqu YMMWORD [edi+0*SIZEOF_YMMWORD], ymm0
- vmovdqu YMMWORD [edi+1*SIZEOF_YMMWORD], ymm1
- sub eax, byte 2*SIZEOF_YMMWORD
- jz short .nextrow
- add esi, byte SIZEOF_YMMWORD
- add ebx, 2*SIZEOF_YMMWORD
- add edi, 2*SIZEOF_YMMWORD
- jmp short .columnloop
- alignx 16, 7
- .nextrow:
- pop esi
- pop edi
- add esi, byte 1*SIZEOF_JSAMPROW
- add edi, byte 2*SIZEOF_JSAMPROW
- sub ecx, byte 2
- jg near .rowloop
- .return:
- vzeroupper
- pop edi
- pop esi
- pop ebx
- pop ebp
- ret
- align 32
|