123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366 |
- ;
- ; jcsample.asm - downsampling (64-bit AVX2)
- ;
- ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
- ; Copyright (C) 2009, 2016, D. R. Commander.
- ; Copyright (C) 2015, Intel Corporation.
- ;
- ; Based on the x86 SIMD extension for IJG JPEG library
- ; Copyright (C) 1999-2006, MIYASAKA Masaru.
- ; For conditions of distribution and use, see copyright notice in jsimdext.inc
- ;
- ; This file should be assembled with NASM (Netwide Assembler),
- ; can *not* be assembled with Microsoft's MASM or any compatible
- ; assembler (including Borland's Turbo Assembler).
- ; NASM is available from http://nasm.sourceforge.net/ or
- ; http://sourceforge.net/project/showfiles.php?group_id=6208
- %include "jsimdext.inc"
- ; --------------------------------------------------------------------------
- SECTION SEG_TEXT
- BITS 64
- ;
- ; Downsample pixel values of a single component.
- ; This version handles the common case of 2:1 horizontal and 1:1 vertical,
- ; without smoothing.
- ;
- ; GLOBAL(void)
- ; jsimd_h2v1_downsample_avx2(JDIMENSION image_width, int max_v_samp_factor,
- ; JDIMENSION v_samp_factor,
- ; JDIMENSION width_in_blocks, JSAMPARRAY input_data,
- ; JSAMPARRAY output_data);
- ;
- ; r10d = JDIMENSION image_width
- ; r11 = int max_v_samp_factor
- ; r12d = JDIMENSION v_samp_factor
- ; r13d = JDIMENSION width_in_blocks
- ; r14 = JSAMPARRAY input_data
- ; r15 = JSAMPARRAY output_data
- align 32
- GLOBAL_FUNCTION(jsimd_h2v1_downsample_avx2)
- EXTN(jsimd_h2v1_downsample_avx2):
- push rbp
- mov rax, rsp
- mov rbp, rsp
- collect_args 6
- mov ecx, r13d
- shl rcx, 3 ; imul rcx,DCTSIZE (rcx = output_cols)
- jz near .return
- mov edx, r10d
- ; -- expand_right_edge
- push rcx
- shl rcx, 1 ; output_cols * 2
- sub rcx, rdx
- jle short .expand_end
- mov rax, r11
- test rax, rax
- jle short .expand_end
- cld
- mov rsi, r14 ; input_data
- .expandloop:
- push rax
- push rcx
- mov rdi, JSAMPROW [rsi]
- add rdi, rdx
- mov al, JSAMPLE [rdi-1]
- rep stosb
- pop rcx
- pop rax
- add rsi, byte SIZEOF_JSAMPROW
- dec rax
- jg short .expandloop
- .expand_end:
- pop rcx ; output_cols
- ; -- h2v1_downsample
- mov eax, r12d ; rowctr
- test eax, eax
- jle near .return
- mov rdx, 0x00010000 ; bias pattern
- vmovd xmm7, edx
- vpshufd xmm7, xmm7, 0x00 ; xmm7={0, 1, 0, 1, 0, 1, 0, 1}
- vperm2i128 ymm7, ymm7, ymm7, 0 ; ymm7={xmm7, xmm7}
- vpcmpeqw ymm6, ymm6, ymm6
- vpsrlw ymm6, ymm6, BYTE_BIT ; ymm6={0xFF 0x00 0xFF 0x00 ..}
- mov rsi, r14 ; input_data
- mov rdi, r15 ; output_data
- .rowloop:
- push rcx
- push rdi
- push rsi
- mov rsi, JSAMPROW [rsi] ; inptr
- mov rdi, JSAMPROW [rdi] ; outptr
- cmp rcx, byte SIZEOF_YMMWORD
- jae short .columnloop
- .columnloop_r24:
- ; rcx can possibly be 8, 16, 24
- cmp rcx, 24
- jne .columnloop_r16
- vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vmovdqu xmm1, XMMWORD [rsi+1*SIZEOF_YMMWORD]
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop_r16:
- cmp rcx, 16
- jne .columnloop_r8
- vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vpxor ymm1, ymm1, ymm1
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop_r8:
- vmovdqu xmm0, XMMWORD[rsi+0*SIZEOF_YMMWORD]
- vpxor ymm1, ymm1, ymm1
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop:
- vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [rsi+1*SIZEOF_YMMWORD]
- .downsample:
- vpsrlw ymm2, ymm0, BYTE_BIT
- vpand ymm0, ymm0, ymm6
- vpsrlw ymm3, ymm1, BYTE_BIT
- vpand ymm1, ymm1, ymm6
- vpaddw ymm0, ymm0, ymm2
- vpaddw ymm1, ymm1, ymm3
- vpaddw ymm0, ymm0, ymm7
- vpaddw ymm1, ymm1, ymm7
- vpsrlw ymm0, ymm0, 1
- vpsrlw ymm1, ymm1, 1
- vpackuswb ymm0, ymm0, ymm1
- vpermq ymm0, ymm0, 0xd8
- vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
- sub rcx, byte SIZEOF_YMMWORD ; outcol
- add rsi, byte 2*SIZEOF_YMMWORD ; inptr
- add rdi, byte 1*SIZEOF_YMMWORD ; outptr
- cmp rcx, byte SIZEOF_YMMWORD
- jae short .columnloop
- test rcx, rcx
- jnz near .columnloop_r24
- pop rsi
- pop rdi
- pop rcx
- add rsi, byte SIZEOF_JSAMPROW ; input_data
- add rdi, byte SIZEOF_JSAMPROW ; output_data
- dec rax ; rowctr
- jg near .rowloop
- .return:
- vzeroupper
- uncollect_args 6
- pop rbp
- ret
- ; --------------------------------------------------------------------------
- ;
- ; Downsample pixel values of a single component.
- ; This version handles the standard case of 2:1 horizontal and 2:1 vertical,
- ; without smoothing.
- ;
- ; GLOBAL(void)
- ; jsimd_h2v2_downsample_avx2(JDIMENSION image_width, int max_v_samp_factor,
- ; JDIMENSION v_samp_factor,
- ; JDIMENSION width_in_blocks, JSAMPARRAY input_data,
- ; JSAMPARRAY output_data);
- ;
- ; r10d = JDIMENSION image_width
- ; r11 = int max_v_samp_factor
- ; r12d = JDIMENSION v_samp_factor
- ; r13d = JDIMENSION width_in_blocks
- ; r14 = JSAMPARRAY input_data
- ; r15 = JSAMPARRAY output_data
- align 32
- GLOBAL_FUNCTION(jsimd_h2v2_downsample_avx2)
- EXTN(jsimd_h2v2_downsample_avx2):
- push rbp
- mov rax, rsp
- mov rbp, rsp
- collect_args 6
- mov ecx, r13d
- shl rcx, 3 ; imul rcx,DCTSIZE (rcx = output_cols)
- jz near .return
- mov edx, r10d
- ; -- expand_right_edge
- push rcx
- shl rcx, 1 ; output_cols * 2
- sub rcx, rdx
- jle short .expand_end
- mov rax, r11
- test rax, rax
- jle short .expand_end
- cld
- mov rsi, r14 ; input_data
- .expandloop:
- push rax
- push rcx
- mov rdi, JSAMPROW [rsi]
- add rdi, rdx
- mov al, JSAMPLE [rdi-1]
- rep stosb
- pop rcx
- pop rax
- add rsi, byte SIZEOF_JSAMPROW
- dec rax
- jg short .expandloop
- .expand_end:
- pop rcx ; output_cols
- ; -- h2v2_downsample
- mov eax, r12d ; rowctr
- test rax, rax
- jle near .return
- mov rdx, 0x00020001 ; bias pattern
- vmovd xmm7, edx
- vpcmpeqw ymm6, ymm6, ymm6
- vpshufd xmm7, xmm7, 0x00 ; ymm7={1, 2, 1, 2, 1, 2, 1, 2}
- vperm2i128 ymm7, ymm7, ymm7, 0
- vpsrlw ymm6, ymm6, BYTE_BIT ; ymm6={0xFF 0x00 0xFF 0x00 ..}
- mov rsi, r14 ; input_data
- mov rdi, r15 ; output_data
- .rowloop:
- push rcx
- push rdi
- push rsi
- mov rdx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; inptr0
- mov rsi, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; inptr1
- mov rdi, JSAMPROW [rdi] ; outptr
- cmp rcx, byte SIZEOF_YMMWORD
- jae short .columnloop
- .columnloop_r24:
- cmp rcx, 24
- jne .columnloop_r16
- vmovdqu ymm0, YMMWORD [rdx+0*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vmovdqu xmm2, XMMWORD [rdx+1*SIZEOF_YMMWORD]
- vmovdqu xmm3, XMMWORD [rsi+1*SIZEOF_YMMWORD]
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop_r16:
- cmp rcx, 16
- jne .columnloop_r8
- vmovdqu ymm0, YMMWORD [rdx+0*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vpxor ymm2, ymm2, ymm2
- vpxor ymm3, ymm3, ymm3
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop_r8:
- vmovdqu xmm0, XMMWORD [rdx+0*SIZEOF_XMMWORD]
- vmovdqu xmm1, XMMWORD [rsi+0*SIZEOF_XMMWORD]
- vpxor ymm2, ymm2, ymm2
- vpxor ymm3, ymm3, ymm3
- mov rcx, SIZEOF_YMMWORD
- jmp short .downsample
- .columnloop:
- vmovdqu ymm0, YMMWORD [rdx+0*SIZEOF_YMMWORD]
- vmovdqu ymm1, YMMWORD [rsi+0*SIZEOF_YMMWORD]
- vmovdqu ymm2, YMMWORD [rdx+1*SIZEOF_YMMWORD]
- vmovdqu ymm3, YMMWORD [rsi+1*SIZEOF_YMMWORD]
- .downsample:
- vpand ymm4, ymm0, ymm6
- vpsrlw ymm0, ymm0, BYTE_BIT
- vpand ymm5, ymm1, ymm6
- vpsrlw ymm1, ymm1, BYTE_BIT
- vpaddw ymm0, ymm0, ymm4
- vpaddw ymm1, ymm1, ymm5
- vpand ymm4, ymm2, ymm6
- vpsrlw ymm2, ymm2, BYTE_BIT
- vpand ymm5, ymm3, ymm6
- vpsrlw ymm3, ymm3, BYTE_BIT
- vpaddw ymm2, ymm2, ymm4
- vpaddw ymm3, ymm3, ymm5
- vpaddw ymm0, ymm0, ymm1
- vpaddw ymm2, ymm2, ymm3
- vpaddw ymm0, ymm0, ymm7
- vpaddw ymm2, ymm2, ymm7
- vpsrlw ymm0, ymm0, 2
- vpsrlw ymm2, ymm2, 2
- vpackuswb ymm0, ymm0, ymm2
- vpermq ymm0, ymm0, 0xd8
- vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
- sub rcx, byte SIZEOF_YMMWORD ; outcol
- add rdx, byte 2*SIZEOF_YMMWORD ; inptr0
- add rsi, byte 2*SIZEOF_YMMWORD ; inptr1
- add rdi, byte 1*SIZEOF_YMMWORD ; outptr
- cmp rcx, byte SIZEOF_YMMWORD
- jae near .columnloop
- test rcx, rcx
- jnz near .columnloop_r24
- pop rsi
- pop rdi
- pop rcx
- add rsi, byte 2*SIZEOF_JSAMPROW ; input_data
- add rdi, byte 1*SIZEOF_JSAMPROW ; output_data
- dec rax ; rowctr
- jg near .rowloop
- .return:
- vzeroupper
- uncollect_args 6
- pop rbp
- ret
- ; For some reason, the OS X linker does not honor the request to align the
- ; segment unless we do this.
- align 32
|