src/os_cpu/bsd_x86/vm/bsd_x86_32.s
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7089790_bsd_vs_linux Cdiff src/os_cpu/bsd_x86/vm/bsd_x86_32.s

src/os_cpu/bsd_x86/vm/bsd_x86_32.s

Print this page
rev 2698 : new bsd files

*** 20,82 **** # or visit www.oracle.com if you need additional information or have any # questions. # # NOTE WELL! The _Copy functions are called directly # from server-compiler-generated code via CallLeafNoFP, # which means that they *must* either not use floating # point or use it in the same manner as does the server # compiler. ! .globl _Copy_conjoint_bytes ! .globl _Copy_arrayof_conjoint_bytes ! .globl _Copy_conjoint_jshorts_atomic ! .globl _Copy_arrayof_conjoint_jshorts ! .globl _Copy_conjoint_jints_atomic ! .globl _Copy_arrayof_conjoint_jints ! .globl _Copy_conjoint_jlongs_atomic ! .globl _mmx_Copy_arrayof_conjoint_jshorts ! .globl _Atomic_cmpxchg_long ! .globl _Atomic_move_long .text ! .globl SafeFetch32, Fetch32PFI, Fetch32Resume ! .globl SafeFetchN ## TODO: avoid exposing Fetch32PFI and Fetch32Resume. ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP) ## routine to vet the address. If the address is the faulting LD then ## SafeFetchTriage() would return the resume-at EIP, otherwise null. ! .type SafeFetch32,@function .p2align 4,,15 ! SafeFetch32: ! SafeFetchN: movl 0x8(%esp), %eax movl 0x4(%esp), %ecx ! Fetch32PFI: movl (%ecx), %eax ! Fetch32Resume: ret ! .globl SpinPause ! .type SpinPause,@function .p2align 4,,15 ! SpinPause: rep nop movl $1, %eax ret # Support for void Copy::conjoint_bytes(void* from, # void* to, # size_t count) .p2align 4,,15 ! .type _Copy_conjoint_bytes,@function ! _Copy_conjoint_bytes: pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to --- 20,113 ---- # or visit www.oracle.com if you need additional information or have any # questions. # + #ifdef __APPLE__ + # Darwin uses _ prefixed global symbols + #define SYMBOL(s) _ ## s + #define ELF_TYPE(name, description) + #else + #define SYMBOL(s) s + #define ELF_TYPE(name, description) .type name,description + #endif + + .globl SYMBOL(fixcw) + # NOTE WELL! The _Copy functions are called directly # from server-compiler-generated code via CallLeafNoFP, # which means that they *must* either not use floating # point or use it in the same manner as does the server # compiler. ! .globl SYMBOL(_Copy_conjoint_bytes) ! .globl SYMBOL(_Copy_arrayof_conjoint_bytes) ! .globl SYMBOL(_Copy_conjoint_jshorts_atomic) ! .globl SYMBOL(_Copy_arrayof_conjoint_jshorts) ! .globl SYMBOL(_Copy_conjoint_jints_atomic) ! .globl SYMBOL(_Copy_arrayof_conjoint_jints) ! .globl SYMBOL(_Copy_conjoint_jlongs_atomic) ! .globl SYMBOL(_mmx_Copy_arrayof_conjoint_jshorts) ! .globl SYMBOL(_Atomic_cmpxchg_long) ! .globl SYMBOL(_Atomic_move_long) .text ! # Support for void os::Solaris::init_thread_fpu_state() in os_solaris_i486.cpp ! # Set fpu to 53 bit precision. This happens too early to use a stub. ! # ported from solaris_x86_32.s ! #ifdef __APPLE__ ! .align 4 ! #else ! .align 16 ! #endif ! SYMBOL(fixcw): ! pushl $0x27f ! fldcw 0(%esp) ! popl %eax ! ret ! ! #ifdef __APPLE__ ! .align 4 ! #else ! .align 16 ! #endif ! ! .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume) ! .globl SYMBOL(SafeFetchN) ## TODO: avoid exposing Fetch32PFI and Fetch32Resume. ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP) ## routine to vet the address. If the address is the faulting LD then ## SafeFetchTriage() would return the resume-at EIP, otherwise null. ! ELF_TYPE(SafeFetch32,@function) .p2align 4,,15 ! SYMBOL(SafeFetch32): ! SYMBOL(SafeFetchN): movl 0x8(%esp), %eax movl 0x4(%esp), %ecx ! SYMBOL(Fetch32PFI): movl (%ecx), %eax ! SYMBOL(Fetch32Resume): ret ! .globl SYMBOL(SpinPause) ! ELF_TYPE(SpinPause,@function) .p2align 4,,15 ! SYMBOL(SpinPause): rep nop movl $1, %eax ret # Support for void Copy::conjoint_bytes(void* from, # void* to, # size_t count) .p2align 4,,15 ! ELF_TYPE(_Copy_conjoint_bytes,@function) ! SYMBOL(_Copy_conjoint_bytes): pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to
*** 179,190 **** # void* to, # size_t count) # # Same as _Copy_conjoint_bytes, except no source alignment check. .p2align 4,,15 ! .type _Copy_arrayof_conjoint_bytes,@function ! _Copy_arrayof_conjoint_bytes: pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to --- 210,221 ---- # void* to, # size_t count) # # Same as _Copy_conjoint_bytes, except no source alignment check. .p2align 4,,15 ! ELF_TYPE(_Copy_arrayof_conjoint_bytes,@function) ! SYMBOL(_Copy_arrayof_conjoint_bytes): pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to
*** 267,278 **** # Support for void Copy::conjoint_jshorts_atomic(void* from, # void* to, # size_t count) .p2align 4,,15 ! .type _Copy_conjoint_jshorts_atomic,@function ! _Copy_conjoint_jshorts_atomic: pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to --- 298,309 ---- # Support for void Copy::conjoint_jshorts_atomic(void* from, # void* to, # size_t count) .p2align 4,,15 ! ELF_TYPE(_Copy_conjoint_jshorts_atomic,@function) ! SYMBOL(_Copy_conjoint_jshorts_atomic): pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to
*** 354,365 **** # Support for void Copy::arrayof_conjoint_jshorts(void* from, # void* to, # size_t count) .p2align 4,,15 ! .type _Copy_arrayof_conjoint_jshorts,@function ! _Copy_arrayof_conjoint_jshorts: pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to --- 385,396 ---- # Support for void Copy::arrayof_conjoint_jshorts(void* from, # void* to, # size_t count) .p2align 4,,15 ! ELF_TYPE(_Copy_arrayof_conjoint_jshorts,@function) ! SYMBOL(_Copy_arrayof_conjoint_jshorts): pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to
*** 431,444 **** # void* to, # size_t count) # Equivalent to # arrayof_conjoint_jints .p2align 4,,15 ! .type _Copy_conjoint_jints_atomic,@function ! .type _Copy_arrayof_conjoint_jints,@function ! _Copy_conjoint_jints_atomic: ! _Copy_arrayof_conjoint_jints: pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to --- 462,475 ---- # void* to, # size_t count) # Equivalent to # arrayof_conjoint_jints .p2align 4,,15 ! ELF_TYPE(_Copy_conjoint_jints_atomic,@function) ! ELF_TYPE(_Copy_arrayof_conjoint_jints,@function) ! SYMBOL(_Copy_conjoint_jints_atomic): ! SYMBOL(_Copy_arrayof_conjoint_jints): pushl %esi movl 4+12(%esp),%ecx # count pushl %edi movl 8+ 4(%esp),%esi # from movl 8+ 8(%esp),%edi # to
*** 496,517 **** # # 32-bit # # count treated as signed # ! # if (from > to) { # while (--count >= 0) { # *to++ = *from++; # } # } else { # while (--count >= 0) { # to[count] = from[count]; # } # } .p2align 4,,15 ! .type _Copy_conjoint_jlongs_atomic,@function ! _Copy_conjoint_jlongs_atomic: movl 4+8(%esp),%ecx # count movl 4+0(%esp),%eax # from movl 4+4(%esp),%edx # to cmpl %eax,%edx jae cla_CopyLeft --- 527,548 ---- # # 32-bit # # count treated as signed # ! # // if (from > to) { # while (--count >= 0) { # *to++ = *from++; # } # } else { # while (--count >= 0) { # to[count] = from[count]; # } # } .p2align 4,,15 ! ELF_TYPE(_Copy_conjoint_jlongs_atomic,@function) ! SYMBOL(_Copy_conjoint_jlongs_atomic): movl 4+8(%esp),%ecx # count movl 4+0(%esp),%eax # from movl 4+4(%esp),%edx # to cmpl %eax,%edx jae cla_CopyLeft
*** 535,546 **** # Support for void Copy::arrayof_conjoint_jshorts(void* from, # void* to, # size_t count) .p2align 4,,15 ! .type _mmx_Copy_arrayof_conjoint_jshorts,@function ! _mmx_Copy_arrayof_conjoint_jshorts: pushl %esi movl 4+12(%esp),%ecx pushl %edi movl 8+ 4(%esp),%esi movl 8+ 8(%esp),%edi --- 566,577 ---- # Support for void Copy::arrayof_conjoint_jshorts(void* from, # void* to, # size_t count) .p2align 4,,15 ! ELF_TYPE(_mmx_Copy_arrayof_conjoint_jshorts,@function) ! SYMBOL(_mmx_Copy_arrayof_conjoint_jshorts): pushl %esi movl 4+12(%esp),%ecx pushl %edi movl 8+ 4(%esp),%esi movl 8+ 8(%esp),%edi
*** 634,645 **** # volatile jlong* dest, # jlong compare_value, # bool is_MP) # .p2align 4,,15 ! .type _Atomic_cmpxchg_long,@function ! _Atomic_cmpxchg_long: # 8(%esp) : return PC pushl %ebx # 4(%esp) : old %ebx pushl %edi # 0(%esp) : old %edi movl 12(%esp), %ebx # 12(%esp) : exchange_value (low) movl 16(%esp), %ecx # 16(%esp) : exchange_value (high) --- 665,676 ---- # volatile jlong* dest, # jlong compare_value, # bool is_MP) # .p2align 4,,15 ! ELF_TYPE(_Atomic_cmpxchg_long,@function) ! SYMBOL(_Atomic_cmpxchg_long): # 8(%esp) : return PC pushl %ebx # 4(%esp) : old %ebx pushl %edi # 0(%esp) : old %edi movl 12(%esp), %ebx # 12(%esp) : exchange_value (low) movl 16(%esp), %ecx # 16(%esp) : exchange_value (high)
*** 656,667 **** # Support for jlong Atomic::load and Atomic::store. # void _Atomic_move_long(volatile jlong* src, volatile jlong* dst) .p2align 4,,15 ! .type _Atomic_move_long,@function ! _Atomic_move_long: movl 4(%esp), %eax # src fildll (%eax) movl 8(%esp), %eax # dest fistpll (%eax) ret --- 687,698 ---- # Support for jlong Atomic::load and Atomic::store. # void _Atomic_move_long(volatile jlong* src, volatile jlong* dst) .p2align 4,,15 ! ELF_TYPE(_Atomic_move_long,@function) ! SYMBOL(_Atomic_move_long): movl 4(%esp), %eax # src fildll (%eax) movl 8(%esp), %eax # dest fistpll (%eax) ret
src/os_cpu/bsd_x86/vm/bsd_x86_32.s
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File