< prev index next >

src/os_cpu/solaris_x86/vm/solaris_x86_64.il

Print this page
rev 13267 : [mq]: Atomic_polishing

*** 1,7 **** // ! // Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it // under the terms of the GNU General Public License version 2 only, as // published by the Free Software Foundation. --- 1,7 ---- // ! // Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it // under the terms of the GNU General Public License version 2 only, as // published by the Free Software Foundation.
*** 47,105 **** rdtsc salq $32, %rdx orq %rdx, %rax .end ! // Support for jint Atomic::add(jint add_value, volatile jint* dest) .inline _Atomic_add,2 movl %edi, %eax // save add_value for return lock xaddl %edi, (%rsi) addl %edi, %eax .end ! // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) .inline _Atomic_add_long,2 movq %rdi, %rax // save add_value for return lock xaddq %rdi, (%rsi) addq %rdi, %rax .end ! // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). .inline _Atomic_xchg,2 xchgl (%rsi), %edi movl %edi, %eax .end ! // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). .inline _Atomic_xchg_long,2 xchgq (%rsi), %rdi movq %rdi, %rax .end ! // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, ! // volatile jbyte *dest, ! // jbyte compare_value) .inline _Atomic_cmpxchg_byte,3 movb %dl, %al // compare_value lock cmpxchgb %dil, (%rsi) .end ! // Support for jint Atomic::cmpxchg(jint exchange_value, ! // volatile jint *dest, ! // jint compare_value) .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value lock cmpxchgl %edi, (%rsi) .end ! // Support for jlong Atomic::cmpxchg(jlong exchange_value, ! // volatile jlong* dest, ! // jlong compare_value) .inline _Atomic_cmpxchg_long,3 movq %rdx, %rax // compare_value lock cmpxchgq %rdi, (%rsi) .end --- 47,105 ---- rdtsc salq $32, %rdx orq %rdx, %rax .end ! // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) .inline _Atomic_add,2 movl %edi, %eax // save add_value for return lock xaddl %edi, (%rsi) addl %edi, %eax .end ! // Support for int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) .inline _Atomic_add_long,2 movq %rdi, %rax // save add_value for return lock xaddq %rdi, (%rsi) addq %rdi, %rax .end ! // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest). .inline _Atomic_xchg,2 xchgl (%rsi), %edi movl %edi, %eax .end ! // Support for int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest). .inline _Atomic_xchg_long,2 xchgq (%rsi), %rdi movq %rdi, %rax .end ! // Support for int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, ! // volatile int8_t *dest, ! // int8_t compare_value) .inline _Atomic_cmpxchg_byte,3 movb %dl, %al // compare_value lock cmpxchgb %dil, (%rsi) .end ! // Support for int32_t Atomic:specialized_:cmpxchg(int32_t exchange_value, ! // volatile int32_t *dest, ! // int32_t compare_value) .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value lock cmpxchgl %edi, (%rsi) .end ! // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, ! // volatile int64_t* dest, ! // int64_t compare_value) .inline _Atomic_cmpxchg_long,3 movq %rdx, %rax // compare_value lock cmpxchgq %rdi, (%rsi) .end
< prev index next >