85 lock 86 cmpxchgb %dil, (%rsi) 87 .end 88 89 // Support for jint Atomic::cmpxchg(jint exchange_value, 90 // volatile jint *dest, 91 // jint compare_value) 92 .inline _Atomic_cmpxchg,3 93 movl %edx, %eax // compare_value 94 lock 95 cmpxchgl %edi, (%rsi) 96 .end 97 98 // Support for jlong Atomic::cmpxchg(jlong exchange_value, 99 // volatile jlong* dest, 100 // jlong compare_value) 101 .inline _Atomic_cmpxchg_long,3 102 movq %rdx, %rax // compare_value 103 lock 104 cmpxchgq %rdi, (%rsi) 105 .end 106 107 // Support for OrderAccess::acquire() 108 .inline _OrderAccess_acquire,0 109 movl 0(%rsp), %eax 110 .end 111 112 // Support for OrderAccess::fence() 113 .inline _OrderAccess_fence,0 114 lock 115 addl $0, (%rsp) 116 .end 117 118 // Support for u2 Bytes::swap_u2(u2 x) 119 .inline _raw_swap_u2,1 120 movw %di, %ax 121 rorw $8, %ax 122 .end 123 124 // Support for u4 Bytes::swap_u4(u4 x) 125 .inline _raw_swap_u4,1 126 movl %edi, %eax 127 bswapl %eax 128 .end 129 130 // Support for u8 Bytes::swap_u8(u8 x) 131 .inline _raw_swap_u8,1 132 movq %rdi, %rax 133 bswapq %rax 134 .end 135 | 85 lock 86 cmpxchgb %dil, (%rsi) 87 .end 88 89 // Support for jint Atomic::cmpxchg(jint exchange_value, 90 // volatile jint *dest, 91 // jint compare_value) 92 .inline _Atomic_cmpxchg,3 93 movl %edx, %eax // compare_value 94 lock 95 cmpxchgl %edi, (%rsi) 96 .end 97 98 // Support for jlong Atomic::cmpxchg(jlong exchange_value, 99 // volatile jlong* dest, 100 // jlong compare_value) 101 .inline _Atomic_cmpxchg_long,3 102 movq %rdx, %rax // compare_value 103 lock 104 cmpxchgq %rdi, (%rsi) 105 .end 106 107 // Support for u2 Bytes::swap_u2(u2 x) 108 .inline _raw_swap_u2,1 109 movw %di, %ax 110 rorw $8, %ax 111 .end 112 113 // Support for u4 Bytes::swap_u4(u4 x) 114 .inline _raw_swap_u4,1 115 movl %edi, %eax 116 bswapl %eax 117 .end 118 119 // Support for u8 Bytes::swap_u8(u8 x) 120 .inline _raw_swap_u8,1 121 movq %rdi, %rax 122 bswapq %rax 123 .end 124 |