1 //
   2 // Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 //
  24 
  25   // The argument size of each inline directive is ignored by the compiler
  26   // and is set to the number of arguments as documentation.
  27 
  28   // Get the raw thread ID from %gs:0
  29       .inline _raw_thread_id,0
  30       movq     %fs:0, %rax
  31       .end
  32 
  33   // Get current sp
  34       .inline _get_current_sp,0
  35       .volatile
  36       movq     %rsp, %rax
  37       .end
  38 
  39   // Get current fp
  40       .inline _get_current_fp,0
  41       .volatile
  42       movq     %rbp, %rax
  43       .end
  44 
  45   // Support for os::rdtsc()
  46       .inline _raw_rdtsc,0
  47       rdtsc
  48       salq     $32, %rdx
  49       orq      %rdx, %rax
  50       .end
  51 
  52   // Implementation of jint _Atomic_add(jint add_value, volatile jint* dest)
  53   // used by Atomic::add(volatile jint* dest, jint add_value)
  54       .inline _Atomic_add,2
  55       movl     %edi, %eax      // save add_value for return
  56       lock
  57       xaddl    %edi, (%rsi)
  58       addl     %edi, %eax
  59       .end
  60 
  61   // Implementation of jlong _Atomic_add(jlong add_value, volatile jlong* dest)
  62   // used by Atomic::add(volatile jlong* dest, jint add_value)
  63       .inline _Atomic_add_long,2
  64       movq     %rdi, %rax      // save add_value for return
  65       lock
  66       xaddq    %rdi, (%rsi)
  67       addq     %rdi, %rax
  68       .end
  69 
  70   // Implementation of jint _Atomic_xchg(jint exchange_value, volatile jint* dest)
  71   // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
  72       .inline _Atomic_xchg,2
  73       xchgl    (%rsi), %edi
  74       movl     %edi, %eax
  75       .end
  76 
  77   // Implementation of jlong _Atomic_xchg(jlong exchange_value, volatile jlong* dest)
  78   // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
  79       .inline _Atomic_xchg_long,2
  80       xchgq    (%rsi), %rdi
  81       movq     %rdi, %rax
  82       .end
  83 
  84   // Support for jbyte Atomic::cmpxchg(volatile jbyte *dest,
  85   //                                   jbyte compare_value,
  86   //                                   jbyte exchange_value)
  87       .inline _Atomic_cmpxchg_byte,3
  88       movb     %dl, %al      // compare_value
  89       lock
  90       cmpxchgb %dil, (%rsi)
  91       .end
  92 
  93   // Support for jint Atomic::cmpxchg(volatile jint *dest,
  94   //                                  int compare_value,
  95   //                                  jint exchange_value)
  96       .inline _Atomic_cmpxchg,3
  97       movl     %edx, %eax      // compare_value
  98       lock
  99       cmpxchgl %edi, (%rsi)
 100       .end
 101 
 102   // Support for jlong Atomic::cmpxchg(volatile jlong* dest,
 103   //                                   jlong compare_value,
 104   //                                   jlong exchange_value)
 105       .inline _Atomic_cmpxchg_long,3
 106       movq     %rdx, %rax      // compare_value
 107       lock
 108       cmpxchgq %rdi, (%rsi)
 109       .end
 110 
 111   // Support for u2 Bytes::swap_u2(u2 x)
 112       .inline _raw_swap_u2,1
 113       movw     %di, %ax
 114       rorw     $8, %ax
 115       .end
 116 
 117   // Support for u4 Bytes::swap_u4(u4 x)
 118       .inline _raw_swap_u4,1
 119       movl     %edi, %eax
 120       bswapl   %eax
 121       .end
 122 
 123   // Support for u8 Bytes::swap_u8(u8 x)
 124       .inline _raw_swap_u8,1
 125       movq     %rdi, %rax
 126       bswapq   %rax
 127       .end
 128 
 129   // Support for void Prefetch::read
 130       .inline _Prefetch_read,2
 131       prefetcht0 (%rdi, %rsi, 1)
 132       .end
 133 
 134   // Support for void Prefetch::write
 135   // We use prefetcht0 because em64t doesn't support prefetchw.
 136   // prefetchw is a 3dnow instruction.
 137       .inline _Prefetch_write,2
 138       prefetcht0 (%rdi, %rsi, 1)
 139       .end