1 //
   2 // Copyright 2004-2007 Sun Microsystems, Inc.  All Rights Reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20 // CA 95054 USA or visit www.sun.com if you need additional information or
  21 // have any questions.
  22 //  
  23 //
  24 
  25   // The argument size of each inline directive is ignored by the compiler
  26   // and is set to the number of arguments as documentation.
  27 
  28   // Get the raw thread ID from %gs:0
  29       .inline _raw_thread_id,0
  30       movq     %fs:0, %rax 
  31       .end
  32 
  33   // Get the frame pointer from previous frame.
  34       .inline _get_previous_fp,0
  35       movq     %rbp, %rax 
  36       movq     %rax, %rax 
  37       .end
  38 
  39   // Support for jint Atomic::add(jint add_value, volatile jint* dest)
  40   // An additional bool (os::is_MP()) is passed as the last argument.
  41       .inline _Atomic_add,3
  42       movl     %edi, %eax      // save add_value for return
  43       testl    %edx, %edx      // MP test
  44       je       1f
  45       lock
  46 1:    xaddl    %edi, (%rsi)
  47       addl     %edi, %eax
  48       .end
  49 
  50   // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
  51   // An additional bool (os::is_MP()) is passed as the last argument.
  52       .inline _Atomic_add_long,3
  53       movq     %rdi, %rax      // save add_value for return
  54       testq    %rdx, %rdx      // MP test
  55       je       1f
  56       lock
  57 1:    xaddq    %rdi, (%rsi)
  58       addq     %rdi, %rax
  59       .end
  60 
  61   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
  62       .inline _Atomic_xchg,2
  63       xchgl    (%rsi), %edi
  64       movl     %edi, %eax
  65       .end
  66 
  67   // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest).
  68       .inline _Atomic_xchg_long,2
  69       xchgq    (%rsi), %rdi
  70       movq     %rdi, %rax
  71       .end
  72 
  73   // Support for jint Atomic::cmpxchg(jint exchange_value, 
  74   //                                  volatile jint *dest, 
  75   //                                  jint compare_value)
  76   // An additional bool (os::is_MP()) is passed as the last argument.
  77       .inline _Atomic_cmpxchg,4
  78       movl     %edx, %eax      // compare_value
  79       testl    %ecx, %ecx      // MP test
  80       je       1f
  81       lock
  82 1:    cmpxchgl %edi, (%rsi)
  83       .end
  84 
  85   // Support for jlong Atomic::cmpxchg(jlong exchange_value,
  86   //                                   volatile jlong* dest,
  87   //                                   jlong compare_value)
  88   // An additional bool (os::is_MP()) is passed as the last argument.
  89       .inline _Atomic_cmpxchg_long,6
  90       movq     %rdx, %rax      // compare_value
  91       testq    %rcx, %rcx      // MP test
  92       je       1f
  93       lock
  94 1:    cmpxchgq %rdi, (%rsi)
  95       .end
  96 
  97   // Support for OrderAccess::acquire()
  98       .inline _OrderAccess_acquire,0
  99       movl     0(%rsp), %eax
 100       .end
 101 
 102   // Support for OrderAccess::fence()
 103       .inline _OrderAccess_fence,0
 104       lock
 105       addl     $0, (%rsp)
 106       .end
 107 
 108   // Support for u2 Bytes::swap_u2(u2 x)
 109       .inline _raw_swap_u2,1
 110       movw     %di, %ax
 111       rorw     $8, %ax
 112       .end
 113 
 114   // Support for u4 Bytes::swap_u4(u4 x)
 115       .inline _raw_swap_u4,1
 116       movl     %edi, %eax
 117       bswapl   %eax
 118       .end
 119 
 120   // Support for u8 Bytes::swap_u8(u8 x)
 121       .inline _raw_swap_u8,1
 122       movq     %rdi, %rax
 123       bswapq   %rax
 124       .end
 125 
 126   // Support for void Prefetch::read
 127       .inline _Prefetch_read,2
 128       prefetcht0 (%rdi, %rsi, 1)
 129       .end
 130 
 131   // Support for void Prefetch::write
 132   // We use prefetcht0 because em64t doesn't support prefetchw.
 133   // prefetchw is a 3dnow instruction.
 134       .inline _Prefetch_write,2
 135       prefetcht0 (%rdi, %rsi, 1)
 136       .end