< prev index next >

src/cpu/x86/vm/stubGenerator_x86_64.cpp

Print this page
rev 13267 : [mq]: Atomic_polishing
   1 /*
   2  * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 530     // make sure exception is set
 531     {
 532       Label L;
 533       __ testptr(rax, rax);
 534       __ jcc(Assembler::notEqual, L);
 535       __ stop("StubRoutines::forward exception: no pending exception (2)");
 536       __ bind(L);
 537     }
 538 #endif
 539 
 540     // continue at exception handler (return address removed)
 541     // rax: exception
 542     // rbx: exception handler
 543     // rdx: throwing pc
 544     __ verify_oop(rax);
 545     __ jmp(rbx);
 546 
 547     return start;
 548   }
 549 
 550   // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
 551   //
 552   // Arguments :
 553   //    c_rarg0: exchange_value
 554   //    c_rarg0: dest
 555   //
 556   // Result:
 557   //    *dest <- ex, return (orig *dest)
 558   address generate_atomic_xchg() {
 559     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 560     address start = __ pc();
 561 
 562     __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
 563     __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
 564     __ ret(0);
 565 
 566     return start;
 567   }
 568 
 569   // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
 570   //
 571   // Arguments :
 572   //    c_rarg0: exchange_value
 573   //    c_rarg1: dest
 574   //
 575   // Result:
 576   //    *dest <- ex, return (orig *dest)
 577   address generate_atomic_xchg_ptr() {
 578     StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
 579     address start = __ pc();
 580 
 581     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 582     __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
 583     __ ret(0);
 584 
 585     return start;
 586   }
 587 
 588   // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
 589   //                                         jint compare_value)
 590   //
 591   // Arguments :
 592   //    c_rarg0: exchange_value
 593   //    c_rarg1: dest
 594   //    c_rarg2: compare_value
 595   //
 596   // Result:
 597   //    if ( compare_value == *dest ) {
 598   //       *dest = exchange_value
 599   //       return compare_value;
 600   //    else
 601   //       return *dest;
 602   address generate_atomic_cmpxchg() {
 603     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 604     address start = __ pc();
 605 
 606     __ movl(rax, c_rarg2);
 607    if ( os::is_MP() ) __ lock();
 608     __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
 609     __ ret(0);
 610 
 611     return start;
 612   }
 613 
 614   // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
 615   //                                          jbyte compare_value)
 616   //
 617   // Arguments :
 618   //    c_rarg0: exchange_value
 619   //    c_rarg1: dest
 620   //    c_rarg2: compare_value
 621   //
 622   // Result:
 623   //    if ( compare_value == *dest ) {
 624   //       *dest = exchange_value
 625   //       return compare_value;
 626   //    else
 627   //       return *dest;
 628   address generate_atomic_cmpxchg_byte() {
 629     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
 630     address start = __ pc();
 631 
 632     __ movsbq(rax, c_rarg2);
 633    if ( os::is_MP() ) __ lock();
 634     __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
 635     __ ret(0);
 636 
 637     return start;
 638   }
 639 
 640   // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
 641   //                                          volatile jlong* dest,
 642   //                                          jlong compare_value)
 643   // Arguments :
 644   //    c_rarg0: exchange_value
 645   //    c_rarg1: dest
 646   //    c_rarg2: compare_value
 647   //
 648   // Result:
 649   //    if ( compare_value == *dest ) {
 650   //       *dest = exchange_value
 651   //       return compare_value;
 652   //    else
 653   //       return *dest;
 654   address generate_atomic_cmpxchg_long() {
 655     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 656     address start = __ pc();
 657 
 658     __ movq(rax, c_rarg2);
 659    if ( os::is_MP() ) __ lock();
 660     __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
 661     __ ret(0);
 662 
 663     return start;
 664   }
 665 
 666   // Support for jint atomic::add(jint add_value, volatile jint* dest)
 667   //
 668   // Arguments :
 669   //    c_rarg0: add_value
 670   //    c_rarg1: dest
 671   //
 672   // Result:
 673   //    *dest += add_value
 674   //    return *dest;
 675   address generate_atomic_add() {
 676     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 677     address start = __ pc();
 678 
 679     __ movl(rax, c_rarg0);
 680    if ( os::is_MP() ) __ lock();
 681     __ xaddl(Address(c_rarg1, 0), c_rarg0);
 682     __ addl(rax, c_rarg0);
 683     __ ret(0);
 684 
 685     return start;
 686   }
 687 
 688   // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
 689   //
 690   // Arguments :
 691   //    c_rarg0: add_value
 692   //    c_rarg1: dest
 693   //
 694   // Result:
 695   //    *dest += add_value
 696   //    return *dest;
 697   address generate_atomic_add_ptr() {
 698     StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
 699     address start = __ pc();
 700 
 701     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 702    if ( os::is_MP() ) __ lock();
 703     __ xaddptr(Address(c_rarg1, 0), c_rarg0);
 704     __ addptr(rax, c_rarg0);
 705     __ ret(0);
 706 
 707     return start;
 708   }


   1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 530     // make sure exception is set
 531     {
 532       Label L;
 533       __ testptr(rax, rax);
 534       __ jcc(Assembler::notEqual, L);
 535       __ stop("StubRoutines::forward exception: no pending exception (2)");
 536       __ bind(L);
 537     }
 538 #endif
 539 
 540     // continue at exception handler (return address removed)
 541     // rax: exception
 542     // rbx: exception handler
 543     // rdx: throwing pc
 544     __ verify_oop(rax);
 545     __ jmp(rbx);
 546 
 547     return start;
 548   }
 549 
 550   // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest)
 551   //
 552   // Arguments :
 553   //    c_rarg0: exchange_value
 554   //    c_rarg0: dest
 555   //
 556   // Result:
 557   //    *dest <- ex, return (orig *dest)
 558   address generate_atomic_xchg() {
 559     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 560     address start = __ pc();
 561 
 562     __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
 563     __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
 564     __ ret(0);
 565 
 566     return start;
 567   }
 568 
 569   // Support for intptr_t Atomic::specialized_xchg(intptr_t exchange_value, volatile intptr_t* dest)
 570   //
 571   // Arguments :
 572   //    c_rarg0: exchange_value
 573   //    c_rarg1: dest
 574   //
 575   // Result:
 576   //    *dest <- ex, return (orig *dest)
 577   address generate_atomic_xchg_ptr() {
 578     StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
 579     address start = __ pc();
 580 
 581     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 582     __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
 583     __ ret(0);
 584 
 585     return start;
 586   }
 587 
 588   // Support for int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
 589   //                                                 int32_t compare_value)
 590   //
 591   // Arguments :
 592   //    c_rarg0: exchange_value
 593   //    c_rarg1: dest
 594   //    c_rarg2: compare_value
 595   //
 596   // Result:
 597   //    if ( compare_value == *dest ) {
 598   //       *dest = exchange_value
 599   //       return compare_value;
 600   //    else
 601   //       return *dest;
 602   address generate_atomic_cmpxchg() {
 603     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 604     address start = __ pc();
 605 
 606     __ movl(rax, c_rarg2);
 607    if ( os::is_MP() ) __ lock();
 608     __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
 609     __ ret(0);
 610 
 611     return start;
 612   }
 613 
 614   // Support for int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
 615   //                                                int8_t compare_value)
 616   //
 617   // Arguments :
 618   //    c_rarg0: exchange_value
 619   //    c_rarg1: dest
 620   //    c_rarg2: compare_value
 621   //
 622   // Result:
 623   //    if ( compare_value == *dest ) {
 624   //       *dest = exchange_value
 625   //       return compare_value;
 626   //    else
 627   //       return *dest;
 628   address generate_atomic_cmpxchg_byte() {
 629     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
 630     address start = __ pc();
 631 
 632     __ movsbq(rax, c_rarg2);
 633    if ( os::is_MP() ) __ lock();
 634     __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
 635     __ ret(0);
 636 
 637     return start;
 638   }
 639 
 640   // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value,
 641   //                                                 volatile int64_t* dest,
 642   //                                                 int64_t compare_value)
 643   // Arguments :
 644   //    c_rarg0: exchange_value
 645   //    c_rarg1: dest
 646   //    c_rarg2: compare_value
 647   //
 648   // Result:
 649   //    if ( compare_value == *dest ) {
 650   //       *dest = exchange_value
 651   //       return compare_value;
 652   //    else
 653   //       return *dest;
 654   address generate_atomic_cmpxchg_long() {
 655     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 656     address start = __ pc();
 657 
 658     __ movq(rax, c_rarg2);
 659    if ( os::is_MP() ) __ lock();
 660     __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
 661     __ ret(0);
 662 
 663     return start;
 664   }
 665 
 666   // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest)
 667   //
 668   // Arguments :
 669   //    c_rarg0: add_value
 670   //    c_rarg1: dest
 671   //
 672   // Result:
 673   //    *dest += add_value
 674   //    return *dest;
 675   address generate_atomic_add() {
 676     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 677     address start = __ pc();
 678 
 679     __ movl(rax, c_rarg0);
 680    if ( os::is_MP() ) __ lock();
 681     __ xaddl(Address(c_rarg1, 0), c_rarg0);
 682     __ addl(rax, c_rarg0);
 683     __ ret(0);
 684 
 685     return start;
 686   }
 687 
 688   // Support for intptr_t Atomic::specialized_add(intptr_t add_value, volatile intptr_t* dest)
 689   //
 690   // Arguments :
 691   //    c_rarg0: add_value
 692   //    c_rarg1: dest
 693   //
 694   // Result:
 695   //    *dest += add_value
 696   //    return *dest;
 697   address generate_atomic_add_ptr() {
 698     StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
 699     address start = __ pc();
 700 
 701     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 702    if ( os::is_MP() ) __ lock();
 703     __ xaddptr(Address(c_rarg1, 0), c_rarg0);
 704     __ addptr(rax, c_rarg0);
 705     __ ret(0);
 706 
 707     return start;
 708   }


< prev index next >