< prev index next >

src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp

Print this page
rev 12906 : [mq]: gc_interface


   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "interpreter/bytecodeHistogram.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateInterpreterGenerator.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "oops/arrayOop.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/synchronizer.hpp"
  45 #include "runtime/timer.hpp"
  46 #include "runtime/vframeArray.hpp"


 675   }
 676 
 677   __ movptr(rdx, Address(rbx, Method::const_offset()));
 678   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 679   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 680   __ push(rdx); // set constant pool cache
 681   __ push(rlocals); // set locals pointer
 682   if (native_call) {
 683     __ push(0); // no bcp
 684   } else {
 685     __ push(rbcp); // set bcp
 686   }
 687   __ push(0); // reserve word for pointer to expression stack bottom
 688   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 689 }
 690 
 691 // End of helpers
 692 
 693 // Method entry for java.lang.ref.Reference.get.
 694 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 695 #if INCLUDE_ALL_GCS
 696   // Code: _aload_0, _getfield, _areturn
 697   // parameter size = 1
 698   //
 699   // The code that gets generated by this routine is split into 2 parts:
 700   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 701   //    2. The slow path - which is an expansion of the regular method entry.
 702   //
 703   // Notes:-
 704   // * In the G1 code we do not check whether we need to block for
 705   //   a safepoint. If G1 is enabled then we must execute the specialized
 706   //   code for Reference.get (except when the Reference object is null)
 707   //   so that we can log the value in the referent field with an SATB
 708   //   update buffer.
 709   //   If the code for the getfield template is modified so that the
 710   //   G1 pre-barrier code is executed when the current method is
 711   //   Reference.get() then going through the normal method entry
 712   //   will be fine.
 713   // * The G1 code can, however, check the receiver object (the instance
 714   //   of java.lang.Reference) and jump to the slow path if null. If the
 715   //   Reference object is null then we obviously cannot fetch the referent
 716   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 717   //   regular method entry code to generate the NPE.
 718   //
 719   // rbx: Method*
 720 
 721   // r13: senderSP must preserve for slow path, set SP to it on fast path
 722 
 723   address entry = __ pc();
 724 
 725   const int referent_offset = java_lang_ref_Reference::referent_offset;
 726   guarantee(referent_offset > 0, "referent offset not initialized");
 727 
 728   if (UseG1GC) {
 729     Label slow_path;
 730     // rbx: method
 731 
 732     // Check if local 0 != NULL
 733     // If the receiver is null then it is OK to jump to the slow path.
 734     __ movptr(rax, Address(rsp, wordSize));
 735 
 736     __ testptr(rax, rax);
 737     __ jcc(Assembler::zero, slow_path);
 738 
 739     // rax: local 0
 740     // rbx: method (but can be used as scratch now)
 741     // rdx: scratch
 742     // rdi: scratch
 743 
 744     // Preserve the sender sp in case the pre-barrier
 745     // calls the runtime
 746     NOT_LP64(__ push(rsi));
 747 
 748     // Generate the G1 pre-barrier code to log the value of
 749     // the referent field in an SATB buffer.
 750 
 751     // Load the value of the referent field.
 752     const Address field_address(rax, referent_offset);
 753     __ load_heap_oop(rax, field_address);
 754 
 755     const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
 756     const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 757     NOT_LP64(__ get_thread(thread));
 758 
 759     // Generate the G1 pre-barrier code to log the value of
 760     // the referent field in an SATB buffer.
 761     __ g1_write_barrier_pre(noreg /* obj */,
 762                             rax /* pre_val */,
 763                             thread /* thread */,
 764                             rbx /* tmp */,
 765                             true /* tosca_live */,
 766                             true /* expand_call */);
 767 
 768     // _areturn

 769     NOT_LP64(__ pop(rsi));      // get sender sp
 770     __ pop(rdi);                // get return address
 771     __ mov(rsp, sender_sp);     // set sp to sender sp
 772     __ jmp(rdi);
 773     __ ret(0);
 774 
 775     // generate a vanilla interpreter entry as the slow path
 776     __ bind(slow_path);
 777     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 778     return entry;
 779   }
 780 #endif // INCLUDE_ALL_GCS
 781 
 782   // If G1 is not enabled then attempt to go through the accessor entry point
 783   // Reference.get is an accessor
 784   return NULL;
 785 }
 786 
 787 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 788   // Quick & dirty stack overflow checking: bang the stack & handle trap.
 789   // Note that we do the banging after the frame is setup, since the exception
 790   // handling code expects to find a valid interpreter frame on the stack.
 791   // Doing the banging earlier fails if the caller frame is not an interpreter
 792   // frame.
 793   // (Also, the exception throwing code expects to unlock any synchronized
 794   // method receiever, so do the banging after locking the receiver.)
 795 
 796   // Bang each page in the shadow zone. We can't assume it's been done for
 797   // an interpreter frame with greater than a page of locals, so each page
 798   // needs to be checked.  Only true for non-native.
 799   if (UseStackBanging) {
 800     const int page_size = os::vm_page_size();
 801     const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
 802     const int start_page = native_call ? n_shadow_pages : 1;
 803     for (int pages = start_page; pages <= n_shadow_pages; pages++) {
 804       __ bang_stack_with_offset(pages*page_size);




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetCodeGen.hpp"
  28 #include "interpreter/bytecodeHistogram.hpp"
  29 #include "interpreter/interp_masm.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/templateInterpreterGenerator.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/synchronizer.hpp"
  46 #include "runtime/timer.hpp"
  47 #include "runtime/vframeArray.hpp"


 676   }
 677 
 678   __ movptr(rdx, Address(rbx, Method::const_offset()));
 679   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 680   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 681   __ push(rdx); // set constant pool cache
 682   __ push(rlocals); // set locals pointer
 683   if (native_call) {
 684     __ push(0); // no bcp
 685   } else {
 686     __ push(rbcp); // set bcp
 687   }
 688   __ push(0); // reserve word for pointer to expression stack bottom
 689   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 690 }
 691 
 692 // End of helpers
 693 
 694 // Method entry for java.lang.ref.Reference.get.
 695 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {

 696   // Code: _aload_0, _getfield, _areturn
 697   // parameter size = 1
 698   //
 699   // The code that gets generated by this routine is split into 2 parts:
 700   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 701   //    2. The slow path - which is an expansion of the regular method entry.
 702   //
 703   // Notes:-
 704   // * In the G1 code we do not check whether we need to block for
 705   //   a safepoint. If G1 is enabled then we must execute the specialized
 706   //   code for Reference.get (except when the Reference object is null)
 707   //   so that we can log the value in the referent field with an SATB
 708   //   update buffer.
 709   //   If the code for the getfield template is modified so that the
 710   //   G1 pre-barrier code is executed when the current method is
 711   //   Reference.get() then going through the normal method entry
 712   //   will be fine.
 713   // * The G1 code can, however, check the receiver object (the instance
 714   //   of java.lang.Reference) and jump to the slow path if null. If the
 715   //   Reference object is null then we obviously cannot fetch the referent
 716   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 717   //   regular method entry code to generate the NPE.
 718   //
 719   // rbx: Method*
 720 
 721   // r13: senderSP must preserve for slow path, set SP to it on fast path
 722 
 723   address entry = __ pc();
 724 
 725   const int referent_offset = java_lang_ref_Reference::referent_offset;
 726   guarantee(referent_offset > 0, "referent offset not initialized");
 727 

 728   Label slow_path;
 729   // rbx: method
 730 
 731   // Check if local 0 != NULL
 732   // If the receiver is null then it is OK to jump to the slow path.
 733   __ movptr(rax, Address(rsp, wordSize));
 734 
 735   __ testptr(rax, rax);
 736   __ jcc(Assembler::zero, slow_path);
 737 
 738   // rax: local 0
 739   // rbx: method (but can be used as scratch now)
 740   // rdx: scratch
 741   // rdi: scratch
 742 
 743   // Preserve the sender sp in case the pre-barrier
 744   // calls the runtime
 745   NOT_LP64(__ push(rsi));
 746 



 747   // Load the value of the referent field.
 748   const Address field_address(rax, referent_offset);
 749   BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen();
 750   code_gen->load_at(_masm, ACCESS_ON_HEAP | GC_ACCESS_ON_WEAK, T_OBJECT, rax, field_address, /*tmp1*/ rbx, /*tmp2*/ rdx);












 751 
 752   // _areturn
 753   const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
 754   NOT_LP64(__ pop(rsi));      // get sender sp
 755   __ pop(rdi);                // get return address
 756   __ mov(rsp, sender_sp);     // set sp to sender sp
 757   __ jmp(rdi);
 758   __ ret(0);
 759 
 760   // generate a vanilla interpreter entry as the slow path
 761   __ bind(slow_path);
 762   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 763   return entry;






 764 }
 765 
 766 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 767   // Quick & dirty stack overflow checking: bang the stack & handle trap.
 768   // Note that we do the banging after the frame is setup, since the exception
 769   // handling code expects to find a valid interpreter frame on the stack.
 770   // Doing the banging earlier fails if the caller frame is not an interpreter
 771   // frame.
 772   // (Also, the exception throwing code expects to unlock any synchronized
 773   // method receiever, so do the banging after locking the receiver.)
 774 
 775   // Bang each page in the shadow zone. We can't assume it's been done for
 776   // an interpreter frame with greater than a page of locals, so each page
 777   // needs to be checked.  Only true for non-native.
 778   if (UseStackBanging) {
 779     const int page_size = os::vm_page_size();
 780     const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
 781     const int start_page = native_call ? n_shadow_pages : 1;
 782     for (int pages = start_page; pages <= n_shadow_pages; pages++) {
 783       __ bang_stack_with_offset(pages*page_size);


< prev index next >