< prev index next >

src/cpu/ppc/vm/templateInterpreter_ppc.cpp

Print this page
rev 8109 : 8077838: Recent developments for ppc.
Reviewed-by: kvn
   1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2013, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


 247 //       so we have a 'sticky' overflow test.
 248 //
 249 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
 250   // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
 251   Register Rscratch1   = R11_scratch1;
 252   Register Rscratch2   = R12_scratch2;
 253   Register R3_counters = R3_ARG1;
 254   Label done;
 255 
 256   if (TieredCompilation) {
 257     const int increment = InvocationCounter::count_increment;
 258     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 259     Label no_mdo;
 260     if (ProfileInterpreter) {
 261       const Register Rmdo = Rscratch1;
 262       // If no method data exists, go to profile_continue.
 263       __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
 264       __ cmpdi(CCR0, Rmdo, 0);
 265       __ beq(CCR0, no_mdo);
 266 
 267       // Increment invocation counter in the MDO.
 268       const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
 269       __ lwz(Rscratch2, mdo_ic_offs, Rmdo);
 270       __ addi(Rscratch2, Rscratch2, increment);
 271       __ stw(Rscratch2, mdo_ic_offs, Rmdo);
 272       __ load_const_optimized(Rscratch1, mask, R0);
 273       __ and_(Rscratch1, Rscratch2, Rscratch1);
 274       __ bne(CCR0, done);
 275       __ b(*overflow);
 276     }
 277 
 278     // Increment counter in MethodCounters*.
 279     const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
 280     __ bind(no_mdo);
 281     __ get_method_counters(R19_method, R3_counters, done);
 282     __ lwz(Rscratch2, mo_ic_offs, R3_counters);
 283     __ addi(Rscratch2, Rscratch2, increment);
 284     __ stw(Rscratch2, mo_ic_offs, R3_counters);
 285     __ load_const_optimized(Rscratch1, mask, R0);
 286     __ and_(Rscratch1, Rscratch2, Rscratch1);
 287     __ beq(CCR0, *overflow);
 288 
 289     __ bind(done);
 290 
 291   } else {
 292 
 293     // Update standard invocation counters.
 294     Register Rsum_ivc_bec = R4_ARG2;
 295     __ get_method_counters(R19_method, R3_counters, done);
 296     __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
 297     // Increment interpreter invocation counter.
 298     if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
 299       __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
 300       __ addi(R12_scratch2, R12_scratch2, 1);
 301       __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
 302     }
 303     // Check if we must create a method data obj.
 304     if (ProfileInterpreter && profile_method != NULL) {


 594   __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
 595   __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
 596   __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
 597   __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
 598 #endif
 599   __ subf(R12_scratch2, top_frame_size, R1_SP);
 600   __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
 601   __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
 602 
 603   // Push top frame.
 604   __ push_frame(top_frame_size, R11_scratch1);
 605 }
 606 
 607 // End of helpers
 608 
 609 
 610 // Support abs and sqrt like in compiler.
 611 // For others we can use a normal (native) entry.
 612 
 613 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
 614   // Provide math entry with debugging on demand.
 615   // Note: Debugging changes which code will get executed:
 616   // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call.
 617   // Not debugging and enabled InlineIntrinics: processor instruction will get used.
 618   // Result might differ slightly due to rounding etc.
 619   if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry.
 620 
 621   return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
 622           (kind==Interpreter::java_lang_math_abs));
 623 }
 624 
 625 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 626   if (!math_entry_available(kind)) {
 627     NOT_PRODUCT(__ should_not_reach_here();)
 628     return Interpreter::entry_for_kind(Interpreter::zerolocals);
 629   }
 630 
 631   Label Lslow_path;
 632   const Register Rjvmti_mode = R11_scratch1;
 633   address entry = __ pc();
 634 
 635   // Provide math entry with debugging on demand.
 636   __ lwz(Rjvmti_mode, thread_(interp_only_mode));
 637   __ cmpwi(CCR0, Rjvmti_mode, 0);
 638   __ bne(CCR0, Lslow_path); // jvmti_mode!=0
 639 
 640   __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
 641 
 642   // Pop c2i arguments (if any) off when we return.
 643 #ifdef ASSERT
 644   __ ld(R9_ARG7, 0, R1_SP);
 645   __ ld(R10_ARG8, 0, R21_sender_SP);
 646   __ cmpd(CCR0, R9_ARG7, R10_ARG8);
 647   __ asm_assert_eq("backlink", 0x545);
 648 #endif // ASSERT
 649   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
 650 
 651   if (kind == Interpreter::java_lang_math_sqrt) {
 652     __ fsqrt(F1_RET, F1_RET);
 653   } else if (kind == Interpreter::java_lang_math_abs) {
 654     __ fabs(F1_RET, F1_RET);
 655   } else {
 656     ShouldNotReachHere();
 657   }
 658 
 659   // And we're done.
 660   __ blr();
 661 
 662   // Provide slow path for JVMTI case.
 663   __ bind(Lslow_path);
 664   __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2);
 665   __ flush();
 666 
 667   return entry;
 668 }
 669 
 670 // Interpreter stub for calling a native method. (asm interpreter)
 671 // This sets up a somewhat different looking stack for calling the
 672 // native method than the typical interpreter frame setup.
 673 //
 674 // On entry:
 675 //   R19_method    - method
 676 //   R16_thread    - JavaThread*
 677 //   R15_esp       - intptr_t* sender tos
 678 //
 679 //   abstract stack (grows up)
 680 //     [  IJava (caller of JNI callee)  ]  <-- ASP
 681 //        ...
 682 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 683 
 684   address entry = __ pc();


   1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2013, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


 247 //       so we have a 'sticky' overflow test.
 248 //
 249 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
 250   // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
 251   Register Rscratch1   = R11_scratch1;
 252   Register Rscratch2   = R12_scratch2;
 253   Register R3_counters = R3_ARG1;
 254   Label done;
 255 
 256   if (TieredCompilation) {
 257     const int increment = InvocationCounter::count_increment;
 258     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 259     Label no_mdo;
 260     if (ProfileInterpreter) {
 261       const Register Rmdo = Rscratch1;
 262       // If no method data exists, go to profile_continue.
 263       __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
 264       __ cmpdi(CCR0, Rmdo, 0);
 265       __ beq(CCR0, no_mdo);
 266 
 267       // Increment backedge counter in the MDO.
 268       const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
 269       __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
 270       __ addi(Rscratch2, Rscratch2, increment);
 271       __ stw(Rscratch2, mdo_bc_offs, Rmdo);
 272       __ load_const_optimized(Rscratch1, mask, R0);
 273       __ and_(Rscratch1, Rscratch2, Rscratch1);
 274       __ bne(CCR0, done);
 275       __ b(*overflow);
 276     }
 277 
 278     // Increment counter in MethodCounters*.
 279     const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
 280     __ bind(no_mdo);
 281     __ get_method_counters(R19_method, R3_counters, done);
 282     __ lwz(Rscratch2, mo_bc_offs, R3_counters);
 283     __ addi(Rscratch2, Rscratch2, increment);
 284     __ stw(Rscratch2, mo_bc_offs, R3_counters);
 285     __ load_const_optimized(Rscratch1, mask, R0);
 286     __ and_(Rscratch1, Rscratch2, Rscratch1);
 287     __ beq(CCR0, *overflow);
 288 
 289     __ bind(done);
 290 
 291   } else {
 292 
 293     // Update standard invocation counters.
 294     Register Rsum_ivc_bec = R4_ARG2;
 295     __ get_method_counters(R19_method, R3_counters, done);
 296     __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
 297     // Increment interpreter invocation counter.
 298     if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
 299       __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
 300       __ addi(R12_scratch2, R12_scratch2, 1);
 301       __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
 302     }
 303     // Check if we must create a method data obj.
 304     if (ProfileInterpreter && profile_method != NULL) {


 594   __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
 595   __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
 596   __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
 597   __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
 598 #endif
 599   __ subf(R12_scratch2, top_frame_size, R1_SP);
 600   __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
 601   __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
 602 
 603   // Push top frame.
 604   __ push_frame(top_frame_size, R11_scratch1);
 605 }
 606 
 607 // End of helpers
 608 
 609 
 610 // Support abs and sqrt like in compiler.
 611 // For others we can use a normal (native) entry.
 612 
 613 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
 614   if (!InlineIntrinsics) return false;





 615 
 616   return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
 617           (kind==Interpreter::java_lang_math_abs));
 618 }
 619 
 620 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 621   if (!math_entry_available(kind)) {
 622     NOT_PRODUCT(__ should_not_reach_here();)
 623     return Interpreter::entry_for_kind(Interpreter::zerolocals);
 624   }
 625 


 626   address entry = __ pc();
 627 





 628   __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
 629 
 630   // Pop c2i arguments (if any) off when we return.
 631 #ifdef ASSERT
 632   __ ld(R9_ARG7, 0, R1_SP);
 633   __ ld(R10_ARG8, 0, R21_sender_SP);
 634   __ cmpd(CCR0, R9_ARG7, R10_ARG8);
 635   __ asm_assert_eq("backlink", 0x545);
 636 #endif // ASSERT
 637   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
 638 
 639   if (kind == Interpreter::java_lang_math_sqrt) {
 640     __ fsqrt(F1_RET, F1_RET);
 641   } else if (kind == Interpreter::java_lang_math_abs) {
 642     __ fabs(F1_RET, F1_RET);
 643   } else {
 644     ShouldNotReachHere();
 645   }
 646 
 647   // And we're done.
 648   __ blr();
 649 



 650   __ flush();
 651 
 652   return entry;
 653 }
 654 
 655 // Interpreter stub for calling a native method. (asm interpreter)
 656 // This sets up a somewhat different looking stack for calling the
 657 // native method than the typical interpreter frame setup.
 658 //
 659 // On entry:
 660 //   R19_method    - method
 661 //   R16_thread    - JavaThread*
 662 //   R15_esp       - intptr_t* sender tos
 663 //
 664 //   abstract stack (grows up)
 665 //     [  IJava (caller of JNI callee)  ]  <-- ASP
 666 //        ...
 667 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 668 
 669   address entry = __ pc();


< prev index next >