< prev index next >

src/hotspot/cpu/ppc/stubGenerator_ppc.cpp

Print this page
rev 50788 : 8205609: [PPC64] Fix PPC64 part of 8010319 and TLH without UseSIGTRAP on AIX
Reviewed-by: dholmes


 708     }
 709     ++ StubRoutines::_verify_oop_count;
 710   }
 711 #endif
 712 
 713   // Return address of code to be called from code generated by
 714   // MacroAssembler::verify_oop.
 715   //
 716   // Don't generate, rather use C++ code.
 717   address generate_verify_oop() {
 718     // this is actually a `FunctionDescriptor*'.
 719     address start = 0;
 720 
 721 #if !defined(PRODUCT)
 722     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 723 #endif
 724 
 725     return start;
 726   }
 727 
 728   // Fairer handling of safepoints for native methods.
 729   //
 730   // Generate code which reads from the polling page. This special handling is needed as the
 731   // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
 732   // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
 733   // to read from the safepoint polling page.
 734   address generate_load_from_poll() {
 735     StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
 736     address start = __ function_entry();
 737     __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
 738     return start;
 739   }
 740 
 741   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 742   //
 743   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 744   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 745   //
 746   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 747   // for turning on loop predication optimization, and hence the behavior of "array range check"
 748   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 749   //
 750   // Generate stub for disjoint short fill. If "aligned" is true, the
 751   // "to" address is assumed to be heapword aligned.
 752   //
 753   // Arguments for generated stub:
 754   //   to:    R3_ARG1
 755   //   value: R4_ARG2
 756   //   count: R5_ARG3 treated as signed
 757   //
 758   address generate_fill(BasicType t, bool aligned, const char* name) {
 759     StubCodeMark mark(this, "StubRoutines", name);




 708     }
 709     ++ StubRoutines::_verify_oop_count;
 710   }
 711 #endif
 712 
 713   // Return address of code to be called from code generated by
 714   // MacroAssembler::verify_oop.
 715   //
 716   // Don't generate, rather use C++ code.
 717   address generate_verify_oop() {
 718     // this is actually a `FunctionDescriptor*'.
 719     address start = 0;
 720 
 721 #if !defined(PRODUCT)
 722     start = CAST_FROM_FN_PTR(address, verify_oop_helper);
 723 #endif
 724 
 725     return start;
 726   }
 727 












 728 
 729   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
 730   //
 731   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
 732   // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
 733   //
 734   // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
 735   // for turning on loop predication optimization, and hence the behavior of "array range check"
 736   // and "loop invariant check" could be influenced, which potentially boosted JVM98.
 737   //
 738   // Generate stub for disjoint short fill. If "aligned" is true, the
 739   // "to" address is assumed to be heapword aligned.
 740   //
 741   // Arguments for generated stub:
 742   //   to:    R3_ARG1
 743   //   value: R4_ARG2
 744   //   count: R5_ARG3 treated as signed
 745   //
 746   address generate_fill(BasicType t, bool aligned, const char* name) {
 747     StubCodeMark mark(this, "StubRoutines", name);


< prev index next >