< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page




  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "interpreter/bytecode.hpp"
  45 #include "interpreter/interpreter.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/biasedLocking.hpp"
  53 #include "runtime/compilationPolicy.hpp"
  54 #include "runtime/interfaceSupport.hpp"
  55 #include "runtime/javaCalls.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/threadCritical.hpp"
  58 #include "runtime/vframe.hpp"
  59 #include "runtime/vframeArray.hpp"
  60 #include "runtime/vm_version.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"

  63 
  64 
  65 // Implementation of StubAssembler
  66 
  67 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  68   _name = name;
  69   _must_gc_arguments = false;
  70   _frame_size = no_frame_size;
  71   _num_rt_args = 0;
  72   _stub_id = stub_id;
  73 }
  74 
  75 
  76 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  77   _name = name;
  78   _must_gc_arguments = must_gc_arguments;
  79 }
  80 
  81 
  82 void StubAssembler::set_frame_size(int size) {


 187   OopMapSet* oop_maps;
 188   int frame_size;
 189   bool must_gc_arguments;
 190 
 191   if (!CodeCacheExtensions::skip_compiler_support()) {
 192     // bypass useless code generation
 193     Compilation::setup_code_buffer(&code, 0);
 194 
 195     // create assembler for code generation
 196     StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
 197     // generate code for runtime stub
 198     oop_maps = generate_code_for(id, sasm);
 199     assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
 200            "if stub has an oop map it must have a valid frame size");
 201 
 202 #ifdef ASSERT
 203     // Make sure that stubs that need oopmaps have them
 204     switch (id) {
 205       // These stubs don't need to have an oopmap
 206     case dtrace_object_alloc_id:

 207     case g1_pre_barrier_slow_id:
 208     case g1_post_barrier_slow_id:
 209     case slow_subtype_check_id:
 210     case fpu2long_stub_id:
 211     case unwind_exception_id:
 212     case counter_overflow_id:
 213 #if defined(SPARC) || defined(PPC)
 214     case handle_exception_nofpu_id:  // Unused on sparc
 215 #endif
 216       break;
 217 
 218       // All other stubs should have oopmaps
 219     default:
 220       assert(oop_maps != NULL, "must have an oopmap");
 221     }
 222 #endif
 223 
 224     // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 225     sasm->align(BytesPerWord);
 226     // make sure all code is in code buffer


 300   FUNCTION_CASE(entry, SharedRuntime::drem);
 301   FUNCTION_CASE(entry, SharedRuntime::dsin);
 302   FUNCTION_CASE(entry, SharedRuntime::dtan);
 303   FUNCTION_CASE(entry, SharedRuntime::f2i);
 304   FUNCTION_CASE(entry, SharedRuntime::f2l);
 305   FUNCTION_CASE(entry, SharedRuntime::frem);
 306   FUNCTION_CASE(entry, SharedRuntime::l2d);
 307   FUNCTION_CASE(entry, SharedRuntime::l2f);
 308   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 309   FUNCTION_CASE(entry, SharedRuntime::lmul);
 310   FUNCTION_CASE(entry, SharedRuntime::lrem);
 311   FUNCTION_CASE(entry, SharedRuntime::lrem);
 312   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 313   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 314   FUNCTION_CASE(entry, is_instance_of);
 315   FUNCTION_CASE(entry, trace_block_entry);
 316 #ifdef TRACE_HAVE_INTRINSICS
 317   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
 318 #endif
 319   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());

 320 
 321 #undef FUNCTION_CASE
 322 
 323   // Soft float adds more runtime names.
 324   return pd_name_for_address(entry);
 325 }
 326 
 327 
 328 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
 329   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
 330 
 331   assert(klass->is_klass(), "not a class");
 332   instanceKlassHandle h(thread, klass);
 333   h->check_valid_for_instantiation(true, CHECK);
 334   // make sure klass is initialized
 335   h->initialize(CHECK);
 336   // allocate instance and return via TLS
 337   oop obj = h->allocate_instance(CHECK);
 338   thread->set_vm_result(obj);
 339 JRT_END


 654   ResourceMark rm(thread);
 655   char* message = SharedRuntime::generate_class_cast_message(
 656     thread, object->klass()->external_name());
 657   SharedRuntime::throw_and_post_jvmti_exception(
 658     thread, vmSymbols::java_lang_ClassCastException(), message);
 659 JRT_END
 660 
 661 
 662 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
 663   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 664   ResourceMark rm(thread);
 665   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 666 JRT_END
 667 
 668 
 669 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
 670   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 671   if (PrintBiasedLockingStatistics) {
 672     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 673   }
 674   Handle h_obj(thread, obj);
 675   assert(h_obj()->is_oop(), "must be NULL or an object");
 676   if (UseBiasedLocking) {
 677     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 678     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
 679   } else {
 680     if (UseFastLocking) {
 681       // When using fast locking, the compiled code has already tried the fast case
 682       assert(obj == lock->obj(), "must match");
 683       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
 684     } else {
 685       lock->set_obj(obj);
 686       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
 687     }
 688   }
 689 JRT_END
 690 
 691 
 692 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
 693   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 694   assert(thread == JavaThread::current(), "threads must correspond");
 695   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 696   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 697   EXCEPTION_MARK;
 698 
 699   oop obj = lock->obj();
 700   assert(obj->is_oop(), "must be NULL or an object");
 701   if (UseFastLocking) {
 702     // When using fast locking, the compiled code has already tried the fast case
 703     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
 704   } else {
 705     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
 706   }
 707 JRT_END
 708 
 709 // Cf. OptoRuntime::deoptimize_caller_frame
 710 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
 711   // Called from within the owner thread, so no need for safepoint
 712   RegisterMap reg_map(thread, false);
 713   frame stub_frame = thread->last_frame();
 714   assert(stub_frame.is_runtime_frame(), "Sanity check");
 715   frame caller_frame = stub_frame.sender(&reg_map);
 716   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 717   assert(nm != NULL, "Sanity check");
 718   methodHandle method(thread, nm->method());
 719   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");


1389       bs->write_ref_array((HeapWord*)dst_addr, length);
1390       return ac_ok;
1391     }
1392   }
1393   return ac_failed;
1394 }
1395 
1396 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1397 // and we did not copy anything
1398 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1399 #ifndef PRODUCT
1400   _generic_arraycopy_cnt++;        // Slow-path oop array copy
1401 #endif
1402 
1403   if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1404   if (!dst->is_array() || !src->is_array()) return ac_failed;
1405   if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1406   if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1407 
1408   if (length == 0) return ac_ok;




1409   if (src->is_typeArray()) {
1410     Klass* klass_oop = src->klass();
1411     if (klass_oop != dst->klass()) return ac_failed;
1412     TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
1413     const int l2es = klass->log2_element_size();
1414     const int ihs = klass->array_header_in_bytes() / wordSize;
1415     char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1416     char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
1417     // Potential problem: memmove is not guaranteed to be word atomic
1418     // Revisit in Merlin
1419     memmove(dst_addr, src_addr, length << l2es);
1420     return ac_ok;
1421   } else if (src->is_objArray() && dst->is_objArray()) {
1422     if (UseCompressedOops) {
1423       narrowOop *src_addr  = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
1424       narrowOop *dst_addr  = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
1425       return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1426     } else {
1427       oop *src_addr  = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1428       oop *dst_addr  = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);




  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "interpreter/bytecode.hpp"
  45 #include "interpreter/interpreter.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/biasedLocking.hpp"
  53 #include "runtime/compilationPolicy.hpp"
  54 #include "runtime/interfaceSupport.hpp"
  55 #include "runtime/javaCalls.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/threadCritical.hpp"
  58 #include "runtime/vframe.hpp"
  59 #include "runtime/vframeArray.hpp"
  60 #include "runtime/vm_version.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  64 
  65 
  66 // Implementation of StubAssembler
  67 
  68 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  69   _name = name;
  70   _must_gc_arguments = false;
  71   _frame_size = no_frame_size;
  72   _num_rt_args = 0;
  73   _stub_id = stub_id;
  74 }
  75 
  76 
  77 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  78   _name = name;
  79   _must_gc_arguments = must_gc_arguments;
  80 }
  81 
  82 
  83 void StubAssembler::set_frame_size(int size) {


 188   OopMapSet* oop_maps;
 189   int frame_size;
 190   bool must_gc_arguments;
 191 
 192   if (!CodeCacheExtensions::skip_compiler_support()) {
 193     // bypass useless code generation
 194     Compilation::setup_code_buffer(&code, 0);
 195 
 196     // create assembler for code generation
 197     StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
 198     // generate code for runtime stub
 199     oop_maps = generate_code_for(id, sasm);
 200     assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
 201            "if stub has an oop map it must have a valid frame size");
 202 
 203 #ifdef ASSERT
 204     // Make sure that stubs that need oopmaps have them
 205     switch (id) {
 206       // These stubs don't need to have an oopmap
 207     case dtrace_object_alloc_id:
 208     case shenandoah_write_barrier_slow_id:
 209     case g1_pre_barrier_slow_id:
 210     case g1_post_barrier_slow_id:
 211     case slow_subtype_check_id:
 212     case fpu2long_stub_id:
 213     case unwind_exception_id:
 214     case counter_overflow_id:
 215 #if defined(SPARC) || defined(PPC)
 216     case handle_exception_nofpu_id:  // Unused on sparc
 217 #endif
 218       break;
 219 
 220       // All other stubs should have oopmaps
 221     default:
 222       assert(oop_maps != NULL, "must have an oopmap");
 223     }
 224 #endif
 225 
 226     // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 227     sasm->align(BytesPerWord);
 228     // make sure all code is in code buffer


 302   FUNCTION_CASE(entry, SharedRuntime::drem);
 303   FUNCTION_CASE(entry, SharedRuntime::dsin);
 304   FUNCTION_CASE(entry, SharedRuntime::dtan);
 305   FUNCTION_CASE(entry, SharedRuntime::f2i);
 306   FUNCTION_CASE(entry, SharedRuntime::f2l);
 307   FUNCTION_CASE(entry, SharedRuntime::frem);
 308   FUNCTION_CASE(entry, SharedRuntime::l2d);
 309   FUNCTION_CASE(entry, SharedRuntime::l2f);
 310   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 311   FUNCTION_CASE(entry, SharedRuntime::lmul);
 312   FUNCTION_CASE(entry, SharedRuntime::lrem);
 313   FUNCTION_CASE(entry, SharedRuntime::lrem);
 314   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 315   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 316   FUNCTION_CASE(entry, is_instance_of);
 317   FUNCTION_CASE(entry, trace_block_entry);
 318 #ifdef TRACE_HAVE_INTRINSICS
 319   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
 320 #endif
 321   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 322   FUNCTION_CASE(entry, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c1);
 323 
 324 #undef FUNCTION_CASE
 325 
 326   // Soft float adds more runtime names.
 327   return pd_name_for_address(entry);
 328 }
 329 
 330 
 331 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
 332   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
 333 
 334   assert(klass->is_klass(), "not a class");
 335   instanceKlassHandle h(thread, klass);
 336   h->check_valid_for_instantiation(true, CHECK);
 337   // make sure klass is initialized
 338   h->initialize(CHECK);
 339   // allocate instance and return via TLS
 340   oop obj = h->allocate_instance(CHECK);
 341   thread->set_vm_result(obj);
 342 JRT_END


 657   ResourceMark rm(thread);
 658   char* message = SharedRuntime::generate_class_cast_message(
 659     thread, object->klass()->external_name());
 660   SharedRuntime::throw_and_post_jvmti_exception(
 661     thread, vmSymbols::java_lang_ClassCastException(), message);
 662 JRT_END
 663 
 664 
 665 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
 666   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 667   ResourceMark rm(thread);
 668   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 669 JRT_END
 670 
 671 
 672 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
 673   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 674   if (PrintBiasedLockingStatistics) {
 675     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 676   }
 677 Handle h_obj(thread, oopDesc::bs()->write_barrier(obj));
 678   assert(h_obj()->is_oop(), "must be NULL or an object");
 679   if (UseBiasedLocking) {
 680     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 681     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
 682   } else {
 683     if (UseFastLocking) {
 684       // When using fast locking, the compiled code has already tried the fast case
 685       assert(obj == lock->obj(), "must match");
 686       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
 687     } else {
 688       lock->set_obj(obj);
 689       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
 690     }
 691   }
 692 JRT_END
 693 
 694 
 695 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
 696   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 697   assert(thread == JavaThread::current(), "threads must correspond");
 698   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 699   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 700   EXCEPTION_MARK;
 701 
 702   oop obj = oopDesc::bs()->write_barrier(lock->obj());
 703   assert(obj->is_oop(), "must be NULL or an object");
 704   if (UseFastLocking) {
 705     // When using fast locking, the compiled code has already tried the fast case
 706     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
 707   } else {
 708     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
 709   }
 710 JRT_END
 711 
 712 // Cf. OptoRuntime::deoptimize_caller_frame
 713 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
 714   // Called from within the owner thread, so no need for safepoint
 715   RegisterMap reg_map(thread, false);
 716   frame stub_frame = thread->last_frame();
 717   assert(stub_frame.is_runtime_frame(), "Sanity check");
 718   frame caller_frame = stub_frame.sender(&reg_map);
 719   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 720   assert(nm != NULL, "Sanity check");
 721   methodHandle method(thread, nm->method());
 722   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");


1392       bs->write_ref_array((HeapWord*)dst_addr, length);
1393       return ac_ok;
1394     }
1395   }
1396   return ac_failed;
1397 }
1398 
1399 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
1400 // and we did not copy anything
1401 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1402 #ifndef PRODUCT
1403   _generic_arraycopy_cnt++;        // Slow-path oop array copy
1404 #endif
1405 
1406   if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1407   if (!dst->is_array() || !src->is_array()) return ac_failed;
1408   if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1409   if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1410 
1411   if (length == 0) return ac_ok;
1412 
1413   oopDesc::bs()->read_barrier(src);
1414   oopDesc::bs()->write_barrier(dst);
1415 
1416   if (src->is_typeArray()) {
1417     Klass* klass_oop = src->klass();
1418     if (klass_oop != dst->klass()) return ac_failed;
1419     TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
1420     const int l2es = klass->log2_element_size();
1421     const int ihs = klass->array_header_in_bytes() / wordSize;
1422     char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1423     char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
1424     // Potential problem: memmove is not guaranteed to be word atomic
1425     // Revisit in Merlin
1426     memmove(dst_addr, src_addr, length << l2es);
1427     return ac_ok;
1428   } else if (src->is_objArray() && dst->is_objArray()) {
1429     if (UseCompressedOops) {
1430       narrowOop *src_addr  = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
1431       narrowOop *dst_addr  = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
1432       return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1433     } else {
1434       oop *src_addr  = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1435       oop *dst_addr  = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);


< prev index next >