< prev index next >

src/hotspot/share/code/compiledMethod.cpp

Print this page




  96   case zombie:
  97     return "zombie";
  98   case unloaded:
  99     return "unloaded";
 100   default:
 101     fatal("unexpected method state: %d", state);
 102     return NULL;
 103   }
 104 }
 105 
 106 //-----------------------------------------------------------------------------
 107 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
 108   MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
 109                  Mutex::_no_safepoint_check_flag);
 110   _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
 111 }
 112 
 113 //-----------------------------------------------------------------------------
 114 
 115 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
 116   return OrderAccess::load_acquire(&_exception_cache);
 117 }
 118 
 119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 120   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 121   assert(new_entry != NULL,"Must be non null");
 122   assert(new_entry->next() == NULL, "Must be null");
 123 
 124   for (;;) {
 125     ExceptionCache *ec = exception_cache();
 126     if (ec != NULL) {
 127       Klass* ex_klass = ec->exception_type();
 128       if (!ex_klass->is_loader_alive()) {
 129         // We must guarantee that entries are not inserted with new next pointer
 130         // edges to ExceptionCache entries with dead klasses, due to bad interactions
 131         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
 132         // the head pointer forward to the first live ExceptionCache, so that the new
 133         // next pointers always point at live ExceptionCaches, that are not removed due
 134         // to concurrent ExceptionCache cleanup.
 135         ExceptionCache* next = ec->next();
 136         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {




  96   case zombie:
  97     return "zombie";
  98   case unloaded:
  99     return "unloaded";
 100   default:
 101     fatal("unexpected method state: %d", state);
 102     return NULL;
 103   }
 104 }
 105 
 106 //-----------------------------------------------------------------------------
 107 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
 108   MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
 109                  Mutex::_no_safepoint_check_flag);
 110   _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
 111 }
 112 
 113 //-----------------------------------------------------------------------------
 114 
 115 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
 116   return Atomic::load_acquire(&_exception_cache);
 117 }
 118 
 119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 120   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 121   assert(new_entry != NULL,"Must be non null");
 122   assert(new_entry->next() == NULL, "Must be null");
 123 
 124   for (;;) {
 125     ExceptionCache *ec = exception_cache();
 126     if (ec != NULL) {
 127       Klass* ex_klass = ec->exception_type();
 128       if (!ex_klass->is_loader_alive()) {
 129         // We must guarantee that entries are not inserted with new next pointer
 130         // edges to ExceptionCache entries with dead klasses, due to bad interactions
 131         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
 132         // the head pointer forward to the first live ExceptionCache, so that the new
 133         // next pointers always point at live ExceptionCaches, that are not removed due
 134         // to concurrent ExceptionCache cleanup.
 135         ExceptionCache* next = ec->next();
 136         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {


< prev index next >