87 switch (state) {
88 case not_installed:
89 return "not installed";
90 case in_use:
91 return "in use";
92 case not_used:
93 return "not_used";
94 case not_entrant:
95 return "not_entrant";
96 case zombie:
97 return "zombie";
98 case unloaded:
99 return "unloaded";
100 default:
101 fatal("unexpected method state: %d", state);
102 return NULL;
103 }
104 }
105
106 //-----------------------------------------------------------------------------
107
108 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
109 return OrderAccess::load_acquire(&_exception_cache);
110 }
111
112 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
113 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
114 assert(new_entry != NULL,"Must be non null");
115 assert(new_entry->next() == NULL, "Must be null");
116
117 for (;;) {
118 ExceptionCache *ec = exception_cache();
119 if (ec != NULL) {
120 Klass* ex_klass = ec->exception_type();
121 if (!ex_klass->is_loader_alive()) {
122 // We must guarantee that entries are not inserted with new next pointer
123 // edges to ExceptionCache entries with dead klasses, due to bad interactions
124 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
125 // the head pointer forward to the first live ExceptionCache, so that the new
126 // next pointers always point at live ExceptionCaches, that are not removed due
|
87 switch (state) {
88 case not_installed:
89 return "not installed";
90 case in_use:
91 return "in use";
92 case not_used:
93 return "not_used";
94 case not_entrant:
95 return "not_entrant";
96 case zombie:
97 return "zombie";
98 case unloaded:
99 return "unloaded";
100 default:
101 fatal("unexpected method state: %d", state);
102 return NULL;
103 }
104 }
105
106 //-----------------------------------------------------------------------------
107 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
108 MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
109 Mutex::_no_safepoint_check_flag);
110 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
111 }
112
113 //-----------------------------------------------------------------------------
114
115 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
116 return OrderAccess::load_acquire(&_exception_cache);
117 }
118
119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
120 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
121 assert(new_entry != NULL,"Must be non null");
122 assert(new_entry->next() == NULL, "Must be null");
123
124 for (;;) {
125 ExceptionCache *ec = exception_cache();
126 if (ec != NULL) {
127 Klass* ex_klass = ec->exception_type();
128 if (!ex_klass->is_loader_alive()) {
129 // We must guarantee that entries are not inserted with new next pointer
130 // edges to ExceptionCache entries with dead klasses, due to bad interactions
131 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
132 // the head pointer forward to the first live ExceptionCache, so that the new
133 // next pointers always point at live ExceptionCaches, that are not removed due
|