71 case not_installed:
72 return "not installed";
73 case in_use:
74 return "in use";
75 case not_used:
76 return "not_used";
77 case not_entrant:
78 return "not_entrant";
79 case zombie:
80 return "zombie";
81 case unloaded:
82 return "unloaded";
83 default:
84 fatal("unexpected method state: %d", state);
85 return NULL;
86 }
87 }
88
89 //-----------------------------------------------------------------------------
90
91 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
92 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
93 assert(new_entry != NULL,"Must be non null");
94 assert(new_entry->next() == NULL, "Must be null");
95
96 ExceptionCache *ec = exception_cache();
97 if (ec != NULL) {
98 new_entry->set_next(ec);
99 }
100 release_set_exception_cache(new_entry);
101 }
102
103 void CompiledMethod::clean_exception_cache() {
104 ExceptionCache* prev = NULL;
105 ExceptionCache* curr = exception_cache();
106
107 while (curr != NULL) {
108 ExceptionCache* next = curr->next();
109
110 Klass* ex_klass = curr->exception_type();
111 if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
112 if (prev == NULL) {
113 set_exception_cache(next);
114 } else {
115 prev->set_next(next);
116 }
117 delete curr;
118 // prev stays the same.
119 } else {
120 prev = curr;
121 }
122
123 curr = next;
124 }
125 }
126
127 // public method for accessing the exception cache
128 // These are the public access methods.
129 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
130 // We never grab a lock to read the exception cache, so we may
131 // have false negatives. This is okay, as it can only happen during
132 // the first few exception lookups for a given nmethod.
133 ExceptionCache* ec = exception_cache();
134 while (ec != NULL) {
135 address ret_val;
136 if ((ret_val = ec->match(exception,pc)) != NULL) {
137 return ret_val;
138 }
139 ec = ec->next();
140 }
141 return NULL;
142 }
143
144 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
145 // There are potential race conditions during exception cache updates, so we
146 // must own the ExceptionCache_lock before doing ANY modifications. Because
147 // we don't lock during reads, it is possible to have several threads attempt
148 // to update the cache with the same data. We need to check for already inserted
149 // copies of the current data before adding it.
150
151 MutexLocker ml(ExceptionCache_lock);
152 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
153
154 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
155 target_entry = new ExceptionCache(exception,pc,handler);
156 add_exception_cache_entry(target_entry);
157 }
158 }
159
160 //-------------end of code for ExceptionCache--------------
161
162 // private method for handling exception cache
163 // These methods are private, and used to manipulate the exception cache
164 // directly.
165 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
166 ExceptionCache* ec = exception_cache();
167 while (ec != NULL) {
168 if (ec->match_exception_with_space(exception)) {
169 return ec;
170 }
171 ec = ec->next();
172 }
173 return NULL;
174 }
175
176 bool CompiledMethod::is_at_poll_return(address pc) {
177 RelocIterator iter(this, pc, pc+1);
178 while (iter.next()) {
179 if (iter.type() == relocInfo::poll_return_type)
180 return true;
181 }
182 return false;
183 }
184
185
186 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
187 RelocIterator iter(this, pc, pc+1);
188 while (iter.next()) {
189 relocInfo::relocType t = iter.type();
190 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
191 return true;
192 }
193 return false;
|
71 case not_installed:
72 return "not installed";
73 case in_use:
74 return "in use";
75 case not_used:
76 return "not_used";
77 case not_entrant:
78 return "not_entrant";
79 case zombie:
80 return "zombie";
81 case unloaded:
82 return "unloaded";
83 default:
84 fatal("unexpected method state: %d", state);
85 return NULL;
86 }
87 }
88
89 //-----------------------------------------------------------------------------
90
91 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
92 return OrderAccess::load_acquire(&_exception_cache);
93 }
94
95 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
96 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
97 assert(new_entry != NULL,"Must be non null");
98 assert(new_entry->next() == NULL, "Must be null");
99
100 for (;;) {
101 ExceptionCache *ec = exception_cache();
102 if (ec != NULL) {
103 Klass* ex_klass = ec->exception_type();
104 if (!ex_klass->is_loader_alive()) {
105 // We must guarantee that entries are not inserted with new next pointer
106 // edges to ExceptionCache entries with dead klasses, due to bad interactions
107 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
108 // the head pointer forward to the first live ExceptionCache, so that the new
109 // next pointers always point at live ExceptionCaches, that are not removed due
110 // to concurrent ExceptionCache cleanup.
111 ExceptionCache* next = ec->next();
112 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
113 CodeCache::release_exception_cache(ec);
114 }
115 continue;
116 }
117 ec = exception_cache();
118 if (ec != NULL) {
119 new_entry->set_next(ec);
120 }
121 }
122 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
123 return;
124 }
125 }
126 }
127
128 void CompiledMethod::clean_exception_cache() {
129 // For each nmethod, only a single thread may call this cleanup function
130 // at the same time, whether called in STW cleanup or concurrent cleanup.
131 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
132 // then a single writer may contend with cleaning up the head pointer to the
133 // first ExceptionCache node that has a Klass* that is alive. That is fine,
134 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
135 // And the concurrent writers do not clean up next pointers, only the head.
136 // Also note that concurent readers will walk through Klass* pointers that are not
137 // alive. That does not cause ABA problems, because Klass* is deleted after
138 // a handshake with all threads, after all stale ExceptionCaches have been
139 // unlinked. That is also when the CodeCache::exception_cache_free_list()
140 ExceptionCache* prev = NULL;
141 ExceptionCache* curr = exception_cache_acquire();
142
143 while (curr != NULL) {
144 ExceptionCache* next = curr->next();
145
146 if (!curr->exception_type()->is_loader_alive()) {
147 if (prev == NULL) {
148 // Try to clean head; this is contended by concurrent inserts, that
149 // both lazily clean the head, and insert entries at the head. If
150 // the CAS fails, the operation is restarted.
151 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
152 prev = NULL;
153 curr = exception_cache_acquire();
154 continue;
155 }
156 } else {
157 // It is impossible to during cleanup connect the next pointer to
158 // an ExceptionCache that has not been published before a safepoint
159 // prior to the cleanup. Therefore, release is not required.
160 prev->set_next(next);
161 }
162 // prev stays the same.
163
164 CodeCache::release_exception_cache(curr);
165 } else {
166 prev = curr;
167 }
168
169 curr = next;
170 }
171 }
172
173 // public method for accessing the exception cache
174 // These are the public access methods.
175 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
176 // We never grab a lock to read the exception cache, so we may
177 // have false negatives. This is okay, as it can only happen during
178 // the first few exception lookups for a given nmethod.
179 ExceptionCache* ec = exception_cache_acquire();
180 while (ec != NULL) {
181 address ret_val;
182 if ((ret_val = ec->match(exception,pc)) != NULL) {
183 return ret_val;
184 }
185 ec = ec->next();
186 }
187 return NULL;
188 }
189
190 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
191 // There are potential race conditions during exception cache updates, so we
192 // must own the ExceptionCache_lock before doing ANY modifications. Because
193 // we don't lock during reads, it is possible to have several threads attempt
194 // to update the cache with the same data. We need to check for already inserted
195 // copies of the current data before adding it.
196
197 MutexLocker ml(ExceptionCache_lock);
198 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
199
200 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
201 target_entry = new ExceptionCache(exception,pc,handler);
202 add_exception_cache_entry(target_entry);
203 }
204 }
205
206 // private method for handling exception cache
207 // These methods are private, and used to manipulate the exception cache
208 // directly.
209 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
210 ExceptionCache* ec = exception_cache_acquire();
211 while (ec != NULL) {
212 if (ec->match_exception_with_space(exception)) {
213 return ec;
214 }
215 ec = ec->next();
216 }
217 return NULL;
218 }
219
220 //-------------end of code for ExceptionCache--------------
221
222 bool CompiledMethod::is_at_poll_return(address pc) {
223 RelocIterator iter(this, pc, pc+1);
224 while (iter.next()) {
225 if (iter.type() == relocInfo::poll_return_type)
226 return true;
227 }
228 return false;
229 }
230
231
232 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
233 RelocIterator iter(this, pc, pc+1);
234 while (iter.next()) {
235 relocInfo::relocType t = iter.type();
236 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
237 return true;
238 }
239 return false;
|