137 *entry = (Metadata*)meta; // Should be atomic on x64
138 return (Metadata*)k;
139 } else { // Method
140 // Quick check the current method's name.
141 Method* m = _method;
142 int signature_len = build_u2_from((address)klass_name + klass_len + 2 + method_name_len);
143 int full_len = 2 + klass_len + 2 + method_name_len + 2 + signature_len;
144 if (!klass_matched || memcmp(_name, meta_name, full_len) != 0) { // Does not match?
145 Thread* thread = Thread::current();
146 const char* method_name = klass_name + klass_len;
147 m = AOTCodeHeap::find_method(k, thread, method_name);
148 }
149 meta = ((intptr_t)m) | 1;
150 *entry = (Metadata*)meta; // Should be atomic on x64
151 return (Metadata*)m;
152 }
153 }
154 ShouldNotReachHere(); return NULL;
155 }
156
157 bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
158 // Make sure the method is not flushed in case of a safepoint in code below.
159 methodHandle the_method(method());
160 NoSafepointVerifier nsv;
161
162 {
163 // Enter critical section. Does not block for safepoint.
164 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
165
166 if (*_state_adr == new_state) {
167 // another thread already performed this transition so nothing
168 // to do, but return false to indicate this.
169 return false;
170 }
171
172 // Change state
173 OrderAccess::storestore();
174 *_state_adr = new_state;
175
|
137 *entry = (Metadata*)meta; // Should be atomic on x64
138 return (Metadata*)k;
139 } else { // Method
140 // Quick check the current method's name.
141 Method* m = _method;
142 int signature_len = build_u2_from((address)klass_name + klass_len + 2 + method_name_len);
143 int full_len = 2 + klass_len + 2 + method_name_len + 2 + signature_len;
144 if (!klass_matched || memcmp(_name, meta_name, full_len) != 0) { // Does not match?
145 Thread* thread = Thread::current();
146 const char* method_name = klass_name + klass_len;
147 m = AOTCodeHeap::find_method(k, thread, method_name);
148 }
149 meta = ((intptr_t)m) | 1;
150 *entry = (Metadata*)meta; // Should be atomic on x64
151 return (Metadata*)m;
152 }
153 }
154 ShouldNotReachHere(); return NULL;
155 }
156
157 void AOTCompiledMethod::do_unloading(bool unloading_occurred) {
158 unload_nmethod_caches(unloading_occurred);
159 }
160
161 bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
162 // Make sure the method is not flushed in case of a safepoint in code below.
163 methodHandle the_method(method());
164 NoSafepointVerifier nsv;
165
166 {
167 // Enter critical section. Does not block for safepoint.
168 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
169
170 if (*_state_adr == new_state) {
171 // another thread already performed this transition so nothing
172 // to do, but return false to indicate this.
173 return false;
174 }
175
176 // Change state
177 OrderAccess::storestore();
178 *_state_adr = new_state;
179
|