Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/code/compiledIC.cpp
+++ new/src/share/vm/code/compiledIC.cpp
1 1 /*
2 - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
2 + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_compiledIC.cpp.incl"
27 27
28 28
29 29 // Every time a compiled IC is changed or its type is being accessed,
30 30 // either the CompiledIC_lock must be set or we must be at a safe point.
31 31
32 32 //-----------------------------------------------------------------------------
33 33 // Low-level access to an inline cache. Private, since they might not be
34 34 // MT-safe to use.
35 35
36 36 void CompiledIC::set_cached_oop(oop cache) {
37 37 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
38 38 assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
39 39 assert (cache == NULL || cache != badOop, "invalid oop");
40 40
41 41 if (TraceCompiledIC) {
42 42 tty->print(" ");
43 43 print_compiled_ic();
44 44 tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache);
45 45 }
46 46
47 47 if (cache == NULL) cache = (oop)Universe::non_oop_word();
48 48
49 49 *_oop_addr = cache;
50 50 // fix up the relocations
51 51 RelocIterator iter = _oops;
52 52 while (iter.next()) {
53 53 if (iter.type() == relocInfo::oop_type) {
54 54 oop_Relocation* r = iter.oop_reloc();
55 55 if (r->oop_addr() == _oop_addr)
56 56 r->fix_oop_relocation();
57 57 }
58 58 }
59 59 return;
60 60 }
61 61
62 62
63 63 oop CompiledIC::cached_oop() const {
64 64 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
65 65 assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
66 66
67 67 if (!is_in_transition_state()) {
68 68 oop data = *_oop_addr;
69 69 // If we let the oop value here be initialized to zero...
70 70 assert(data != NULL || Universe::non_oop_word() == NULL,
71 71 "no raw nulls in CompiledIC oops, because of patching races");
72 72 return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
73 73 } else {
74 74 return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
75 75 }
76 76 }
77 77
78 78
79 79 void CompiledIC::set_ic_destination(address entry_point) {
80 80 assert(entry_point != NULL, "must set legal entry point");
81 81 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
82 82 if (TraceCompiledIC) {
83 83 tty->print(" ");
84 84 print_compiled_ic();
85 85 tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
86 86 }
87 87 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
88 88 #ifdef ASSERT
89 89 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
90 90 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
91 91 #endif
92 92 _ic_call->set_destination_mt_safe(entry_point);
93 93 }
94 94
95 95
96 96 address CompiledIC::ic_destination() const {
97 97 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
98 98 if (!is_in_transition_state()) {
99 99 return _ic_call->destination();
100 100 } else {
101 101 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
102 102 }
103 103 }
104 104
105 105
106 106 bool CompiledIC::is_in_transition_state() const {
107 107 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
108 108 return InlineCacheBuffer::contains(_ic_call->destination());
109 109 }
110 110
111 111
112 112 // Returns native address of 'call' instruction in inline-cache. Used by
113 113 // the InlineCacheBuffer when it needs to find the stub.
114 114 address CompiledIC::stub_address() const {
115 115 assert(is_in_transition_state(), "should only be called when we are in a transition state");
116 116 return _ic_call->destination();
117 117 }
118 118
119 119
120 120 //-----------------------------------------------------------------------------
121 121 // High-level access to an inline cache. Guaranteed to be MT-safe.
122 122
123 123
124 124 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
125 125 methodHandle method = call_info->selected_method();
126 126 bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
127 127 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
128 128 assert(method->is_oop(), "cannot be NULL and must be oop");
129 129 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
130 130 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
131 131
132 132 address entry;
133 133 if (is_invoke_interface) {
134 134 int index = klassItable::compute_itable_index(call_info->resolved_method()());
135 135 entry = VtableStubs::create_stub(false, index, method());
136 136 assert(entry != NULL, "entry not computed");
137 137 klassOop k = call_info->resolved_method()->method_holder();
138 138 assert(Klass::cast(k)->is_interface(), "sanity check");
139 139 InlineCacheBuffer::create_transition_stub(this, k, entry);
140 140 } else {
141 141 // Can be different than method->vtable_index(), due to package-private etc.
142 142 int vtable_index = call_info->vtable_index();
143 143 entry = VtableStubs::create_stub(true, vtable_index, method());
144 144 InlineCacheBuffer::create_transition_stub(this, method(), entry);
145 145 }
146 146
147 147 if (TraceICs) {
148 148 ResourceMark rm;
149 149 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
150 150 instruction_address(), method->print_value_string(), entry);
151 151 }
152 152
153 153 Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
154 154 // We can't check this anymore. With lazy deopt we could have already
155 155 // cleaned this IC entry before we even return. This is possible if
156 156 // we ran out of space in the inline cache buffer trying to do the
157 157 // set_next and we safepointed to free up space. This is a benign
158 158 // race because the IC entry was complete when we safepointed so
159 159 // cleaning it immediately is harmless.
160 160 // assert(is_megamorphic(), "sanity check");
161 161 }
162 162
163 163
164 164 // true if destination is megamorphic stub
165 165 bool CompiledIC::is_megamorphic() const {
166 166 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
167 167 assert(!is_optimized(), "an optimized call cannot be megamorphic");
168 168
169 169 // Cannot rely on cached_oop. It is either an interface or a method.
170 170 return VtableStubs::is_entry_point(ic_destination());
171 171 }
172 172
173 173 bool CompiledIC::is_call_to_compiled() const {
174 174 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
175 175
176 176 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
177 177 // method is guaranteed to still exist, since we only remove methods after all inline caches
178 178 // has been cleaned up
179 179 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
180 180 bool is_monomorphic = (cb != NULL && cb->is_nmethod());
181 181 // Check that the cached_oop is a klass for non-optimized monomorphic calls
182 182 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
183 183 // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
184 184 #ifdef ASSERT
185 185 #ifdef TIERED
186 186 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
187 187 bool is_c1_method = caller->is_compiled_by_c1();
188 188 #else
189 189 #ifdef COMPILER1
190 190 bool is_c1_method = true;
191 191 #else
192 192 bool is_c1_method = false;
193 193 #endif // COMPILER1
194 194 #endif // TIERED
195 195 assert( is_c1_method ||
196 196 !is_monomorphic ||
197 197 is_optimized() ||
198 198 (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
199 199 #endif // ASSERT
200 200 return is_monomorphic;
201 201 }
202 202
203 203
204 204 bool CompiledIC::is_call_to_interpreted() const {
205 205 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
206 206 // Call to interpreter if destination is either calling to a stub (if it
207 207 // is optimized), or calling to an I2C blob
208 208 bool is_call_to_interpreted = false;
209 209 if (!is_optimized()) {
210 210 // must use unsafe because the destination can be a zombie (and we're cleaning)
211 211 // and the print_compiled_ic code wants to know if site (in the non-zombie)
212 212 // is to the interpreter.
213 213 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
214 214 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
215 215 assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
216 216 } else {
217 217 // Check if we are calling into our own codeblob (i.e., to a stub)
218 218 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
219 219 address dest = ic_destination();
220 220 #ifdef ASSERT
221 221 {
222 222 CodeBlob* db = CodeCache::find_blob_unsafe(dest);
223 223 assert(!db->is_adapter_blob(), "must use stub!");
224 224 }
225 225 #endif /* ASSERT */
226 226 is_call_to_interpreted = cb->contains(dest);
227 227 }
228 228 return is_call_to_interpreted;
229 229 }
230 230
231 231
232 232 void CompiledIC::set_to_clean() {
233 233 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
234 234 if (TraceInlineCacheClearing || TraceICs) {
235 235 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
236 236 print();
237 237 }
238 238
239 239 address entry;
240 240 if (is_optimized()) {
241 241 entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
242 242 } else {
243 243 entry = SharedRuntime::get_resolve_virtual_call_stub();
244 244 }
245 245
246 246 // A zombie transition will always be safe, since the oop has already been set to NULL, so
247 247 // we only need to patch the destination
248 248 bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
249 249
250 250 if (safe_transition) {
251 251 if (!is_optimized()) set_cached_oop(NULL);
252 252 // Kill any leftover stub we might have too
253 253 if (is_in_transition_state()) {
254 254 ICStub* old_stub = ICStub_from_destination_address(stub_address());
255 255 old_stub->clear();
256 256 }
257 257 set_ic_destination(entry);
258 258 } else {
259 259 // Unsafe transition - create stub.
260 260 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
261 261 }
262 262 // We can't check this anymore. With lazy deopt we could have already
263 263 // cleaned this IC entry before we even return. This is possible if
264 264 // we ran out of space in the inline cache buffer trying to do the
265 265 // set_next and we safepointed to free up space. This is a benign
266 266 // race because the IC entry was complete when we safepointed so
267 267 // cleaning it immediately is harmless.
268 268 // assert(is_clean(), "sanity check");
269 269 }
270 270
271 271
272 272 bool CompiledIC::is_clean() const {
273 273 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
274 274 bool is_clean = false;
275 275 address dest = ic_destination();
276 276 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
277 277 dest == SharedRuntime::get_resolve_virtual_call_stub();
278 278 assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
279 279 return is_clean;
280 280 }
281 281
282 282
283 283 void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
284 284 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
285 285 // Updating a cache to the wrong entry can cause bugs that are very hard
286 286 // to track down - if cache entry gets invalid - we just clean it. In
287 287 // this way it is always the same code path that is responsible for
288 288 // updating and resolving an inline cache
289 289 //
290 290 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
291 291 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
292 292 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
293 293 //
294 294 // In both of these cases the only thing being modifed is the jump/call target and these
295 295 // transitions are mt_safe
296 296
297 297 Thread *thread = Thread::current();
298 298 if (info._to_interpreter) {
299 299 // Call to interpreter
300 300 if (info.is_optimized() && is_optimized()) {
301 301 assert(is_clean(), "unsafe IC path");
302 302 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
303 303 // the call analysis (callee structure) specifies that the call is optimized
304 304 // (either because of CHA or the static target is final)
305 305 // At code generation time, this call has been emitted as static call
306 306 // Call via stub
307 307 assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
308 308 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
309 309 methodHandle method (thread, (methodOop)info.cached_oop()());
310 310 csc->set_to_interpreted(method, info.entry());
311 311 if (TraceICs) {
312 312 ResourceMark rm(thread);
313 313 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
314 314 instruction_address(),
315 315 method->print_value_string());
316 316 }
317 317 } else {
318 318 // Call via method-klass-holder
319 319 assert(info.cached_oop().not_null(), "must be set");
320 320 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
321 321
322 322 if (TraceICs) {
323 323 ResourceMark rm(thread);
324 324 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
325 325 }
326 326 }
327 327 } else {
328 328 // Call to compiled code
329 329 bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
330 330 #ifdef ASSERT
331 331 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
332 332 assert (cb->is_nmethod(), "must be compiled!");
333 333 #endif /* ASSERT */
334 334
335 335 // This is MT safe if we come from a clean-cache and go through a
336 336 // non-verified entry point
337 337 bool safe = SafepointSynchronize::is_at_safepoint() ||
338 338 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
339 339
340 340 if (!safe) {
341 341 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
342 342 } else {
343 343 set_ic_destination(info.entry());
344 344 if (!is_optimized()) set_cached_oop(info.cached_oop()());
345 345 }
346 346
347 347 if (TraceICs) {
348 348 ResourceMark rm(thread);
349 349 assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be");
350 350 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
351 351 instruction_address(),
352 352 ((klassOop)info.cached_oop()())->print_value_string(),
353 353 (safe) ? "" : "via stub");
354 354 }
355 355 }
356 356 // We can't check this anymore. With lazy deopt we could have already
357 357 // cleaned this IC entry before we even return. This is possible if
358 358 // we ran out of space in the inline cache buffer trying to do the
359 359 // set_next and we safepointed to free up space. This is a benign
360 360 // race because the IC entry was complete when we safepointed so
361 361 // cleaning it immediately is harmless.
362 362 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
363 363 }
364 364
365 365
366 366 // is_optimized: Compiler has generated an optimized call (i.e., no inline
367 367 // cache) static_bound: The call can be static bound (i.e, no need to use
368 368 // inline cache)
369 369 void CompiledIC::compute_monomorphic_entry(methodHandle method,
370 370 KlassHandle receiver_klass,
371 371 bool is_optimized,
372 372 bool static_bound,
373 373 CompiledICInfo& info,
374 374 TRAPS) {
375 375 info._is_optimized = is_optimized;
376 376
377 377 nmethod* method_code = method->code();
378 378 address entry = NULL;
379 379 if (method_code != NULL) {
380 380 // Call to compiled code
381 381 if (static_bound || is_optimized) {
382 382 entry = method_code->verified_entry_point();
383 383 } else {
384 384 entry = method_code->entry_point();
385 385 }
386 386 }
387 387 if (entry != NULL) {
388 388 // Call to compiled code
389 389 info._entry = entry;
390 390 if (static_bound || is_optimized) {
391 391 info._cached_oop = Handle(THREAD, (oop)NULL);
392 392 } else {
393 393 info._cached_oop = receiver_klass;
394 394 }
395 395 info._to_interpreter = false;
396 396 } else {
397 397 // Note: the following problem exists with Compiler1:
398 398 // - at compile time we may or may not know if the destination is final
399 399 // - if we know that the destination is final, we will emit an optimized
400 400 // virtual call (no inline cache), and need a methodOop to make a call
401 401 // to the interpreter
402 402 // - if we do not know if the destination is final, we emit a standard
403 403 // virtual call, and use CompiledICHolder to call interpreted code
404 404 // (no static call stub has been generated)
405 405 // However in that case we will now notice it is static_bound
406 406 // and convert the call into what looks to be an optimized
407 407 // virtual call. This causes problems in verifying the IC because
408 408 // it look vanilla but is optimized. Code in is_call_to_interpreted
409 409 // is aware of this and weakens its asserts.
410 410
411 411 info._to_interpreter = true;
412 412 // static_bound should imply is_optimized -- otherwise we have a
413 413 // performance bug (statically-bindable method is called via
414 414 // dynamically-dispatched call note: the reverse implication isn't
415 415 // necessarily true -- the call may have been optimized based on compiler
416 416 // analysis (static_bound is only based on "final" etc.)
417 417 #ifdef COMPILER2
418 418 #ifdef TIERED
419 419 #if defined(ASSERT)
420 420 // can't check the assert because we don't have the CompiledIC with which to
421 421 // find the address if the call instruction.
422 422 //
423 423 // CodeBlob* cb = find_blob_unsafe(instruction_address());
424 424 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
425 425 #endif // ASSERT
426 426 #else
427 427 assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
428 428 #endif // TIERED
429 429 #endif // COMPILER2
430 430 if (is_optimized) {
431 431 // Use stub entry
432 432 info._entry = method()->get_c2i_entry();
433 433 info._cached_oop = method;
↓ open down ↓ |
421 lines elided |
↑ open up ↑ |
434 434 } else {
435 435 // Use mkh entry
436 436 oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK);
437 437 info._cached_oop = Handle(THREAD, holder);
438 438 info._entry = method()->get_c2i_unverified_entry();
439 439 }
440 440 }
441 441 }
442 442
443 443
444 -inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
444 +inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
445 445 address first_oop = NULL;
446 446 // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
447 - CodeBlob *code1 = code;
448 - return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
447 + nmethod* tmp_nm = nm;
448 + return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
449 449 }
450 450
451 451 CompiledIC::CompiledIC(NativeCall* ic_call)
452 452 : _ic_call(ic_call),
453 453 _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized))
454 454 {
455 455 }
456 456
457 457
458 458 CompiledIC::CompiledIC(Relocation* ic_reloc)
459 459 : _ic_call(nativeCall_at(ic_reloc->addr())),
460 460 _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
461 461 {
462 462 assert(ic_reloc->type() == relocInfo::virtual_call_type ||
463 463 ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
464 464 }
465 465
466 466
467 467 // ----------------------------------------------------------------------------
468 468
469 469 void CompiledStaticCall::set_to_clean() {
470 470 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
471 471 // Reset call site
472 472 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
473 473 #ifdef ASSERT
474 474 CodeBlob* cb = CodeCache::find_blob_unsafe(this);
475 475 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
476 476 #endif
477 477 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
478 478
479 479 // Do not reset stub here: It is too expensive to call find_stub.
480 480 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
481 481 // both the call and its stub.
482 482 }
483 483
484 484
485 485 bool CompiledStaticCall::is_clean() const {
486 486 return destination() == SharedRuntime::get_resolve_static_call_stub();
487 487 }
488 488
489 489 bool CompiledStaticCall::is_call_to_compiled() const {
490 490 return CodeCache::contains(destination());
491 491 }
492 492
493 493
494 494 bool CompiledStaticCall::is_call_to_interpreted() const {
495 495 // It is a call to interpreted, if it calls to a stub. Hence, the destination
496 496 // must be in the stub part of the nmethod that contains the call
497 497 nmethod* nm = CodeCache::find_nmethod(instruction_address());
498 498 return nm->stub_contains(destination());
499 499 }
500 500
501 501
502 502 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
503 503 address stub=find_stub();
504 504 assert(stub!=NULL, "stub not found");
505 505
506 506 if (TraceICs) {
507 507 ResourceMark rm;
508 508 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
509 509 instruction_address(),
510 510 callee->name_and_sig_as_C_string());
511 511 }
512 512
513 513 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
514 514 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
515 515
516 516 assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
517 517 assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
518 518
519 519 // Update stub
520 520 method_holder->set_data((intptr_t)callee());
521 521 jump->set_jump_destination(entry);
522 522
523 523 // Update jump to call
524 524 set_destination_mt_safe(stub);
525 525 }
526 526
527 527
528 528 void CompiledStaticCall::set(const StaticCallInfo& info) {
529 529 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
530 530 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
531 531 // Updating a cache to the wrong entry can cause bugs that are very hard
532 532 // to track down - if cache entry gets invalid - we just clean it. In
533 533 // this way it is always the same code path that is responsible for
534 534 // updating and resolving an inline cache
535 535 assert(is_clean(), "do not update a call entry - use clean");
536 536
537 537 if (info._to_interpreter) {
538 538 // Call to interpreted code
539 539 set_to_interpreted(info.callee(), info.entry());
540 540 } else {
541 541 if (TraceICs) {
542 542 ResourceMark rm;
543 543 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
544 544 instruction_address(),
545 545 info.entry());
546 546 }
547 547 // Call to compiled code
548 548 assert (CodeCache::contains(info.entry()), "wrong entry point");
549 549 set_destination_mt_safe(info.entry());
550 550 }
551 551 }
552 552
553 553
554 554 // Compute settings for a CompiledStaticCall. Since we might have to set
555 555 // the stub when calling to the interpreter, we need to return arguments.
556 556 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
557 557 nmethod* m_code = m->code();
558 558 info._callee = m;
559 559 if (m_code != NULL) {
560 560 info._to_interpreter = false;
561 561 info._entry = m_code->verified_entry_point();
562 562 } else {
563 563 // Callee is interpreted code. In any case entering the interpreter
564 564 // puts a converter-frame on the stack to save arguments.
565 565 info._to_interpreter = true;
566 566 info._entry = m()->get_c2i_entry();
567 567 }
568 568 }
569 569
570 570
571 571 void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
572 572 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
573 573 // Reset stub
574 574 address stub = static_stub->addr();
575 575 assert(stub!=NULL, "stub not found");
576 576 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
577 577 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
578 578 method_holder->set_data(0);
579 579 jump->set_jump_destination((address)-1);
580 580 }
581 581
582 582
583 583 address CompiledStaticCall::find_stub() {
584 584 // Find reloc. information containing this call-site
585 585 RelocIterator iter((nmethod*)NULL, instruction_address());
586 586 while (iter.next()) {
587 587 if (iter.addr() == instruction_address()) {
588 588 switch(iter.type()) {
589 589 case relocInfo::static_call_type:
590 590 return iter.static_call_reloc()->static_stub();
591 591 // We check here for opt_virtual_call_type, since we reuse the code
592 592 // from the CompiledIC implementation
593 593 case relocInfo::opt_virtual_call_type:
594 594 return iter.opt_virtual_call_reloc()->static_stub();
595 595 case relocInfo::poll_type:
596 596 case relocInfo::poll_return_type: // A safepoint can't overlap a call.
597 597 default:
598 598 ShouldNotReachHere();
599 599 }
600 600 }
601 601 }
602 602 return NULL;
603 603 }
604 604
605 605
606 606 //-----------------------------------------------------------------------------
607 607 // Non-product mode code
608 608 #ifndef PRODUCT
609 609
610 610 void CompiledIC::verify() {
611 611 // make sure code pattern is actually a call imm32 instruction
612 612 _ic_call->verify();
613 613 if (os::is_MP()) {
614 614 _ic_call->verify_alignment();
615 615 }
616 616 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
617 617 || is_optimized() || is_megamorphic(), "sanity check");
618 618 }
619 619
620 620
621 621 void CompiledIC::print() {
622 622 print_compiled_ic();
623 623 tty->cr();
624 624 }
625 625
626 626
627 627 void CompiledIC::print_compiled_ic() {
628 628 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
629 629 instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
630 630 }
631 631
632 632
633 633 void CompiledStaticCall::print() {
634 634 tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
635 635 if (is_clean()) {
636 636 tty->print("clean");
637 637 } else if (is_call_to_compiled()) {
638 638 tty->print("compiled");
639 639 } else if (is_call_to_interpreted()) {
640 640 tty->print("interpreted");
641 641 }
642 642 tty->cr();
643 643 }
644 644
645 645 void CompiledStaticCall::verify() {
646 646 // Verify call
647 647 NativeCall::verify();
648 648 if (os::is_MP()) {
649 649 verify_alignment();
650 650 }
651 651
652 652 // Verify stub
653 653 address stub = find_stub();
654 654 assert(stub != NULL, "no stub found for static call");
655 655 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
656 656 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
657 657
658 658 // Verify state
659 659 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
660 660 }
661 661
662 662 #endif
↓ open down ↓ |
204 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX