44 if (nm->is_unloading()) {
45 // We don't need to take the lock when unlinking nmethods from
46 // the Method, because it is only concurrently unlinked by
47 // the entry barrier, which acquires the per nmethod lock.
48 nm->unlink_from_method();
49
50 // We can end up calling nmethods that are unloading
51 // since we clear compiled ICs lazily. Returning false
52 // will re-resovle the call and update the compiled IC.
53 return false;
54 }
55
56 // Heal oops and disarm
57 ZNMethodOopClosure cl;
58 ZNMethod::nmethod_oops_do(nm, &cl);
59 disarm(nm);
60
61 return true;
62 }
63
64 int ZBarrierSetNMethod::disarmed_value() const {
65 // We override the default BarrierSetNMethod::disarmed_value() since
66 // this can be called by GC threads, which doesn't keep an up to date
67 // address_bad_mask.
68 const uintptr_t disarmed_addr = ((uintptr_t)&ZAddressBadMask) + ZNMethodDisarmedOffset;
69 return *((int*)disarmed_addr);
70 }
71
72 ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
73 return ZThreadLocalData::nmethod_disarmed_offset();
74 }
|
44 if (nm->is_unloading()) {
45 // We don't need to take the lock when unlinking nmethods from
46 // the Method, because it is only concurrently unlinked by
47 // the entry barrier, which acquires the per nmethod lock.
48 nm->unlink_from_method();
49
50 // We can end up calling nmethods that are unloading
51 // since we clear compiled ICs lazily. Returning false
52 // will re-resovle the call and update the compiled IC.
53 return false;
54 }
55
56 // Heal oops and disarm
57 ZNMethodOopClosure cl;
58 ZNMethod::nmethod_oops_do(nm, &cl);
59 disarm(nm);
60
61 return true;
62 }
63
64 int* ZBarrierSetNMethod::disarmed_value_address() const {
65 const uintptr_t mask_addr = reinterpret_cast<uintptr_t>(&ZAddressBadMask);
66 const uintptr_t disarmed_addr = mask_addr + ZNMethodDisarmedOffset;
67 return reinterpret_cast<int*>(disarmed_addr);
68 }
69
70 ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
71 return ZThreadLocalData::nmethod_disarmed_offset();
72 }
|