< prev index next >

src/hotspot/share/interpreter/bytecodeInterpreter.cpp

Print this page

        

*** 703,723 **** // Try to revoke bias. markOop header = rcvr->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { // Try to rebias. markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); --- 703,723 ---- // Try to revoke bias. markOop header = rcvr->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { // Try to rebias. markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
*** 732,742 **** header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // Debugging hint. DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); --- 732,742 ---- header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // Debugging hint. DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
*** 748,758 **** // Traditional lightweight locking. if (!success) { markOop displaced = rcvr->mark()->set_unlocked(); mon->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { mon->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); --- 748,758 ---- // Traditional lightweight locking. if (!success) { markOop displaced = rcvr->mark()->set_unlocked(); mon->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { mon->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
*** 901,922 **** // try revoke bias markOop header = lockee->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (*BiasedLocking::revoked_lock_entry_count_addr())++; } } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { // try rebias markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 901,922 ---- // try revoke bias markOop header = lockee->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (*BiasedLocking::revoked_lock_entry_count_addr())++; } } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { // try rebias markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 930,940 **** header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 930,940 ---- header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 946,956 **** // traditional lightweight locking if (!success) { markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 946,956 ---- // traditional lightweight locking if (!success) { markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 1842,1863 **** // try revoke bias markOop header = lockee->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { // try rebias markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (* BiasedLocking::rebiased_lock_entry_count_addr())++; } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 1842,1863 ---- // try revoke bias markOop header = lockee->klass()->prototype_header(); if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { // try rebias markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } ! if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (* BiasedLocking::rebiased_lock_entry_count_addr())++; } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 1873,1883 **** header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 1873,1883 ---- header = header->copy_set_hash(hash); } markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) ! if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 1889,1899 **** // traditional lightweight locking if (!success) { markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); --- 1889,1899 ---- // traditional lightweight locking if (!success) { markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; ! if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); } else { CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
*** 1921,1931 **** most_recent->set_obj(NULL); if (!lockee->mark()->has_bias_pattern()) { bool call_vm = UseHeavyMonitors; // If it isn't recursive we either must swap old header or call the runtime if (header != NULL || call_vm) { ! if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case most_recent->set_obj(lockee); CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); } } --- 1921,1931 ---- most_recent->set_obj(NULL); if (!lockee->mark()->has_bias_pattern()) { bool call_vm = UseHeavyMonitors; // If it isn't recursive we either must swap old header or call the runtime if (header != NULL || call_vm) { ! if (call_vm || Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case most_recent->set_obj(lockee); CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); } }
*** 2187,2197 **** // Try allocate in shared eden retry: HeapWord* compare_to = *Universe::heap()->top_addr(); HeapWord* new_top = compare_to + obj_size; if (new_top <= *Universe::heap()->end_addr()) { ! if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { goto retry; } result = (oop) compare_to; } } --- 2187,2197 ---- // Try allocate in shared eden retry: HeapWord* compare_to = *Universe::heap()->top_addr(); HeapWord* new_top = compare_to + obj_size; if (new_top <= *Universe::heap()->end_addr()) { ! if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { goto retry; } result = (oop) compare_to; } }
*** 2973,2983 **** end->set_obj(NULL); if (!lockee->mark()->has_bias_pattern()) { // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { ! if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case end->set_obj(lockee); { // Prevent any HandleMarkCleaner from freeing our live handles HandleMark __hm(THREAD); --- 2973,2983 ---- end->set_obj(NULL); if (!lockee->mark()->has_bias_pattern()) { // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { ! if (Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case end->set_obj(lockee); { // Prevent any HandleMarkCleaner from freeing our live handles HandleMark __hm(THREAD);
*** 3048,3058 **** if (!rcvr->mark()->has_bias_pattern()) { base->set_obj(NULL); // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { ! if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { // restore object for the slow case base->set_obj(rcvr); { // Prevent any HandleMarkCleaner from freeing our live handles HandleMark __hm(THREAD); --- 3048,3058 ---- if (!rcvr->mark()->has_bias_pattern()) { base->set_obj(NULL); // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { ! if (Atomic::cmpxchg(header, rcvr->mark_addr(), lock) != lock) { // restore object for the slow case base->set_obj(rcvr); { // Prevent any HandleMarkCleaner from freeing our live handles HandleMark __hm(THREAD);
< prev index next >