diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 98e66731817..12f0f0fa5b5 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -1625,7 +1625,7 @@ jint G1CollectedHeap::initialize() { vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark"); return JNI_ENOMEM; } - _cmThread = _cm->cm_thread(); + _cm_thread = _cm->cm_thread(); // Now expand into the initial heap size. if (!expand(init_byte_size, _workers)) { @@ -1714,7 +1714,7 @@ void G1CollectedHeap::stop() { // that are destroyed during shutdown. _cr->stop(); _young_gen_sampling_thread->stop(); - _cmThread->stop(); + _cm_thread->stop(); if (G1StringDedup::is_enabled()) { G1StringDedup::stop(); } @@ -1967,7 +1967,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) { // is set) so that if a waiter requests another System.gc() it doesn't // incorrectly see that a marking cycle is still in progress. if (concurrent) { - _cmThread->set_idle(); + _cm_thread->set_idle(); } // This notify_all() will ensure that a thread that called @@ -2178,11 +2178,11 @@ bool G1CollectedHeap::supports_concurrent_phase_control() const { } const char* const* G1CollectedHeap::concurrent_phases() const { - return _cmThread->concurrent_phases(); + return _cm_thread->concurrent_phases(); } bool G1CollectedHeap::request_concurrent_phase(const char* phase) { - return _cmThread->request_concurrent_phase(phase); + return _cm_thread->request_concurrent_phase(phase); } class PrintRegionClosure: public HeapRegionClosure { @@ -2272,7 +2272,7 @@ void G1CollectedHeap::print_on_error(outputStream* st) const { void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { workers()->print_worker_threads_on(st); - _cmThread->print_on(st); + _cm_thread->print_on(st); st->cr(); _cm->print_worker_threads_on(st); _cr->print_threads_on(st); @@ -2284,7 +2284,7 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { workers()->threads_do(tc); - tc->do_thread(_cmThread); + tc->do_thread(_cm_thread); _cm->threads_do(tc); _cr->threads_do(tc); tc->do_thread(_young_gen_sampling_thread); @@ -2455,8 +2455,8 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, void G1CollectedHeap::do_concurrent_mark() { MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - if (!_cmThread->in_progress()) { - _cmThread->set_started(); + if (!_cm_thread->in_progress()) { + _cm_thread->set_started(); CGC_lock->notify(); } } @@ -2752,7 +2752,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { _verifier->verify_dirty_young_regions(); // We should not be doing initial mark unless the conc mark thread is running - if (!_cmThread->should_terminate()) { + if (!_cm_thread->should_terminate()) { // This call will decide whether this pause is an initial-mark // pause. If it is, in_initial_mark_gc() will return true // for the duration of this pause. diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 9d8ac4239e8..5a0f1c3f5f3 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -766,7 +766,7 @@ private: // The concurrent marker (and the thread it runs in.) G1ConcurrentMark* _cm; - G1ConcurrentMarkThread* _cmThread; + G1ConcurrentMarkThread* _cm_thread; // The concurrent refiner. G1ConcurrentRefine* _cr; @@ -1253,7 +1253,7 @@ public: // bitmap off to the side. void do_concurrent_mark(); - bool isMarkedNext(oop obj) const; + bool is_marked_next(oop obj) const; // Determine if an object is dead, given the object and also // the region to which the object belongs. An object is dead @@ -1271,7 +1271,7 @@ public: bool is_obj_ill(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_next_marking(obj) && - !isMarkedNext(obj) && + !is_marked_next(obj) && !hr->is_archive(); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index 913a253593f..ae0aef54cf5 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -120,7 +120,7 @@ inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { return _task_queues->queue(i); } -inline bool G1CollectedHeap::isMarkedNext(oop obj) const { +inline bool G1CollectedHeap::is_marked_next(oop obj) const { return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj); } @@ -242,7 +242,7 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const { - return !isMarkedNext(obj) && !hr->is_archive(); + return !is_marked_next(obj) && !hr->is_archive(); } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const { diff --git a/src/hotspot/share/gc/g1/satbMarkQueue.cpp b/src/hotspot/share/gc/g1/satbMarkQueue.cpp index 8777e9519cf..783014dcf0b 100644 --- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp +++ b/src/hotspot/share/gc/g1/satbMarkQueue.cpp @@ -103,7 +103,7 @@ inline bool requires_marking(const void* entry, G1CollectedHeap* heap) { } inline bool retain_entry(const void* entry, G1CollectedHeap* heap) { - return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry); + return requires_marking(entry, heap) && !heap->is_marked_next((oop)entry); } // This method removes entries from a SATB buffer that will not be