--- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2016-09-26 10:58:38.736950469 +0200 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2016-09-26 10:58:38.596950463 +0200 @@ -7829,11 +7829,11 @@ assert(stack->isEmpty(), "Expected precondition"); assert(stack->capacity() > num, "Shouldn't bite more than can chew"); size_t i = num; - oop cur = _overflow_list; + oopDesc* cur = _overflow_list; const markOop proto = markOopDesc::prototype(); NOT_PRODUCT(ssize_t n = 0;) - for (oop next; i > 0 && cur != NULL; cur = next, i--) { - next = oop(cur->mark()); + for (oopDesc* next; i > 0 && cur != NULL; cur = next, i--) { + next = cur->mark(); cur->set_mark(proto); // until proven otherwise assert(cur->is_oop(), "Should be an oop"); bool res = stack->push(cur); @@ -7848,7 +7848,7 @@ return !stack->isEmpty(); } -#define BUSY (cast_to_oop(0x1aff1aff)) +#define BUSY ((oopDesc*)(0x1aff1aff)) // (MT-safe) Get a prefix of at most "num" from the list. // The overflow list is chained through the mark word of // each object in the list. We fetch the entire list, @@ -7881,7 +7881,7 @@ return false; } // Grab the entire list; we'll put back a suffix - oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + oopDesc* prefix = (oopDesc*)Atomic::xchg_ptr(BUSY, &_overflow_list); Thread* tid = Thread::current(); // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was // set to ParallelGCThreads. @@ -7896,7 +7896,7 @@ return false; } else if (_overflow_list != BUSY) { // Try and grab the prefix - prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + prefix = (oopDesc*)Atomic::xchg_ptr(BUSY, &_overflow_list); } } // If the list was found to be empty, or we spun long @@ -7915,9 +7915,9 @@ } assert(prefix != NULL && prefix != BUSY, "Error"); size_t i = num; - oop cur = prefix; + oopDesc* cur = prefix; // Walk down the first "num" objects, unless we reach the end. - for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--); + for (; i > 1 && cur->mark() != NULL; cur = cast_from_oop(cur->mark()), i--); if (cur->mark() == NULL) { // We have "num" or fewer elements in the list, so there // is nothing to return to the global list. @@ -7929,14 +7929,14 @@ } else { // Chop off the suffix and return it to the global list. assert(cur->mark() != BUSY, "Error"); - oop suffix_head = cur->mark(); // suffix will be put back on global list + oopDesc* suffix_head = cur->mark(); // suffix will be put back on global list cur->set_mark(NULL); // break off suffix // It's possible that the list is still in the empty(busy) state // we left it in a short while ago; in that case we may be // able to place back the suffix without incurring the cost // of a walk down the list. - oop observed_overflow_list = _overflow_list; - oop cur_overflow_list = observed_overflow_list; + oopDesc* observed_overflow_list = _overflow_list; + oopDesc* cur_overflow_list = observed_overflow_list; bool attached = false; while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { observed_overflow_list = @@ -7950,8 +7950,8 @@ // Too bad, someone else sneaked in (at least) an element; we'll need // to do a splice. Find tail of suffix so we can prepend suffix to global // list. - for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark())); - oop suffix_tail = cur; + for (cur = suffix_head; cur->mark() != NULL; cur = cast_from_oop(cur->mark())); + oopDesc* suffix_tail = cur; assert(suffix_tail != NULL && suffix_tail->mark() == NULL, "Tautology"); observed_overflow_list = _overflow_list; @@ -7965,7 +7965,7 @@ } // ... and try to place spliced list back on overflow_list ... observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); + (oopDesc*) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); // ... until we have succeeded in doing so. } @@ -7974,10 +7974,10 @@ // Push the prefix elements on work_q assert(prefix != NULL, "control point invariant"); const markOop proto = markOopDesc::prototype(); - oop next; + oopDesc* next; NOT_PRODUCT(ssize_t n = 0;) for (cur = prefix; cur != NULL; cur = next) { - next = oop(cur->mark()); + next = cast_from_oop(cur->mark()); cur->set_mark(proto); // until proven otherwise assert(cur->is_oop(), "Should be an oop"); bool res = work_q->push(cur); @@ -7997,7 +7997,7 @@ assert(p->is_oop(), "Not an oop"); preserve_mark_if_necessary(p); p->set_mark((markOop)_overflow_list); - _overflow_list = p; + _overflow_list = (oopDesc*)p; } // Multi-threaded; use CAS to prepend to overflow list @@ -8005,8 +8005,8 @@ NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);) assert(p->is_oop(), "Not an oop"); par_preserve_mark_if_necessary(p); - oop observed_overflow_list = _overflow_list; - oop cur_overflow_list; + oopDesc* observed_overflow_list = _overflow_list; + oopDesc* cur_overflow_list; do { cur_overflow_list = observed_overflow_list; if (cur_overflow_list != BUSY) { @@ -8015,7 +8015,7 @@ p->set_mark(NULL); } observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); + (oopDesc*) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } #undef BUSY --- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp 2016-09-26 10:58:39.840950518 +0200 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp 2016-09-26 10:58:39.704950512 +0200 @@ -540,7 +540,7 @@ // Overflow list of grey objects, threaded through mark-word // Manipulated with CAS in the parallel/multi-threaded case. - oop _overflow_list; + oopDesc* volatile _overflow_list; // The following array-pair keeps track of mark words // displaced for accommodating overflow list above. // This code will likely be revisited under RFE#4922830. --- old/src/share/vm/gc/cms/parNewGeneration.cpp 2016-09-26 10:58:40.888950565 +0200 +++ new/src/share/vm/gc/cms/parNewGeneration.cpp 2016-09-26 10:58:40.740950558 +0200 @@ -1263,7 +1263,7 @@ // (although some performance comparisons would be useful since // single global lists have their own performance disadvantages // as we were made painfully aware not long ago, see 6786503). -#define BUSY (cast_to_oop(0x1aff1aff)) +#define BUSY ((oopDesc*)(0x1aff1aff)) void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { assert(is_in_reserved(from_space_obj), "Should be from this generation"); if (ParGCUseLocalOverflow) { @@ -1286,8 +1286,8 @@ listhead->forward_to(from_space_obj); from_space_obj = listhead; } - oop observed_overflow_list = _overflow_list; - oop cur_overflow_list; + oopDesc* observed_overflow_list = _overflow_list; + oopDesc* cur_overflow_list; do { cur_overflow_list = observed_overflow_list; if (cur_overflow_list != BUSY) { @@ -1296,7 +1296,7 @@ from_space_obj->set_klass_to_list_ptr(NULL); } observed_overflow_list = - (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); + (oopDesc*)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } } @@ -1339,7 +1339,7 @@ if (_overflow_list == NULL) return false; // Otherwise, there was something there; try claiming the list. - oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + oopDesc* prefix = (oopDesc*)Atomic::xchg_ptr(BUSY, &_overflow_list); // Trim off a prefix of at most objsFromOverflow items Thread* tid = Thread::current(); size_t spin_count = ParallelGCThreads; @@ -1353,7 +1353,7 @@ return false; } else if (_overflow_list != BUSY) { // try and grab the prefix - prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + prefix = (oopDesc*)Atomic::xchg_ptr(BUSY, &_overflow_list); } } if (prefix == NULL || prefix == BUSY) { @@ -1367,7 +1367,7 @@ } assert(prefix != NULL && prefix != BUSY, "Error"); size_t i = 1; - oop cur = prefix; + oopDesc* cur = prefix; while (i < objsFromOverflow && cur->klass_or_null() != NULL) { i++; cur = cur->list_ptr_from_klass(); } @@ -1380,18 +1380,18 @@ (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); } } else { - assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); - oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list - cur->set_klass_to_list_ptr(NULL); // break off suffix + assert(cur->klass_or_null() != (Klass*)BUSY, "Error"); + oopDesc* suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list + cur->set_klass_to_list_ptr(NULL); // break off suffix // It's possible that the list is still in the empty(busy) state // we left it in a short while ago; in that case we may be // able to place back the suffix. - oop observed_overflow_list = _overflow_list; - oop cur_overflow_list = observed_overflow_list; + oopDesc* observed_overflow_list = _overflow_list; + oopDesc* cur_overflow_list = observed_overflow_list; bool attached = false; while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); + (oopDesc*) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); if (cur_overflow_list == observed_overflow_list) { attached = true; break; @@ -1400,7 +1400,7 @@ if (!attached) { // Too bad, someone else got in in between; we'll need to do a splice. // Find the last item of suffix list - oop last = suffix; + oopDesc* last = suffix; while (last->klass_or_null() != NULL) { last = last->list_ptr_from_klass(); } @@ -1415,7 +1415,7 @@ last->set_klass_to_list_ptr(NULL); } observed_overflow_list = - (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); + (oopDesc*)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } } @@ -1425,8 +1425,8 @@ cur = prefix; ssize_t n = 0; while (cur != NULL) { - oop obj_to_push = cur->forwardee(); - oop next = cur->list_ptr_from_klass(); + oopDesc* obj_to_push = (oopDesc*)cur->forwardee(); + oopDesc* next = cur->list_ptr_from_klass(); cur->set_klass(obj_to_push->klass()); // This may be an array object that is self-forwarded. In that case, the list pointer // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. @@ -1435,7 +1435,7 @@ // with promotion failure. oopDesc* f = cur; FREE_C_HEAP_ARRAY(oopDesc, f); - } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { + } else if (par_scan_state->should_be_partially_scanned(oop(obj_to_push), oop(cur))) { assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); obj_to_push = cur; } --- old/src/share/vm/gc/cms/parNewGeneration.hpp 2016-09-26 10:58:41.720950602 +0200 +++ new/src/share/vm/gc/cms/parNewGeneration.hpp 2016-09-26 10:58:41.612950597 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -323,7 +323,7 @@ // A list of from-space images of to-be-scanned objects, threaded through // klass-pointers (klass information already copied to the forwarded // image.) Manipulated with CAS. - oop _overflow_list; + oopDesc* volatile _overflow_list; NOT_PRODUCT(ssize_t _num_par_pushes;) // This closure is used by the reference processor to filter out @@ -383,7 +383,7 @@ NOT_PRODUCT(bool should_simulate_overflow();) // Accessor for overflow list - oop overflow_list() { return _overflow_list; } + oop overflow_list() { return oop(_overflow_list); } // Push the given (from-space) object on the global overflow list. void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state); --- old/src/share/vm/oops/oop.hpp 2016-09-26 10:58:42.836950651 +0200 +++ new/src/share/vm/oops/oop.hpp 2016-09-26 10:58:42.700950645 +0200 @@ -92,8 +92,8 @@ inline int klass_gap() const; inline void set_klass_gap(int z); // For when the klass pointer is being used as a linked list "next" field. - inline void set_klass_to_list_ptr(oop k); - inline oop list_ptr_from_klass(); + inline void set_klass_to_list_ptr(oopDesc* k); + inline oopDesc* list_ptr_from_klass(); // size of object header, aligned to platform wordSize static int header_size() { return sizeof(oopDesc)/HeapWordSize; } --- old/src/share/vm/oops/oop.inline.hpp 2016-09-26 10:58:43.732950691 +0200 +++ new/src/share/vm/oops/oop.inline.hpp 2016-09-26 10:58:43.592950685 +0200 @@ -150,23 +150,23 @@ } } -void oopDesc::set_klass_to_list_ptr(oop k) { +void oopDesc::set_klass_to_list_ptr(oopDesc* k) { // This is only to be used during GC, for from-space objects, so no // barrier is needed. if (UseCompressedClassPointers) { - _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) + _metadata._compressed_klass = (narrowKlass)encode_heap_oop(oop(k)); // may be null (parnew overflow handling) } else { - _metadata._klass = (Klass*)(address)k; + _metadata._klass = (Klass*)k; } } -oop oopDesc::list_ptr_from_klass() { +oopDesc* oopDesc::list_ptr_from_klass() { // This is only to be used during GC, for from-space objects. if (UseCompressedClassPointers) { - return decode_heap_oop((narrowOop)_metadata._compressed_klass); + return (oopDesc*)decode_heap_oop((narrowOop)_metadata._compressed_klass); } else { // Special case for GC - return (oop)(address)_metadata._klass; + return (oopDesc*)(address)_metadata._klass; } }