diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2014, Red Hat Inc. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // @@ -995,6 +995,7 @@ source_hpp %{ +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shenandoah/brooksPointer.hpp" #include "opto/addnode.hpp" @@ -4462,8 +4463,8 @@ __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, Assembler::byte, /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); - %} - + %} + enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{ MacroAssembler _masm(&cbuf); @@ -5893,7 +5894,7 @@ %{ // Get base of card map predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) && - (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base); + (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base()); match(ConP); op_cost(0); diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -2048,21 +2048,21 @@ starti; f(0,31), f((int)T & 1, 30); f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12); - f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0); + f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); } void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int imm, int op1, int op2) { starti; f(0,31), f((int)T & 1, 30); f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12); - f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0); + f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); } void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, Register Xm, int op1, int op2) { starti; f(0,31), f((int)T & 1, 30); f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12); - f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0); + f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); } void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2) { diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,8 @@ #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/compiledICHolder.hpp" @@ -42,6 +44,7 @@ #include "runtime/vframeArray.hpp" #include "vmreg_aarch64.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #endif @@ -685,14 +688,14 @@ } if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && - UseTLAB && FastTLABRefill) { + UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Label slow_path; Register obj_size = r2; Register t1 = r19; Register t2 = r4; assert_different_registers(klass, obj, obj_size, t1, t2); - __ stp(r5, r19, Address(__ pre(sp, -2 * wordSize))); + __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize))); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized @@ -717,24 +720,6 @@ } #endif // ASSERT - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; - __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5 - - __ bind(retry_tlab); - - // get the instance size (size is postive so movl is fine for 64bit) - __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); - - __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); - - __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true); - __ verify_oop(obj); - __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); - __ ret(lr); - - __ bind(try_eden); // get the instance size (size is postive so movl is fine for 64bit) __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); @@ -743,11 +728,11 @@ __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); __ verify_oop(obj); - __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); + __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize))); __ ret(lr); __ bind(slow_path); - __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); + __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize))); } __ enter(); @@ -815,7 +800,7 @@ } #endif // ASSERT - if (UseTLAB && FastTLABRefill) { + if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Register arr_size = r4; Register t1 = r2; Register t2 = r5; @@ -827,45 +812,10 @@ __ cmpw(length, rscratch1); __ br(Assembler::HI, slow_path); - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; - const Register thread = - __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread - - __ bind(retry_tlab); - // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // since size is positive ldrw does right thing on 64bit __ ldrw(t1, Address(klass, Klass::layout_helper_offset())); - __ lslvw(arr_size, length, t1); - __ ubfx(t1, t1, Klass::_lh_header_size_shift, - exact_log2(Klass::_lh_header_size_mask + 1)); - __ add(arr_size, arr_size, t1); - __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up - __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); - - __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size - - __ initialize_header(obj, klass, length, t1, t2); - __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); - assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); - assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andr(t1, t1, Klass::_lh_header_size_mask); - __ sub(arr_size, arr_size, t1); // body length - __ add(t1, t1, obj); // body start - if (!ZeroTLAB) { - __ initialize_body(t1, arr_size, 0, t2); - } - __ verify_oop(obj); - - __ ret(lr); - - __ bind(try_eden); - // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) - // since size is positive ldrw does right thing on 64bit - __ ldrw(t1, Address(klass, Klass::layout_helper_offset())); - // since size is postive movw does right thing on 64bit + // since size is positive movw does right thing on 64bit __ movw(arr_size, length); __ lslvw(arr_size, length, t1); __ ubfx(t1, t1, Klass::_lh_header_size_shift, @@ -875,7 +825,7 @@ __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size - __ incr_allocated_bytes(thread, arr_size, 0, rscratch1); + __ incr_allocated_bytes(rthread, arr_size, 0, rscratch1); __ initialize_header(obj, klass, length, t1, t2); __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); @@ -1227,17 +1177,6 @@ // arg0: store_address Address store_addr(rfp, 2*BytesPerWord); - BarrierSet* bs = Universe::heap()->barrier_set(); - if (bs->kind() == BarrierSet::Shenandoah) { - __ movptr(r0, (int)id); - __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); - __ should_not_reach_here(); - break; - } - - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - Label done; Label runtime; @@ -1258,13 +1197,13 @@ assert_different_registers(card_offset, byte_map_base, rscratch1); f.load_argument(0, card_offset); - __ lsr(card_offset, card_offset, CardTableModRefBS::card_shift); + __ lsr(card_offset, card_offset, CardTable::card_shift); __ load_byte_map_base(byte_map_base); __ ldrb(rscratch1, Address(byte_map_base, card_offset)); - __ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val()); __ br(Assembler::EQ, done); - assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); + assert((int)CardTable::dirty_card_val() == 0, "must be 0"); __ membar(Assembler::StoreLoad); __ ldrb(rscratch1, Address(byte_map_base, card_offset)); diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -277,6 +277,8 @@ // Add in the index add(result, result, tmp); load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } void InterpreterMacroAssembler::load_resolved_klass_at_offset( @@ -399,6 +401,13 @@ str(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); } +void InterpreterMacroAssembler::load_float(Address src) { + ldrs(v0, src); +} + +void InterpreterMacroAssembler::load_double(Address src) { + ldrd(v0, src); +} void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -158,6 +158,10 @@ void load_ptr(int n, Register val); void store_ptr(int n, Register val); +// Load float value from 'address'. The value is loaded onto the FPU register v0. + void load_float(Address src); + void load_double(Address src); + // Generate a subtype check: branch to ok_is_subtype if sub_klass is // a subtype of super_klass. void gen_subtype_check( Register sub_klass, Label &ok_is_subtype ); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,8 +29,9 @@ #include "jvm.h" #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" - #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shenandoah/brooksPointer.hpp" @@ -47,10 +48,12 @@ #include "runtime/biasedLocking.hpp" #include "runtime/icache.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -1802,18 +1805,63 @@ void MacroAssembler::membar(Membar_mask_bits order_constraint) { address prev = pc() - NativeMembar::instruction_size; - if (prev == code()->last_membar()) { + address last = code()->last_insn(); + if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) { NativeMembar *bar = NativeMembar_at(prev); // We are merging two memory barrier instructions. On AArch64 we // can do this simply by ORing them together. bar->set_kind(bar->get_kind() | order_constraint); BLOCK_COMMENT("merged membar"); } else { - code()->set_last_membar(pc()); + code()->set_last_insn(pc()); dmb(Assembler::barrier(order_constraint)); } } +bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { + if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { + merge_ldst(rt, adr, size_in_bytes, is_store); + code()->clear_last_insn(); + return true; + } else { + assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); + const unsigned mask = size_in_bytes - 1; + if (adr.getMode() == Address::base_plus_offset && + (adr.offset() & mask) == 0) { // only supports base_plus_offset. + code()->set_last_insn(pc()); + } + return false; + } +} + +void MacroAssembler::ldr(Register Rx, const Address &adr) { + // We always try to merge two adjacent loads into one ldp. + if (!try_merge_ldst(Rx, adr, 8, false)) { + Assembler::ldr(Rx, adr); + } +} + +void MacroAssembler::ldrw(Register Rw, const Address &adr) { + // We always try to merge two adjacent loads into one ldp. + if (!try_merge_ldst(Rw, adr, 4, false)) { + Assembler::ldrw(Rw, adr); + } +} + +void MacroAssembler::str(Register Rx, const Address &adr) { + // We always try to merge two adjacent stores into one stp. + if (!try_merge_ldst(Rx, adr, 8, true)) { + Assembler::str(Rx, adr); + } +} + +void MacroAssembler::strw(Register Rw, const Address &adr) { + // We always try to merge two adjacent stores into one stp. + if (!try_merge_ldst(Rw, adr, 4, true)) { + Assembler::strw(Rw, adr); + } +} + // MacroAssembler routines found actually to be needed void MacroAssembler::push(Register src) @@ -2068,9 +2116,14 @@ } void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } // If a constant does not fit in an immediate field, generate some @@ -2654,6 +2707,143 @@ return Address(base, offset); } +// Checks whether offset is aligned. +// Returns true if it is, else false. +bool MacroAssembler::merge_alignment_check(Register base, + size_t size, + long cur_offset, + long prev_offset) const { + if (AvoidUnalignedAccesses) { + if (base == sp) { + // Checks whether low offset if aligned to pair of registers. + long pair_mask = size * 2 - 1; + long offset = prev_offset > cur_offset ? cur_offset : prev_offset; + return (offset & pair_mask) == 0; + } else { // If base is not sp, we can't guarantee the access is aligned. + return false; + } + } else { + long mask = size - 1; + // Load/store pair instruction only supports element size aligned offset. + return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; + } +} + +// Checks whether current and previous loads/stores can be merged. +// Returns true if it can be merged, else false. +bool MacroAssembler::ldst_can_merge(Register rt, + const Address &adr, + size_t cur_size_in_bytes, + bool is_store) const { + address prev = pc() - NativeInstruction::instruction_size; + address last = code()->last_insn(); + + if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) { + return false; + } + + if (adr.getMode() != Address::base_plus_offset || prev != last) { + return false; + } + + NativeLdSt* prev_ldst = NativeLdSt_at(prev); + size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); + + assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); + assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); + + if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { + return false; + } + + long max_offset = 63 * prev_size_in_bytes; + long min_offset = -64 * prev_size_in_bytes; + + assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); + + // Only same base can be merged. + if (adr.base() != prev_ldst->base()) { + return false; + } + + long cur_offset = adr.offset(); + long prev_offset = prev_ldst->offset(); + size_t diff = abs(cur_offset - prev_offset); + if (diff != prev_size_in_bytes) { + return false; + } + + // Following cases can not be merged: + // ldr x2, [x2, #8] + // ldr x3, [x2, #16] + // or: + // ldr x2, [x3, #8] + // ldr x2, [x3, #16] + // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. + if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { + return false; + } + + long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; + // Offset range must be in ldp/stp instruction's range. + if (low_offset > max_offset || low_offset < min_offset) { + return false; + } + + if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { + return true; + } + + return false; +} + +// Merge current load/store with previous load/store into ldp/stp. +void MacroAssembler::merge_ldst(Register rt, + const Address &adr, + size_t cur_size_in_bytes, + bool is_store) { + + assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); + + Register rt_low, rt_high; + address prev = pc() - NativeInstruction::instruction_size; + NativeLdSt* prev_ldst = NativeLdSt_at(prev); + + long offset; + + if (adr.offset() < prev_ldst->offset()) { + offset = adr.offset(); + rt_low = rt; + rt_high = prev_ldst->target(); + } else { + offset = prev_ldst->offset(); + rt_low = prev_ldst->target(); + rt_high = rt; + } + + Address adr_p = Address(prev_ldst->base(), offset); + // Overwrite previous generated binary. + code_section()->set_end(prev); + + const int sz = prev_ldst->size_in_bytes(); + assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); + if (!is_store) { + BLOCK_COMMENT("merged ldr pair"); + if (sz == 8) { + ldp(rt_low, rt_high, adr_p); + } else { + ldpw(rt_low, rt_high, adr_p); + } + } else { + BLOCK_COMMENT("merged str pair"); + if (sz == 8) { + stp(rt_low, rt_high, adr_p); + } else { + stpw(rt_low, rt_high, adr_p); + } + } +} + /** * Multiply 64 bit by 64 bit first loop. */ @@ -3517,16 +3707,16 @@ // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, + assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - - lsr(obj, obj, CardTableModRefBS::card_shift); - - assert(CardTableModRefBS::dirty_card_val() == 0, "must be"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + lsr(obj, obj, CardTable::card_shift); + + assert(CardTable::dirty_card_val() == 0, "must be"); load_byte_map_base(rscratch1); @@ -4031,8 +4221,9 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); Label done; Label runtime; @@ -4049,20 +4240,20 @@ // storing region crossing non-NULL, is card already dirty? - ExternalAddress cardtable((address) ct->byte_map_base); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + ExternalAddress cardtable((address) ct->byte_map_base()); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); const Register card_addr = tmp; - lsr(card_addr, store_addr, CardTableModRefBS::card_shift); + lsr(card_addr, store_addr, CardTable::card_shift); // get the address of the card load_byte_map_base(tmp2); add(card_addr, card_addr, tmp2); ldrb(tmp2, Address(card_addr)); - cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); br(Assembler::EQ, done); - assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); + assert((int)CardTable::dirty_card_val() == 0, "must be 0"); membar(Assembler::StoreLoad); @@ -4311,131 +4502,6 @@ // verify_tlab(); } -// Preserves r19, and r3. -Register MacroAssembler::tlab_refill(Label& retry, - Label& try_eden, - Label& slow_case) { - Register top = r0; - Register t1 = r2; - Register t2 = r4; - assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); - Label do_refill, discard_tlab; - - if (!Universe::heap()->supports_inline_contig_alloc()) { - // No allocation in the shared eden. - b(slow_case); - } - - ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); - ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); - - // calculate amount of free space - sub(t1, t1, top); - lsr(t1, t1, LogHeapWordSize); - - // Retain tlab and allocate object in shared space if - // the amount free in the tlab is too large to discard. - - ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - cmp(t1, rscratch1); - br(Assembler::LE, discard_tlab); - - // Retain - // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); - add(rscratch1, rscratch1, t2); - str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - - if (TLABStats) { - // increment number of slow_allocations - addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), - 1, rscratch1); - } - b(try_eden); - - bind(discard_tlab); - if (TLABStats) { - // increment number of refills - addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, - rscratch1); - // accumulate wastage -- t1 is amount free in tlab - addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, - rscratch1); - } - - // if tlab is currently allocated (top or end != null) then - // fill [top, end + alignment_reserve) with array object - cbz(top, do_refill); - - // set up the mark word - mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); - str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); - // set the length to the remaining space - sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); - add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); - lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); - strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); - // set klass to intArrayKlass - { - unsigned long offset; - // dubious reloc why not an oop reloc? - adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), - offset); - ldr(t1, Address(rscratch1, offset)); - } - // store klass last. concurrent gcs assumes klass length is valid if - // klass field is not null. - store_klass(top, t1); - - mov(t1, top); - ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); - sub(t1, t1, rscratch1); - incr_allocated_bytes(rthread, t1, 0, rscratch1); - - // refill the tlab with an eden allocation - bind(do_refill); - ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); - lsl(t1, t1, LogHeapWordSize); - // allocate new tlab, address returned in top - eden_allocate(top, t1, 0, t2, slow_case); - - // Check that t1 was preserved in eden_allocate. -#ifdef ASSERT - if (UseTLAB) { - Label ok; - Register tsize = r4; - assert_different_registers(tsize, rthread, t1); - str(tsize, Address(pre(sp, -16))); - ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); - lsl(tsize, tsize, LogHeapWordSize); - cmp(t1, tsize); - br(Assembler::EQ, ok); - STOP("assert(t1 != tlab size)"); - should_not_reach_here(); - - bind(ok); - ldr(tsize, Address(post(sp, 16))); - } -#endif - str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); - str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); - add(top, top, t1); - sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); - - if (ZeroTLAB) { - // This is a fast TLAB refill, therefore the GC is not notified of it. - // So compiled code must fill the new TLAB with zeroes. - ldr(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); - zero_memory(top,t1,t2); - } - - verify_tlab(); - b(retry); - - return rthread; // for use by caller -} - // Zero words; len is in bytes // Destroys all registers except addr // len must be a nonzero multiple of wordSize @@ -4492,7 +4558,7 @@ bind(loop); sub(len, len, unroll); for (int i = -unroll; i < 0; i++) - str(zr, Address(t1, i * wordSize)); + Assembler::str(zr, Address(t1, i * wordSize)); bind(entry); add(t1, t1, unroll * wordSize); cbnz(len, loop); @@ -4669,7 +4735,7 @@ void MacroAssembler::load_byte_map_base(Register reg) { jbyte *byte_map_base = - ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base; + ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base(); if (is_valid_AArch64_address((address)byte_map_base)) { // Strictly speaking the byte_map_base isn't an address at all, diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -150,11 +150,19 @@ void bind(Label& L) { Assembler::bind(L); - code()->clear_last_membar(); + code()->clear_last_insn(); } void membar(Membar_mask_bits order_constraint); + using Assembler::ldr; + using Assembler::str; + + void ldr(Register Rx, const Address &adr); + void ldrw(Register Rw, const Address &adr); + void str(Register Rx, const Address &adr); + void strw(Register Rx, const Address &adr); + // Frame creation and destruction shared between JITs. void build_frame(int framesize); void remove_frame(int framesize); @@ -893,7 +901,6 @@ Register t2, // temp register Label& slow_case // continuation point if fast allocation fails ); - Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address void zero_memory(Register addr, Register len, Register t1); void verify_tlab(); @@ -1344,6 +1351,17 @@ // Uses rscratch2 if the address is not directly reachable Address spill_address(int size, int offset, Register tmp=rscratch2); + bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const; + + // Check whether two loads/stores can be merged into ldp/stp. + bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const; + + // Merge current load/store with previous load/store into ldp/stp. + void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store); + + // Try to merge two loads/stores into ldp/stp. If success, returns true else false. + bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store); + public: void spill(Register Rx, bool is64, int offset) { if (is64) { diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -131,6 +131,13 @@ return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 && Instruction_aarch64::extract(insn, 7, 0) == 0b10111111; } + + bool is_Imm_LdSt() { + unsigned int insn = uint_at(0); + return Instruction_aarch64::extract(insn, 29, 27) == 0b111 && + Instruction_aarch64::extract(insn, 23, 23) == 0b0 && + Instruction_aarch64::extract(insn, 26, 25) == 0b00; + } }; inline NativeInstruction* nativeInstruction_at(address address) { @@ -532,4 +539,57 @@ return (NativeMembar*)addr; } +class NativeLdSt : public NativeInstruction { +private: + int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); } + // Check whether instruction is with unscaled offset. + bool is_ldst_ur() { + return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 || + Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) && + Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00; + } + bool is_ldst_unsigned_offset() { + return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 || + Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100; + } +public: + Register target() { + uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0); + return r == 0x1f ? zr : as_Register(r); + } + Register base() { + uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5); + return b == 0x1f ? sp : as_Register(b); + } + int64_t offset() { + if (is_ldst_ur()) { + return Instruction_aarch64::sextract(uint_at(0), 20, 12); + } else if (is_ldst_unsigned_offset()) { + return Instruction_aarch64::extract(uint_at(0), 21, 10) << size(); + } else { + // others like: pre-index or post-index. + ShouldNotReachHere(); + return 0; + } + } + size_t size_in_bytes() { return 1 << size(); } + bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); } + bool is_load() { + assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || + Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); + + return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01; + } + bool is_store() { + assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || + Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); + + return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00; + } +}; + +inline NativeLdSt *NativeLdSt_at(address addr) { + assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found"); + return (NativeLdSt*)addr; +} #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,8 @@ #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/instanceOop.hpp" @@ -709,9 +711,7 @@ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); __ pop(saved_regs, sp); break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default: ShouldNotReachHere(); @@ -752,16 +752,16 @@ __ pop(saved_regs, sp); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); Label L_loop; - __ lsr(start, start, CardTableModRefBS::card_shift); - __ lsr(end, end, CardTableModRefBS::card_shift); + __ lsr(start, start, CardTable::card_shift); + __ lsr(end, end, CardTable::card_shift); __ sub(end, end, start); // number of bytes to copy const Register count = end; // 'end' register contains bytes count now diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -210,8 +210,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { if (val == noreg) { __ store_heap_oop_null(obj); @@ -396,7 +395,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; + Label call_ldc, notFloat, notClass, notInt, Done; if (wide) { __ get_unsigned_2_byte_index_at_bcp(r1, 1); @@ -443,20 +442,19 @@ __ b(Done); __ bind(notFloat); -#ifdef ASSERT - { - Label L; - __ cmp(r3, JVM_CONSTANT_Integer); - __ br(Assembler::EQ, L); - // String and Object are rewritten to fast_aldc - __ stop("unexpected tag type in ldc"); - __ bind(L); - } -#endif - // itos JVM_CONSTANT_Integer only + + __ cmp(r3, JVM_CONSTANT_Integer); + __ br(Assembler::NE, notInt); + + // itos __ adds(r1, r2, r1, Assembler::LSL, 3); __ ldrw(r0, Address(r1, base_offset)); __ push_i(r0); + __ b(Done); + + __ bind(notInt); + condy_helper(Done); + __ bind(Done); } @@ -467,6 +465,8 @@ Register result = r0; Register tmp = r1; + Register rarg = r2; + int index_size = wide ? sizeof(u2) : sizeof(u1); Label resolved; @@ -481,12 +481,27 @@ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); // first time invocation - must resolve first - __ mov(tmp, (int)bytecode()); - __ call_VM(result, entry, tmp); + __ mov(rarg, (int)bytecode()); + __ call_VM(result, entry, rarg); __ bind(resolved); + { // Check for the null sentinel. + // If we just called the VM, that already did the mapping for us, + // but it's harmless to retry. + Label notNull; + + // Stash null_sentinel address to get its value later + __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); + __ ldr(tmp, Address(rarg)); + __ cmp(result, tmp); + __ br(Assembler::NE, notNull); + __ mov(result, 0); // NULL object reference + __ bind(notNull); + } + if (VerifyOops) { + // Safe to call with 0 result __ verify_oop(result); } } @@ -494,7 +509,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Long, Done; + Label notDouble, notLong, Done; __ get_unsigned_2_byte_index_at_bcp(r0, 1); __ get_cpool_and_tags(r1, r2); @@ -505,22 +520,143 @@ __ lea(r2, Address(r2, r0, Address::lsl(0))); __ load_unsigned_byte(r2, Address(r2, tags_offset)); __ cmpw(r2, (int)JVM_CONSTANT_Double); - __ br(Assembler::NE, Long); + __ br(Assembler::NE, notDouble); + // dtos __ lea (r2, Address(r1, r0, Address::lsl(3))); __ ldrd(v0, Address(r2, base_offset)); __ push_d(); __ b(Done); - __ bind(Long); + __ bind(notDouble); + __ cmpw(r2, (int)JVM_CONSTANT_Long); + __ br(Assembler::NE, notLong); + // ltos __ lea(r0, Address(r1, r0, Address::lsl(3))); __ ldr(r0, Address(r0, base_offset)); __ push_l(); + __ b(Done); + + __ bind(notLong); + condy_helper(Done); __ bind(Done); } +void TemplateTable::condy_helper(Label& Done) +{ + Register obj = r0; + Register rarg = r1; + Register flags = r2; + Register off = r3; + + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + + __ mov(rarg, (int) bytecode()); + __ call_VM(obj, entry, rarg); + + __ get_vm_result_2(flags, rthread); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + __ mov(off, flags); + __ andw(off, off, ConstantPoolCacheEntry::field_index_mask); + + const Address field(obj, off); + + // What sort of thing are we loading? + // x86 uses a shift and mask or wings it with a shift plus assert + // the mask is not needed. aarch64 just uses bitfield extract + __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, + ConstantPoolCacheEntry::tos_state_bits); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ cmpw(flags, itos); + __ br(Assembler::NE, notInt); + // itos + __ ldrw(r0, field); + __ push(itos); + __ b(Done); + + __ bind(notInt); + __ cmpw(flags, ftos); + __ br(Assembler::NE, notFloat); + // ftos + __ load_float(field); + __ push(ftos); + __ b(Done); + + __ bind(notFloat); + __ cmpw(flags, stos); + __ br(Assembler::NE, notShort); + // stos + __ load_signed_short(r0, field); + __ push(stos); + __ b(Done); + + __ bind(notShort); + __ cmpw(flags, btos); + __ br(Assembler::NE, notByte); + // btos + __ load_signed_byte(r0, field); + __ push(btos); + __ b(Done); + + __ bind(notByte); + __ cmpw(flags, ctos); + __ br(Assembler::NE, notChar); + // ctos + __ load_unsigned_short(r0, field); + __ push(ctos); + __ b(Done); + + __ bind(notChar); + __ cmpw(flags, ztos); + __ br(Assembler::NE, notBool); + // ztos + __ load_signed_byte(r0, field); + __ push(ztos); + __ b(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ cmpw(flags, ltos); + __ br(Assembler::NE, notLong); + // ltos + __ ldr(r0, field); + __ push(ltos); + __ b(Done); + + __ bind(notLong); + __ cmpw(flags, dtos); + __ br(Assembler::NE, notDouble); + // dtos + __ load_double(field); + __ push(dtos); + __ b(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); +} + void TemplateTable::locals_index(Register reg, int offset) { __ ldrb(reg, at_bcp(offset)); diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -193,7 +193,9 @@ } // Enable vendor specific features - if (_cpu == CPU_CAVIUM) { + + // ThunderX + if (_cpu == CPU_CAVIUM && (_model == 0xA1)) { if (_variant == 0) _features |= CPU_DMB_ATOMICS; if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true); @@ -202,6 +204,20 @@ FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0)); } } + // ThunderX2 + if ((_cpu == CPU_CAVIUM && (_model == 0xAF)) || + (_cpu == CPU_BROADCOM && (_model == 0x516))) { + if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { + FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true); + } + if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) { + FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true); + } + if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { + FLAG_SET_DEFAULT(UseFPUForSpilling, true); + } + } + if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC; if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH; // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07) diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp --- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,8 @@ #include "ci/ciArray.hpp" #include "ci/ciObjArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" @@ -475,22 +477,21 @@ } void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) { - assert(CardTableModRefBS::dirty_card_val() == 0, + assert(CardTable::dirty_card_val() == 0, "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise"); #ifdef AARCH64 // AARCH64 has a register that is constant zero. We can use that one to set the // value in the card table to dirty. __ move(FrameMap::ZR_opr, card_addr); #else // AARCH64 - CardTableModRefBS* ct = (CardTableModRefBS*)_bs; - if(((intx)ct->byte_map_base & 0xff) == 0) { + if((ci_card_table_address_as() & 0xff) == 0) { // If the card table base address is aligned to 256 bytes, we can use the register // that contains the card_table_base_address. __ move(value, card_addr); } else { // Otherwise we need to create a register containing that value. LIR_Opr tmp_zero = new_register(T_INT); - __ move(LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()), tmp_zero); + __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero); __ move(tmp_zero, card_addr); } #endif // AARCH64 @@ -510,14 +511,14 @@ } #ifdef AARCH64 - LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE); + LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE); LIR_Opr tmp2 = tmp; - __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTableModRefBS::card_shift) + __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift) LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE); #else // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load // byte instruction does not support the addressing mode we need. - LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BOOLEAN); + LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN); #endif if (UseCondCardMark) { if (UseConcMarkSweepGC) { @@ -527,7 +528,7 @@ __ move(card_addr, cur_value); LabelObj* L_already_dirty = new LabelObj(); - __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val())); + __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val())); __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label()); set_card(tmp, card_addr); __ branch_destination(L_already_dirty->label()); diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp --- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,9 @@ #include "c1/c1_LIRAssembler.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/compiledICHolder.hpp" @@ -40,6 +43,7 @@ #include "utilities/align.hpp" #include "vmreg_arm.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #endif @@ -608,8 +612,6 @@ __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); - BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ct = barrier_set_cast(bs); Label done; Label recheck; Label runtime; @@ -619,8 +621,7 @@ Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf())); - AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + AddressLiteral cardtable(ci_card_table_address_as
(), relocInfo::none); // save at least the registers that need saving if the runtime is called #ifdef AARCH64 @@ -649,12 +650,12 @@ // explicitly specify that 'cardtable' has a relocInfo::none // type. __ lea(r_card_base_1, cardtable); - __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift)); + __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift)); // first quick check without barrier __ ldrb(r_tmp2, Address(r_card_addr_0)); - __ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val()); __ b(recheck, ne); __ bind(done); @@ -675,14 +676,14 @@ // reload card state after the barrier that ensures the stored oop was visible __ ldrb(r_tmp2, Address(r_card_addr_0)); - assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code"); + assert(CardTable::dirty_card_val() == 0, "adjust this code"); __ cbz(r_tmp2, done); // storing region crossing non-NULL, card is clean. // dirty card and log. - assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code"); - if (((intptr_t)ct->byte_map_base & 0xff) == 0) { + assert(0 == (int)CardTable::dirty_card_val(), "adjust this code"); + if ((ci_card_table_address_as() & 0xff) == 0) { // Card table is aligned so the lowest byte of the table address base is zero. __ strb(r_card_base_1, Address(r_card_addr_0)); } else { @@ -722,10 +723,10 @@ const Register result = R0; const Register klass = R1; - if (UseTLAB && FastTLABRefill && id != new_instance_id) { + if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) { // We come here when TLAB allocation failed. - // In this case we either refill TLAB or allocate directly from eden. - Label retry_tlab, try_eden, slow_case, slow_case_no_pop; + // In this case we try to allocate directly from eden. + Label slow_case, slow_case_no_pop; // Make sure the class is fully initialized if (id == fast_new_instance_init_check_id) { @@ -742,17 +743,6 @@ __ raw_push(R4, R5, LR); - __ tlab_refill(result, obj_size, tmp1, tmp2, obj_end, try_eden, slow_case); - - __ bind(retry_tlab); - __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset())); - __ tlab_allocate(result, obj_end, tmp1, obj_size, slow_case); // initializes result and obj_end - __ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2, - instanceOopDesc::header_size() * HeapWordSize, -1, - /* is_tlab_allocated */ true); - __ raw_pop_and_ret(R4, R5); - - __ bind(try_eden); __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset())); __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end __ incr_allocated_bytes(obj_size, tmp2); @@ -803,10 +793,10 @@ const Register klass = R1; const Register length = R2; - if (UseTLAB && FastTLABRefill) { + if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { // We come here when TLAB allocation failed. - // In this case we either refill TLAB or allocate directly from eden. - Label retry_tlab, try_eden, slow_case, slow_case_no_pop; + // In this case we try to allocate directly from eden. + Label slow_case, slow_case_no_pop; #ifdef AARCH64 __ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length); @@ -825,40 +815,6 @@ __ raw_push(R4, R5, LR); - __ tlab_refill(result, arr_size, tmp1, tmp2, tmp3, try_eden, slow_case); - - __ bind(retry_tlab); - // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size) - __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset())); - __ mov(arr_size, MinObjAlignmentInBytesMask); - __ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift)); - -#ifdef AARCH64 - __ lslv_w(tmp3, length, tmp1); - __ add(arr_size, arr_size, tmp3); -#else - __ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1)); -#endif // AARCH64 - - __ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); - __ align_reg(arr_size, arr_size, MinObjAlignmentInBytes); - - // tlab_allocate initializes result and obj_end, and preserves tmp2 which contains header_size - __ tlab_allocate(result, obj_end, tmp1, arr_size, slow_case); - - assert_different_registers(result, obj_end, klass, length, tmp1, tmp2); - __ initialize_header(result, klass, length, tmp1); - - __ add(tmp2, result, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); - if (!ZeroTLAB) { - __ initialize_body(tmp2, obj_end, tmp1); - } - - __ membar(MacroAssembler::StoreStore, tmp1); - - __ raw_pop_and_ret(R4, R5); - - __ bind(try_eden); // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size) __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset())); __ mov(arr_size, MinObjAlignmentInBytesMask); diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "jvm.h" #include "gc/shared/barrierSet.inline.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "interp_masm_arm.hpp" @@ -410,12 +411,12 @@ void InterpreterMacroAssembler::store_check_part1(Register card_table_base) { // Check barrier set type (should be card table) and element size BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, + assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code"); // Load card table base address. @@ -433,19 +434,19 @@ rarely accessed area of thread descriptor). */ // TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64 - mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference); + mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference); } // The 2nd part of the store check. void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) { assert_different_registers(obj, card_table_base, tmp); - assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations."); + assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations."); #ifdef AARCH64 - add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift)); + add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift)); Address card_table_addr(card_table_base); #else - Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift); + Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift); #endif if (UseCondCardMark) { @@ -472,8 +473,9 @@ #ifdef AARCH64 strb(ZR, card_table_addr); #else - CardTableModRefBS* ct = barrier_set_cast(Universe::heap()->barrier_set()); - if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) { + CardTableModRefBS* ctbs = barrier_set_cast(Universe::heap()->barrier_set()); + CardTable* ct = ctbs->card_table(); + if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) { // Card table is aligned so the lowest byte of the table address base is zero. // This works only if the code is not saved for later use, possibly // in a context where the base would no longer be aligned. diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp --- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "ci/ciEnv.hpp" #include "code/nativeInst.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" @@ -43,6 +44,7 @@ #include "runtime/stubRoutines.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -1316,98 +1318,6 @@ str(obj_end, Address(Rthread, JavaThread::tlab_top_offset())); } -void MacroAssembler::tlab_refill(Register top, Register tmp1, Register tmp2, - Register tmp3, Register tmp4, - Label& try_eden, Label& slow_case) { - if (!Universe::heap()->supports_inline_contig_alloc()) { - b(slow_case); - return; - } - - InlinedAddress intArrayKlass_addr((address)Universe::intArrayKlassObj_addr()); - Label discard_tlab, do_refill; - ldr(top, Address(Rthread, JavaThread::tlab_top_offset())); - ldr(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); - ldr(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset())); - - // Calculate amount of free space - sub(tmp1, tmp1, top); - // Retain tlab and allocate in shared space - // if the amount of free space in tlab is too large to discard - cmp(tmp2, AsmOperand(tmp1, lsr, LogHeapWordSize)); - b(discard_tlab, ge); - - // Increment waste limit to prevent getting stuck on this slow path - mov_slow(tmp3, ThreadLocalAllocBuffer::refill_waste_limit_increment()); - add(tmp2, tmp2, tmp3); - str(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset())); - if (TLABStats) { - ldr_u32(tmp2, Address(Rthread, JavaThread::tlab_slow_allocations_offset())); - add_32(tmp2, tmp2, 1); - str_32(tmp2, Address(Rthread, JavaThread::tlab_slow_allocations_offset())); - } - b(try_eden); - bind_literal(intArrayKlass_addr); - - bind(discard_tlab); - if (TLABStats) { - ldr_u32(tmp2, Address(Rthread, JavaThread::tlab_number_of_refills_offset())); - ldr_u32(tmp3, Address(Rthread, JavaThread::tlab_fast_refill_waste_offset())); - add_32(tmp2, tmp2, 1); - add_32(tmp3, tmp3, AsmOperand(tmp1, lsr, LogHeapWordSize)); - str_32(tmp2, Address(Rthread, JavaThread::tlab_number_of_refills_offset())); - str_32(tmp3, Address(Rthread, JavaThread::tlab_fast_refill_waste_offset())); - } - // If tlab is currently allocated (top or end != null) - // then fill [top, end + alignment_reserve) with array object - cbz(top, do_refill); - - // Set up the mark word - mov_slow(tmp2, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); - str(tmp2, Address(top, oopDesc::mark_offset_in_bytes())); - // Set klass to intArrayKlass and the length to the remaining space - ldr_literal(tmp2, intArrayKlass_addr); - add(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes() - - typeArrayOopDesc::header_size(T_INT) * HeapWordSize); - Register klass = tmp2; - ldr(klass, Address(tmp2)); - logical_shift_right(tmp1, tmp1, LogBytesPerInt); // divide by sizeof(jint) - str_32(tmp1, Address(top, arrayOopDesc::length_offset_in_bytes())); - store_klass(klass, top); // blows klass: - klass = noreg; - - ldr(tmp1, Address(Rthread, JavaThread::tlab_start_offset())); - sub(tmp1, top, tmp1); // size of tlab's allocated portion - incr_allocated_bytes(tmp1, tmp2); - - bind(do_refill); - // Refill the tlab with an eden allocation - ldr(tmp1, Address(Rthread, JavaThread::tlab_size_offset())); - logical_shift_left(tmp4, tmp1, LogHeapWordSize); - eden_allocate(top, tmp1, tmp2, tmp3, tmp4, slow_case); - str(top, Address(Rthread, JavaThread::tlab_start_offset())); - str(top, Address(Rthread, JavaThread::tlab_top_offset())); - -#ifdef ASSERT - // Verify that tmp1 contains tlab_end - ldr(tmp2, Address(Rthread, JavaThread::tlab_size_offset())); - add(tmp2, top, AsmOperand(tmp2, lsl, LogHeapWordSize)); - cmp(tmp1, tmp2); - breakpoint(ne); -#endif - - sub(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); - - if (ZeroTLAB) { - // clobbers start and tmp - // top must be preserved! - add(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - ldr(tmp2, Address(Rthread, JavaThread::tlab_start_offset())); - zero_memory(tmp2, tmp1, tmp3); - } -} - // Fills memory regions [start..end] with zeroes. Clobbers `start` and `tmp` registers. void MacroAssembler::zero_memory(Register start, Register end, Register tmp) { Label loop; @@ -2357,7 +2267,8 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); Label done; Label runtime; @@ -2378,18 +2289,18 @@ // storing region crossing non-NULL, is card already dirty? const Register card_addr = tmp1; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - - mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference); - add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift)); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference); + add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift)); ldrb(tmp2, Address(card_addr)); - cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + cmp(tmp2, (int)G1CardTable::g1_young_card_val()); b(done, eq); membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2); - assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code"); + assert(CardTable::dirty_card_val() == 0, "adjust this code"); ldrb(tmp2, Address(card_addr)); cbz(tmp2, done); @@ -3115,7 +3026,6 @@ } #endif // COMPILER2 - // Must preserve condition codes, or C2 encodeKlass_not_null rule // must be changed. void MacroAssembler::encode_klass_not_null(Register r) { @@ -3353,4 +3263,3 @@ } #endif // COMPILER2 - diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp --- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp @@ -359,8 +359,6 @@ void tlab_allocate(Register obj, Register obj_end, Register tmp1, RegisterOrConstant size_expression, Label& slow_case); - void tlab_refill(Register top, Register tmp1, Register tmp2, Register tmp3, Register tmp4, - Label& try_eden, Label& slow_case); void zero_memory(Register start, Register end, Register tmp); void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp); diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "asm/assembler.hpp" #include "assembler_arm.inline.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/instanceOop.hpp" @@ -2907,8 +2909,7 @@ __ pop(saved_regs | R9ifScratched); #endif // AARCH64 } - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: break; default: ShouldNotReachHere(); @@ -2961,12 +2962,12 @@ #endif // !AARCH64 } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { BLOCK_COMMENT("CardTablePostBarrier"); - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); Label L_cardtable_loop, L_done; @@ -2975,12 +2976,12 @@ __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop); __ sub(count, count, BytesPerHeapOop); // last addr - __ logical_shift_right(addr, addr, CardTableModRefBS::card_shift); - __ logical_shift_right(count, count, CardTableModRefBS::card_shift); + __ logical_shift_right(addr, addr, CardTable::card_shift); + __ logical_shift_right(count, count, CardTable::card_shift); __ sub(count, count, addr); // nb of cards // warning: Rthread has not been preserved - __ mov_address(tmp, (address) ct->byte_map_base, symbolic_Relocation::card_table_reference); + __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference); __ add(addr,tmp, addr); Register zero = __ zero_register(tmp); @@ -2992,8 +2993,6 @@ __ BIND(L_done); } break; - case BarrierSet::ModRef: - break; default: ShouldNotReachHere(); } diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp --- a/src/hotspot/cpu/arm/templateTable_arm.cpp +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -228,8 +228,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { if (is_null) { __ store_heap_oop_null(new_val, obj); diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,9 @@ #include "c1/c1_Defs.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/compiledICHolder.hpp" @@ -40,6 +43,7 @@ #include "utilities/macros.hpp" #include "vmreg_ppc.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #endif @@ -413,34 +417,9 @@ assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } + // We don't support eden allocation. -// if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && -// UseTLAB && FastTLABRefill) { -// if (id == fast_new_instance_init_check_id) { -// // make sure the klass is initialized -// __ lbz(R0, in_bytes(InstanceKlass::init_state_offset()), R3_ARG1); -// __ cmpwi(CCR0, R0, InstanceKlass::fully_initialized); -// __ bne(CCR0, slow_path); -// } -//#ifdef ASSERT -// // assert object can be fast path allocated -// { -// Label ok, not_ok; -// __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R3_ARG1); -// // make sure it's an instance (LH > 0) -// __ cmpwi(CCR0, R0, 0); -// __ ble(CCR0, not_ok); -// __ testbitdi(CCR0, R0, R0, Klass::_lh_instance_slow_path_bit); -// __ beq(CCR0, ok); -// -// __ bind(not_ok); -// __ stop("assert(can be fast path allocated)"); -// __ bind(ok); -// } -//#endif // ASSERT -// // We don't support eden allocation. -// __ bind(slow_path); -// } + oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2); } break; @@ -820,7 +799,7 @@ Register tmp = R0; Register addr = R14; Register tmp2 = R15; - jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; + jbyte* byte_map_base = ci_card_table_address(); Label restart, refill, ret; @@ -828,26 +807,26 @@ __ std(addr, -8, R1_SP); __ std(tmp2, -16, R1_SP); - __ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0. + __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0. __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp); __ add(addr, tmp2, addr); __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] // Return if young card. - __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val()); + __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val()); __ beq(CCR0, ret); // Return if sequential consistent value is already dirty. __ membar(Assembler::StoreLoad); __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] - __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val()); + __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val()); __ beq(CCR0, ret); // Not dirty. // First, dirty it. - __ li(tmp, G1SATBCardTableModRefBS::dirty_card_val()); + __ li(tmp, G1CardTable::dirty_card_val()); __ stb(tmp, 0, addr); int dirty_card_q_index_byte_offset = diff --git a/src/hotspot/cpu/ppc/copy_ppc.hpp b/src/hotspot/cpu/ppc/copy_ppc.hpp --- a/src/hotspot/cpu/ppc/copy_ppc.hpp +++ b/src/hotspot/cpu/ppc/copy_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,11 +32,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -52,7 +52,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -70,25 +70,25 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } // Template for atomic, element-wise copy. template -static void copy_conjoint_atomic(T* from, T* to, size_t count) { +static void copy_conjoint_atomic(const T* from, T* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -104,44 +104,44 @@ } } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { // TODO: contribute optimized version. copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { // TODO: contribute optimized version. copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { copy_conjoint_atomic(from, to, count); } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { // TODO: contribute optimized version. - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { // TODO: contribute optimized version. - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { diff --git a/src/hotspot/cpu/ppc/frame_ppc.cpp b/src/hotspot/cpu/ppc/frame_ppc.cpp --- a/src/hotspot/cpu/ppc/frame_ppc.cpp +++ b/src/hotspot/cpu/ppc/frame_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,6 +32,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/monitorChunk.hpp" #include "runtime/signature.hpp" #include "runtime/stubCodeGenerator.hpp" diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -492,6 +492,8 @@ // Add in the index. add(result, tmp, result); load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } // load cpool->resolved_klass_at(index) diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" @@ -43,6 +44,7 @@ #include "runtime/stubRoutines.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -2336,9 +2338,6 @@ std(new_top, in_bytes(JavaThread::tlab_top_offset()), R16_thread); //verify_tlab(); not implemented } -void MacroAssembler::tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case) { - unimplemented("tlab_refill"); -} void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2) { unimplemented("incr_allocated_bytes"); } @@ -3039,20 +3038,20 @@ void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { CardTableModRefBS* bs = barrier_set_cast(Universe::heap()->barrier_set()); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); + assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + CardTable* ct = bs->card_table(); #ifdef ASSERT cmpdi(CCR0, Rnew_val, 0); asm_assert_ne("null oop not allowed", 0x321); #endif - card_table_write(bs->byte_map_base, Rtmp, Rstore_addr); + card_table_write(ct->byte_map_base(), Rtmp, Rstore_addr); } // Write the card table byte. void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) { assert_different_registers(Robj, Rtmp, R0); load_const_optimized(Rtmp, (address)byte_map_base, R0); - srdi(Robj, Robj, CardTableModRefBS::card_shift); + srdi(Robj, Robj, CardTable::card_shift); li(R0, 0); // dirty if (UseConcMarkSweepGC) membar(Assembler::StoreStore); stbx(R0, Rtmp, Robj); @@ -3174,6 +3173,7 @@ G1SATBCardTableLoggingModRefBS* bs = barrier_set_cast(Universe::heap()->barrier_set()); + CardTable* ct = bs->card_table(); // Does store cross heap regions? if (G1RSBarrierRegionFilter) { @@ -3190,26 +3190,26 @@ #endif // Storing region crossing non-NULL, is card already dirty? - assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); const Register Rcard_addr = Rtmp1; Register Rbase = Rtmp2; - load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3); - - srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); + load_const_optimized(Rbase, (address)ct->byte_map_base(), /*temp*/ Rtmp3); + + srdi(Rcard_addr, Rstore_addr, CardTable::card_shift); // Get the address of the card. lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); - cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + cmpwi(CCR0, Rtmp3, (int)G1CardTable::g1_young_card_val()); beq(CCR0, filtered); membar(Assembler::StoreLoad); lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar. - cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val()); + cmpwi(CCR0, Rtmp3 /* card value */, CardTable::dirty_card_val()); beq(CCR0, filtered); // Storing a region crossing, non-NULL oop, card is clean. // Dirty card and log. - li(Rtmp3, CardTableModRefBS::dirty_card_val()); + li(Rtmp3, CardTable::dirty_card_val()); //release(); // G1: oops are allowed to get visible after dirty marking. stbx(Rtmp3, Rbase, Rcard_addr); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -602,7 +602,6 @@ Register t1, // temp register Label& slow_case // continuation point if fast allocation fails ); - void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2); enum { trampoline_stub_size = 6 * 4 }; diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/instanceOop.hpp" @@ -667,9 +669,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default: ShouldNotReachHere(); @@ -703,8 +703,7 @@ __ restore_LR_CR(R0); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { Label Lskip_loop, Lstore_loop; if (UseConcMarkSweepGC) { @@ -712,19 +711,20 @@ __ release(); } - CardTableModRefBS* const ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* const ctbs = barrier_set_cast(bs); + CardTable* const ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(addr, count, tmp); __ sldi(count, count, LogBytesPerHeapOop); __ addi(count, count, -BytesPerHeapOop); __ add(count, addr, count); // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) - __ srdi(addr, addr, CardTableModRefBS::card_shift); - __ srdi(count, count, CardTableModRefBS::card_shift); + __ srdi(addr, addr, CardTable::card_shift); + __ srdi(count, count, CardTable::card_shift); __ subf(count, addr, count); assert_different_registers(R0, addr, count, tmp); - __ load_const(tmp, (address)ct->byte_map_base); + __ load_const(tmp, (address)ct->byte_map_base()); __ addic_(count, count, 1); __ beq(CCR0, Lskip_loop); __ li(R0, 0); diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -103,8 +103,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { Label Lnull, Ldone; if (Rval != noreg) { @@ -314,7 +313,7 @@ Rcpool = R3_ARG1; transition(vtos, vtos); - Label notInt, notClass, exit; + Label notInt, notFloat, notClass, exit; __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. if (wide) { // Read index. @@ -356,13 +355,16 @@ __ align(32, 12); __ bind(notInt); -#ifdef ASSERT - // String and Object are rewritten to fast_aldc __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); - __ asm_assert_eq("unexpected type", 0x8765); -#endif + __ bne(CCR0, notFloat); __ lfsx(F15_ftos, Rcpool, Rscratch1); __ push(ftos); + __ b(exit); + + __ align(32, 12); + // assume the tag is for condy; if not, the VM runtime will tell us + __ bind(notFloat); + condy_helper(exit); __ align(32, 12); __ bind(exit); @@ -380,6 +382,19 @@ // non-null object (CallSite, etc.) __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); + + // Convert null sentinel to NULL. + int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); + __ ld(Rscratch, simm16_rest, Rscratch); + __ cmpld(CCR0, R17_tos, Rscratch); + if (VM_Version::has_isel()) { + __ isel_0(R17_tos, CCR0, Assembler::equal); + } else { + Label not_sentinel; + __ bne(CCR0, not_sentinel); + __ li(R17_tos, 0); + __ bind(not_sentinel); + } __ verify_oop(R17_tos); __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); @@ -395,7 +410,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Llong, Lexit; + Label not_double, not_long, exit; Register Rindex = R11_scratch1, Rcpool = R12_scratch2, @@ -410,23 +425,129 @@ __ addi(Rtag, Rtag, tags_offset); __ lbzx(Rtag, Rtag, Rindex); - __ sldi(Rindex, Rindex, LogBytesPerWord); + __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); - __ bne(CCR0, Llong); - // A double can be placed at word-aligned locations in the constant pool. - // Check out Conversions.java for an example. - // Also ConstantPool::header_size() is 20, which makes it very difficult - // to double-align double on the constant pool. SG, 11/7/97 + __ bne(CCR0, not_double); __ lfdx(F15_ftos, Rcpool, Rindex); __ push(dtos); - __ b(Lexit); - - __ bind(Llong); + __ b(exit); + + __ bind(not_double); + __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); + __ bne(CCR0, not_long); __ ldx(R17_tos, Rcpool, Rindex); __ push(ltos); - - __ bind(Lexit); + __ b(exit); + + __ bind(not_long); + condy_helper(exit); + + __ align(32, 12); + __ bind(exit); +} + +void TemplateTable::condy_helper(Label& Done) { + const Register obj = R31; + const Register off = R11_scratch1; + const Register flags = R12_scratch2; + const Register rarg = R4_ARG2; + __ li(rarg, (int)bytecode()); + call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); + __ get_vm_result_2(flags); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); + + // What sort of thing are we loading? + __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ cmplwi(CCR0, flags, itos); + __ bne(CCR0, notInt); + // itos + __ lwax(R17_tos, obj, off); + __ push(itos); + __ b(Done); + + __ bind(notInt); + __ cmplwi(CCR0, flags, ftos); + __ bne(CCR0, notFloat); + // ftos + __ lfsx(F15_ftos, obj, off); + __ push(ftos); + __ b(Done); + + __ bind(notFloat); + __ cmplwi(CCR0, flags, stos); + __ bne(CCR0, notShort); + // stos + __ lhax(R17_tos, obj, off); + __ push(stos); + __ b(Done); + + __ bind(notShort); + __ cmplwi(CCR0, flags, btos); + __ bne(CCR0, notByte); + // btos + __ lbzx(R17_tos, obj, off); + __ extsb(R17_tos, R17_tos); + __ push(btos); + __ b(Done); + + __ bind(notByte); + __ cmplwi(CCR0, flags, ctos); + __ bne(CCR0, notChar); + // ctos + __ lhzx(R17_tos, obj, off); + __ push(ctos); + __ b(Done); + + __ bind(notChar); + __ cmplwi(CCR0, flags, ztos); + __ bne(CCR0, notBool); + // ztos + __ lbzx(R17_tos, obj, off); + __ push(ztos); + __ b(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ cmplwi(CCR0, flags, ltos); + __ bne(CCR0, notLong); + // ltos + __ ldx(R17_tos, obj, off); + __ push(ltos); + __ b(Done); + + __ bind(notLong); + __ cmplwi(CCR0, flags, dtos); + __ bne(CCR0, notDouble); + // dtos + __ lfdx(F15_ftos, obj, off); + __ push(dtos); + __ b(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); } // Get the locals index located in the bytecode stream at bcp + offset. diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp --- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,9 @@ #include "c1/c1_Defs.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_s390.hpp" #include "oops/compiledICHolder.hpp" @@ -40,6 +43,7 @@ #include "vmreg_s390.inline.hpp" #include "registerSaver_s390.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #endif @@ -346,11 +350,6 @@ __ set_info("fast new_instance init check", dont_gc_arguments); } - if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && - UseTLAB && FastTLABRefill) { - // Sapjvm: must call RT to generate allocation events. - } - OopMap* map = save_live_registers_except_r2(sasm); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); oop_maps = new OopMapSet(); @@ -411,10 +410,6 @@ } #endif // ASSERT - if (UseTLAB && FastTLABRefill) { - // sapjvm: must call RT to generate allocation events. - } - OopMap* map = save_live_registers_except_r2(sasm); int call_offset; if (id == new_type_array_id) { @@ -854,7 +849,7 @@ Register r1 = Z_R6; // Must be saved/restored. Register r2 = Z_R7; // Must be saved/restored. Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card. - jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; + jbyte* byte_map_base = ci_card_table_address(); // Save registers used below (see assertion in G1PreBarrierStub::emit_code()). __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); @@ -863,17 +858,17 @@ // Calculate address of card corresponding to the updated oop slot. AddressLiteral rs(byte_map_base); - __ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift); + __ z_srlg(addr_card, addr_oop, CardTable::card_shift); addr_oop = noreg; // dead now __ load_const_optimized(cardtable, rs); // cardtable := __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable - __ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val()); __ z_bre(young_card); __ z_sync(); // Required to support concurrent cleaning. - __ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val()); + __ z_cli(0, addr_card, (int)CardTable::dirty_card_val()); __ z_brne(not_already_dirty); __ bind(young_card); @@ -886,7 +881,7 @@ __ bind(not_already_dirty); // First, dirty it: [addr_card] := 0 - __ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val()); + __ z_mvi(0, addr_card, CardTable::dirty_card_val()); Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card. Register buf = r2; diff --git a/src/hotspot/cpu/s390/copy_s390.hpp b/src/hotspot/cpu/s390/copy_s390.hpp --- a/src/hotspot/cpu/s390/copy_s390.hpp +++ b/src/hotspot/cpu/s390/copy_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -73,7 +73,7 @@ #undef USE_INLINE_ASM -static void copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -89,7 +89,7 @@ } } -static void copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -105,7 +105,7 @@ } } -static bool has_destructive_overlap(char* from, char* to, size_t byte_count) { +static bool has_destructive_overlap(const char* from, char* to, size_t byte_count) { return (from < to) && ((to-from) < (ptrdiff_t)byte_count); } @@ -662,7 +662,7 @@ // D I S J O I N T C O P Y I N G // //*************************************// -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: very frequent, some tests frequent. // Copy HeapWord (=DW) aligned storage. Use MVCLE in inline-asm code. @@ -740,13 +740,13 @@ #endif } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: < 4k calls. assert(((((size_t)from) & 0x07L) | (((size_t)to) & 0x07L)) == 0, "No atomic copy w/o aligned data"); pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate. } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: very rare. pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate. } @@ -756,7 +756,7 @@ // C O N J O I N T C O P Y I N G // //*************************************// -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: between some and lower end of frequent. #ifdef USE_INLINE_ASM @@ -836,13 +836,13 @@ #endif } -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // Just delegate. HeapWords are optimally aligned anyway. pd_aligned_conjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -866,16 +866,16 @@ // C O N J O I N T A T O M I C C O P Y I N G // //**************************************************// -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { // Call arraycopy stubs to do the job. pd_conjoint_bytes(from, to, count); // bytes are always accessed atomically. } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; - if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerShort)) { + if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerShort)) { // Use optimizations from shared code where no z-specific optimization exists. copy_conjoint_jshorts_atomic(from, to, count); } else { @@ -890,11 +890,11 @@ #endif } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; - if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerInt)) { + if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerInt)) { switch (count_in) { case 4: COPY4_ATOMIC_4(to,from) return; @@ -922,7 +922,7 @@ #endif } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -970,11 +970,11 @@ } } else - pd_aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate. + pd_aligned_disjoint_words((const HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate. #endif } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -1011,24 +1011,24 @@ #endif } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } //**********************************************// diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -389,6 +389,8 @@ #endif z_agr(result, index); // Address of indexed array element. load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } // load cpool->resolved_klass_at(index) diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #include "asm/codeBuffer.hpp" #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "gc/shared/cardTableModRefBS.hpp" @@ -50,6 +51,7 @@ #include "utilities/events.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -3502,12 +3504,13 @@ // Write to card table for modification at store_addr - register is destroyed afterwards. void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { - CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); assert_different_registers(store_addr, tmp); - z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift); - load_absolute_address(tmp, (address)bs->byte_map_base); + z_srlg(store_addr, store_addr, CardTable::card_shift); + load_absolute_address(tmp, (address)ct->byte_map_base()); z_agr(store_addr, tmp); z_mvi(0, store_addr, 0); // Store byte 0. } @@ -3707,6 +3710,7 @@ assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3. G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); + CardTable* ct = bs->card_table(); assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); BLOCK_COMMENT("g1_write_barrier_post {"); @@ -3733,33 +3737,33 @@ Rnew_val = noreg; // end of lifetime // Storing region crossing non-NULL, is card already dirty? - assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(Rtmp1, Rtmp2, Rtmp3); // Make sure not to use Z_R0 for any of these registers. Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3; // calculate address of card - load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base. - z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table. + load_const_optimized(Rbase, (address)ct->byte_map_base()); // Card table base. + z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table. z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli. Rbase = noreg; // end of lifetime // Filter young. - assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code"); - z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val()); + assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code"); + z_cli(0, Rcard_addr, (int)G1CardTable::g1_young_card_val()); z_bre(filtered); // Check the card value. If dirty, we're done. // This also avoids false sharing of the (already dirty) card. z_sync(); // Required to support concurrent cleaning. - assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code"); - z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar. + assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code"); + z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar. z_bre(filtered); // Storing a region crossing, non-NULL oop, card is clean. // Dirty card and log. - z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); + z_mvi(0, Rcard_addr, CardTable::dirty_card_val()); Register Rcard_addr_x = Rcard_addr; Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1; diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,6 +26,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "registerSaver_s390.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "nativeInst_s390.hpp" @@ -722,8 +724,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: case BarrierSet::ModRef: break; default: @@ -761,14 +762,14 @@ } } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: // These cases formerly known as // void array_store_check(Register addr, Register count, bool branchToEnd). { NearLabel doXC, done; - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(Z_R0, Z_R1, addr, count); // Nothing to do if count <= 0. @@ -787,11 +788,11 @@ __ add2reg_with_index(count, -BytesPerHeapOop, count, addr); // Get base address of card table. - __ load_const_optimized(Z_R1, (address)ct->byte_map_base); + __ load_const_optimized(Z_R1, (address)ct->byte_map_base()); // count = (count>>shift) - (addr>>shift) - __ z_srlg(addr, addr, CardTableModRefBS::card_shift); - __ z_srlg(count, count, CardTableModRefBS::card_shift); + __ z_srlg(addr, addr, CardTable::card_shift); + __ z_srlg(count, count, CardTable::card_shift); // Prefetch first elements of card table for update. if (VM_Version::has_Prefetch()) { diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp --- a/src/hotspot/cpu/s390/templateTable_s390.cpp +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -260,8 +260,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { if (val_is_null) { __ store_heap_oop_null(val, offset, base); @@ -450,7 +449,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; + Label call_ldc, notFloat, notClass, notInt, Done; const Register RcpIndex = Z_tmp_1; const Register Rtags = Z_ARG2; @@ -500,22 +499,17 @@ __ z_bru(Done); __ bind(notFloat); -#ifdef ASSERT - { - Label L; - - __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); - __ z_bre(L); - // String and Object are rewritten to fast_aldc. - __ stop("unexpected tag type in ldc"); - - __ bind(L); - } -#endif + __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); + __ z_brne(notInt); // itos __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false); __ push_i(Z_tos); + __ z_bru(Done); + + // assume the tag is for condy; if not, the VM runtime will tell us + __ bind(notInt); + condy_helper(Done); __ bind(Done); } @@ -528,15 +522,23 @@ const Register index = Z_tmp_2; int index_size = wide ? sizeof(u2) : sizeof(u1); - Label L_resolved; + Label L_do_resolve, L_resolved; // We are resolved if the resolved reference cache entry contains a // non-null object (CallSite, etc.). __ get_cache_index_at_bcp(index, 1, index_size); // Load index. __ load_resolved_reference_at_index(Z_tos, index); __ z_ltgr(Z_tos, Z_tos); + __ z_bre(L_do_resolve); + + // Convert null sentinel to NULL. + __ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr()); + __ z_cg(Z_tos, Address(Z_R1_scratch)); __ z_brne(L_resolved); - + __ clear_reg(Z_tos); + __ z_bru(L_resolved); + + __ bind(L_do_resolve); // First time invocation - must resolve first. address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); __ load_const_optimized(Z_ARG1, (int)bytecode()); @@ -548,7 +550,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Long, Done; + Label notDouble, notLong, Done; // Z_tmp_1 = index of cp entry __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned); @@ -566,21 +568,132 @@ // Check type. __ z_cli(0, Z_tos, JVM_CONSTANT_Double); - __ z_brne(Long); - + __ z_brne(notDouble); // dtos __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset)); __ push_d(); __ z_bru(Done); - __ bind(Long); + __ bind(notDouble); + __ z_cli(0, Z_tos, JVM_CONSTANT_Long); + __ z_brne(notLong); // ltos __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset)); __ push_l(); + __ z_bru(Done); + + __ bind(notLong); + condy_helper(Done); __ bind(Done); } +void TemplateTable::condy_helper(Label& Done) { + const Register obj = Z_tmp_1; + const Register off = Z_tmp_2; + const Register flags = Z_ARG1; + const Register rarg = Z_ARG2; + __ load_const_optimized(rarg, (int)bytecode()); + call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); + __ get_vm_result_2(flags); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + assert(ConstantPoolCacheEntry::field_index_mask == 0xffff, "or use other instructions"); + __ z_llghr(off, flags); + const Address field(obj, off); + + // What sort of thing are we loading? + __ z_srl(flags, ConstantPoolCacheEntry::tos_state_shift); + // Make sure we don't need to mask flags for tos_state after the above shift. + ConstantPoolCacheEntry::verify_tos_state_shift(); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ z_cghi(flags, itos); + __ z_brne(notInt); + // itos + __ z_l(Z_tos, field); + __ push(itos); + __ z_bru(Done); + + __ bind(notInt); + __ z_cghi(flags, ftos); + __ z_brne(notFloat); + // ftos + __ z_le(Z_ftos, field); + __ push(ftos); + __ z_bru(Done); + + __ bind(notFloat); + __ z_cghi(flags, stos); + __ z_brne(notShort); + // stos + __ z_lh(Z_tos, field); + __ push(stos); + __ z_bru(Done); + + __ bind(notShort); + __ z_cghi(flags, btos); + __ z_brne(notByte); + // btos + __ z_lb(Z_tos, field); + __ push(btos); + __ z_bru(Done); + + __ bind(notByte); + __ z_cghi(flags, ctos); + __ z_brne(notChar); + // ctos + __ z_llh(Z_tos, field); + __ push(ctos); + __ z_bru(Done); + + __ bind(notChar); + __ z_cghi(flags, ztos); + __ z_brne(notBool); + // ztos + __ z_lb(Z_tos, field); + __ push(ztos); + __ z_bru(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ z_cghi(flags, ltos); + __ z_brne(notLong); + // ltos + __ z_lg(Z_tos, field); + __ push(ltos); + __ z_bru(Done); + + __ bind(notLong); + __ z_cghi(flags, dtos); + __ z_brne(notDouble); + // dtos + __ z_ld(Z_ftos, field); + __ push(dtos); + __ z_bru(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); +} + void TemplateTable::locals_index(Register reg, int offset) { __ z_llgc(reg, at_bcp(offset)); __ z_lcgr(reg); diff --git a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp +++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp @@ -35,6 +35,7 @@ #include "gc/shared/collectedHeap.hpp" #include "nativeInst_sparc.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" diff --git a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp +++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,9 @@ #include "c1/c1_Defs.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/compiledICHolder.hpp" @@ -38,6 +41,7 @@ #include "utilities/align.hpp" #include "vmreg_sparc.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #endif @@ -389,7 +393,7 @@ } if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && - UseTLAB && FastTLABRefill) { + UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Label slow_path; Register G1_obj_size = G1; Register G3_t1 = G3; @@ -424,25 +428,8 @@ __ bind(ok); } #endif // ASSERT - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; - __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass - __ bind(retry_tlab); - - // get the instance size - __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); - - __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); - - __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2, /* is_tlab_allocated */ true); - __ verify_oop(O0_obj); - __ mov(O0, I0); - __ ret(); - __ delayed()->restore(); - - __ bind(try_eden); + // If we got here then the TLAB allocation failed, so try allocating directly from eden. // get the instance size __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); @@ -508,73 +495,6 @@ } #endif // ASSERT - if (UseTLAB && FastTLABRefill) { - Label slow_path; - Register G1_arr_size = G1; - Register G3_t1 = G3; - Register O1_t2 = O1; - assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); - - // check that array length is small enough for fast path - __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); - __ cmp(G4_length, G3_t1); - __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); - __ delayed()->nop(); - - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; - __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass - - __ bind(retry_tlab); - - // get the allocation size: (length << (layout_helper & 0x1F)) + header_size - __ ld(klass_lh, G3_t1); - __ sll(G4_length, G3_t1, G1_arr_size); - __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); - __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); - __ add(G1_arr_size, G3_t1, G1_arr_size); - __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up - __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); - - __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size - - __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); - __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); - __ sub(G1_arr_size, G3_t1, O1_t2); // body length - __ add(O0_obj, G3_t1, G3_t1); // body start - if (!ZeroTLAB) { - __ initialize_body(G3_t1, O1_t2); - } - __ verify_oop(O0_obj); - __ retl(); - __ delayed()->nop(); - - __ bind(try_eden); - // get the allocation size: (length << (layout_helper & 0x1F)) + header_size - __ ld(klass_lh, G3_t1); - __ sll(G4_length, G3_t1, G1_arr_size); - __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); - __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); - __ add(G1_arr_size, G3_t1, G1_arr_size); - __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); - __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); - - __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size - __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2); - - __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); - __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); - __ sub(G1_arr_size, G3_t1, O1_t2); // body length - __ add(O0_obj, G3_t1, G3_t1); // body start - __ initialize_body(G3_t1, O1_t2); - __ verify_oop(O0_obj); - __ retl(); - __ delayed()->nop(); - - __ bind(slow_path); - } - if (id == new_type_array_id) { oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); } else { @@ -927,22 +847,22 @@ Register cardtable = G5; Register tmp = G1_scratch; Register tmp2 = G3_scratch; - jbyte* byte_map_base = barrier_set_cast(bs)->byte_map_base; + jbyte* byte_map_base = ci_card_table_address(); Label not_already_dirty, restart, refill, young_card; - __ srlx(addr, CardTableModRefBS::card_shift, addr); + __ srlx(addr, CardTable::card_shift, addr); AddressLiteral rs(byte_map_base); __ set(rs, cardtable); // cardtable := __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] - __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); + __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] - assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); + assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); __ bind(young_card); diff --git a/src/hotspot/cpu/sparc/copy_sparc.hpp b/src/hotspot/cpu/sparc/copy_sparc.hpp --- a/src/hotspot/cpu/sparc/copy_sparc.hpp +++ b/src/hotspot/cpu/sparc/copy_sparc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,11 +27,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -47,7 +47,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -65,23 +65,23 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -97,7 +97,7 @@ } } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -113,12 +113,12 @@ } } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { // Do better than this: inline memmove body NEEDS CLEANUP if (from > to) { while (count-- > 0) { @@ -135,24 +135,24 @@ } } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "jvm.h" #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" @@ -35,6 +36,7 @@ #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.inline.hpp" #include "runtime/safepoint.hpp" @@ -44,6 +46,7 @@ #include "utilities/align.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -658,7 +661,7 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base, Register tmp, Register obj) { - srlx(obj, CardTableModRefBS::card_shift, obj); + srlx(obj, CardTable::card_shift, obj); assert(tmp != obj, "need separate temp reg"); set((address) byte_map_base, tmp); stb(G0, tmp, obj); @@ -1411,9 +1414,14 @@ void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } @@ -3237,127 +3245,6 @@ verify_tlab(); } - -void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { - Register top = O0; - Register t1 = G1; - Register t2 = G3; - Register t3 = O1; - assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); - Label do_refill, discard_tlab; - - if (!Universe::heap()->supports_inline_contig_alloc()) { - // No allocation in the shared eden. - ba(slow_case); - delayed()->nop(); - } - - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); - - // calculate amount of free space - sub(t1, top, t1); - srl_ptr(t1, LogHeapWordSize, t1); - - // Retain tlab and allocate object in shared space if - // the amount free in the tlab is too large to discard. - cmp(t1, t2); - - brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); - // increment waste limit to prevent getting stuck on this slow path - if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { - delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); - } else { - delayed()->nop(); - // set64 does not use the temp register if the given constant is 32 bit. So - // we can just use any register; using G0 results in ignoring of the upper 32 bit - // of that value. - set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); - add(t2, t3, t2); - } - - st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); - if (TLABStats) { - // increment number of slow_allocations - ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); - add(t2, 1, t2); - stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); - } - ba(try_eden); - delayed()->nop(); - - bind(discard_tlab); - if (TLABStats) { - // increment number of refills - ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); - add(t2, 1, t2); - stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); - // accumulate wastage - ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); - add(t2, t1, t2); - stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); - } - - // if tlab is currently allocated (top or end != null) then - // fill [top, end + alignment_reserve) with array object - br_null_short(top, Assembler::pn, do_refill); - - set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); - st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word - // set klass to intArrayKlass - sub(t1, typeArrayOopDesc::header_size(T_INT), t1); - add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); - sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); - st(t1, top, arrayOopDesc::length_offset_in_bytes()); - set((intptr_t)Universe::intArrayKlassObj_addr(), t2); - ld_ptr(t2, 0, t2); - // store klass last. concurrent gcs assumes klass length is valid if - // klass field is not null. - store_klass(t2, top); - verify_oop(top); - - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); - sub(top, t1, t1); // size of tlab's allocated portion - incr_allocated_bytes(t1, t2, t3); - - // refill the tlab with an eden allocation - bind(do_refill); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); - sll_ptr(t1, LogHeapWordSize, t1); - // allocate new tlab, address returned in top - eden_allocate(top, t1, 0, t2, t3, slow_case); - - st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); - st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); -#ifdef ASSERT - // check that tlab_size (t1) is still valid - { - Label ok; - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); - sll_ptr(t2, LogHeapWordSize, t2); - cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); - STOP("assert(t1 == tlab_size)"); - should_not_reach_here(); - - bind(ok); - } -#endif // ASSERT - add(top, t1, top); // t1 is tlab_size - sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); - st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); - - if (ZeroTLAB) { - // This is a fast TLAB refill, therefore the GC is not notified of it. - // So compiled code must fill the new TLAB with zeroes. - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); - zero_memory(t2, t1); - } - verify_tlab(); - ba(retry); - delayed()->nop(); -} - void MacroAssembler::zero_memory(Register base, Register index) { assert_different_registers(base, index); Label loop; @@ -3690,17 +3577,17 @@ Label not_already_dirty, restart, refill, young_card; - __ srlx(O0, CardTableModRefBS::card_shift, O0); + __ srlx(O0, CardTable::card_shift, O0); AddressLiteral addrlit(byte_map_base); __ set(addrlit, O1); // O1 := __ ldub(O0, O1, O2); // O2 := [O0 + O1] - __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); + __ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); __ ldub(O0, O1, O2); // O2 := [O0 + O1] - assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); + assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); __ bind(young_card); @@ -3780,6 +3667,7 @@ G1SATBCardTableLoggingModRefBS* bs = barrier_set_cast(Universe::heap()->barrier_set()); + CardTable* ct = bs->card_table(); if (G1RSBarrierRegionFilter) { xor3(store_addr, new_val, tmp); @@ -3820,7 +3708,8 @@ if (dirty_card_log_enqueue == 0) { G1SATBCardTableLoggingModRefBS* bs = barrier_set_cast(heap->barrier_set()); - generate_dirty_card_log_enqueue(bs->byte_map_base); + CardTable *ct = bs->card_table(); + generate_dirty_card_log_enqueue(ct->byte_map_base()); assert(dirty_card_log_enqueue != 0, "postcondition."); } if (satb_log_enqueue_with_frame == 0) { @@ -3842,9 +3731,10 @@ if (new_val == G0) return; CardTableModRefBS* bs = barrier_set_cast(Universe::heap()->barrier_set()); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); - card_table_write(bs->byte_map_base, tmp, store_addr); + CardTable* ct = bs->card_table(); + + assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + card_table_write(ct->byte_map_base(), tmp, store_addr); } // ((OopHandle)result).resolve(); diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp @@ -1266,7 +1266,6 @@ Register t1, // temp register Label& slow_case // continuation point if fast allocation fails ); - void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); void zero_memory(Register base, Register index); void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2); diff --git a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp --- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp +++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp @@ -24,6 +24,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/instanceOop.hpp" @@ -875,9 +877,7 @@ DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default: ShouldNotReachHere(); @@ -908,11 +908,11 @@ __ restore(); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(addr, count, tmp); Label L_loop, L_done; @@ -923,10 +923,10 @@ __ sub(count, BytesPerHeapOop, count); __ add(count, addr, count); // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) - __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); - __ srl_ptr(count, CardTableModRefBS::card_shift, count); + __ srl_ptr(addr, CardTable::card_shift, addr); + __ srl_ptr(count, CardTable::card_shift, count); __ sub(count, addr, count); - AddressLiteral rs(ct->byte_map_base); + AddressLiteral rs(ct->byte_map_base()); __ set(rs, tmp); __ BIND(L_loop); __ stb(G0, tmp, addr); diff --git a/src/hotspot/cpu/sparc/templateTable_sparc.cpp b/src/hotspot/cpu/sparc/templateTable_sparc.cpp --- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp +++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,8 +90,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code"); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -529,12 +529,16 @@ if (SafepointMechanism::uses_thread_local_poll()) { #ifdef _LP64 - __ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset())); + const Register poll_addr = rscratch1; + __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset())); +#else + const Register poll_addr = rbx; + assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite"); + __ get_thread(poll_addr); + __ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset())); +#endif __ relocate(relocInfo::poll_return_type); - __ testl(rax, Address(rscratch1, 0)); -#else - ShouldNotReachHere(); -#endif + __ testl(rax, Address(poll_addr, 0)); } else { AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); @@ -555,16 +559,20 @@ int offset = __ offset(); if (SafepointMechanism::uses_thread_local_poll()) { #ifdef _LP64 - __ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset())); + const Register poll_addr = rscratch1; + __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset())); +#else + assert(tmp->is_cpu_register(), "needed"); + const Register poll_addr = tmp->as_register(); + __ get_thread(poll_addr); + __ movptr(poll_addr, Address(poll_addr, in_bytes(Thread::polling_page_offset()))); +#endif add_debug_info_for_branch(info); __ relocate(relocInfo::poll_type); address pre_pc = __ pc(); - __ testl(rax, Address(rscratch1, 0)); + __ testl(rax, Address(poll_addr, 0)); address post_pc = __ pc(); - guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length"); -#else - ShouldNotReachHere(); -#endif + guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); } else { AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type); if (Assembler::is_polling_page_far()) { diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -143,6 +143,7 @@ LIR_Opr LIRGenerator::safepoint_poll_register() { + NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } ) return LIR_OprFact::illegalOpr; } @@ -1546,7 +1547,7 @@ if (x->is_safepoint()) { // increment backedge counter if needed increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); - __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); + __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); } set_no_result(x); diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,9 @@ #include "c1/c1_Defs.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/compiledICHolder.hpp" @@ -39,6 +42,7 @@ #include "utilities/macros.hpp" #include "vmreg_x86.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #endif @@ -995,8 +999,8 @@ __ set_info("fast new_instance init check", dont_gc_arguments); } - if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && - UseTLAB && FastTLABRefill) { + if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB + && Universe::heap()->supports_inline_contig_alloc()) { Label slow_path; Register obj_size = rcx; Register t1 = rbx; @@ -1031,21 +1035,8 @@ // if we got here then the TLAB allocation failed, so try // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; - const Register thread = - __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi - - __ bind(retry_tlab); - - // get the instance size (size is postive so movl is fine for 64bit) - __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); - - __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); - - __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true); - __ verify_oop(obj); - __ pop(rbx); - __ pop(rdi); - __ ret(0); + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); + NOT_LP64(__ get_thread(thread)); __ bind(try_eden); // get the instance size (size is postive so movl is fine for 64bit) @@ -1129,24 +1120,13 @@ } #endif // ASSERT - if (UseTLAB && FastTLABRefill) { + // If we got here, the TLAB allocation failed, so try allocating from + // eden if inline contiguous allocations are supported. + if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Register arr_size = rsi; Register t1 = rcx; // must be rcx for use as shift count Register t2 = rdi; Label slow_path; - assert_different_registers(length, klass, obj, arr_size, t1, t2); - - // check that array length is small enough for fast path. - __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); - __ jcc(Assembler::above, slow_path); - - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; - const Register thread = - __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi - - __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // since size is positive movl does right thing on 64bit @@ -1161,36 +1141,11 @@ __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up __ andptr(arr_size, ~MinObjAlignmentInBytesMask); - __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size + __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size - __ initialize_header(obj, klass, length, t1, t2); - __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); - assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); - assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andptr(t1, Klass::_lh_header_size_mask); - __ subptr(arr_size, t1); // body length - __ addptr(t1, obj); // body start - if (!ZeroTLAB) { - __ initialize_body(t1, arr_size, 0, t2); - } - __ verify_oop(obj); - __ ret(0); - - __ bind(try_eden); - // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) - // since size is positive movl does right thing on 64bit - __ movl(t1, Address(klass, Klass::layout_helper_offset())); - // since size is postive movl does right thing on 64bit - __ movl(arr_size, length); - assert(t1 == rcx, "fixed register usage"); - __ shlptr(arr_size /* by t1=rcx, mod 32 */); - __ shrptr(t1, Klass::_lh_header_size_shift); - __ andptr(t1, Klass::_lh_header_size_mask); - __ addptr(arr_size, t1); - __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up - __ andptr(arr_size, ~MinObjAlignmentInBytesMask); - - __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size + // Using t2 for non 64-bit. + const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); + NOT_LP64(__ get_thread(thread)); __ incr_allocated_bytes(thread, arr_size, 0); __ initialize_header(obj, klass, length, t1, t2); @@ -1701,10 +1656,6 @@ __ should_not_reach_here(); break; } - CardTableModRefBS* ct = - barrier_set_cast(Universe::heap()->barrier_set()); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - Label done; Label enqueued; Label runtime; @@ -1726,25 +1677,25 @@ const Register card_addr = rcx; f.load_argument(0, card_addr); - __ shrptr(card_addr, CardTableModRefBS::card_shift); + __ shrptr(card_addr, CardTable::card_shift); // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT // a valid address and therefore is not properly handled by the relocation code. - __ movptr(cardtable, (intptr_t)ct->byte_map_base); + __ movptr(cardtable, ci_card_table_address_as()); __ addptr(card_addr, cardtable); NOT_LP64(__ get_thread(thread);) - __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); + __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val()); __ jcc(Assembler::equal, done); __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); - __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); + __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); __ jcc(Assembler::equal, done); // storing region crossing non-NULL, card is clean. // dirty card and log. - __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); + __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); const Register tmp = rdx; __ push(rdx); diff --git a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp --- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp +++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,9 +65,6 @@ #define SUPPORT_RESERVED_STACK_AREA #endif -#ifdef _LP64 -// X64 have implemented the local polling #define THREAD_LOCAL_POLL -#endif #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp --- a/src/hotspot/cpu/x86/globals_x86.hpp +++ b/src/hotspot/cpu/x86/globals_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,9 +97,10 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); -#ifdef _LP64 +#if defined(_LP64) || defined(_WINDOWS) define_pd_global(bool, ThreadLocalHandshakes, true); #else +// get_thread() is slow on linux 32 bit, therefore off by default define_pd_global(bool, ThreadLocalHandshakes, false); #endif diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -830,13 +830,12 @@ if (verifyoop) { verify_oop(rax, state); } + + address* const safepoint_table = Interpreter::safept_table(state); #ifdef _LP64 - Label no_safepoint, dispatch; - address* const safepoint_table = Interpreter::safept_table(state); if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) { NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); - testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); jccb(Assembler::zero, no_safepoint); @@ -851,9 +850,23 @@ #else Address index(noreg, rbx, Address::times_ptr); - ExternalAddress tbl((address)table); - ArrayAddress dispatch(tbl, index); - jump(dispatch); + if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) { + NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); + Label no_safepoint; + const Register thread = rcx; + get_thread(thread); + testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); + + jccb(Assembler::zero, no_safepoint); + ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index); + jump(dispatch_addr); + bind(no_safepoint); + } + + { + ArrayAddress dispatch_addr(ExternalAddress((address)table), index); + jump(dispatch_addr); + } #endif // _LP64 } diff --git a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp --- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp +++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -346,8 +346,9 @@ _from -= Interpreter::stackElementSize; if (_num_args < Argument::n_float_register_parameters_c-1) { + assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range"); *_reg_args++ = from_obj; - *_fp_identifiers |= (intptr_t)(0x01 << (_num_args*2)); // mark as float + *_fp_identifiers |= ((intptr_t)0x01 << (_num_args*2)); // mark as float _num_args++; } else { *_to++ = from_obj; @@ -360,8 +361,9 @@ _from -= 2*Interpreter::stackElementSize; if (_num_args < Argument::n_float_register_parameters_c-1) { + assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range"); *_reg_args++ = from_obj; - *_fp_identifiers |= (intptr_t)(0x3 << (_num_args*2)); // mark as double + *_fp_identifiers |= ((intptr_t)0x3 << (_num_args*2)); // mark as double _num_args++; } else { *_to++ = from_obj; diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "asm/assembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" @@ -47,6 +48,7 @@ #include "runtime/thread.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" @@ -3685,9 +3687,14 @@ } void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } #ifdef _LP64 @@ -3883,10 +3890,17 @@ } #endif -#ifdef _LP64 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) { if (SafepointMechanism::uses_thread_local_poll()) { - testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); +#ifdef _LP64 + assert(thread_reg == r15_thread, "should be"); +#else + if (thread_reg == noreg) { + thread_reg = temp_reg; + get_thread(thread_reg); + } +#endif + testb(Address(thread_reg, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); jcc(Assembler::notZero, slow_path); // handshake bit set implies poll } else { cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), @@ -3894,13 +3908,6 @@ jcc(Assembler::notEqual, slow_path); } } -#else -void MacroAssembler::safepoint_poll(Label& slow_path) { - cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), - SafepointSynchronize::_not_synchronized); - jcc(Assembler::notEqual, slow_path); -} -#endif // Calls to C land // @@ -5590,9 +5597,10 @@ Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf())); - CardTableModRefBS* ct = + CardTableModRefBS* ctbs = barrier_set_cast(Universe::heap()->barrier_set()); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); Label done; Label runtime; @@ -5615,24 +5623,24 @@ const Register cardtable = tmp2; movptr(card_addr, store_addr); - shrptr(card_addr, CardTableModRefBS::card_shift); + shrptr(card_addr, CardTable::card_shift); // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT // a valid address and therefore is not properly handled by the relocation code. - movptr(cardtable, (intptr_t)ct->byte_map_base); + movptr(cardtable, (intptr_t)ct->byte_map_base()); addptr(card_addr, cardtable); - cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); + cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val()); jcc(Assembler::equal, done); membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); - cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); + cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); jcc(Assembler::equal, done); // storing a region crossing, non-NULL oop, card is clean. // dirty card and log. - movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); + movb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); cmpl(queue_index, 0); jcc(Assembler::equal, runtime); @@ -5736,14 +5744,14 @@ // Does a store check for the oop in register obj. The content of // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableForRS || - bs->kind() == BarrierSet::CardTableExtension, + assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - - shrptr(obj, CardTableModRefBS::card_shift); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + shrptr(obj, CardTable::card_shift); Address card_addr; @@ -5752,7 +5760,7 @@ // So this essentially converts an address to a displacement and it will // never need to be relocated. On 64bit however the value may be too // large for a 32bit displacement. - intptr_t disp = (intptr_t) ct->byte_map_base; + intptr_t disp = (intptr_t) ct->byte_map_base(); if (is_simm32(disp)) { card_addr = Address(noreg, obj, Address::times_1, disp); } else { @@ -5760,12 +5768,12 @@ // displacement and done in a single instruction given favorable mapping and a // smarter version of as_Address. However, 'ExternalAddress' generates a relocation // entry and that entry is not properly handled by the relocation code. - AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none); + AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none); Address index(noreg, obj, Address::times_1); card_addr = as_Address(ArrayAddress(cardtable, index)); } - int dirty = CardTableModRefBS::dirty_card_val(); + int dirty = CardTable::dirty_card_val(); if (UseCondCardMark) { Label L_already_dirty; if (UseConcMarkSweepGC) { @@ -5853,121 +5861,6 @@ verify_tlab(); } -// Preserves rbx, and rdx. -Register MacroAssembler::tlab_refill(Label& retry, - Label& try_eden, - Label& slow_case) { - Register top = rax; - Register t1 = rcx; // object size - Register t2 = rsi; - Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread); - assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); - Label do_refill, discard_tlab; - - if (!Universe::heap()->supports_inline_contig_alloc()) { - // No allocation in the shared eden. - jmp(slow_case); - } - - NOT_LP64(get_thread(thread_reg)); - - movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); - - // calculate amount of free space - subptr(t1, top); - shrptr(t1, LogHeapWordSize); - - // Retain tlab and allocate object in shared space if - // the amount free in the tlab is too large to discard. - cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - jcc(Assembler::lessEqual, discard_tlab); - - // Retain - // %%% yuck as movptr... - movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); - addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2); - if (TLABStats) { - // increment number of slow_allocations - addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1); - } - jmp(try_eden); - - bind(discard_tlab); - if (TLABStats) { - // increment number of refills - addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1); - // accumulate wastage -- t1 is amount free in tlab - addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1); - } - - // if tlab is currently allocated (top or end != null) then - // fill [top, end + alignment_reserve) with array object - testptr(top, top); - jcc(Assembler::zero, do_refill); - - // set up the mark word - movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); - // set the length to the remaining space - subptr(t1, typeArrayOopDesc::header_size(T_INT)); - addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); - shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint))); - movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); - // set klass to intArrayKlass - // dubious reloc why not an oop reloc? - movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr())); - // store klass last. concurrent gcs assumes klass length is valid if - // klass field is not null. - store_klass(top, t1); - - movptr(t1, top); - subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); - incr_allocated_bytes(thread_reg, t1, 0); - - // refill the tlab with an eden allocation - bind(do_refill); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shlptr(t1, LogHeapWordSize); - // allocate new tlab, address returned in top - eden_allocate(top, t1, 0, t2, slow_case); - - // Check that t1 was preserved in eden_allocate. -#ifdef ASSERT - if (UseTLAB) { - Label ok; - Register tsize = rsi; - assert_different_registers(tsize, thread_reg, t1); - push(tsize); - movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shlptr(tsize, LogHeapWordSize); - cmpptr(t1, tsize); - jcc(Assembler::equal, ok); - STOP("assert(t1 != tlab size)"); - should_not_reach_here(); - - bind(ok); - pop(tsize); - } -#endif - movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); - movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); - addptr(top, t1); - subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); - - if (ZeroTLAB) { - // This is a fast TLAB refill, therefore the GC is not notified of it. - // So compiled code must fill the new TLAB with zeroes. - movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); - zero_memory(top, t1, 0, t2); - } - - verify_tlab(); - jmp(retry); - - return thread_reg; // for use by caller -} - // Preserves the contents of address, destroys the contents length_in_bytes and temp. void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -550,7 +550,6 @@ Register t2, // temp register Label& slow_case // continuation point if fast allocation fails ); - Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); void incr_allocated_bytes(Register thread, @@ -687,11 +686,9 @@ // Support for serializing memory accesses between threads void serialize_memory(Register thread, Register tmp); -#ifdef _LP64 + // If thread_reg is != noreg the code assumes the register passed contains + // the thread (required on 64 bit). void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); -#else - void safepoint_poll(Label& slow_path); -#endif void verify_tlab(); diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp --- a/src/hotspot/cpu/x86/nativeInst_x86.hpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -704,14 +704,18 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { + if (SafepointMechanism::uses_thread_local_poll()) { #ifdef AMD64 - if (SafepointMechanism::uses_thread_local_poll()) { const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix; const int test_offset = has_rex_prefix ? 1 : 0; +#else + const int test_offset = 0; +#endif const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl; const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg; return is_test_opcode && is_rax_target; } +#ifdef AMD64 // Try decoding a near safepoint first: if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && ubyte_at(1) == 0x05) { // 00 rax 101 diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -2111,16 +2111,13 @@ Label after_transition; // check for safepoint operation in progress and/or pending suspend requests - { Label Continue; - - __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()), - SafepointSynchronize::_not_synchronized); - - Label L; - __ jcc(Assembler::notEqual, L); + { Label Continue, slow_path; + + __ safepoint_poll(slow_path, thread, noreg); + __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); __ jcc(Assembler::equal, Continue); - __ bind(L); + __ bind(slow_path); // Don't use call_VM as it will see a possible pending exception and forward it // and never return here preventing us from clearing _last_native_pc down below. @@ -2996,8 +2993,11 @@ // if this was not a poll_return then we need to correct the return address now. if (!cause_return) { - __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset())); - __ movptr(Address(rbp, wordSize), rax); + // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack. + // Additionally, rbx is a callee saved register and we can look at it later to determine + // if someone changed the return address for us! + __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset())); + __ movptr(Address(rbp, wordSize), rbx); } // do the call @@ -3029,11 +3029,63 @@ __ bind(noException); + Label no_adjust, bail, not_special; + if (SafepointMechanism::uses_thread_local_poll() && !cause_return) { + // If our stashed return pc was modified by the runtime we avoid touching it + __ cmpptr(rbx, Address(rbp, wordSize)); + __ jccb(Assembler::notEqual, no_adjust); + + // Skip over the poll instruction. + // See NativeInstruction::is_safepoint_poll() + // Possible encodings: + // 85 00 test %eax,(%rax) + // 85 01 test %eax,(%rcx) + // 85 02 test %eax,(%rdx) + // 85 03 test %eax,(%rbx) + // 85 06 test %eax,(%rsi) + // 85 07 test %eax,(%rdi) + // + // 85 04 24 test %eax,(%rsp) + // 85 45 00 test %eax,0x0(%rbp) + +#ifdef ASSERT + __ movptr(rax, rbx); // remember where 0x85 should be, for verification below +#endif + // rsp/rbp base encoding takes 3 bytes with the following register values: + // rsp 0x04 + // rbp 0x05 + __ movzbl(rcx, Address(rbx, 1)); + __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05 + __ subptr(rcx, 4); // looking for 0x00 .. 0x01 + __ cmpptr(rcx, 1); + __ jcc(Assembler::above, not_special); + __ addptr(rbx, 1); + __ bind(not_special); +#ifdef ASSERT + // Verify the correct encoding of the poll we're about to skip. + __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl); + __ jcc(Assembler::notEqual, bail); + // Mask out the modrm bits + __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask); + // rax encodes to 0, so if the bits are nonzero it's incorrect + __ jcc(Assembler::notZero, bail); +#endif + // Adjust return pc forward to step over the safepoint poll instruction + __ addptr(rbx, 2); + __ movptr(Address(rbp, wordSize), rbx); + } + + __ bind(no_adjust); // Normal exit, register restoring and exit RegisterSaver::restore_live_registers(masm, save_vectors); __ ret(0); +#ifdef ASSERT + __ bind(bail); + __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); +#endif + // make sure all code is generated masm->flush(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -705,9 +707,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default : ShouldNotReachHere(); @@ -739,22 +739,22 @@ break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); Label L_loop; const Register end = count; // elements count; end == start+count-1 assert_different_registers(start, end); __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); - __ shrptr(start, CardTableModRefBS::card_shift); - __ shrptr(end, CardTableModRefBS::card_shift); + __ shrptr(start, CardTable::card_shift); + __ shrptr(end, CardTable::card_shift); __ subptr(end, start); // end --> count __ BIND(L_loop); - intptr_t disp = (intptr_t) ct->byte_map_base; + intptr_t disp = (intptr_t) ct->byte_map_base(); Address cardtable(start, count, Address::times_1, disp); __ movb(cardtable, 0); __ decrement(count); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -29,6 +29,9 @@ #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -1326,9 +1329,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default: ShouldNotReachHere(); @@ -1367,12 +1368,8 @@ __ popa(); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { - CardTableModRefBS* ct = barrier_set_cast(bs); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - Label L_loop, L_done; const Register end = count; @@ -1381,11 +1378,11 @@ __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive - __ shrptr(start, CardTableModRefBS::card_shift); - __ shrptr(end, CardTableModRefBS::card_shift); + __ shrptr(start, CardTable::card_shift); + __ shrptr(end, CardTable::card_shift); __ subptr(end, start); // end --> cards count - int64_t disp = (int64_t) ct->byte_map_base; + int64_t disp = ci_card_table_address_as(); __ mov64(scratch, disp); __ addptr(start, scratch); __ BIND(L_loop); diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp --- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -303,45 +303,45 @@ // used in MacroAssembler::sha512_AVX2 ALIGNED_(64) julong StubRoutines::x86::_k512_W[] = { - 0x428a2f98d728ae22LL, 0x7137449123ef65cdLL, - 0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL, - 0x3956c25bf348b538LL, 0x59f111f1b605d019LL, - 0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL, - 0xd807aa98a3030242LL, 0x12835b0145706fbeLL, - 0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL, - 0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL, - 0x9bdc06a725c71235LL, 0xc19bf174cf692694LL, - 0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL, - 0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL, - 0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL, - 0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL, - 0x983e5152ee66dfabLL, 0xa831c66d2db43210LL, - 0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL, - 0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL, - 0x06ca6351e003826fLL, 0x142929670a0e6e70LL, - 0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL, - 0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL, - 0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL, - 0x81c2c92e47edaee6LL, 0x92722c851482353bLL, - 0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL, - 0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL, - 0xd192e819d6ef5218LL, 0xd69906245565a910LL, - 0xf40e35855771202aLL, 0x106aa07032bbd1b8LL, - 0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL, - 0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL, - 0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL, - 0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL, - 0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL, - 0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL, - 0x90befffa23631e28LL, 0xa4506cebde82bde9LL, - 0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL, - 0xca273eceea26619cLL, 0xd186b8c721c0c207LL, - 0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL, - 0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL, - 0x113f9804bef90daeLL, 0x1b710b35131c471bLL, - 0x28db77f523047d84LL, 0x32caab7b40c72493LL, - 0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL, - 0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL, - 0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL, + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, + 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, + 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, + 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, + 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, + 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, + 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, + 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, + 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, + 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, + 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, + 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, + 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, + 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, + 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, + 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, + 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, + 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, + 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, + 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, + 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, + 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, }; #endif diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1149,7 +1149,7 @@ Label slow_path; #ifndef _LP64 - __ safepoint_poll(slow_path); + __ safepoint_poll(slow_path, thread, noreg); #else __ safepoint_poll(slow_path, r15_thread, rscratch1); #endif diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_32.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_32.cpp --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,10 +61,7 @@ Label slow_path; // If we need a safepoint check, generate full interpreter entry. - ExternalAddress state(SafepointSynchronize::address_of_state()); - __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), - SafepointSynchronize::_not_synchronized); - __ jcc(Assembler::notEqual, slow_path); + __ safepoint_poll(slow_path, noreg, rdi); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. @@ -113,10 +110,7 @@ Label slow_path; // If we need a safepoint check, generate full interpreter entry. - ExternalAddress state(SafepointSynchronize::address_of_state()); - __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), - SafepointSynchronize::_not_synchronized); - __ jcc(Assembler::notEqual, slow_path); + __ safepoint_poll(slow_path, noreg, rdi); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -223,8 +223,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: { if (val == noreg) { __ store_heap_oop_null(obj); @@ -2734,11 +2733,16 @@ __ bind(skip_register_finalizer); } -#ifdef _LP64 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { Label no_safepoint; NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll")); +#ifdef _LP64 __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); +#else + const Register thread = rdi; + __ get_thread(thread); + __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); +#endif __ jcc(Assembler::zero, no_safepoint); __ push(state); __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -2746,7 +2750,6 @@ __ pop(state); __ bind(no_safepoint); } -#endif // Narrow result if state is itos but result type is smaller. // Need to narrow in the return bytecode rather than in generate_return_entry diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad --- a/src/hotspot/cpu/x86/x86_32.ad +++ b/src/hotspot/cpu/x86/x86_32.ad @@ -317,7 +317,7 @@ // Indicate if the safepoint node needs the polling page as an input. // Since x86 does have absolute addressing, it doesn't. bool SafePointNode::needs_polling_address_input() { - return false; + return SafepointMechanism::uses_thread_local_poll(); } // @@ -706,34 +706,25 @@ } if (do_polling() && C->is_method_compilation()) { - cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); - emit_opcode(cbuf,0x85); - emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX - emit_d32(cbuf, (intptr_t)os::get_polling_page()); + if (SafepointMechanism::uses_thread_local_poll()) { + Register pollReg = as_Register(EBX_enc); + MacroAssembler masm(&cbuf); + masm.get_thread(pollReg); + masm.movl(pollReg, Address(pollReg, in_bytes(Thread::polling_page_offset()))); + masm.relocate(relocInfo::poll_return_type); + masm.testl(rax, Address(pollReg, 0)); + } else { + cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); + emit_opcode(cbuf,0x85); + emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX + emit_d32(cbuf, (intptr_t)os::get_polling_page()); + } } } uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { - Compile *C = ra_->C; - // If method set FPU control word, restore to standard control word - int size = C->in_24_bit_fp_mode() ? 6 : 0; - if (C->max_vector_size() > 16) size += 3; // vzeroupper - if (do_polling() && C->is_method_compilation()) size += 6; - - int framesize = C->frame_size_in_bytes(); - assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); - // Remove two words for return addr and rbp, - framesize -= 2*wordSize; - - size++; // popl rbp, - - if (framesize >= 128) { - size += 6; - } else { - size += framesize ? 3 : 0; - } - size += 64; // added to support ReservedStackAccess - return size; + return MachNode::size(ra_); // too many variables; just compute it + // the hard way } int MachEpilogNode::reloc() const { @@ -13336,6 +13327,7 @@ // ============================================================================ // Safepoint Instruction instruct safePoint_poll(eFlagsReg cr) %{ + predicate(SafepointMechanism::uses_global_page_poll()); match(SafePoint); effect(KILL cr); @@ -13354,6 +13346,25 @@ ins_pipe( ialu_reg_mem ); %} +instruct safePoint_poll_tls(eFlagsReg cr, eRegP_no_EBP poll) %{ + predicate(SafepointMechanism::uses_thread_local_poll()); + match(SafePoint poll); + effect(KILL cr, USE poll); + + format %{ "TSTL #EAX,[$poll]\t! Safepoint: poll for GC" %} + ins_cost(125); + // EBP would need size(3) + size(2); /* setting an explicit size will cause debug builds to assert if size is incorrect */ + ins_encode %{ + __ relocate(relocInfo::poll_type); + address pre_pc = __ pc(); + __ testl(rax, Address($poll$$Register, 0)); + address post_pc = __ pc(); + guarantee(pre_pc[0] == 0x85, "must emit test-ax [reg]"); + %} + ins_pipe(ialu_reg_mem); +%} + // ============================================================================ // This name is KNOWN by the ADLC and cannot be changed. diff --git a/src/hotspot/cpu/zero/copy_zero.hpp b/src/hotspot/cpu/zero/copy_zero.hpp --- a/src/hotspot/cpu/zero/copy_zero.hpp +++ b/src/hotspot/cpu/zero/copy_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,11 +28,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -49,7 +49,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { @@ -70,73 +70,73 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef _LP64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(BytesPerInt == BytesPerOop, "jints and oops must be the same size"); - _Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count); + _Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // _LP64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jlongs(from, to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef _LP64 diff --git a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp --- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp +++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -41,6 +41,7 @@ #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" diff --git a/src/hotspot/os/bsd/decoder_machO.cpp b/src/hotspot/os/bsd/decoder_machO.cpp --- a/src/hotspot/os/bsd/decoder_machO.cpp +++ b/src/hotspot/os/bsd/decoder_machO.cpp @@ -27,6 +27,7 @@ #ifdef __APPLE__ #include "jvm.h" #include "decoder_machO.hpp" +#include "memory/allocation.inline.hpp" #include #include diff --git a/src/hotspot/os/linux/decoder_linux.cpp b/src/hotspot/os/linux/decoder_linux.cpp --- a/src/hotspot/os/linux/decoder_linux.cpp +++ b/src/hotspot/os/linux/decoder_linux.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "jvm.h" #include "utilities/decoder_elf.hpp" +#include "utilities/elfFile.hpp" #include @@ -50,3 +51,38 @@ return false; } +// Returns true if the elf file is marked NOT to require an executable stack, +// or if the file could not be opened. +// Returns false if the elf file requires an executable stack, the stack flag +// is not set at all, or if the file can not be read. +bool ElfFile::specifies_noexecstack(const char* filepath) { + if (filepath == NULL) return true; + + FILE* file = fopen(filepath, "r"); + if (file == NULL) return true; + + // AARCH64 defaults to noexecstack. All others default to execstack. + bool result = AARCH64_ONLY(true) NOT_AARCH64(false); + + // Read file header + Elf_Ehdr head; + if (fread(&head, sizeof(Elf_Ehdr), 1, file) == 1 && + is_elf_file(head) && + fseek(file, head.e_phoff, SEEK_SET) == 0) { + + // Read program header table + Elf_Phdr phdr; + for (int index = 0; index < head.e_phnum; index ++) { + if (fread((void*)&phdr, sizeof(Elf_Phdr), 1, file) != 1) { + result = false; + break; + } + if (phdr.p_type == PT_GNU_STACK) { + result = (phdr.p_flags == (PF_R | PF_W)); + break; + } + } + } + fclose(file); + return result; +} diff --git a/src/hotspot/os/linux/globals_linux.hpp b/src/hotspot/os/linux/globals_linux.hpp --- a/src/hotspot/os/linux/globals_linux.hpp +++ b/src/hotspot/os/linux/globals_linux.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,11 @@ product(bool, UseContainerSupport, true, \ "Enable detection and runtime container configuration support") \ \ + product(bool, PreferContainerQuotaForCPUCount, true, \ + "Calculate the container CPU availability based on the value" \ + " of quotas (if set), when true. Otherwise, use the CPU" \ + " shares value, provided it is less than quota.") \ + \ diagnostic(bool, UseCpuAllocPath, false, \ "Use CPU_ALLOC code path in os::active_processor_count ") diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp --- a/src/hotspot/os/linux/osContainer_linux.cpp +++ b/src/hotspot/os/linux/osContainer_linux.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -414,9 +414,9 @@ } -char * OSContainer::container_type() { +const char * OSContainer::container_type() { if (is_containerized()) { - return (char *)"cgroupv1"; + return "cgroupv1"; } else { return NULL; } @@ -499,11 +499,11 @@ /* active_processor_count * * Calculate an appropriate number of active processors for the - * VM to use based on these three cgroup options. + * VM to use based on these three inputs. * * cpu affinity - * cpu quota & cpu period - * cpu shares + * cgroup cpu quota & cpu period + * cgroup cpu shares * * Algorithm: * @@ -513,42 +513,61 @@ * required CPUs by dividing quota by period. * * If shares are in effect (shares != -1), calculate the number - * of cpus required for the shares by dividing the share value + * of CPUs required for the shares by dividing the share value * by PER_CPU_SHARES. * * All results of division are rounded up to the next whole number. * - * Return the smaller number from the three different settings. + * If neither shares or quotas have been specified, return the + * number of active processors in the system. + * + * If both shares and quotas have been specified, the results are + * based on the flag PreferContainerQuotaForCPUCount. If true, + * return the quota value. If false return the smallest value + * between shares or quotas. + * + * If shares and/or quotas have been specified, the resulting number + * returned will never exceed the number of active processors. * * return: - * number of cpus - * OSCONTAINER_ERROR if failure occured during extract of cpuset info + * number of CPUs */ int OSContainer::active_processor_count() { - int cpu_count, share_count, quota_count; - int share, quota, period; + int quota_count = 0, share_count = 0; + int cpu_count, limit_count; int result; - cpu_count = os::Linux::active_processor_count(); + cpu_count = limit_count = os::Linux::active_processor_count(); + int quota = cpu_quota(); + int period = cpu_period(); + int share = cpu_shares(); - share = cpu_shares(); + if (quota > -1 && period > 0) { + quota_count = ceilf((float)quota / (float)period); + log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count); + } if (share > -1) { share_count = ceilf((float)share / (float)PER_CPU_SHARES); - log_trace(os, container)("cpu_share count: %d", share_count); - } else { - share_count = cpu_count; + log_trace(os, container)("CPU Share count based on shares: %d", share_count); } - quota = cpu_quota(); - period = cpu_period(); - if (quota > -1 && period > 0) { - quota_count = ceilf((float)quota / (float)period); - log_trace(os, container)("quota_count: %d", quota_count); - } else { - quota_count = cpu_count; + // If both shares and quotas are setup results depend + // on flag PreferContainerQuotaForCPUCount. + // If true, limit CPU count to quota + // If false, use minimum of shares and quotas + if (quota_count !=0 && share_count != 0) { + if (PreferContainerQuotaForCPUCount) { + limit_count = quota_count; + } else { + limit_count = MIN2(quota_count, share_count); + } + } else if (quota_count != 0) { + limit_count = quota_count; + } else if (share_count != 0) { + limit_count = share_count; } - result = MIN2(cpu_count, MIN2(share_count, quota_count)); + result = MIN2(cpu_count, limit_count); log_trace(os, container)("OSContainer::active_processor_count: %d", result); return result; } diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp --- a/src/hotspot/os/linux/osContainer_linux.hpp +++ b/src/hotspot/os/linux/osContainer_linux.hpp @@ -40,7 +40,7 @@ public: static void init(); static inline bool is_containerized(); - static char * container_type(); + static const char * container_type(); static jlong memory_limit_in_bytes(); static jlong memory_and_swap_limit_in_bytes(); diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -177,20 +177,17 @@ if (OSContainer::is_containerized()) { jlong mem_limit, mem_usage; - if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) { - if ((mem_usage = OSContainer::memory_usage_in_bytes()) > 0) { - if (mem_limit > mem_usage) { - avail_mem = (julong)mem_limit - (julong)mem_usage; - } else { - avail_mem = 0; - } - log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); - return avail_mem; - } else { - log_debug(os,container)("container memory usage call failed: " JLONG_FORMAT, mem_usage); - } - } else { - log_debug(os,container)("container memory unlimited or failed: " JLONG_FORMAT, mem_limit); + if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) { + log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value", + mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit); + } + if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) { + log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage); + } + if (mem_limit > 0 && mem_usage > 0 ) { + avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0; + log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); + return avail_mem; } } @@ -201,22 +198,18 @@ } julong os::physical_memory() { + jlong phys_mem = 0; if (OSContainer::is_containerized()) { jlong mem_limit; if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) { log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit); - return (julong)mem_limit; - } else { - if (mem_limit == OSCONTAINER_ERROR) { - log_debug(os,container)("container memory limit call failed"); - } - if (mem_limit == -1) { - log_debug(os,container)("container memory unlimited, using host value"); - } + return mem_limit; } - } - - jlong phys_mem = Linux::physical_memory(); + log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value", + mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit); + } + + phys_mem = Linux::physical_memory(); log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem); return phys_mem; } @@ -2135,63 +2128,54 @@ } void os::Linux::print_container_info(outputStream* st) { - if (OSContainer::is_containerized()) { - st->print("container (cgroup) information:\n"); - - char *p = OSContainer::container_type(); - if (p == NULL) - st->print("container_type() failed\n"); - else { - st->print("container_type: %s\n", p); - } - - p = OSContainer::cpu_cpuset_cpus(); - if (p == NULL) - st->print("cpu_cpuset_cpus() failed\n"); - else { - st->print("cpu_cpuset_cpus: %s\n", p); - free(p); - } - - p = OSContainer::cpu_cpuset_memory_nodes(); - if (p < 0) - st->print("cpu_memory_nodes() failed\n"); - else { - st->print("cpu_memory_nodes: %s\n", p); - free(p); - } - - int i = OSContainer::active_processor_count(); - if (i < 0) - st->print("active_processor_count() failed\n"); - else - st->print("active_processor_count: %d\n", i); - - i = OSContainer::cpu_quota(); - st->print("cpu_quota: %d\n", i); - - i = OSContainer::cpu_period(); - st->print("cpu_period: %d\n", i); - - i = OSContainer::cpu_shares(); - st->print("cpu_shares: %d\n", i); - - jlong j = OSContainer::memory_limit_in_bytes(); - st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j); - - j = OSContainer::memory_and_swap_limit_in_bytes(); - st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j); - - j = OSContainer::memory_soft_limit_in_bytes(); - st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j); - - j = OSContainer::OSContainer::memory_usage_in_bytes(); - st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j); - - j = OSContainer::OSContainer::memory_max_usage_in_bytes(); - st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j); - st->cr(); - } + if (!OSContainer::is_containerized()) { + return; + } + + st->print("container (cgroup) information:\n"); + + const char *p_ct = OSContainer::container_type(); + st->print("container_type: %s\n", p_ct != NULL ? p_ct : "failed"); + + char *p = OSContainer::cpu_cpuset_cpus(); + st->print("cpu_cpuset_cpus: %s\n", p != NULL ? p : "failed"); + free(p); + + p = OSContainer::cpu_cpuset_memory_nodes(); + st->print("cpu_memory_nodes: %s\n", p != NULL ? p : "failed"); + free(p); + + int i = OSContainer::active_processor_count(); + if (i > 0) { + st->print("active_processor_count: %d\n", i); + } else { + st->print("active_processor_count: failed\n"); + } + + i = OSContainer::cpu_quota(); + st->print("cpu_quota: %d\n", i); + + i = OSContainer::cpu_period(); + st->print("cpu_period: %d\n", i); + + i = OSContainer::cpu_shares(); + st->print("cpu_shares: %d\n", i); + + jlong j = OSContainer::memory_limit_in_bytes(); + st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j); + + j = OSContainer::memory_and_swap_limit_in_bytes(); + st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j); + + j = OSContainer::memory_soft_limit_in_bytes(); + st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j); + + j = OSContainer::OSContainer::memory_usage_in_bytes(); + st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j); + + j = OSContainer::OSContainer::memory_max_usage_in_bytes(); + st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j); + st->cr(); } void os::print_memory_info(outputStream* st) { @@ -3077,10 +3061,12 @@ return res != (uintptr_t) MAP_FAILED; } -static address get_stack_commited_bottom(address bottom, size_t size) { - address nbot = bottom; - address ntop = bottom + size; - +// If there is no page mapped/committed, top (bottom + size) is returned +static address get_stack_mapped_bottom(address bottom, + size_t size, + bool committed_only /* must have backing pages */) { + // address used to test if the page is mapped/committed + address test_addr = bottom + size; size_t page_sz = os::vm_page_size(); unsigned pages = size / page_sz; @@ -3092,38 +3078,39 @@ while (imin < imax) { imid = (imax + imin) / 2; - nbot = ntop - (imid * page_sz); + test_addr = bottom + (imid * page_sz); // Use a trick with mincore to check whether the page is mapped or not. // mincore sets vec to 1 if page resides in memory and to 0 if page // is swapped output but if page we are asking for is unmapped // it returns -1,ENOMEM - mincore_return_value = mincore(nbot, page_sz, vec); - - if (mincore_return_value == -1) { - // Page is not mapped go up - // to find first mapped page - if (errno != EAGAIN) { - assert(errno == ENOMEM, "Unexpected mincore errno"); - imax = imid; + mincore_return_value = mincore(test_addr, page_sz, vec); + + if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) { + // Page is not mapped/committed go up + // to find first mapped/committed page + if ((mincore_return_value == -1 && errno != EAGAIN) + || (committed_only && (vec[0] & 0x01) == 0)) { + assert(mincore_return_value != -1 || errno == ENOMEM, "Unexpected mincore errno"); + + imin = imid + 1; } } else { - // Page is mapped go down - // to find first not mapped page - imin = imid + 1; + // mapped/committed, go down + imax= imid; } } - nbot = nbot + page_sz; - - // Adjust stack bottom one page up if last checked page is not mapped - if (mincore_return_value == -1) { - nbot = nbot + page_sz; - } - - return nbot; -} - + // Adjust stack bottom one page up if last checked page is not mapped/committed + if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) { + assert(mincore_return_value != -1 || (errno != EAGAIN && errno != ENOMEM), + "Should not get to here"); + + test_addr = test_addr + page_sz; + } + + return test_addr; +} // Linux uses a growable mapping for the stack, and if the mapping for // the stack guard pages is not removed when we detach a thread the @@ -3161,9 +3148,9 @@ if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) { // Fallback to slow path on all errors, including EAGAIN - stack_extent = (uintptr_t) get_stack_commited_bottom( - os::Linux::initial_thread_stack_bottom(), - (size_t)addr - stack_extent); + stack_extent = (uintptr_t) get_stack_mapped_bottom(os::Linux::initial_thread_stack_bottom(), + (size_t)addr - stack_extent, + false /* committed_only */); } if (stack_extent < (uintptr_t)addr) { @@ -3190,6 +3177,11 @@ return os::uncommit_memory(addr, size); } +size_t os::committed_stack_size(address bottom, size_t size) { + address bot = get_stack_mapped_bottom(bottom, size, true /* committed_only */); + return size_t(bottom + size - bot); +} + // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory // at 'requested_addr'. If there are existing memory mappings at the same // location, however, they will be overwritten. If 'fixed' is false, diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -331,8 +331,15 @@ return aligned_base; } -int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { - return vsnprintf(buf, len, fmt, args); +int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { + // All supported POSIX platforms provide C99 semantics. + int result = ::vsnprintf(buf, len, fmt, args); + // If an encoding error occurred (result < 0) then it's not clear + // whether the buffer is NUL terminated, so ensure it is. + if ((result < 0) && (len > 0)) { + buf[len - 1] = '\0'; + } + return result; } int os::get_fileno(FILE* fp) { diff --git a/src/hotspot/os/solaris/dtrace/jhelper.d b/src/hotspot/os/solaris/dtrace/jhelper.d --- a/src/hotspot/os/solaris/dtrace/jhelper.d +++ b/src/hotspot/os/solaris/dtrace/jhelper.d @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. - * + * */ /* This file is auto-generated */ @@ -30,7 +30,7 @@ #ifdef DEBUG #define MARK_LINE this->line = __LINE__ #else -#define MARK_LINE +#define MARK_LINE #endif #ifdef _LP64 @@ -59,9 +59,8 @@ #define copyin_int32(ADDR) *(int32_t*) copyin((pointer) (ADDR), sizeof(int32_t)) #define copyin_uint8(ADDR) *(uint8_t*) copyin((pointer) (ADDR), sizeof(uint8_t)) -#define SAME(x) x #define copyin_offset(JVM_CONST) JVM_CONST = \ - copyin_int32(JvmOffsetsPtr + SAME(IDX_)JVM_CONST * sizeof(int32_t)) + copyin_int32(JvmOffsetsPtr + IDX_##JVM_CONST * sizeof(int32_t)) int init_done; @@ -97,7 +96,7 @@ /!init_done && !this->done/ { MARK_LINE; - + copyin_offset(POINTER_SIZE); copyin_offset(COMPILER); copyin_offset(OFFSET_CollectedHeap_reserved); @@ -158,7 +157,9 @@ #endif /* Read address of GrowableArray */ - this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_); + // this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_); + this->code_heaps_address = * ( uint64_t * ) copyin ( ( uint64_t ) ( &``__1cJCodeCacheG_heaps_ ) , sizeof ( uint64_t ) ); + /* Read address of _data array field in GrowableArray */ this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data); this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len); @@ -168,7 +169,9 @@ /* * Get Java heap bounds */ - this->Universe_collectedHeap = copyin_ptr(&``__1cIUniverseO_collectedHeap_); + // this->Universe_collectedHeap = copyin_ptr(&``__1cIUniverseO_collectedHeap_); + this->Universe_collectedHeap = * ( uint64_t * ) copyin ( ( uint64_t ) ( &``__1cIUniverseO_collectedHeap_ ) , sizeof ( uint64_t ) ); + this->heap_start = copyin_ptr(this->Universe_collectedHeap + OFFSET_CollectedHeap_reserved + OFFSET_MemRegion_start); @@ -181,8 +184,8 @@ } /* - * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in - * the code cache. If more code heaps are added the following probes have to + * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in + * the code cache. If more code heaps are added the following probes have to * be extended. This is done by simply adding a probe to get the heap bounds * and another probe to set the code heap address of the newly created heap. */ @@ -197,7 +200,7 @@ /* CodeHeap 1 */ init_done = 1; this->code_heap1_address = copyin_ptr(this->code_heaps_array_address); - this->code_heap1_low = copyin_ptr(this->code_heap1_address + + this->code_heap1_low = copyin_ptr(this->code_heap1_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap1_high = copyin_ptr(this->code_heap1_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); @@ -211,7 +214,7 @@ init_done = 2; this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; this->code_heap2_address = copyin_ptr(this->code_heaps_array_address); - this->code_heap2_low = copyin_ptr(this->code_heap2_address + + this->code_heap2_low = copyin_ptr(this->code_heap2_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap2_high = copyin_ptr(this->code_heap2_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); @@ -224,7 +227,7 @@ init_done = 3; this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; this->code_heap3_address = copyin_ptr(this->code_heaps_array_address); - this->code_heap3_low = copyin_ptr(this->code_heap3_address + + this->code_heap3_low = copyin_ptr(this->code_heap3_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap3_high = copyin_ptr(this->code_heap3_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); @@ -237,7 +240,7 @@ init_done = 4; this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; this->code_heap4_address = copyin_ptr(this->code_heaps_array_address); - this->code_heap4_low = copyin_ptr(this->code_heap4_address + + this->code_heap4_low = copyin_ptr(this->code_heap4_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap4_high = copyin_ptr(this->code_heap4_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); @@ -250,7 +253,7 @@ init_done = 5; this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; this->code_heap5_address = copyin_ptr(this->code_heaps_array_address); - this->code_heap5_low = copyin_ptr(this->code_heap5_address + + this->code_heap5_low = copyin_ptr(this->code_heap5_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap5_high = copyin_ptr(this->code_heap5_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); @@ -309,10 +312,10 @@ /!this->done && this->codecache/ { MARK_LINE; - /* + /* * Get code heap configuration */ - this->code_heap_low = copyin_ptr(this->code_heap_address + + this->code_heap_low = copyin_ptr(this->code_heap_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); this->code_heap_segmap_low = copyin_ptr(this->code_heap_address + OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low); @@ -506,10 +509,10 @@ /* * Now we need to add a trailing '\0' and possibly a tag character. */ - this->result[this->klassSymbolLength + 1 + + this->result[this->klassSymbolLength + 1 + this->nameSymbolLength + this->signatureSymbolLength] = this->suffix; - this->result[this->klassSymbolLength + 2 + + this->result[this->klassSymbolLength + 2 + this->nameSymbolLength + this->signatureSymbolLength] = '\0'; @@ -519,7 +522,7 @@ dtrace:helper:ustack: /this->done && this->error == (char *) NULL/ { - this->result; + this->result; } dtrace:helper:ustack: diff --git a/src/hotspot/os/windows/globals_windows.hpp b/src/hotspot/os/windows/globals_windows.hpp --- a/src/hotspot/os/windows/globals_windows.hpp +++ b/src/hotspot/os/windows/globals_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,10 +37,7 @@ notproduct, \ range, \ constraint, \ - writeable) \ - \ - product(bool, UseUTCFileTimestamp, true, \ - "Adjust the timestamp returned from stat() to be UTC") + writeable) // diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -363,6 +363,25 @@ return sz; } +size_t os::committed_stack_size(address bottom, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + address top = bottom + size; + size_t committed_size = 0; + + while (committed_size < size) { + // top is exclusive + VirtualQuery(top - 1, &minfo, sizeof(minfo)); + if ((minfo.State & MEM_COMMIT) != 0) { + committed_size += minfo.RegionSize; + top -= minfo.RegionSize; + } else { + break; + } + } + + return MIN2(committed_size, size); +} + struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { const struct tm* time_struct_ptr = localtime(clock); if (time_struct_ptr != NULL) { @@ -979,11 +998,6 @@ } -static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, - PMINIDUMP_EXCEPTION_INFORMATION, - PMINIDUMP_USER_STREAM_INFORMATION, - PMINIDUMP_CALLBACK_INFORMATION); - static HANDLE dumpFile = NULL; // Check if dump file can be created. @@ -1499,13 +1513,39 @@ if (nl != NULL) *nl = '\0'; } -int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { - int ret = vsnprintf(buf, len, fmt, args); - // Get the correct buffer size if buf is too small - if (ret < 0) { - return _vscprintf(fmt, args); - } - return ret; +int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { +#if _MSC_VER >= 1900 + // Starting with Visual Studio 2015, vsnprint is C99 compliant. + int result = ::vsnprintf(buf, len, fmt, args); + // If an encoding error occurred (result < 0) then it's not clear + // whether the buffer is NUL terminated, so ensure it is. + if ((result < 0) && (len > 0)) { + buf[len - 1] = '\0'; + } + return result; +#else + // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use + // _vsnprintf, whose behavior seems to be *mostly* consistent across + // versions. However, when len == 0, avoid _vsnprintf too, and just + // go straight to _vscprintf. The output is going to be truncated in + // that case, except in the unusual case of empty output. More + // importantly, the documentation for various versions of Visual Studio + // are inconsistent about the behavior of _vsnprintf when len == 0, + // including it possibly being an error. + int result = -1; + if (len > 0) { + result = _vsnprintf(buf, len, fmt, args); + // If output (including NUL terminator) is truncated, the buffer + // won't be NUL terminated. Add the trailing NUL specified by C99. + if ((result < 0) || (result >= len)) { + buf[len - 1] = '\0'; + } + } + if (result < 0) { + result = _vscprintf(fmt, args); + } + return result; +#endif // _MSC_VER dispatch } static inline time_t get_mtime(const char* filename) { diff --git a/src/hotspot/os/windows/semaphore_windows.hpp b/src/hotspot/os/windows/semaphore_windows.hpp --- a/src/hotspot/os/windows/semaphore_windows.hpp +++ b/src/hotspot/os/windows/semaphore_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" -#include +#include class WindowsSemaphore : public CHeapObj { HANDLE _semaphore; diff --git a/src/hotspot/os/windows/sharedRuntimeRem.cpp b/src/hotspot/os/windows/sharedRuntimeRem.cpp --- a/src/hotspot/os/windows/sharedRuntimeRem.cpp +++ b/src/hotspot/os/windows/sharedRuntimeRem.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "runtime/sharedRuntime.hpp" #ifdef _WIN64 // These are copied defines from fdlibm.h, this allows us to keep the code diff --git a/src/hotspot/os/windows/symbolengine.cpp b/src/hotspot/os/windows/symbolengine.cpp --- a/src/hotspot/os/windows/symbolengine.cpp +++ b/src/hotspot/os/windows/symbolengine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "utilities/globalDefinitions.hpp" #include "symbolengine.hpp" #include "utilities/debug.hpp" +#include "utilities/ostream.hpp" #include "windbghelp.hpp" #include diff --git a/src/hotspot/os/windows/windbghelp.hpp b/src/hotspot/os/windows/windbghelp.hpp --- a/src/hotspot/os/windows/windbghelp.hpp +++ b/src/hotspot/os/windows/windbghelp.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef OS_WINDOWS_VM_DBGHELPLOADER_HPP -#define OS_WINDOWS_VM_DBGHELPLOADER_HPP +#ifndef OS_WINDOWS_WINDBGHELP_HPP +#define OS_WINDOWS_WINDBGHELP_HPP #include #include @@ -71,6 +71,5 @@ }; +#endif // OS_WINDOWS_WINDBGHELP_HPP -#endif // OS_WINDOWS_VM_DBGHELPLOADER_HPP - diff --git a/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp b/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp --- a/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp +++ b/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP #define OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count * HeapWordSize); #else @@ -70,7 +70,7 @@ #endif // AMD64 } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -108,7 +108,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -132,15 +132,15 @@ #endif // AMD64 } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -219,25 +219,25 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jints_atomic(from, to, count); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jlongs_atomic(from, to, count); #else @@ -262,47 +262,47 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jints(from, to, count); #else - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #endif // AMD64 } diff --git a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp --- a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -99,7 +99,7 @@ : "memory", "cc"); \ } -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); if (__builtin_expect(count <= 8, 1)) { COPY_SMALL(from, to, count); @@ -108,7 +108,7 @@ _Copy_conjoint_words(from, to, count); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { if (__builtin_constant_p(count)) { memcpy(to, from, count * sizeof(HeapWord)); return; @@ -121,7 +121,7 @@ _Copy_disjoint_words(from, to, count); } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); if (__builtin_expect(count <= 8, 1)) { COPY_SMALL(from, to, count); @@ -130,56 +130,56 @@ _Copy_disjoint_words(from, to, count); } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jlongs(from, to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { assert(!UseCompressedOops, "foo!"); assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); diff --git a/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp b/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp --- a/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp +++ b/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP #define OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_words(from, to, count * HeapWordSize); #else @@ -34,7 +34,7 @@ #endif } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AARCH64 _Copy_disjoint_words(from, to, count * HeapWordSize); #else @@ -42,27 +42,27 @@ #endif // AARCH64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_jshorts_atomic(from, to, count * BytesPerShort); #else @@ -70,58 +70,58 @@ #endif } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_jints_atomic(from, to, count * BytesPerInt); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AARCH64 assert(HeapWordSize == BytesPerLong, "64-bit architecture"); - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #else _Copy_conjoint_jlongs_atomic(to, from, count * BytesPerLong); #endif } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AARCH64 if (UseCompressedOops) { assert(BytesPerHeapOop == BytesPerInt, "compressed oops"); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } else { assert(BytesPerHeapOop == BytesPerLong, "64-bit architecture"); - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } #else assert(BytesPerHeapOop == BytesPerInt, "32-bit architecture"); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_bytes_atomic((void*)from, (void*)to, count); +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_bytes_atomic((const void*)from, (void*)to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } #endif // OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP diff --git a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp --- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/shared/barrierSet.inline.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/metaspaceShared.hpp" @@ -42,7 +43,7 @@ } if (bs->is_a(BarrierSet::CardTableModRef)) { - _card_table_base = (address) (barrier_set_cast(bs)->byte_map_base); + _card_table_base = (address) (barrier_set_cast(bs)->card_table()->byte_map_base()); } else { _card_table_base = NULL; } diff --git a/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp b/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp --- a/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp +++ b/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP #define OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count * HeapWordSize); #else @@ -70,7 +70,7 @@ #endif // AMD64 } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -108,7 +108,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -132,15 +132,15 @@ #endif // AMD64 } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -219,25 +219,25 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jints_atomic(from, to, count); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jlongs_atomic(from, to, count); #else @@ -262,47 +262,47 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jints(from, to, count); #else - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #endif // AMD64 } diff --git a/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp b/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp --- a/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp +++ b/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,11 @@ #ifndef OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP #define OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifndef AMD64 (void)memcpy(to, from, count * HeapWordSize); #else @@ -50,7 +50,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -68,15 +68,15 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -84,53 +84,53 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't. _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else - _Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count); + _Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); diff --git a/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp b/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp --- a/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp +++ b/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,11 @@ #ifndef OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP #define OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -50,7 +50,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -68,23 +68,23 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -100,7 +100,7 @@ } } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -116,10 +116,10 @@ } } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #else // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't. __asm { @@ -149,7 +149,7 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { // Do better than this: inline memmove body NEEDS CLEANUP if (from > to) { while (count-- > 0) { @@ -166,7 +166,7 @@ } } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 pd_conjoint_bytes_atomic(from, to, count); #else @@ -174,20 +174,20 @@ #endif // AMD64 } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } #endif // OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP diff --git a/src/hotspot/share/adlc/arena.hpp b/src/hotspot/share/adlc/arena.hpp --- a/src/hotspot/share/adlc/arena.hpp +++ b/src/hotspot/share/adlc/arena.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,11 @@ //------------------------------Chunk------------------------------------------ // Linked list of raw memory chunks class Chunk: public CHeapObj { + private: + // This ordinary operator delete is needed even though not used, so the + // below two-argument operator delete will be treated as a placement + // delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2. + void operator delete(void* p); public: void* operator new(size_t size, size_t length) throw(); void operator delete(void* p, size_t length); diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp --- a/src/hotspot/share/aot/aotCodeHeap.cpp +++ b/src/hotspot/share/aot/aotCodeHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,10 @@ #include "aot/aotCodeHeap.hpp" #include "aot/aotLoader.hpp" +#include "ci/ciUtilities.hpp" #include "classfile/javaAssertions.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/gcLocker.hpp" #include "interpreter/abstractInterpreter.hpp" @@ -194,7 +197,7 @@ } AOTLib::~AOTLib() { - free((void*) _name); + os::free((void*) _name); } AOTCodeHeap::~AOTCodeHeap() { @@ -207,7 +210,7 @@ } AOTLib::AOTLib(void* handle, const char* name, int dso_id) : _valid(true), _dl_handle(handle), _dso_id(dso_id) { - _name = (const char*) strdup(name); + _name = (const char*) os::strdup(name); // Verify that VM runs with the same parameters as AOT tool. _config = (AOTConfiguration*) load_symbol("A.config"); @@ -539,8 +542,7 @@ _lib_symbols_initialized = true; CollectedHeap* heap = Universe::heap(); - CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set()); - SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ci_card_table_address()); SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL)); SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL)); SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page()); diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp --- a/src/hotspot/share/asm/codeBuffer.hpp +++ b/src/hotspot/share/asm/codeBuffer.hpp @@ -380,7 +380,7 @@ OopRecorder _default_oop_recorder; // override with initialize_oop_recorder Arena* _overflow_arena; - address _last_membar; // used to merge consecutive memory barriers + address _last_insn; // used to merge consecutive memory barriers, loads or stores. address _decode_begin; // start address for decode address decode_begin(); @@ -395,7 +395,7 @@ _decode_begin = NULL; _overflow_arena = NULL; _code_strings = CodeStrings(); - _last_membar = NULL; + _last_insn = NULL; } void initialize(address code_start, csize_t code_size) { @@ -587,9 +587,9 @@ OopRecorder* oop_recorder() const { return _oop_recorder; } CodeStrings& strings() { return _code_strings; } - address last_membar() const { return _last_membar; } - void set_last_membar(address a) { _last_membar = a; } - void clear_last_membar() { set_last_membar(NULL); } + address last_insn() const { return _last_insn; } + void set_last_insn(address a) { _last_insn = a; } + void clear_last_insn() { set_last_insn(NULL); } void free_strings() { if (!_code_strings.is_null()) { diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,8 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "ci/ciObjArray.hpp" +#include "ci/ciUtilities.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shenandoah/brooksPointer.hpp" #include "runtime/arguments.hpp" @@ -1472,11 +1474,7 @@ Shenandoah_pre_barrier(addr_opr, pre_val, do_load, patch, info); break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - // No pre barriers - break; - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: // No pre barriers break; default : @@ -1505,10 +1503,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - break; - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default : ShouldNotReachHere(); @@ -1525,13 +1520,9 @@ Shenandoah_post_barrier(addr, new_val); break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: CardTableModRef_post_barrier(addr, new_val); break; - case BarrierSet::ModRef: - // No post barriers - break; default : ShouldNotReachHere(); } @@ -1780,9 +1771,7 @@ //////////////////////////////////////////////////////////////////////// void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { - CardTableModRefBS* ct = barrier_set_cast(_bs); - assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code"); - LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base); + LIR_Const* card_table_base = new LIR_Const(ci_card_table_address()); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); // ptr cannot be an object because we use this barrier for array card marks @@ -1804,9 +1793,9 @@ LIR_Opr tmp = new_pointer_register(); if (TwoOperandLIRForm) { __ move(addr, tmp); - __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); + __ unsigned_shift_right(tmp, CardTable::card_shift, tmp); } else { - __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); + __ unsigned_shift_right(addr, CardTable::card_shift, tmp); } LIR_Address* card_addr; @@ -1816,7 +1805,7 @@ card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE); } - LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()); + LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val()); if (UseCondCardMark) { LIR_Opr cur_value = new_register(T_INT); if (UseConcMarkSweepGC) { diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -47,6 +47,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/ci/ciArray.cpp b/src/hotspot/share/ci/ciArray.cpp --- a/src/hotspot/share/ci/ciArray.cpp +++ b/src/hotspot/share/ci/ciArray.cpp @@ -30,7 +30,7 @@ #include "ci/ciUtilities.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" -#include "oops/typeArrayOop.hpp" +#include "oops/typeArrayOop.inline.hpp" // ciArray // diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,7 @@ #include "prims/jvmtiExport.hpp" #include "runtime/init.hpp" #include "runtime/reflection.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp" #include "trace/tracing.hpp" diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp --- a/src/hotspot/share/ci/ciInstanceKlass.cpp +++ b/src/hotspot/share/ci/ciInstanceKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" #include "runtime/fieldDescriptor.hpp" +#include "runtime/jniHandles.inline.hpp" // ciInstanceKlass // diff --git a/src/hotspot/share/ci/ciObject.cpp b/src/hotspot/share/ci/ciObject.cpp --- a/src/hotspot/share/ci/ciObject.cpp +++ b/src/hotspot/share/ci/ciObject.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "ci/ciUtilities.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/jniHandles.inline.hpp" // ciObject // @@ -98,6 +99,14 @@ } // ------------------------------------------------------------------ +// ciObject::get_oop +// +// Get the oop of this ciObject. +oop ciObject::get_oop() const { + return JNIHandles::resolve_non_null(_handle); +} + +// ------------------------------------------------------------------ // ciObject::klass // // Get the ciKlass of this ciObject. diff --git a/src/hotspot/share/ci/ciObject.hpp b/src/hotspot/share/ci/ciObject.hpp --- a/src/hotspot/share/ci/ciObject.hpp +++ b/src/hotspot/share/ci/ciObject.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,10 +67,7 @@ jobject handle() const { return _handle; } // Get the VM oop that this object holds. - oop get_oop() const { - assert(_handle != NULL, "null oop"); - return JNIHandles::resolve_non_null(_handle); - } + oop get_oop() const; void init_flags_from(oop x); diff --git a/src/hotspot/share/ci/ciTypeArray.cpp b/src/hotspot/share/ci/ciTypeArray.cpp --- a/src/hotspot/share/ci/ciTypeArray.cpp +++ b/src/hotspot/share/ci/ciTypeArray.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciTypeArray.hpp" #include "ci/ciUtilities.hpp" +#include "oops/typeArrayOop.inline.hpp" // ciTypeArray // diff --git a/src/hotspot/share/ci/ciUtilities.cpp b/src/hotspot/share/ci/ciUtilities.cpp --- a/src/hotspot/share/ci/ciUtilities.cpp +++ b/src/hotspot/share/ci/ciUtilities.cpp @@ -24,6 +24,9 @@ #include "precompiled.hpp" #include "ci/ciUtilities.hpp" +#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTable.hpp" +#include "memory/universe.hpp" // ciUtilities // @@ -43,3 +46,13 @@ char c = type2char(t); return c ? c : 'X'; } + +// ------------------------------------------------------------------ +// card_table_base +jbyte *ci_card_table_address() { + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code"); + return ct->byte_map_base(); +} diff --git a/src/hotspot/share/ci/ciUtilities.hpp b/src/hotspot/share/ci/ciUtilities.hpp --- a/src/hotspot/share/ci/ciUtilities.hpp +++ b/src/hotspot/share/ci/ciUtilities.hpp @@ -27,6 +27,7 @@ #include "ci/ciEnv.hpp" #include "runtime/interfaceSupport.hpp" +#include "utilities/globalDefinitions.hpp" // The following routines and definitions are used internally in the // compiler interface. @@ -114,4 +115,9 @@ const char* basictype_to_str(BasicType t); const char basictype_to_char(BasicType t); +jbyte *ci_card_table_address(); +template T ci_card_table_address_as() { + return reinterpret_cast(ci_card_table_address()); +} + #endif // SHARE_VM_CI_CIUTILITIES_HPP diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,7 +137,6 @@ PerfCounter* ClassLoader::_sync_JVMDefineClassLockFreeCounter = NULL; PerfCounter* ClassLoader::_sync_JNIDefineClassLockFreeCounter = NULL; PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = NULL; -PerfCounter* ClassLoader::_isUnsyncloadClass = NULL; PerfCounter* ClassLoader::_load_instance_class_failCounter = NULL; GrowableArray* ClassLoader::_patch_mod_entries = NULL; @@ -1642,9 +1641,6 @@ // of the bug fix of 6365597. They are mainly focused on finding out // the behavior of system & user-defined classloader lock, whether // ClassLoader.loadClass/findClass is being called synchronized or not. - // Also two additional counters are created to see whether 'UnsyncloadClass' - // flag is being set or not and how many times load_instance_class call - // fails with linkageError etc. NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS, "systemLoaderLockContentionRate"); NEWPERFEVENTCOUNTER(_sync_nonSystemLoaderLockContentionRate, SUN_CLS, @@ -1660,14 +1656,8 @@ NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS, "unsafeDefineClassCalls"); - NEWPERFEVENTCOUNTER(_isUnsyncloadClass, SUN_CLS, "isUnsyncloadClassSet"); NEWPERFEVENTCOUNTER(_load_instance_class_failCounter, SUN_CLS, "loadInstanceClassFailRate"); - - // increment the isUnsyncloadClass counter if UnsyncloadClass is set. - if (UnsyncloadClass) { - _isUnsyncloadClass->inc(); - } } // lookup zip library entry points diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp --- a/src/hotspot/share/classfile/classLoader.hpp +++ b/src/hotspot/share/classfile/classLoader.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -207,7 +207,6 @@ static PerfCounter* _sync_JNIDefineClassLockFreeCounter; static PerfCounter* _unsafe_defineClassCallCounter; - static PerfCounter* _isUnsyncloadClass; static PerfCounter* _load_instance_class_failCounter; // The boot class path consists of 3 ordered pieces: diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -999,9 +999,8 @@ if (!is_anonymous) { - ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(BarrierSet::barrier_set()->write_barrier(loader())); // First, Atomically set it - ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL); + ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL); if (old != NULL) { delete cld; // Returns the data. diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp --- a/src/hotspot/share/classfile/compactHashtable.cpp +++ b/src/hotspot/share/classfile/compactHashtable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -146,27 +146,23 @@ cht->init(base_address, _num_entries, _num_buckets, _compact_buckets->data(), _compact_entries->data()); - if (log_is_enabled(Info, cds, hashtables)) { - ResourceMark rm; - LogMessage(cds, hashtables) msg; - stringStream info_stream; - + LogMessage(cds, hashtables) msg; + if (msg.is_info()) { double avg_cost = 0.0; if (_num_entries > 0) { avg_cost = double(table_bytes)/double(_num_entries); } - info_stream.print_cr("Shared %s table stats -------- base: " PTR_FORMAT, + msg.info("Shared %s table stats -------- base: " PTR_FORMAT, table_name, (intptr_t)base_address); - info_stream.print_cr("Number of entries : %9d", _num_entries); - info_stream.print_cr("Total bytes used : %9d", table_bytes); - info_stream.print_cr("Average bytes per entry : %9.3f", avg_cost); - info_stream.print_cr("Average bucket size : %9.3f", summary.avg()); - info_stream.print_cr("Variance of bucket size : %9.3f", summary.variance()); - info_stream.print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); - info_stream.print_cr("Empty buckets : %9d", _num_empty_buckets); - info_stream.print_cr("Value_Only buckets : %9d", _num_value_only_buckets); - info_stream.print_cr("Other buckets : %9d", _num_other_buckets); - msg.info("%s", info_stream.as_string()); + msg.info("Number of entries : %9d", _num_entries); + msg.info("Total bytes used : %9d", table_bytes); + msg.info("Average bytes per entry : %9.3f", avg_cost); + msg.info("Average bucket size : %9.3f", summary.avg()); + msg.info("Variance of bucket size : %9.3f", summary.variance()); + msg.info("Std. dev. of bucket size: %9.3f", summary.sd()); + msg.info("Empty buckets : %9d", _num_empty_buckets); + msg.info("Value_Only buckets : %9d", _num_value_only_buckets); + msg.info("Other buckets : %9d", _num_other_buckets); } } diff --git a/src/hotspot/share/classfile/javaAssertions.cpp b/src/hotspot/share/classfile/javaAssertions.cpp --- a/src/hotspot/share/classfile/javaAssertions.cpp +++ b/src/hotspot/share/classfile/javaAssertions.cpp @@ -31,6 +31,7 @@ #include "memory/oopFactory.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/handles.inline.hpp" bool JavaAssertions::_userDefault = false; diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -46,13 +46,14 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" -#include "oops/typeArrayOop.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" @@ -3405,9 +3406,7 @@ DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) { assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), ""); - // DependencyContext can write to the field address -> need write barrier. - call_site = BarrierSet::barrier_set()->write_barrier(call_site); - intptr_t* vmdeps_addr = (intptr_t*)call_site->address_field_addr(_vmdependencies_offset); + intptr_t* vmdeps_addr = (intptr_t*)call_site->field_addr(_vmdependencies_offset); DependencyContext dep_ctx(vmdeps_addr); return dep_ctx; } @@ -3462,14 +3461,14 @@ int java_lang_ClassLoader::name_offset = -1; int java_lang_ClassLoader::unnamedModule_offset = -1; -ClassLoaderData** java_lang_ClassLoader::loader_data_addr(oop loader) { - assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop"); - return (ClassLoaderData**) loader->address_field_addr(_loader_data_offset); -} - ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) { - loader = BarrierSet::barrier_set()->read_barrier(loader); - return *java_lang_ClassLoader::loader_data_addr(loader); + assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop"); + return HeapAccess<>::load_at(loader, _loader_data_offset); +} + +ClassLoaderData* java_lang_ClassLoader::cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data) { + assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop"); + return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data); } void java_lang_ClassLoader::compute_offsets() { diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp --- a/src/hotspot/share/classfile/javaClasses.hpp +++ b/src/hotspot/share/classfile/javaClasses.hpp @@ -881,15 +881,15 @@ static inline oop referent(oop ref); static inline void set_referent(oop ref, oop value); static inline void set_referent_raw(oop ref, oop value); - static inline HeapWord* referent_addr(oop ref); + static inline HeapWord* referent_addr_raw(oop ref); static inline oop next(oop ref); static inline void set_next(oop ref, oop value); static inline void set_next_raw(oop ref, oop value); - static inline HeapWord* next_addr(oop ref); + static inline HeapWord* next_addr_raw(oop ref); static inline oop discovered(oop ref); static inline void set_discovered(oop ref, oop value); static inline void set_discovered_raw(oop ref, oop value); - static inline HeapWord* discovered_addr(oop ref); + static inline HeapWord* discovered_addr_raw(oop ref); static bool is_referent_field(oop obj, ptrdiff_t offset); static inline bool is_phantom(oop ref); }; @@ -1229,8 +1229,8 @@ public: static void compute_offsets(); - static ClassLoaderData** loader_data_addr(oop loader); static ClassLoaderData* loader_data(oop loader); + static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data); static oop parent(oop loader); static oop name(oop loader); diff --git a/src/hotspot/share/classfile/javaClasses.inline.hpp b/src/hotspot/share/classfile/javaClasses.inline.hpp --- a/src/hotspot/share/classfile/javaClasses.inline.hpp +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp @@ -100,8 +100,8 @@ void java_lang_ref_Reference::set_referent_raw(oop ref, oop value) { ref->obj_field_put_raw(referent_offset, value); } -HeapWord* java_lang_ref_Reference::referent_addr(oop ref) { - return ref->obj_field_addr(referent_offset); +HeapWord* java_lang_ref_Reference::referent_addr_raw(oop ref) { + return ref->obj_field_addr_raw(referent_offset); } oop java_lang_ref_Reference::next(oop ref) { return ref->obj_field(next_offset); @@ -112,8 +112,8 @@ void java_lang_ref_Reference::set_next_raw(oop ref, oop value) { ref->obj_field_put_raw(next_offset, value); } -HeapWord* java_lang_ref_Reference::next_addr(oop ref) { - return ref->obj_field_addr(next_offset); +HeapWord* java_lang_ref_Reference::next_addr_raw(oop ref) { + return ref->obj_field_addr_raw(next_offset); } oop java_lang_ref_Reference::discovered(oop ref) { return ref->obj_field(discovered_offset); @@ -124,8 +124,8 @@ void java_lang_ref_Reference::set_discovered_raw(oop ref, oop value) { ref->obj_field_put_raw(discovered_offset, value); } -HeapWord* java_lang_ref_Reference::discovered_addr(oop ref) { - return ref->obj_field_addr(discovered_offset); +HeapWord* java_lang_ref_Reference::discovered_addr_raw(oop ref) { + return ref->obj_field_addr_raw(discovered_offset); } bool java_lang_ref_Reference::is_phantom(oop ref) { return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM; diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp --- a/src/hotspot/share/classfile/modules.cpp +++ b/src/hotspot/share/classfile/modules.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ #include "runtime/arguments.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/reflection.hpp" #include "utilities/stringUtils.hpp" #include "utilities/utf8.hpp" diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" #include "services/diagnosticCommand.hpp" diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,7 +106,6 @@ oop SystemDictionary::_java_system_loader = NULL; oop SystemDictionary::_java_platform_loader = NULL; -bool SystemDictionary::_has_loadClassInternal = false; bool SystemDictionary::_has_checkPackageAccess = false; // lazily initialized klass variables @@ -159,7 +158,7 @@ // Parallel class loading check bool SystemDictionary::is_parallelCapable(Handle class_loader) { - if (UnsyncloadClass || class_loader.is_null()) return true; + if (class_loader.is_null()) return true; if (AlwaysLockClassLoader) return false; return java_lang_ClassLoader::parallelCapable(class_loader()); } @@ -503,8 +502,7 @@ // // We only get here if // 1) custom classLoader, i.e. not bootstrap classloader -// 2) UnsyncloadClass not set -// 3) custom classLoader has broken the class loader objectLock +// 2) custom classLoader has broken the class loader objectLock // so another thread got here in parallel // // lockObject must be held. @@ -594,7 +592,6 @@ } else { placeholder = placeholders()->get_entry(p_index, p_hash, name, loader_data); if (placeholder && placeholder->super_load_in_progress() ){ - // Before UnsyncloadClass: // We only get here if the application has released the // classloader lock when another thread was in the middle of loading a // superclass/superinterface for this class, and now @@ -687,9 +684,9 @@ // defining the class in parallel by accident. // This lock must be acquired here so the waiter will find // any successful result in the SystemDictionary and not attempt - // the define - // ParallelCapable Classloaders and the bootstrap classloader, - // or all classloaders with UnsyncloadClass do not acquire lock here + // the define. + // ParallelCapable Classloaders and the bootstrap classloader + // do not acquire lock here. bool DoObjectLock = true; if (is_parallelCapable(class_loader)) { DoObjectLock = false; @@ -765,14 +762,11 @@ // and that lock is still held when calling classloader's loadClass. // For these classloaders, we ensure that the first requestor // completes the load and other requestors wait for completion. - // case 3. UnsyncloadClass - don't use objectLocker - // With this flag, we allow parallel classloading of a - // class/classloader pair - // case4. Bootstrap classloader - don't own objectLocker + // case 3. Bootstrap classloader - don't own objectLocker // This classloader supports parallelism at the classloader level, // but only allows a single load of a class/classloader pair. // No performance benefit and no deadlock issues. - // case 5. parallelCapable user level classloaders - without objectLocker + // case 4. parallelCapable user level classloaders - without objectLocker // Allow parallel classloading of a class/classloader pair { @@ -788,7 +782,7 @@ // case 1: traditional: should never see load_in_progress. while (!class_has_been_loaded && oldprobe && oldprobe->instance_load_in_progress()) { - // case 4: bootstrap classloader: prevent futile classloading, + // case 3: bootstrap classloader: prevent futile classloading, // wait on first requestor if (class_loader.is_null()) { SystemDictionary_lock->wait(); @@ -811,7 +805,7 @@ } } // All cases: add LOAD_INSTANCE holding SystemDictionary_lock - // case 3: UnsyncloadClass || case 5: parallelCapable: allow competing threads to try + // case 4: parallelCapable: allow competing threads to try // LOAD_INSTANCE in parallel if (!throw_circularity_error && !class_has_been_loaded) { @@ -844,28 +838,6 @@ // Do actual loading k = load_instance_class(name, class_loader, THREAD); - // For UnsyncloadClass only - // If they got a linkageError, check if a parallel class load succeeded. - // If it did, then for bytecode resolution the specification requires - // that we return the same result we did for the other thread, i.e. the - // successfully loaded InstanceKlass - // Should not get here for classloaders that support parallelism - // with the new cleaner mechanism, even with AllowParallelDefineClass - // Bootstrap goes through here to allow for an extra guarantee check - if (UnsyncloadClass || (class_loader.is_null())) { - if (k == NULL && HAS_PENDING_EXCEPTION - && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) { - MutexLocker mu(SystemDictionary_lock, THREAD); - InstanceKlass* check = find_class(d_hash, name, dictionary); - if (check != NULL) { - // Klass is already loaded, so just use it - k = check; - CLEAR_PENDING_EXCEPTION; - guarantee((!class_loader.is_null()), "dup definition for bootstrap loader?"); - } - } - } - // If everything was OK (no exceptions, no null return value), and // class_loader is NOT the defining loader, do a little more bookkeeping. if (!HAS_PENDING_EXCEPTION && k != NULL && @@ -1097,7 +1069,7 @@ HandleMark hm(THREAD); // Classloaders that support parallelism, e.g. bootstrap classloader, - // or all classloaders with UnsyncloadClass do not acquire lock here + // do not acquire lock here bool DoObjectLock = true; if (is_parallelCapable(class_loader)) { DoObjectLock = false; @@ -1556,40 +1528,17 @@ InstanceKlass* spec_klass = SystemDictionary::ClassLoader_klass(); - // Call public unsynchronized loadClass(String) directly for all class loaders - // for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will + // Call public unsynchronized loadClass(String) directly for all class loaders. + // For parallelCapable class loaders, JDK >=7, loadClass(String, boolean) will // acquire a class-name based lock rather than the class loader object lock. - // JDK < 7 already acquire the class loader lock in loadClass(String, boolean), - // so the call to loadClassInternal() was not required. - // - // UnsyncloadClass flag means both call loadClass(String) and do - // not acquire the class loader lock even for class loaders that are - // not parallelCapable. This was a risky transitional - // flag for diagnostic purposes only. It is risky to call - // custom class loaders without synchronization. - // WARNING If a custom class loader does NOT synchronizer findClass, or callers of - // findClass, the UnsyncloadClass flag risks unexpected timing bugs in the field. - // Do NOT assume this will be supported in future releases. - // - // Added MustCallLoadClassInternal in case we discover in the field - // a customer that counts on this call - if (MustCallLoadClassInternal && has_loadClassInternal()) { - JavaCalls::call_special(&result, - class_loader, - spec_klass, - vmSymbols::loadClassInternal_name(), - vmSymbols::string_class_signature(), - string, - CHECK_NULL); - } else { - JavaCalls::call_virtual(&result, - class_loader, - spec_klass, - vmSymbols::loadClass_name(), - vmSymbols::string_class_signature(), - string, - CHECK_NULL); - } + // JDK < 7 already acquire the class loader lock in loadClass(String, boolean). + JavaCalls::call_virtual(&result, + class_loader, + spec_klass, + vmSymbols::loadClass_name(), + vmSymbols::string_class_signature(), + string, + CHECK_NULL); assert(result.get_type() == T_OBJECT, "just checking"); oop obj = (oop) result.get_jobject(); @@ -1718,7 +1667,7 @@ { MutexLocker mu(SystemDictionary_lock, THREAD); // First check if class already defined - if (UnsyncloadClass || (is_parallelDefine(class_loader))) { + if (is_parallelDefine(class_loader)) { InstanceKlass* check = find_class(d_hash, name_h, dictionary); if (check != NULL) { return check; @@ -1737,7 +1686,7 @@ // Only special cases allow parallel defines and can use other thread's results // Other cases fall through, and may run into duplicate defines // caught by finding an entry in the SystemDictionary - if ((UnsyncloadClass || is_parallelDefine(class_loader)) && (probe->instance_klass() != NULL)) { + if (is_parallelDefine(class_loader) && (probe->instance_klass() != NULL)) { placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD); SystemDictionary_lock->notify_all(); #ifdef ASSERT @@ -2174,10 +2123,6 @@ //_box_klasses[T_OBJECT] = WK_KLASS(object_klass); //_box_klasses[T_ARRAY] = WK_KLASS(object_klass); - { // Compute whether we should use loadClass or loadClassInternal when loading classes. - Method* method = InstanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature()); - _has_loadClassInternal = (method != NULL); - } { // Compute whether we should use checkPackageAccess or NOT Method* method = InstanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature()); _has_checkPackageAccess = (method != NULL); diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp --- a/src/hotspot/share/classfile/systemDictionary.hpp +++ b/src/hotspot/share/classfile/systemDictionary.hpp @@ -467,9 +467,6 @@ static void load_abstract_ownable_synchronizer_klass(TRAPS); protected: - // Tells whether ClassLoader.loadClassInternal is present - static bool has_loadClassInternal() { return _has_loadClassInternal; } - // Returns the class loader data to be used when looking up/updating the // system dictionary. static ClassLoaderData *class_loader_data(Handle class_loader) { @@ -746,7 +743,6 @@ static oop _java_system_loader; static oop _java_platform_loader; - static bool _has_loadClassInternal; static bool _has_checkPackageAccess; }; diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,7 @@ #include "oops/klass.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp --- a/src/hotspot/share/classfile/verifier.cpp +++ b/src/hotspot/share/classfile/verifier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -358,10 +358,8 @@ template(reference_lock_name, "lock") \ template(reference_discovered_name, "discovered") \ template(run_finalization_name, "runFinalization") \ - template(run_finalizers_on_exit_name, "runFinalizersOnExit") \ template(dispatchUncaughtException_name, "dispatchUncaughtException") \ template(loadClass_name, "loadClass") \ - template(loadClassInternal_name, "loadClassInternal") \ template(get_name, "get") \ template(put_name, "put") \ template(type_name, "type") \ diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -439,11 +439,11 @@ } void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) { - OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock); + OrderAccess::release_store(&_unloading_clock, unloading_clock); } unsigned char CompiledMethod::unloading_clock() { - return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock); + return OrderAccess::load_acquire(&_unloading_clock); } // Processing of oop references should have been sufficient to keep diff --git a/src/hotspot/share/code/debugInfo.cpp b/src/hotspot/share/code/debugInfo.cpp --- a/src/hotspot/share/code/debugInfo.cpp +++ b/src/hotspot/share/code/debugInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/thread.hpp" // Constructors diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp --- a/src/hotspot/share/code/dependencies.cpp +++ b/src/hotspot/share/code/dependencies.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "oops/objArrayKlass.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -270,5 +270,5 @@ #endif //PRODUCT int nmethodBucket::decrement() { - return Atomic::add(-1, (volatile int *)&_count); + return Atomic::sub(1, &_count); } diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -44,6 +44,7 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" diff --git a/src/hotspot/share/code/oopRecorder.cpp b/src/hotspot/share/code/oopRecorder.cpp --- a/src/hotspot/share/code/oopRecorder.cpp +++ b/src/hotspot/share/code/oopRecorder.cpp @@ -29,6 +29,7 @@ #include "code/oopRecorder.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/jniHandles.inline.hpp" #ifdef ASSERT template int ValueRecorder::_find_index_calls = 0; diff --git a/src/hotspot/share/code/relocInfo_ext.cpp b/src/hotspot/share/code/relocInfo_ext.cpp --- a/src/hotspot/share/code/relocInfo_ext.cpp +++ b/src/hotspot/share/code/relocInfo_ext.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "code/codeCache.hpp" #include "code/relocInfo.hpp" #include "code/relocInfo_ext.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" @@ -59,8 +60,9 @@ } case symbolic_Relocation::card_table_reference: { BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - return (address)ct->byte_map_base; + CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + return (address)ct->byte_map_base(); } case symbolic_Relocation::mark_bits_reference: { return (address)Universe::verify_mark_bits(); diff --git a/src/hotspot/share/compiler/disassembler.cpp b/src/hotspot/share/compiler/disassembler.cpp --- a/src/hotspot/share/compiler/disassembler.cpp +++ b/src/hotspot/share/compiler/disassembler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,9 +23,11 @@ */ #include "precompiled.hpp" +#include "ci/ciUtilities.hpp" #include "classfile/javaClasses.hpp" #include "code/codeCache.hpp" #include "compiler/disassembler.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/resourceArea.hpp" @@ -318,7 +320,7 @@ BarrierSet* bs = Universe::heap()->barrier_set(); if (bs->is_a(BarrierSet::CardTableModRef) && - adr == (address)(barrier_set_cast(bs)->byte_map_base)) { + adr == ci_card_table_address_as
()) { st->print("word_map_base"); if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr)); return; diff --git a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp --- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp +++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp @@ -51,26 +51,3 @@ _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; _heap_alignment = compute_heap_alignment(); } - -void ConcurrentMarkSweepPolicy::initialize_generations() { - _young_gen_spec = new GenerationSpec(Generation::ParNew, _initial_young_size, - _max_young_size, _gen_alignment); - _old_gen_spec = new GenerationSpec(Generation::ConcurrentMarkSweep, - _initial_old_size, _max_old_size, _gen_alignment); -} - -void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size, - size_t init_promo_size, - size_t init_survivor_size) { - double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; - _size_policy = new AdaptiveSizePolicy(init_eden_size, - init_promo_size, - init_survivor_size, - max_gc_pause_sec, - GCTimeRatio); -} - -void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() { - // initialize the policy counters - 2 collectors, 2 generations - _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 2); -} diff --git a/src/hotspot/share/gc/cms/cmsCollectorPolicy.hpp b/src/hotspot/share/gc/cms/cmsCollectorPolicy.hpp --- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.hpp +++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.hpp @@ -30,18 +30,9 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy { protected: void initialize_alignments(); - void initialize_generations(); public: ConcurrentMarkSweepPolicy() {} - - ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; } - - void initialize_gc_policy_counters(); - - virtual void initialize_size_policy(size_t init_eden_size, - size_t init_promo_size, - size_t init_survivor_size); }; #endif // SHARE_VM_GC_CMS_CMSCOLLECTORPOLICY_HPP diff --git a/src/hotspot/share/gc/cms/cmsHeap.cpp b/src/hotspot/share/gc/cms/cmsHeap.cpp --- a/src/hotspot/share/gc/cms/cmsHeap.cpp +++ b/src/hotspot/share/gc/cms/cmsHeap.cpp @@ -64,7 +64,13 @@ }; CMSHeap::CMSHeap(GenCollectorPolicy *policy) : - GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) { + GenCollectedHeap(policy, + Generation::ParNew, + Generation::ConcurrentMarkSweep, + "ParNew::CMS"), + _eden_pool(NULL), + _survivor_pool(NULL), + _old_pool(NULL) { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); @@ -77,7 +83,6 @@ // If we are running CMS, create the collector responsible // for collecting the CMS generations. - assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy"); if (!create_cms_collector()) { return JNI_ENOMEM; } @@ -152,11 +157,10 @@ bool CMSHeap::create_cms_collector() { assert(old_gen()->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); - assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type"); CMSCollector* collector = new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set(), - gen_policy()->as_concurrent_mark_sweep_policy()); + (ConcurrentMarkSweepPolicy*) gen_policy()); if (collector == NULL || !collector->completed_initialization()) { if (collector) { diff --git a/src/hotspot/share/gc/cms/cmsHeap.hpp b/src/hotspot/share/gc/cms/cmsHeap.hpp --- a/src/hotspot/share/gc/cms/cmsHeap.hpp +++ b/src/hotspot/share/gc/cms/cmsHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,10 +75,6 @@ // supports. Caller does not hold the Heap_lock on entry. void collect(GCCause::Cause cause); - bool card_mark_must_follow_store() const { - return true; - } - void stop(); void safepoint_synchronize_begin(); void safepoint_synchronize_end(); diff --git a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp --- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp +++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,9 +88,9 @@ _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 "CompactibleFreeListSpace._dict_par_lock", true, Monitor::_safepoint_check_never), - _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * + _rescan_task_size(CardTable::card_size_in_words * BitsPerWord * CMSRescanMultiple), - _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * + _marking_task_size(CardTable::card_size_in_words * BitsPerWord * CMSConcMarkMultiple), _collector(NULL), _preconsumptionDirtyCardClosure(NULL) @@ -609,7 +609,7 @@ FreeListSpaceDCTOC(CompactibleFreeListSpace* sp, CMSCollector* collector, ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel) : FilteringDCTOC(sp, cl, precision, boundary), @@ -693,7 +693,7 @@ DirtyCardToOopClosure* CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel) { return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel); @@ -2828,7 +2828,7 @@ } const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const { - const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord); + const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord); return ergo_max; } @@ -2865,15 +2865,15 @@ // The "size" of each task is fixed according to rescan_task_size. assert(n_threads > 0, "Unexpected n_threads argument"); const size_t task_size = marking_task_size(); - assert(task_size > CardTableModRefBS::card_size_in_words && - (task_size % CardTableModRefBS::card_size_in_words == 0), + assert(task_size > CardTable::card_size_in_words && + (task_size % CardTable::card_size_in_words == 0), "Otherwise arithmetic below would be incorrect"); MemRegion span = _old_gen->reserved(); if (low != NULL) { if (span.contains(low)) { // Align low down to a card boundary so that // we can use block_offset_careful() on span boundaries. - HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size); + HeapWord* aligned_low = align_down(low, CardTable::card_size); // Clip span prefix at aligned_low span = span.intersection(MemRegion(aligned_low, span.end())); } else if (low > span.end()) { @@ -2881,7 +2881,7 @@ } // else use entire span } assert(span.is_empty() || - ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), + ((uintptr_t)span.start() % CardTable::card_size == 0), "span should start at a card boundary"); size_t n_tasks = (span.word_size() + task_size - 1)/task_size; assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); diff --git a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp --- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp +++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc/cms/adaptiveFreeList.hpp" #include "gc/cms/promotionInfo.hpp" #include "gc/shared/blockOffsetTable.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/space.hpp" #include "logging/log.hpp" #include "memory/binaryTreeDictionary.hpp" @@ -432,7 +433,7 @@ // Override: provides a DCTO_CL specific to this kind of space. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel); diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -300,8 +300,7 @@ } AdaptiveSizePolicy* CMSCollector::size_policy() { - CMSHeap* heap = CMSHeap::heap(); - return heap->gen_policy()->size_policy(); + return CMSHeap::heap()->size_policy(); } void ConcurrentMarkSweepGeneration::initialize_performance_counters() { @@ -449,7 +448,7 @@ _start_sampling(false), _between_prologue_and_epilogue(false), _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), - _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), + _modUnionTable((CardTable::card_shift - LogHeapWordSize), -1 /* lock-free */, "No_lock" /* dummy */), _modUnionClosurePar(&_modUnionTable), // Adjust my span to cover old (cms) gen @@ -901,7 +900,7 @@ // card size. MemRegion mr(start, align_up(start + obj_size, - CardTableModRefBS::card_size /* bytes */)); + CardTable::card_size /* bytes */)); if (par) { _modUnionTable.par_mark_range(mr); } else { @@ -1182,8 +1181,6 @@ // this is not likely to be productive in practice because it's probably too // late anyway. CMSHeap* heap = CMSHeap::heap(); - assert(heap->collector_policy()->is_generation_policy(), - "You may want to check the correctness of the following"); if (heap->incremental_collection_will_fail(true /* consult_young */)) { log.print("CMSCollector: collect because incremental collection will fail "); return true; @@ -1498,7 +1495,7 @@ max_eden_size, full, gc_cause, - heap->collector_policy()); + heap->soft_ref_policy()); // Reset the expansion cause, now that we just completed // a collection cycle. @@ -1890,7 +1887,7 @@ } // Should this be in gc_epilogue? - collector_policy()->counters()->update_counters(); + heap->counters()->update_counters(); { // Clear _foregroundGCShouldWait and, in the event that the @@ -3226,7 +3223,7 @@ if (sp->used_region().contains(_restart_addr)) { // Align down to a card boundary for the start of 0th task // for this space. - aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size); + aligned_start = align_down(_restart_addr, CardTable::card_size); } size_t chunk_size = sp->marking_task_size(); @@ -4029,17 +4026,16 @@ startTimer(); sample_eden(); // Get and clear dirty region from card table - dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( - MemRegion(nextAddr, endAddr), - true, - CardTableModRefBS::precleaned_card_val()); + dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr), + true, + CardTable::precleaned_card_val()); assert(dirtyRegion.start() >= nextAddr, "returned region inconsistent?"); } lastAddr = dirtyRegion.end(); numDirtyCards = - dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; + dirtyRegion.word_size()/CardTable::card_size_in_words; if (!dirtyRegion.is_empty()) { stopTimer(); @@ -4053,7 +4049,7 @@ if (stop_point != NULL) { assert((_collectorState == AbortablePreclean && should_abort_preclean()), "Should only be AbortablePreclean."); - _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); + _ct->invalidate(MemRegion(stop_point, dirtyRegion.end())); if (should_abort_preclean()) { break; // out of preclean loop } else { @@ -4580,7 +4576,7 @@ SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); assert(pst->valid(), "Uninitialized use?"); uint nth_task = 0; - const int alignment = CardTableModRefBS::card_size * BitsPerWord; + const int alignment = CardTable::card_size * BitsPerWord; MemRegion span = sp->used_region(); HeapWord* start_addr = span.start(); HeapWord* end_addr = align_up(span.end(), alignment); @@ -4606,7 +4602,7 @@ // precleaned, and setting the corresponding bits in the mod union // table. Since we have been careful to partition at Card and MUT-word // boundaries no synchronization is needed between parallel threads. - _collector->_ct->ct_bs()->dirty_card_iterate(this_span, + _collector->_ct->dirty_card_iterate(this_span, &modUnionClosure); // Having transferred these marks into the modUnionTable, @@ -4917,16 +4913,14 @@ // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); - _ct->ct_bs()->dirty_card_iterate( - _cmsGen->used_region(), - &modUnionClosure); + _ct->dirty_card_iterate(_cmsGen->used_region(), + &modUnionClosure); } // Having transferred these marks into the modUnionTable, we just need // to rescan the marked objects on the dirty cards in the modUnionTable. // The initial marking may have been done during an asynchronous // collection so there may be dirty bits in the mod-union table. - const int alignment = - CardTableModRefBS::card_size * BitsPerWord; + const int alignment = CardTable::card_size * BitsPerWord; { // ... First handle dirty cards in CMS gen markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); @@ -5551,7 +5545,7 @@ // already have the lock assert(_collectorState == Resetting, "just checking"); assert_lock_strong(bitMapLock()); - GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id()); + GCIdMark gc_id_mark(_cmsThread->gc_id()); _markBitMap.clear_all(); _collectorState = Idling; register_gc_end(); @@ -5636,9 +5630,9 @@ } assert(sz > 0, "size must be nonzero"); HeapWord* next_block = addr + sz; - HeapWord* next_card = align_up(next_block, CardTableModRefBS::card_size); - assert(align_down((uintptr_t)addr, CardTableModRefBS::card_size) < - align_down((uintptr_t)next_card, CardTableModRefBS::card_size), + HeapWord* next_card = align_up(next_block, CardTable::card_size); + assert(align_down((uintptr_t)addr, CardTable::card_size) < + align_down((uintptr_t)next_card, CardTable::card_size), "must be different cards"); return next_card; } @@ -6297,7 +6291,7 @@ assert(_markStack->isEmpty(), "would cause duplicates on stack"); assert(_span.contains(addr), "Out of bounds _finger?"); _finger = addr; - _threshold = align_up(_finger, CardTableModRefBS::card_size); + _threshold = align_up(_finger, CardTable::card_size); } // Should revisit to see if this should be restructured for @@ -6324,7 +6318,7 @@ // during the preclean or remark phase. (CMSCleanOnEnter) if (CMSCleanOnEnter) { size_t sz = _collector->block_size_using_printezis_bits(addr); - HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size); + HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); MemRegion redirty_range = MemRegion(addr, end_card_addr); assert(!redirty_range.is_empty(), "Arithmetical tautology"); // Bump _threshold to end_card_addr; note that @@ -6411,9 +6405,9 @@ // _threshold is always kept card-aligned but _finger isn't // always card-aligned. HeapWord* old_threshold = _threshold; - assert(is_aligned(old_threshold, CardTableModRefBS::card_size), + assert(is_aligned(old_threshold, CardTable::card_size), "_threshold should always be card-aligned"); - _threshold = align_up(_finger, CardTableModRefBS::card_size); + _threshold = align_up(_finger, CardTable::card_size); MemRegion mr(old_threshold, _threshold); assert(!mr.is_empty(), "Control point invariant"); assert(_span.contains(mr), "Should clear within span"); @@ -6523,9 +6517,9 @@ // _threshold is always kept card-aligned but _finger isn't // always card-aligned. HeapWord* old_threshold = _threshold; - assert(is_aligned(old_threshold, CardTableModRefBS::card_size), + assert(is_aligned(old_threshold, CardTable::card_size), "_threshold should always be card-aligned"); - _threshold = align_up(_finger, CardTableModRefBS::card_size); + _threshold = align_up(_finger, CardTable::card_size); MemRegion mr(old_threshold, _threshold); assert(!mr.is_empty(), "Control point invariant"); assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? @@ -6893,7 +6887,7 @@ // are required. if (obj->is_objArray()) { size_t sz = obj->size(); - HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size); + HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); MemRegion redirty_range = MemRegion(addr, end_card_addr); assert(!redirty_range.is_empty(), "Arithmetical tautology"); _mod_union_table->mark_range(redirty_range); @@ -7006,15 +7000,15 @@ } void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { - assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0, + assert(((size_t)mr.start())%CardTable::card_size_in_words == 0, "mr should be aligned to start at a card boundary"); // We'd like to assert: - // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, + // assert(mr.word_size()%CardTable::card_size_in_words == 0, // "mr should be a range of cards"); // However, that would be too strong in one case -- the last // partition ends at _unallocated_block which, in general, can be // an arbitrary boundary, not necessarily card aligned. - _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words; + _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words; _space->object_iterate_mem(mr, &_scan_cl); } @@ -7623,7 +7617,7 @@ // table. if (obj->is_objArray()) { size_t sz = obj->size(); - HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size); + HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); MemRegion redirty_range = MemRegion(addr, end_card_addr); assert(!redirty_range.is_empty(), "Arithmetical tautology"); _collector->_modUnionTable.mark_range(redirty_range); diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp @@ -77,7 +77,7 @@ // methods are used). This is essentially a wrapper around the BitMap class, // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, // we have _shifter == 0. and for the mod union table we have -// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) +// shifter == CardTable::card_shift - LogHeapWordSize.) // XXX 64-bit issues in BitMap? class CMSBitMap VALUE_OBJ_CLASS_SPEC { friend class VMStructs; diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -448,7 +448,7 @@ // This is superfluous except at the end of the space; // we should do better than this XXX MemRegion mr2(mr.start(), align_up(mr.end(), - CardTableModRefBS::card_size /* bytes */)); + CardTable::card_size /* bytes */)); _t->mark_range(mr2); } @@ -457,7 +457,7 @@ // This is superfluous except at the end of the space; // we should do better than this XXX MemRegion mr2(mr.start(), align_up(mr.end(), - CardTableModRefBS::card_size /* bytes */)); + CardTable::card_size /* bytes */)); _t->par_mark_range(mr2); } diff --git a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp --- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp +++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" -void CardTableModRefBSForCTRS:: +void CardTableRS:: non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, @@ -82,7 +82,7 @@ } void -CardTableModRefBSForCTRS:: +CardTableRS:: process_stride(Space* sp, MemRegion used, jint stride, int n_strides, @@ -162,7 +162,7 @@ } void -CardTableModRefBSForCTRS:: +CardTableRS:: process_chunk_boundaries(Space* sp, DirtyCardToOopClosure* dcto_cl, MemRegion chunk_mr, @@ -371,7 +371,7 @@ } void -CardTableModRefBSForCTRS:: +CardTableRS:: get_LNC_array_for_space(Space* sp, jbyte**& lowest_non_clean, uintptr_t& lowest_non_clean_base_chunk_index, diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp --- a/src/hotspot/share/gc/cms/parNewGeneration.cpp +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp @@ -889,7 +889,7 @@ _gc_timer->register_gc_start(); - AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); + AdaptiveSizePolicy* size_policy = gch->size_policy(); WorkGang* workers = gch->workers(); assert(workers != NULL, "Need workgang for parallel work"); uint active_workers = @@ -1490,4 +1490,3 @@ SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers()); _preserved_marks_set.restore(&task_executor); } - diff --git a/src/hotspot/share/gc/g1/collectionSetChooser.cpp b/src/hotspot/share/gc/g1/collectionSetChooser.cpp --- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp @@ -138,7 +138,7 @@ G1PrintRegionLivenessInfoClosure cl("Post-Sorting"); for (uint i = 0; i < _end; ++i) { HeapRegion* r = regions_at(i); - cl.doHeapRegion(r); + cl.do_heap_region(r); } } verify(); @@ -220,7 +220,7 @@ _g1h(G1CollectedHeap::heap()), _cset_updater(hrSorted, true /* parallel */, chunk_size) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { // Do we have any marking information for this region? if (r->is_marked()) { // We will skip any region that's currently used as an old GC diff --git a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp --- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp +++ b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -268,8 +268,6 @@ cm()->concurrent_cycle_start(); - assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); - GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); { ResourceMark rm; diff --git a/src/hotspot/share/gc/g1/g1AllocationContext.hpp b/src/hotspot/share/gc/g1/g1AllocationContext.hpp --- a/src/hotspot/share/gc/g1/g1AllocationContext.hpp +++ b/src/hotspot/share/gc/g1/g1AllocationContext.hpp @@ -41,12 +41,4 @@ } }; -class AllocationContextStats: public StackObj { -public: - inline void clear() { } - inline void update(bool full_gc) { } - inline void update_after_mark() { } - inline bool available() { return false; } -}; - #endif // SHARE_VM_GC_G1_G1ALLOCATIONCONTEXT_HPP diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp --- a/src/hotspot/share/gc/g1/g1Allocator.cpp +++ b/src/hotspot/share/gc/g1/g1Allocator.cpp @@ -134,9 +134,6 @@ _old_is_full = true; } -G1PLAB::G1PLAB(size_t gclab_word_size) : - PLAB(gclab_word_size), _retired(true) { } - size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { // Return the remaining space in the cur alloc region, but not less than // the min TLAB size. @@ -253,7 +250,7 @@ if ((required_in_plab <= plab_word_size) && may_throw_away_buffer(required_in_plab, plab_word_size)) { - G1PLAB* alloc_buf = alloc_buffer(dest, context); + PLAB* alloc_buf = alloc_buffer(dest, context); alloc_buf->retire(); size_t actual_plab_size = 0; @@ -304,7 +301,7 @@ void G1DefaultPLABAllocator::flush_and_retire_stats() { for (uint state = 0; state < InCSetState::Num; state++) { - G1PLAB* const buf = _alloc_buffers[state]; + PLAB* const buf = _alloc_buffers[state]; if (buf != NULL) { G1EvacStats* stats = _g1h->alloc_buffer_stats(state); buf->flush_and_retire_stats(stats); @@ -318,7 +315,7 @@ wasted = 0; undo_wasted = 0; for (uint state = 0; state < InCSetState::Num; state++) { - G1PLAB * const buf = _alloc_buffers[state]; + PLAB * const buf = _alloc_buffers[state]; if (buf != NULL) { wasted += buf->waste(); undo_wasted += buf->undo_waste(); diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp --- a/src/hotspot/share/gc/g1/g1Allocator.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.hpp @@ -178,39 +178,6 @@ } }; -class G1PLAB: public PLAB { -private: - bool _retired; - -public: - G1PLAB(size_t gclab_word_size); - virtual ~G1PLAB() { - guarantee(_retired, "Allocation buffer has not been retired"); - } - - // The amount of space in words wasted within the PLAB including - // waste due to refills and alignment. - size_t wasted() const { return _wasted; } - - virtual void set_buf(HeapWord* buf, size_t word_size) { - PLAB::set_buf(buf, word_size); - _retired = false; - } - - virtual void retire() { - if (_retired) { - return; - } - PLAB::retire(); - _retired = true; - } - - virtual void flush_and_retire_stats(PLABStats* stats) { - PLAB::flush_and_retire_stats(stats); - _retired = true; - } -}; - // Manages the PLABs used during garbage collection. Interface for allocation from PLABs. // Needs to handle multiple contexts, extra alignment in any "survivor" area and some // statistics. @@ -231,7 +198,7 @@ size_t _direct_allocated[InCSetState::Num]; virtual void flush_and_retire_stats() = 0; - virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; + virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; // Calculate the survivor space object alignment in bytes. Returns that or 0 if // there are no restrictions on survivor alignment. @@ -292,14 +259,14 @@ // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor // and old generation allocation. class G1DefaultPLABAllocator : public G1PLABAllocator { - G1PLAB _surviving_alloc_buffer; - G1PLAB _tenured_alloc_buffer; - G1PLAB* _alloc_buffers[InCSetState::Num]; + PLAB _surviving_alloc_buffer; + PLAB _tenured_alloc_buffer; + PLAB* _alloc_buffers[InCSetState::Num]; public: G1DefaultPLABAllocator(G1Allocator* _allocator); - virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) { + virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) { assert(dest.is_valid(), "Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()); assert(_alloc_buffers[dest.value()] != NULL, diff --git a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp @@ -47,7 +47,7 @@ inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest, size_t word_sz, AllocationContext_t context) { - G1PLAB* buffer = alloc_buffer(dest, context); + PLAB* buffer = alloc_buffer(dest, context); if (_survivor_alignment_bytes == 0 || !dest.is_young()) { return buffer->allocate(word_sz); } else { diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -40,9 +40,6 @@ void G1Arguments::initialize_flags() { GCArguments::initialize_flags(); assert(UseG1GC, "Error"); -#if defined(COMPILER1) || INCLUDE_JVMCI - FastTLABRefill = false; -#endif FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads()); if (ParallelGCThreads == 0) { assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0."); diff --git a/src/hotspot/share/gc/g1/g1CardCounts.cpp b/src/hotspot/share/gc/g1/g1CardCounts.cpp --- a/src/hotspot/share/gc/g1/g1CardCounts.cpp +++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,12 +40,12 @@ size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) { // We keep card counts for every card, so the size of the card counts table must // be the same as the card table. - return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words); + return G1CardTable::compute_size(mem_region_size_in_words); } size_t G1CardCounts::heap_map_factor() { // See G1CardCounts::compute_size() why we reuse the card table value. - return G1SATBCardTableLoggingModRefBS::heap_map_factor(); + return G1CardTable::heap_map_factor(); } void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { @@ -72,8 +72,8 @@ // threshold limit is no more than this. guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity"); - _ct_bs = _g1h->g1_barrier_set(); - _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); + _ct = _g1h->card_table(); + _ct_bot = _ct->byte_for_const(_g1h->reserved_region().start()); _card_counts = (jubyte*) mapper->reserved().start(); _reserved_max_card_num = mapper->reserved().byte_size(); @@ -116,17 +116,17 @@ void G1CardCounts::clear_range(MemRegion mr) { if (has_count_table()) { - const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start()); + const jbyte* from_card_ptr = _ct->byte_for_const(mr.start()); // We use the last address in the range as the range could represent the // last region in the heap. In which case trying to find the card will be an // OOB access to the card table. - const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last()); + const jbyte* last_card_ptr = _ct->byte_for_const(mr.last()); #ifdef ASSERT - HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr); + HeapWord* start_addr = _ct->addr_for(from_card_ptr); assert(start_addr == mr.start(), "MemRegion start must be aligned to a card."); - HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr); - assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card."); + HeapWord* last_addr = _ct->addr_for(last_card_ptr); + assert((last_addr + G1CardTable::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card."); #endif // ASSERT // Clear the counts for the (exclusive) card range. @@ -144,7 +144,7 @@ HeapRegionClosure(), _card_counts(card_counts) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _card_counts->clear_region(r); return false; } diff --git a/src/hotspot/share/gc/g1/g1CardCounts.hpp b/src/hotspot/share/gc/g1/g1CardCounts.hpp --- a/src/hotspot/share/gc/g1/g1CardCounts.hpp +++ b/src/hotspot/share/gc/g1/g1CardCounts.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP #define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "memory/allocation.hpp" #include "memory/virtualspace.hpp" @@ -56,6 +57,7 @@ G1CardCountsMappingChangedListener _listener; G1CollectedHeap* _g1h; + G1CardTable* _ct; // The table of counts jubyte* _card_counts; @@ -66,9 +68,6 @@ // CardTable bottom. const jbyte* _ct_bot; - // Barrier set - CardTableModRefBS* _ct_bs; - // Returns true if the card counts table has been reserved. bool has_reserved_count_table() { return _card_counts != NULL; } diff --git a/src/hotspot/share/gc/g1/g1CardLiveData.cpp b/src/hotspot/share/gc/g1/g1CardLiveData.cpp --- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp +++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,10 +68,10 @@ assert(max_capacity % num_max_regions == 0, "Given capacity must be evenly divisible by region size."); size_t region_size = max_capacity / num_max_regions; - assert(region_size % (G1SATBCardTableModRefBS::card_size * BitsPerWord) == 0, + assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0, "Region size must be evenly divisible by area covered by a single word."); _max_capacity = max_capacity; - _cards_per_region = region_size / G1SATBCardTableModRefBS::card_size; + _cards_per_region = region_size / G1CardTable::card_size; _live_regions_size_in_bits = live_region_bitmap_size_in_bits(); _live_regions = allocate_large_bitmap(_live_regions_size_in_bits); @@ -85,11 +85,11 @@ } size_t G1CardLiveData::live_region_bitmap_size_in_bits() const { - return _max_capacity / (_cards_per_region << G1SATBCardTableModRefBS::card_shift); + return _max_capacity / (_cards_per_region << G1CardTable::card_shift); } size_t G1CardLiveData::live_card_bitmap_size_in_bits() const { - return _max_capacity >> G1SATBCardTableModRefBS::card_shift; + return _max_capacity >> G1CardTable::card_shift; } // Helper class that provides functionality to generate the Live Data Count @@ -132,7 +132,7 @@ void clear_card_bitmap_range(HeapWord* start, HeapWord* end) { BitMap::idx_t start_idx = card_live_bitmap_index_for(start); - BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size)); + BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size)); _card_bm.clear_range(start_idx, end_idx); } @@ -140,7 +140,7 @@ // Mark the card liveness bitmap for the object spanning from start to end. void mark_card_bitmap_range(HeapWord* start, HeapWord* end) { BitMap::idx_t start_idx = card_live_bitmap_index_for(start); - BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size)); + BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size)); assert((end_idx - start_idx) > 0, "Trying to mark zero sized range."); @@ -168,7 +168,7 @@ // by the card shift -- address 0 corresponds to card number 0. One // must subtract the card num of the bottom of the heap to obtain a // card table index. - BitMap::idx_t card_num = uintptr_t(addr) >> CardTableModRefBS::card_shift; + BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift; return card_num - _heap_card_bias; } @@ -262,7 +262,7 @@ // Calculate the card number for the bottom of the heap. Used // in biasing indexes into the accounting card bitmaps. _heap_card_bias = - uintptr_t(base_address) >> CardTableModRefBS::card_shift; + uintptr_t(base_address) >> G1CardTable::card_shift; } }; @@ -285,7 +285,7 @@ _mark_bitmap(mark_bitmap), _cm(cm) { } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr); if (marked_bytes > 0) { hr->add_to_marked_bytes(marked_bytes); @@ -352,7 +352,7 @@ _helper(live_data, g1h->reserved_region().start()), _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (has_been_reclaimed(hr)) { _helper.reset_live_data(hr); } @@ -478,7 +478,7 @@ int failures() const { return _failures; } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { int failures = 0; // Walk the marking bitmap for this region and set the corresponding bits diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp b/src/hotspot/share/gc/g1/g1CardTable.cpp copy from src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp copy to src/hotspot/share/gc/g1/g1CardTable.cpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp +++ b/src/hotspot/share/gc/g1/g1CardTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,64 +23,14 @@ */ #include "precompiled.hpp" +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" -#include "gc/g1/heapRegion.hpp" -#include "gc/g1/satbMarkQueue.hpp" #include "gc/shared/memset_with_concurrent_readers.hpp" #include "logging/log.hpp" -#include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.inline.hpp" -#include "runtime/thread.inline.hpp" -G1SATBCardTableModRefBS::G1SATBCardTableModRefBS( - MemRegion whole_heap, - const BarrierSet::FakeRtti& fake_rtti) : - CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT)) -{ } - -void G1SATBCardTableModRefBS::enqueue(oop pre_val) { - // Nulls should have been already filtered. - assert(oopDesc::is_oop(pre_val, true), "Error"); - - if (!JavaThread::satb_mark_queue_set().is_active()) return; - Thread* thr = Thread::current(); - if (thr->is_Java_thread()) { - JavaThread* jt = (JavaThread*)thr; - jt->satb_mark_queue().enqueue(pre_val); - } else { - MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); - JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); - } -} - -template void -G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) { - if (!JavaThread::satb_mark_queue_set().is_active()) return; - T* elem_ptr = dst; - for (int i = 0; i < count; i++, elem_ptr++) { - T heap_oop = oopDesc::load_heap_oop(elem_ptr); - if (!oopDesc::is_null(heap_oop)) { - enqueue(oopDesc::decode_heap_oop_not_null(heap_oop)); - } - } -} - -void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { - if (!dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } -} - -void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { - if (!dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } -} - -bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { +bool G1CardTable::mark_card_deferred(size_t card_index) { jbyte val = _byte_map[card_index]; // It's already processed if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { @@ -102,7 +52,7 @@ return true; } -void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { +void G1CardTable::g1_mark_as_young(const MemRegion& mr) { jbyte *const first = byte_for(mr.start()); jbyte *const last = byte_after(mr.last()); @@ -110,27 +60,18 @@ } #ifndef PRODUCT -void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { +void G1CardTable::verify_g1_young_region(MemRegion mr) { verify_region(mr, g1_young_gen, true); } #endif -void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { +void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); _card_table->clear(mr); } -G1SATBCardTableLoggingModRefBS:: -G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) : - G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)), - _dcqs(JavaThread::dirty_card_queue_set()), - _listener() -{ - _listener.set_card_table(this); -} - -void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { +void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) { mapper->set_mapping_changed_listener(&_listener); _byte_map_size = mapper->reserved().byte_size(); @@ -145,76 +86,17 @@ _covered[0] = _whole_heap; _byte_map = (jbyte*) mapper->reserved().start(); - byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); - log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); + log_trace(gc, barrier)("G1CardTable::G1CardTable: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); + log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); } -void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) { - // In the slow path, we know a card is not young - assert(*byte != g1_young_gen, "slow path invoked without filtering"); - OrderAccess::storeload(); - if (*byte != dirty_card) { - *byte = dirty_card; - Thread* thr = Thread::current(); - if (thr->is_Java_thread()) { - JavaThread* jt = (JavaThread*)thr; - jt->dirty_card_queue().enqueue(byte); - } else { - MutexLockerEx x(Shared_DirtyCardQ_lock, - Mutex::_no_safepoint_check_flag); - _dcqs.shared_dirty_card_queue()->enqueue(byte); - } - } +bool G1CardTable::is_in_young(oop obj) const { + volatile jbyte* p = byte_for(obj); + return *p == G1CardTable::g1_young_card_val(); } - -void -G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) { - if (mr.is_empty()) { - return; - } - volatile jbyte* byte = byte_for(mr.start()); - jbyte* last_byte = byte_for(mr.last()); - Thread* thr = Thread::current(); - // skip all consecutive young cards - for (; byte <= last_byte && *byte == g1_young_gen; byte++); - - if (byte <= last_byte) { - OrderAccess::storeload(); - // Enqueue if necessary. - if (thr->is_Java_thread()) { - JavaThread* jt = (JavaThread*)thr; - for (; byte <= last_byte; byte++) { - if (*byte == g1_young_gen) { - continue; - } - if (*byte != dirty_card) { - *byte = dirty_card; - jt->dirty_card_queue().enqueue(byte); - } - } - } else { - MutexLockerEx x(Shared_DirtyCardQ_lock, - Mutex::_no_safepoint_check_flag); - for (; byte <= last_byte; byte++) { - if (*byte == g1_young_gen) { - continue; - } - if (*byte != dirty_card) { - *byte = dirty_card; - _dcqs.shared_dirty_card_queue()->enqueue(byte); - } - } - } - } -} - -void G1SATBCardTableModRefBS::keep_alive_barrier(oop obj) { - G1SATBCardTableModRefBS::enqueue(obj); -} - diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp b/src/hotspot/share/gc/g1/g1CardTable.hpp copy from src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp copy to src/hotspot/share/gc/g1/g1CardTable.hpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp +++ b/src/hotspot/share/gc/g1/g1CardTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,46 +22,47 @@ * */ -#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP -#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP +#ifndef SHARE_VM_GC_G1_G1CARDTABLE_HPP +#define SHARE_VM_GC_G1_G1CARDTABLE_HPP #include "gc/g1/g1RegionToSpaceMapper.hpp" -#include "gc/shared/cardTableModRefBS.hpp" -#include "memory/memRegion.hpp" -#include "oops/oop.hpp" +#include "gc/shared/cardTable.hpp" +#include "oops/oopsHierarchy.hpp" #include "utilities/macros.hpp" -class DirtyCardQueueSet; -class G1SATBCardTableLoggingModRefBS; +class G1CardTable; +class G1RegionToSpaceMapper; -// This barrier is specialized to use a logging barrier to support -// snapshot-at-the-beginning marking. +class G1CardTableChangedListener : public G1MappingChangedListener { + private: + G1CardTable* _card_table; + public: + G1CardTableChangedListener() : _card_table(NULL) { } -class G1SATBCardTableModRefBS: public CardTableModRefBS { + void set_card_table(G1CardTable* card_table) { _card_table = card_table; } + + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); +}; + +class G1CardTable: public CardTable { friend class VMStructs; -protected: + friend class G1CardTableChangedListener; + + G1CardTableChangedListener _listener; + enum G1CardValues { g1_young_gen = CT_MR_BS_last_reserved << 1 }; - G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); - ~G1SATBCardTableModRefBS() { } +public: + G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() { + _listener.set_card_table(this); + } + bool is_card_dirty(size_t card_index) { + return _byte_map[card_index] == dirty_card_val(); + } -public: - static int g1_young_card_val() { return g1_young_gen; } - - // Add "pre_val" to a set of objects that may have been disconnected from the - // pre-marking object graph. - static void enqueue(oop pre_val); - - static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value); - - template void write_ref_array_pre_work(T* dst, int count); - virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized); - virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized); - - template - void write_ref_field_pre(T* field); + static jbyte g1_young_card_val() { return g1_young_gen; } /* Claimed and deferred bits are used together in G1 during the evacuation @@ -93,99 +94,20 @@ return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); } - virtual void keep_alive_barrier(oop obj); -}; - -template<> -struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::G1SATBCT; -}; - -template<> -struct BarrierSet::GetType { - typedef G1SATBCardTableModRefBS type; -}; - -class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener { - private: - G1SATBCardTableLoggingModRefBS* _card_table; - public: - G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { } - - void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; } - - virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); -}; - -// Adds card-table logging to the post-barrier. -// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet. -class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { - friend class G1SATBCardTableLoggingModRefBSChangedListener; - private: - G1SATBCardTableLoggingModRefBSChangedListener _listener; - DirtyCardQueueSet& _dcqs; - - public: static size_t compute_size(size_t mem_region_size_in_words) { size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); return ReservedSpace::allocation_align_size_up(number_of_slots); } // Returns how many bytes of the heap a single byte of the Card Table corresponds to. - static size_t heap_map_factor() { - return CardTableModRefBS::card_size; - } + static size_t heap_map_factor() { return card_size; } - G1SATBCardTableLoggingModRefBS(MemRegion whole_heap); - - virtual void initialize() { } - virtual void initialize(G1RegionToSpaceMapper* mapper); + void initialize() {} + void initialize(G1RegionToSpaceMapper* mapper); virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } - // NB: if you do a whole-heap invalidation, the "usual invariant" defined - // above no longer applies. - void invalidate(MemRegion mr); - - void write_region_work(MemRegion mr) { invalidate(mr); } - void write_ref_array_work(MemRegion mr) { invalidate(mr); } - - template - void write_ref_field_post(T* field, oop new_val); - void write_ref_field_post_slow(volatile jbyte* byte); - - // Callbacks for runtime accesses. - template - class AccessBarrier: public ModRefBarrierSet::AccessBarrier { - typedef ModRefBarrierSet::AccessBarrier ModRef; - typedef BarrierSet::AccessBarrier Raw; - - public: - // Needed for loads on non-heap weak references - template - static oop oop_load_not_in_heap(T* addr); - - // Needed for non-heap stores - template - static void oop_store_not_in_heap(T* addr, oop new_value); - - // Needed for weak references - static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); - - // Defensive: will catch weak oops at addresses in heap - template - static oop oop_load_in_heap(T* addr); - }; + virtual bool is_in_young(oop obj) const; }; -template<> -struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging; -}; - -template<> -struct BarrierSet::GetType { - typedef G1SATBCardTableLoggingModRefBS type; -}; - -#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP +#endif // SHARE_VM_GC_G1_G1CARDTABLE_HPP diff --git a/src/hotspot/share/gc/g1/g1CardTable.inline.hpp b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP +#define SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP + +#include "gc/g1/g1CardTable.hpp" + +void G1CardTable::set_card_claimed(size_t card_index) { + jbyte val = _byte_map[card_index]; + if (val == clean_card_val()) { + val = (jbyte)claimed_card_val(); + } else { + val |= (jbyte)claimed_card_val(); + } + _byte_map[card_index] = val; +} + +#endif // SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -52,6 +52,7 @@ #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" +#include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1YCTypes.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp" @@ -59,6 +60,7 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/vm_operations_g1.hpp" +#include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" @@ -102,10 +104,10 @@ private: size_t _num_dirtied; G1CollectedHeap* _g1h; - G1SATBCardTableLoggingModRefBS* _g1_bs; + G1CardTable* _g1_ct; HeapRegion* region_for_card(jbyte* card_ptr) const { - return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr)); + return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr)); } bool will_become_free(HeapRegion* hr) const { @@ -116,14 +118,14 @@ public: RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(), - _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { } + _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { } bool do_card_ptr(jbyte* card_ptr, uint worker_i) { HeapRegion* hr = region_for_card(card_ptr); // Should only dirty cards in regions that won't be freed. if (!will_become_free(hr)) { - *card_ptr = CardTableModRefBS::dirty_card_val(); + *card_ptr = G1CardTable::dirty_card_val(); _num_dirtied++; } @@ -1010,7 +1012,7 @@ private: G1HRPrinter* _hr_printer; public: - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { assert(!hr->is_young(), "not expecting to find young regions"); _hr_printer->post_compaction(hr); return false; @@ -1168,7 +1170,7 @@ } const bool do_clear_all_soft_refs = clear_all_soft_refs || - collector_policy()->should_clear_all_soft_refs(); + soft_ref_policy()->should_clear_all_soft_refs(); G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs); GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); @@ -1343,7 +1345,7 @@ return result; } - assert(!collector_policy()->should_clear_all_soft_refs(), + assert(!soft_ref_policy()->should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point"); // What else? We might try synchronous finalization later. If the total @@ -1463,6 +1465,8 @@ CollectedHeap(), _young_gen_sampling_thread(NULL), _collector_policy(collector_policy), + _soft_ref_policy(), + _card_table(NULL), _memory_manager("G1 Young Generation", "end of minor GC"), _full_gc_memory_manager("G1 Old Generation", "end of major GC"), _eden_pool(NULL), @@ -1573,7 +1577,6 @@ } jint G1CollectedHeap::initialize() { - CollectedHeap::pre_initialize(); os::enable_vtime(); // Necessary to satisfy locking discipline assertions. @@ -1615,11 +1618,13 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); // Create the barrier set for the entire reserved region. - G1SATBCardTableLoggingModRefBS* bs - = new G1SATBCardTableLoggingModRefBS(reserved_region()); + G1CardTable* ct = new G1CardTable(reserved_region()); + ct->initialize(); + G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct); bs->initialize(); assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity"); set_barrier_set(bs); + _card_table = ct; // Create the hot card cache. _hot_card_cache = new G1HotCardCache(this); @@ -1650,8 +1655,8 @@ G1RegionToSpaceMapper* cardtable_storage = create_aux_memory_mapper("Card Table", - G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize), - G1SATBCardTableLoggingModRefBS::heap_map_factor()); + G1CardTable::compute_size(g1_rs.size() / HeapWordSize), + G1CardTable::heap_map_factor()); G1RegionToSpaceMapper* card_counts_storage = create_aux_memory_mapper("Card Counts Table", @@ -1665,7 +1670,7 @@ create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); - g1_barrier_set()->initialize(cardtable_storage); + _card_table->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _hot_card_cache->initialize(card_counts_storage); @@ -1675,7 +1680,7 @@ guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); // Also create a G1 rem set. - _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache); + _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache); _g1_rem_set->initialize(max_capacity(), max_regions()); size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; @@ -1894,6 +1899,10 @@ return _collector_policy; } +SoftRefPolicy* G1CollectedHeap::soft_ref_policy() { + return &_soft_ref_policy; +} + size_t G1CollectedHeap::capacity() const { return _hrm.length() * HeapRegion::GrainBytes; } @@ -1917,7 +1926,7 @@ CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) : _gc_time_stamp(gc_time_stamp), _failures(false) { } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { unsigned region_gc_time_stamp = hr->get_gc_time_stamp(); if (_gc_time_stamp != region_gc_time_stamp) { log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr), @@ -1969,7 +1978,7 @@ size_t _used; public: SumUsedClosure() : _used(0) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { _used += r->used(); return false; } @@ -1990,7 +1999,6 @@ switch (cause) { case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent; - case GCCause::_update_allocation_context_stats_inc: return true; case GCCause::_wb_conc_mark: return true; default : return false; } @@ -2188,7 +2196,7 @@ ObjectClosure* _cl; public: IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (!r->is_continues_humongous()) { r->object_iterate(_cl); } @@ -2303,7 +2311,7 @@ outputStream* _st; public: PrintRegionClosure(outputStream* st) : _st(st) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { r->print_on(_st); return false; } @@ -2422,7 +2430,7 @@ size_t _occupied_sum; public: - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); size_t occupied = hrrs->occupied(); _occupied_sum += occupied; @@ -2543,8 +2551,6 @@ resize_all_tlabs(); g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0); - allocation_context_stats().update(full); - MemoryService::track_memory_usage(); // We have just completed a GC. Update the soft reference // policy with the new heap occupancy @@ -2669,7 +2675,7 @@ _dcq(&JavaThread::dirty_card_queue_set()) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (!r->is_starts_humongous()) { return false; } @@ -2689,17 +2695,17 @@ if (!r->rem_set()->is_empty()) { guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), "Found a not-small remembered set here. This is inconsistent with previous assumptions."); - G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set(); + G1CardTable* ct = g1h->card_table(); HeapRegionRemSetIterator hrrs(r->rem_set()); size_t card_index; while (hrrs.has_next(card_index)) { - jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index); + jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index); // The remembered set might contain references to already freed // regions. Filter out such entries to avoid failing card table // verification. - if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) { - if (*card_ptr != CardTableModRefBS::dirty_card_val()) { - *card_ptr = CardTableModRefBS::dirty_card_val(); + if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) { + if (*card_ptr != G1CardTable::dirty_card_val()) { + *card_ptr = G1CardTable::dirty_card_val(); _dcq.enqueue(card_ptr); } } @@ -2745,7 +2751,7 @@ class VerifyRegionRemSetClosure : public HeapRegionClosure { public: - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (!hr->is_archive() && !hr->is_continues_humongous()) { hr->verify_rem_set(); } @@ -2815,7 +2821,7 @@ public: G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _hr_printer->cset(r); return false; } @@ -4505,7 +4511,7 @@ _local_free_list("Local Region List for CSet Freeing") { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index()); @@ -4628,7 +4634,7 @@ public: G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _work_items[_cur_idx++] = WorkItem(r); return false; } @@ -4762,7 +4768,7 @@ _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (!r->is_starts_humongous()) { return false; } @@ -4897,7 +4903,7 @@ class G1AbandonCollectionSetClosure : public HeapRegionClosure { public: - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index()); G1CollectedHeap::heap()->clear_in_cset(r); r->set_young_index_in_cset(-1); @@ -4967,7 +4973,7 @@ bool _success; public: NoYoungRegionsClosure() : _success(true) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_young()) { log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", p2i(r->bottom()), p2i(r->end())); @@ -4997,7 +5003,7 @@ public: TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { _old_set->remove(r); } else if(r->is_young()) { @@ -5065,7 +5071,7 @@ } } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_empty()) { // Add free regions to the free list r->set_free(); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -28,6 +28,7 @@ #include "gc/g1/evacuationInfo.hpp" #include "gc/g1/g1AllocationContext.hpp" #include "gc/g1/g1BiasedArray.hpp" +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1ConcurrentMark.hpp" @@ -49,6 +50,7 @@ #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/plab.hpp" #include "gc/shared/preservedMarks.hpp" +#include "gc/shared/softRefPolicy.hpp" #include "memory/memRegion.hpp" #include "services/memoryManager.hpp" #include "utilities/stack.hpp" @@ -149,6 +151,9 @@ WorkGang* _workers; G1CollectorPolicy* _collector_policy; + G1CardTable* _card_table; + + SoftRefPolicy _soft_ref_policy; GCMemoryManager _memory_manager; GCMemoryManager _full_gc_memory_manager; @@ -222,9 +227,6 @@ // Class that handles archive allocation ranges. G1ArchiveAllocator* _archive_allocator; - // Statistics for each allocation context - AllocationContextStats _allocation_context_stats; - // GC allocation statistics policy for survivors. G1EvacStats _survivor_evac_stats; @@ -277,8 +279,7 @@ // (b) cause == _g1_humongous_allocation // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent. - // (e) cause == _update_allocation_context_stats_inc - // (f) cause == _wb_conc_mark + // (e) cause == _wb_conc_mark bool should_do_concurrent_full_gc(GCCause::Cause cause); // indicates whether we are in young or mixed GC mode @@ -580,8 +581,6 @@ // Determines PLAB size for a given destination. inline size_t desired_plab_sz(InCSetState dest); - inline AllocationContextStats& allocation_context_stats(); - // Do anything common to GC's. void gc_prologue(bool full); void gc_epilogue(bool full); @@ -998,8 +997,7 @@ virtual CollectorPolicy* collector_policy() const; - // Adaptive size policy. No such thing for g1. - virtual AdaptiveSizePolicy* size_policy() { return NULL; } + virtual SoftRefPolicy* soft_ref_policy(); virtual GrowableArray memory_managers(); virtual GrowableArray memory_pools(); @@ -1130,11 +1128,6 @@ // "CollectedHeap" supports. virtual void collect(GCCause::Cause cause); - virtual bool copy_allocation_context_stats(const jint* contexts, - jlong* totals, - jbyte* accuracy, - jint len); - // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } @@ -1187,6 +1180,10 @@ G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; } + G1CardTable* card_table() const { + return _card_table; + } + // Iteration functions. // Iterate over all objects, calling "cl.do_object" on each. @@ -1197,7 +1194,7 @@ } // Iterate over heap regions, in address order, terminating the - // iteration early if the "doHeapRegion" method returns "true". + // iteration early if the "do_heap_region" method returns "true". void heap_region_iterate(HeapRegionClosure* blk) const; // Return the region with the given index. It assumes the index is valid. @@ -1272,36 +1269,8 @@ size_t max_tlab_size() const; size_t unsafe_max_tlab_alloc(Thread* ignored) const; - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. If such permission - // is granted for this heap type, the compiler promises to call - // defer_store_barrier() below on any slow path allocation of - // a new object for which such initializing store barriers will - // have been elided. G1, like CMS, allows this, but should be - // ready to provide a compensating write barrier as necessary - // if that storage came out of a non-young region. The efficiency - // of this implementation depends crucially on being able to - // answer very efficiently in constant time whether a piece of - // storage in the heap comes from a young region or not. - // See ReduceInitialCardMarks. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - virtual bool card_mark_must_follow_store() const { - return true; - } - inline bool is_in_young(const oop obj); - // We don't need barriers for initializing stores to objects - // in the young gen: for the SATB pre-barrier, there is no - // pre-value that needs to be remembered; for the remembered-set - // update logging post-barrier, we don't maintain remembered set - // information for young gen objects. - virtual inline bool can_elide_initializing_store_barrier(oop new_obj); - // Returns "true" iff the given word_size is "very large". static bool is_humongous(size_t word_size) { // Note this has to be strictly greater-than as the TLABs diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,10 +57,6 @@ // Inline functions for G1CollectedHeap -inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { - return _allocation_context_stats; -} - // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } @@ -127,7 +123,7 @@ assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); MemRegion mr(start, end); - g1_barrier_set()->g1_mark_as_young(mr); + card_table()->g1_mark_as_young(mr); } inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { @@ -241,15 +237,6 @@ return heap_region_containing(obj)->is_young(); } -// We don't need barriers for initializing stores to objects -// in the young gen: for the SATB pre-barrier, there is no -// pre-value that needs to be remembered; for the remembered-set -// update logging post-barrier, we don't maintain remembered set -// information for young gen objects. -inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { - return is_in_young(new_obj); -} - inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { if (obj == NULL) { return false; diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp @@ -30,13 +30,6 @@ class STWGCTimer; -bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, - jlong* totals, - jbyte* accuracy, - jint len) { - return false; -} - G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) { return new G1DefaultPolicy(gc_timer); } diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -186,9 +186,9 @@ do { HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]); - bool result = cl->doHeapRegion(r); + bool result = cl->do_heap_region(r); if (result) { - cl->incomplete(); + cl->set_incomplete(); return; } cur_pos++; @@ -292,7 +292,7 @@ public: G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str()); SurvRateGroup* group = r->surv_rate_group(); @@ -332,7 +332,7 @@ public: G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index()); _st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", HR_FORMAT_PARAMS(r), @@ -524,7 +524,7 @@ FREE_C_HEAP_ARRAY(int, _heap_region_indices); } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { const int idx = r->young_index_in_cset(); assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index()); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -38,6 +38,7 @@ #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" +#include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -591,7 +592,7 @@ G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; HeapWord* cur = r->bottom(); @@ -638,7 +639,7 @@ } bool is_complete() { - return _cl.complete(); + return _cl.is_complete(); } }; @@ -694,7 +695,7 @@ CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. @@ -707,12 +708,12 @@ bool G1ConcurrentMark::next_mark_bitmap_is_clear() { CheckBitmapClearHRClosure cl(_next_mark_bitmap); _g1h->heap_region_iterate(&cl); - return cl.complete(); + return cl.is_complete(); } class NoteStartOfMarkHRClosure: public HeapRegionClosure { public: - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { r->note_start_of_marking(); return false; } @@ -1094,7 +1095,7 @@ const uint old_regions_removed() { return _old_regions_removed; } const uint humongous_regions_removed() { return _humongous_regions_removed; } - bool doHeapRegion(HeapRegion *hr) { + bool do_heap_region(HeapRegion *hr) { _g1->reset_gc_time_stamps(hr); hr->note_end_of_marking(); @@ -1135,7 +1136,7 @@ G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, &hrrs_cleanup_task); _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); - assert(g1_note_end.complete(), "Shouldn't have yielded!"); + assert(g1_note_end.is_complete(), "Shouldn't have yielded!"); // Now update the lists _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); @@ -1275,7 +1276,6 @@ // We reclaimed old regions so we should calculate the sizes to make // sure we update the old gen/space data. g1h->g1mm()->update_sizes(); - g1h->allocation_context_stats().update_after_mark(); } void G1ConcurrentMark::complete_cleanup() { @@ -2922,7 +2922,7 @@ "(bytes)", "(bytes)"); } -bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { +bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { const char* type = r->get_type_str(); HeapWord* bottom = r->bottom(); HeapWord* end = r->end(); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -848,7 +848,7 @@ // The header and footer are printed in the constructor and // destructor respectively. G1PrintRegionLivenessInfoClosure(const char* phase_name); - virtual bool doHeapRegion(HeapRegion* r); + virtual bool do_heap_region(HeapRegion* r); ~G1PrintRegionLivenessInfoClosure(); }; diff --git a/src/hotspot/share/gc/g1/g1EvacFailure.cpp b/src/hotspot/share/gc/g1/g1EvacFailure.cpp --- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp +++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,12 +38,12 @@ class UpdateRSetDeferred : public ExtendedOopClosure { private: G1CollectedHeap* _g1; - DirtyCardQueue *_dcq; - G1SATBCardTableModRefBS* _ct_bs; + DirtyCardQueue* _dcq; + G1CardTable* _ct; public: UpdateRSetDeferred(DirtyCardQueue* dcq) : - _g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} + _g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {} virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); } @@ -59,9 +59,9 @@ if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) { return; } - size_t card_index = _ct_bs->index_for(p); - if (_ct_bs->mark_card_deferred(card_index)) { - _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); + size_t card_index = _ct->index_for(p); + if (_ct->mark_card_deferred(card_index)) { + _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index)); } } }; @@ -220,7 +220,7 @@ return rspc.marked_bytes(); } - bool doHeapRegion(HeapRegion *hr) { + bool do_heap_region(HeapRegion *hr) { assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index()); assert(hr->in_collection_set(), "bad CS"); diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp @@ -56,7 +56,7 @@ _bitmap(bitmap), _worker_id(worker_id) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { G1AdjustAndRebuildClosure cl(_worker_id); if (r->is_humongous()) { oop obj = oop(r->humongous_start_region()->bottom()); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -40,7 +40,7 @@ G1ResetHumongousClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { } - bool doHeapRegion(HeapRegion* current) { + bool do_heap_region(HeapRegion* current) { if (current->is_humongous()) { if (current->is_starts_humongous()) { oop obj = oop(current->bottom()); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ #include "logging/log.hpp" #include "utilities/ticks.inline.hpp" -bool G1FullGCPrepareTask::G1CalculatePointersClosure::doHeapRegion(HeapRegion* hr) { +bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) { if (hr->is_humongous()) { oop obj = oop(hr->humongous_start_region()->bottom()); if (_bitmap->is_marked(obj)) { @@ -112,7 +112,7 @@ hr->reset_gc_time_stamp(); hr->rem_set()->clear(); - _g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end())); + _g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end())); if (_g1h->g1_hot_card_cache()->use_cache()) { _g1h->g1_hot_card_cache()->reset_card_counts(hr); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp @@ -67,7 +67,7 @@ G1FullGCCompactionPoint* cp); void update_sets(); - bool doHeapRegion(HeapRegion* hr); + bool do_heap_region(HeapRegion* hr); bool freed_regions(); }; diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.cpp b/src/hotspot/share/gc/g1/g1FullGCScope.cpp --- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp @@ -35,7 +35,7 @@ _tracer(), _active(), _cpu_time(), - _soft_refs(clear_soft, _g1h->collector_policy()), + _soft_refs(clear_soft, _g1h->soft_ref_policy()), _memory_stats(memory_manager, _g1h->gc_cause()), _collector_stats(_g1h->g1mm()->full_collection_counters()), _heap_transition(_g1h) { diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.cpp b/src/hotspot/share/gc/g1/g1HeapTransition.cpp --- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp @@ -58,7 +58,7 @@ class DetailedUsageClosure: public HeapRegionClosure { public: DetailedUsage _usage; - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { _usage._old_used += r->used(); _usage._old_region_count++; diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -273,7 +273,7 @@ G1CollectedHeap* _g1h; public: VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (r->is_archive()) { VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); r->object_iterate(&verify_oop_pointers); @@ -306,7 +306,7 @@ return _failures; } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { // For archive regions, verify there are no heap pointers to // non-pinned regions. For all others, verify liveness info. if (r->is_closed_archive()) { @@ -498,7 +498,7 @@ _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), _old_count(), _humongous_count(), _free_count(){ } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (hr->is_young()) { // TODO } else if (hr->is_humongous()) { @@ -604,11 +604,10 @@ #ifndef PRODUCT class G1VerifyCardTableCleanup: public HeapRegionClosure { G1HeapVerifier* _verifier; - G1SATBCardTableModRefBS* _ct_bs; public: - G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs) - : _verifier(verifier), _ct_bs(ct_bs) { } - virtual bool doHeapRegion(HeapRegion* r) { + G1VerifyCardTableCleanup(G1HeapVerifier* verifier) + : _verifier(verifier) { } + virtual bool do_heap_region(HeapRegion* r) { if (r->is_survivor()) { _verifier->verify_dirty_region(r); } else { @@ -620,16 +619,16 @@ void G1HeapVerifier::verify_card_table_cleanup() { if (G1VerifyCTCleanup || VerifyAfterGC) { - G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set()); + G1VerifyCardTableCleanup cleanup_verifier(this); _g1h->heap_region_iterate(&cleanup_verifier); } } void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) { // All of the region should be clean. - G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); + G1CardTable* ct = _g1h->card_table(); MemRegion mr(hr->bottom(), hr->end()); - ct_bs->verify_not_dirty_region(mr); + ct->verify_not_dirty_region(mr); } void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) { @@ -640,12 +639,12 @@ // not dirty that area (one less thing to have to do while holding // a lock). So we can only verify that [bottom(),pre_dummy_top()] // is dirty. - G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); + G1CardTable* ct = _g1h->card_table(); MemRegion mr(hr->bottom(), hr->pre_dummy_top()); if (hr->is_young()) { - ct_bs->verify_g1_young_region(mr); + ct->verify_g1_young_region(mr); } else { - ct_bs->verify_dirty_region(mr); + ct->verify_dirty_region(mr); } } @@ -654,7 +653,7 @@ G1HeapVerifier* _verifier; public: G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _verifier->verify_dirty_region(r); return false; } @@ -721,7 +720,7 @@ bool failures() { return _failures; } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { bool result = _verifier->verify_bitmaps(_caller, hr); if (!result) { _failures = true; @@ -744,7 +743,7 @@ public: G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { uint i = hr->hrm_index(); InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); if (hr->is_humongous()) { diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,13 +235,7 @@ size_t const _page_size; public: G1PretouchTask(char* start_address, char* end_address, size_t page_size) : - AbstractGangTask("G1 PreTouch", - Universe::is_fully_initialized() && - Thread::current()->is_Named_thread() ? GCId::current_raw() : - // During VM initialization there is - // no GC cycle that this task can be - // associated with. - GCId::undefined()), + AbstractGangTask("G1 PreTouch"), _cur_addr(start_address), _start_addr(start_address), _end_addr(end_address), diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ : _g1h(g1h), _refs(g1h->task_queue(worker_id)), _dcq(&g1h->dirty_card_queue_set()), - _ct_bs(g1h->g1_barrier_set()), + _ct(g1h->card_table()), _closures(NULL), _hash_seed(17), _worker_id(worker_id), @@ -206,7 +206,7 @@ oop const old, size_t word_sz, uint age, HeapWord * const obj_ptr, const AllocationContext_t context) const { - G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); + PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); if (alloc_buf->contains(obj_ptr)) { _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, dest_state.value() == InCSetState::Old, @@ -390,7 +390,6 @@ return forward_ptr; } } - G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) : _g1h(g1h), _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ G1CollectedHeap* _g1h; RefToScanQueue* _refs; DirtyCardQueue _dcq; - G1SATBCardTableModRefBS* _ct_bs; + G1CardTable* _ct; G1EvacuationRootClosures* _closures; G1PLABAllocator* _plab_allocator; @@ -72,7 +72,7 @@ #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) DirtyCardQueue& dirty_card_queue() { return _dcq; } - G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } + G1CardTable* ct() { return _ct; } InCSetState dest(InCSetState original) const { assert(original.is_valid(), @@ -104,10 +104,10 @@ // If the field originates from the to-space, we don't need to include it // in the remembered set updates. if (!from->is_young()) { - size_t card_index = ctbs()->index_for(p); + size_t card_index = ct()->index_for(p); // If the card hasn't been added to the buffer, do it. - if (ctbs()->mark_card_deferred(card_index)) { - dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); + if (ct()->mark_card_deferred(card_index)) { + dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index)); } } } diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/dirtyCardQueue.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp" +#include "gc/g1/g1CardTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1FromCardCache.hpp" @@ -74,7 +75,7 @@ static size_t chunk_size() { return M; } void work(uint worker_id) { - G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); + G1CardTable* ct = _g1h->card_table(); while (_cur_dirty_regions < _num_dirty_regions) { size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; @@ -83,7 +84,7 @@ for (size_t i = next; i < max; i++) { HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); if (!r->is_survivor()) { - ct_bs->clear(MemRegion(r->bottom(), r->end())); + ct->clear(MemRegion(r->bottom(), r->end())); } } } @@ -127,7 +128,7 @@ public: G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { uint hrm_index = r->hrm_index(); if (!r->in_collection_set() && r->is_old_or_humongous()) { _scan_top[hrm_index] = r->top(); @@ -204,7 +205,7 @@ if (_iter_states[region] != Unclaimed) { return false; } - jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed); + G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed); return (res == Unclaimed); } @@ -214,7 +215,7 @@ if (iter_is_complete(region)) { return false; } - jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed); + G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed); return (res == Claimed); } @@ -280,12 +281,12 @@ }; G1RemSet::G1RemSet(G1CollectedHeap* g1, - CardTableModRefBS* ct_bs, + G1CardTable* ct, G1HotCardCache* hot_card_cache) : _g1(g1), _scan_state(new G1RemSetScanState()), _num_conc_refined_cards(0), - _ct_bs(ct_bs), + _ct(ct), _g1p(_g1->g1_policy()), _hot_card_cache(hot_card_cache), _prev_period_summary() { @@ -328,7 +329,7 @@ _worker_i(worker_i) { _g1h = G1CollectedHeap::heap(); _bot = _g1h->bot(); - _ct_bs = _g1h->g1_barrier_set(); + _ct = _g1h->card_table(); } void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) { @@ -345,11 +346,11 @@ } void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){ - _ct_bs->set_card_claimed(card_index); + _ct->set_card_claimed(card_index); _scan_state->add_dirty_region(region_idx_for_card); } -bool G1ScanRSForRegionClosure::doHeapRegion(HeapRegion* r) { +bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "should only be called on elements of CS."); uint region_idx = r->hrm_index(); @@ -381,7 +382,7 @@ _cards_claimed++; // If the card is dirty, then G1 will scan it during Update RS. - if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) { + if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) { continue; } @@ -522,7 +523,7 @@ _g1h(G1CollectedHeap::heap()), _live_data(live_data) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (!r->is_continues_humongous()) { r->rem_set()->scrub(_live_data); } @@ -535,15 +536,15 @@ _g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num); } -inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) { +inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) { #ifdef ASSERT G1CollectedHeap* g1 = G1CollectedHeap::heap(); - assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)), + assert(g1->is_in_exact(ct->addr_for(card_ptr)), "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", p2i(card_ptr), - ct_bs->index_for(ct_bs->addr_for(card_ptr)), - p2i(ct_bs->addr_for(card_ptr)), - g1->addr_to_region(ct_bs->addr_for(card_ptr))); + ct->index_for(ct->addr_for(card_ptr)), + p2i(ct->addr_for(card_ptr)), + g1->addr_to_region(ct->addr_for(card_ptr))); #endif } @@ -551,15 +552,15 @@ uint worker_i) { assert(!_g1->is_gc_active(), "Only call concurrently"); - check_card_ptr(card_ptr, _ct_bs); + check_card_ptr(card_ptr, _ct); // If the card is no longer dirty, nothing to do. - if (*card_ptr != CardTableModRefBS::dirty_card_val()) { + if (*card_ptr != G1CardTable::dirty_card_val()) { return; } // Construct the region representing the card. - HeapWord* start = _ct_bs->addr_for(card_ptr); + HeapWord* start = _ct->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); @@ -586,6 +587,20 @@ return; } + // While we are processing RSet buffers during the collection, we + // actually don't want to scan any cards on the collection set, + // since we don't want to update remembered sets with entries that + // point into the collection set, given that live objects from the + // collection set are about to move and such entries will be stale + // very soon. This change also deals with a reliability issue which + // involves scanning a card in the collection set and coming across + // an array that was being chunked and looking malformed. Note, + // however, that if evacuation fails, we have to scan any objects + // that were not moved and create any missing entries. + if (r->in_collection_set()) { + return; + } + // The result from the hot card cache insert call is either: // * pointer to the current card // (implying that the current card is not 'hot'), @@ -605,12 +620,13 @@ return; } else if (card_ptr != orig_card_ptr) { // Original card was inserted and an old card was evicted. - start = _ct_bs->addr_for(card_ptr); + start = _ct->addr_for(card_ptr); r = _g1->heap_region_containing(start); // Check whether the region formerly in the cache should be // ignored, as discussed earlier for the original card. The - // region could have been freed while in the cache. + // region could have been freed while in the cache. The cset is + // not relevant here, since we're in concurrent phase. if (!r->is_old_or_humongous()) { return; } @@ -639,7 +655,7 @@ // Okay to clean and process the card now. There are still some // stale card cases that may be detected by iteration and dealt with // as iteration failure. - *const_cast(card_ptr) = CardTableModRefBS::clean_card_val(); + *const_cast(card_ptr) = G1CardTable::clean_card_val(); // This fence serves two purposes. First, the card must be cleaned // before processing the contents. Second, we can't proceed with @@ -651,7 +667,7 @@ // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. - HeapWord* end = start + CardTableModRefBS::card_size_in_words; + HeapWord* end = start + G1CardTable::card_size_in_words; MemRegion dirty_region(start, MIN2(scan_limit, end)); assert(!dirty_region.is_empty(), "sanity"); @@ -668,8 +684,8 @@ if (!card_processed) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) - if (*card_ptr != CardTableModRefBS::dirty_card_val()) { - *card_ptr = CardTableModRefBS::dirty_card_val(); + if (*card_ptr != G1CardTable::dirty_card_val()) { + *card_ptr = G1CardTable::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = @@ -685,20 +701,20 @@ G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { assert(_g1->is_gc_active(), "Only call during GC"); - check_card_ptr(card_ptr, _ct_bs); + check_card_ptr(card_ptr, _ct); // If the card is no longer dirty, nothing to do. This covers cards that were already // scanned as parts of the remembered sets. - if (*card_ptr != CardTableModRefBS::dirty_card_val()) { + if (*card_ptr != G1CardTable::dirty_card_val()) { return false; } // We claim lazily (so races are possible but they're benign), which reduces the // number of potential duplicate scans (multiple threads may enqueue the same card twice). - *card_ptr = CardTableModRefBS::clean_card_val() | CardTableModRefBS::claimed_card_val(); + *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val(); // Construct the region representing the card. - HeapWord* card_start = _ct_bs->addr_for(card_ptr); + HeapWord* card_start = _ct->addr_for(card_ptr); // And find the region containing it. uint const card_region_idx = _g1->addr_to_region(card_start); @@ -711,7 +727,7 @@ // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. - HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words; + HeapWord* card_end = card_start + G1CardTable::card_size_in_words; MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); assert(!dirty_region.is_empty(), "sanity"); diff --git a/src/hotspot/share/gc/g1/g1RemSet.hpp b/src/hotspot/share/gc/g1/g1RemSet.hpp --- a/src/hotspot/share/gc/g1/g1RemSet.hpp +++ b/src/hotspot/share/gc/g1/g1RemSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc/g1/dirtyCardQueue.hpp" #include "gc/g1/g1CardLiveData.hpp" +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1RemSetSummary.hpp" #include "gc/g1/heapRegion.hpp" #include "memory/allocation.hpp" @@ -72,7 +73,7 @@ G1CollectedHeap* _g1; size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator. - CardTableModRefBS* _ct_bs; + G1CardTable* _ct; G1Policy* _g1p; G1HotCardCache* _hot_card_cache; @@ -93,7 +94,7 @@ void cleanupHRRS(); G1RemSet(G1CollectedHeap* g1, - CardTableModRefBS* ct_bs, + G1CardTable* ct, G1HotCardCache* hot_card_cache); ~G1RemSet(); @@ -162,7 +163,7 @@ CodeBlobClosure* _code_root_cl; G1BlockOffsetTable* _bot; - G1SATBCardTableModRefBS *_ct_bs; + G1CardTable *_ct; double _strong_code_root_scan_time_sec; uint _worker_i; @@ -176,7 +177,7 @@ CodeBlobClosure* code_root_cl, uint worker_i); - bool doHeapRegion(HeapRegion* r); + bool do_heap_region(HeapRegion* r); double strong_code_root_scan_time_sec() { return _strong_code_root_scan_time_sec; diff --git a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp @@ -252,7 +252,7 @@ _max_rs_mem_sz(0), _max_code_root_mem_sz(0) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); // HeapRegionRemSet::mem_size() includes the diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,22 +23,20 @@ */ #include "precompiled.hpp" +#include "gc/g1/g1CardTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/g1/satbMarkQueue.hpp" -#include "gc/shared/memset_with_concurrent_readers.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp" G1SATBCardTableModRefBS::G1SATBCardTableModRefBS( - MemRegion whole_heap, + G1CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti) : - CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT)) + CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT)) { } void G1SATBCardTableModRefBS::enqueue(oop pre_val) { @@ -80,87 +78,17 @@ } } -bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { - jbyte val = _byte_map[card_index]; - // It's already processed - if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { - return false; - } - - // Cached bit can be installed either on a clean card or on a claimed card. - jbyte new_val = val; - if (val == clean_card_val()) { - new_val = (jbyte)deferred_card_val(); - } else { - if (val & claimed_card_val()) { - new_val = val | (jbyte)deferred_card_val(); - } - } - if (new_val != val) { - Atomic::cmpxchg(new_val, &_byte_map[card_index], val); - } - return true; -} - -void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { - jbyte *const first = byte_for(mr.start()); - jbyte *const last = byte_after(mr.last()); - - memset_with_concurrent_readers(first, g1_young_gen, last - first); -} - -#ifndef PRODUCT -void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { - verify_region(mr, g1_young_gen, true); -} -#endif - -void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { - // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. - MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); - _card_table->clear(mr); -} - G1SATBCardTableLoggingModRefBS:: -G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) : - G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)), - _dcqs(JavaThread::dirty_card_queue_set()), - _listener() -{ - _listener.set_card_table(this); -} - -void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { - mapper->set_mapping_changed_listener(&_listener); - - _byte_map_size = mapper->reserved().byte_size(); - - _guard_index = cards_required(_whole_heap.word_size()) - 1; - _last_valid_index = _guard_index - 1; - - HeapWord* low_bound = _whole_heap.start(); - HeapWord* high_bound = _whole_heap.end(); - - _cur_covered_regions = 1; - _covered[0] = _whole_heap; - - _byte_map = (jbyte*) mapper->reserved().start(); - byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); - assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); - assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); - - log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); - log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, - p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); -} +G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) : + G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)), + _dcqs(JavaThread::dirty_card_queue_set()) {} void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) { // In the slow path, we know a card is not young - assert(*byte != g1_young_gen, "slow path invoked without filtering"); + assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering"); OrderAccess::storeload(); - if (*byte != dirty_card) { - *byte = dirty_card; + if (*byte != G1CardTable::dirty_card_val()) { + *byte = G1CardTable::dirty_card_val(); Thread* thr = Thread::current(); if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; @@ -173,16 +101,15 @@ } } -void -G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) { +void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) { if (mr.is_empty()) { return; } - volatile jbyte* byte = byte_for(mr.start()); - jbyte* last_byte = byte_for(mr.last()); + volatile jbyte* byte = _card_table->byte_for(mr.start()); + jbyte* last_byte = _card_table->byte_for(mr.last()); Thread* thr = Thread::current(); // skip all consecutive young cards - for (; byte <= last_byte && *byte == g1_young_gen; byte++); + for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++); if (byte <= last_byte) { OrderAccess::storeload(); @@ -190,11 +117,11 @@ if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; for (; byte <= last_byte; byte++) { - if (*byte == g1_young_gen) { + if (*byte == G1CardTable::g1_young_card_val()) { continue; } - if (*byte != dirty_card) { - *byte = dirty_card; + if (*byte != G1CardTable::dirty_card_val()) { + *byte = G1CardTable::dirty_card_val(); jt->dirty_card_queue().enqueue(byte); } } @@ -202,11 +129,11 @@ MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); for (; byte <= last_byte; byte++) { - if (*byte == g1_young_gen) { + if (*byte == G1CardTable::g1_young_card_val()) { continue; } - if (*byte != dirty_card) { - *byte = dirty_card; + if (*byte != G1CardTable::dirty_card_val()) { + *byte = G1CardTable::dirty_card_val(); _dcqs.shared_dirty_card_queue()->enqueue(byte); } } @@ -214,7 +141,39 @@ } } -void G1SATBCardTableModRefBS::keep_alive_barrier(oop obj) { - G1SATBCardTableModRefBS::enqueue(obj); +void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) { + // This method initializes the SATB and dirty card queues before a + // JavaThread is added to the Java thread list. Right now, we don't + // have to do anything to the dirty card queue (it should have been + // activated when the thread was created), but we have to activate + // the SATB queue if the thread is created while a marking cycle is + // in progress. The activation / de-activation of the SATB queues at + // the beginning / end of a marking cycle is done during safepoints + // so we have to make sure this method is called outside one to be + // able to safely read the active field of the SATB queue set. Right + // now, it is called just before the thread is added to the Java + // thread list in the Threads::add() method. That method is holding + // the Threads_lock which ensures we are outside a safepoint. We + // cannot do the obvious and set the active field of the SATB queue + // when the thread is created given that, in some cases, safepoints + // might happen between the JavaThread constructor being called and the + // thread being added to the Java thread list (an example of this is + // when the structure for the DestroyJavaVM thread is created). + assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint"); + assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active"); + assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty"); + assert(thread->dirty_card_queue().is_active(), "Dirty card queue should be active"); + + // If we are creating the thread during a marking cycle, we should + // set the active field of the SATB queue to true. + if (thread->satb_mark_queue_set().is_active()) { + thread->satb_mark_queue().set_active(true); + } } +void G1SATBCardTableLoggingModRefBS::on_thread_detach(JavaThread* thread) { + // Flush any deferred card marks, SATB buffers and dirty card queue buffers + CardTableModRefBS::on_thread_detach(thread); + thread->satb_mark_queue().flush(); + thread->dirty_card_queue().flush(); +} diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,8 @@ class DirtyCardQueueSet; class G1SATBCardTableLoggingModRefBS; +class CardTable; +class G1CardTable; // This barrier is specialized to use a logging barrier to support // snapshot-at-the-beginning marking. @@ -40,16 +42,10 @@ class G1SATBCardTableModRefBS: public CardTableModRefBS { friend class VMStructs; protected: - enum G1CardValues { - g1_young_gen = CT_MR_BS_last_reserved << 1 - }; - - G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); + G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti); ~G1SATBCardTableModRefBS() { } public: - static int g1_young_card_val() { return g1_young_gen; } - // Add "pre_val" to a set of objects that may have been disconnected from the // pre-marking object graph. static void enqueue(oop pre_val); @@ -62,38 +58,6 @@ template void write_ref_field_pre(T* field); - -/* - Claimed and deferred bits are used together in G1 during the evacuation - pause. These bits can have the following state transitions: - 1. The claimed bit can be put over any other card state. Except that - the "dirty -> dirty and claimed" transition is checked for in - G1 code and is not used. - 2. Deferred bit can be set only if the previous state of the card - was either clean or claimed. mark_card_deferred() is wait-free. - We do not care if the operation is be successful because if - it does not it will only result in duplicate entry in the update - buffer because of the "cache-miss". So it's not worth spinning. - */ - - bool is_card_claimed(size_t card_index) { - jbyte val = _byte_map[card_index]; - return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); - } - - inline void set_card_claimed(size_t card_index); - - void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN; - void g1_mark_as_young(const MemRegion& mr); - - bool mark_card_deferred(size_t card_index); - - bool is_card_deferred(size_t card_index) { - jbyte val = _byte_map[card_index]; - return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); - } - - virtual void keep_alive_barrier(oop obj); }; template<> @@ -106,54 +70,29 @@ typedef G1SATBCardTableModRefBS type; }; -class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener { - private: - G1SATBCardTableLoggingModRefBS* _card_table; - public: - G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { } - - void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; } - - virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); -}; - // Adds card-table logging to the post-barrier. // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet. class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { - friend class G1SATBCardTableLoggingModRefBSChangedListener; private: - G1SATBCardTableLoggingModRefBSChangedListener _listener; DirtyCardQueueSet& _dcqs; public: - static size_t compute_size(size_t mem_region_size_in_words) { - size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); - return ReservedSpace::allocation_align_size_up(number_of_slots); - } - - // Returns how many bytes of the heap a single byte of the Card Table corresponds to. - static size_t heap_map_factor() { - return CardTableModRefBS::card_size; - } - - G1SATBCardTableLoggingModRefBS(MemRegion whole_heap); - - virtual void initialize() { } - virtual void initialize(G1RegionToSpaceMapper* mapper); - - virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } + G1SATBCardTableLoggingModRefBS(G1CardTable* card_table); // NB: if you do a whole-heap invalidation, the "usual invariant" defined // above no longer applies. void invalidate(MemRegion mr); - void write_region_work(MemRegion mr) { invalidate(mr); } + void write_region(MemRegion mr) { invalidate(mr); } void write_ref_array_work(MemRegion mr) { invalidate(mr); } template void write_ref_field_post(T* field, oop new_val); void write_ref_field_post_slow(volatile jbyte* byte); + virtual void on_thread_attach(JavaThread* thread); + virtual void on_thread_detach(JavaThread* thread); + // Callbacks for runtime accesses. template class AccessBarrier: public ModRefBarrierSet::AccessBarrier { diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp @@ -25,12 +25,13 @@ #ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP #define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/shared/accessBarrierSupport.inline.hpp" -#include "gc/g1/g1SATBCardTableModRefBS.hpp" template inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) { - if (HasDecorator::value || + if (HasDecorator::value || HasDecorator::value) { return; } @@ -43,23 +44,13 @@ template inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) { - volatile jbyte* byte = byte_for(field); - if (*byte != g1_young_gen) { + volatile jbyte* byte = _card_table->byte_for(field); + if (*byte != G1CardTable::g1_young_card_val()) { // Take a slow path for cards in old write_ref_field_post_slow(byte); } } -void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) { - jbyte val = _byte_map[card_index]; - if (val == clean_card_val()) { - val = (jbyte)claimed_card_val(); - } else { - val |= (jbyte)claimed_card_val(); - } - _byte_map[card_index] = val; -} - inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) { assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); // Archive roots need to be enqueued since they add subgraphs to the diff --git a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp --- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp +++ b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/shared/gcLocker.hpp" #include "logging/log.hpp" #include "memory/padded.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.hpp" #include "runtime/mutexLocker.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp --- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp +++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp @@ -79,7 +79,7 @@ G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) : HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { size_t rs_length = r->rem_set()->occupied(); _sampled_rs_lengths += rs_length; @@ -114,7 +114,7 @@ G1CollectionSet* g1cs = g1h->collection_set(); g1cs->iterate(&cl); - if (cl.complete()) { + if (cl.is_complete()) { g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths()); } } diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp --- a/src/hotspot/share/gc/g1/heapRegion.cpp +++ b/src/hotspot/share/gc/g1/heapRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,7 +100,7 @@ guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); guarantee(CardsPerRegion == 0, "we should only set it once"); - CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; + CardsPerRegion = GrainBytes >> G1CardTable::card_shift; if (G1HeapRegionSize != GrainBytes) { FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); @@ -139,9 +139,8 @@ assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); HeapRegionRemSet* hrrs = rem_set(); hrrs->clear(); - CardTableModRefBS* ct_bs = - barrier_set_cast(G1CollectedHeap::heap()->barrier_set()); - ct_bs->clear(MemRegion(bottom(), end())); + G1CardTable* ct = G1CollectedHeap::heap()->card_table(); + ct->clear(MemRegion(bottom(), end())); } void HeapRegion::calc_gc_efficiency() { @@ -463,7 +462,7 @@ class G1VerificationClosure : public OopClosure { protected: G1CollectedHeap* _g1h; - CardTableModRefBS* _bs; + G1CardTable *_ct; oop _containing_obj; bool _failures; int _n_failures; @@ -473,7 +472,7 @@ // _vo == UseNextMarking -> use "next" marking information, // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : - _g1h(g1h), _bs(barrier_set_cast(g1h->barrier_set())), + _g1h(g1h), _ct(g1h->card_table()), _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { } @@ -576,9 +575,9 @@ if (from != NULL && to != NULL && from != to && !to->is_pinned()) { - jbyte cv_obj = *_bs->byte_for_const(_containing_obj); - jbyte cv_field = *_bs->byte_for_const(p); - const jbyte dirty = CardTableModRefBS::dirty_card_val(); + jbyte cv_obj = *_ct->byte_for_const(_containing_obj); + jbyte cv_field = *_ct->byte_for_const(p); + const jbyte dirty = G1CardTable::dirty_card_val(); bool is_bad = !(from->is_young() || to->rem_set()->contains_reference(p) @@ -834,7 +833,6 @@ CompactibleSpace::clear(mangle_space); reset_bot(); } - #ifndef PRODUCT void G1ContiguousSpace::mangle_unused_area() { mangle_unused_area_complete(); diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp --- a/src/hotspot/share/gc/g1/heapRegion.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/g1/heapRegionType.hpp" #include "gc/g1/survRateGroup.hpp" #include "gc/shared/ageTable.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/spaceDecorator.hpp" #include "utilities/macros.hpp" @@ -719,23 +720,23 @@ }; // HeapRegionClosure is used for iterating over regions. -// Terminates the iteration when the "doHeapRegion" method returns "true". +// Terminates the iteration when the "do_heap_region" method returns "true". class HeapRegionClosure : public StackObj { friend class HeapRegionManager; friend class G1CollectionSet; - bool _complete; - void incomplete() { _complete = false; } + bool _is_complete; + void set_incomplete() { _is_complete = false; } public: - HeapRegionClosure(): _complete(true) {} + HeapRegionClosure(): _is_complete(true) {} // Typically called on each region until it returns true. - virtual bool doHeapRegion(HeapRegion* r) = 0; + virtual bool do_heap_region(HeapRegion* r) = 0; // True after iteration if the closure was applied to all heap regions // and returned "false" in all cases. - bool complete() { return _complete; } + bool is_complete() { return _is_complete; } }; #endif // SHARE_VM_GC_G1_HEAPREGION_HPP diff --git a/src/hotspot/share/gc/g1/heapRegionManager.cpp b/src/hotspot/share/gc/g1/heapRegionManager.cpp --- a/src/hotspot/share/gc/g1/heapRegionManager.cpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp @@ -242,9 +242,9 @@ continue; } guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i); - bool res = blk->doHeapRegion(at(i)); + bool res = blk->do_heap_region(at(i)); if (res) { - blk->incomplete(); + blk->set_incomplete(); return; } } @@ -353,7 +353,7 @@ if (!hrclaimer->claim_region(index)) { continue; } - bool res = blk->doHeapRegion(r); + bool res = blk->do_heap_region(r); if (res) { return; } diff --git a/src/hotspot/share/gc/g1/heapRegionManager.hpp b/src/hotspot/share/gc/g1/heapRegionManager.hpp --- a/src/hotspot/share/gc/g1/heapRegionManager.hpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp @@ -236,8 +236,8 @@ // and not free, and return the number of regions newly committed in commit_count. bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers); - // Apply blk->doHeapRegion() on all committed regions in address order, - // terminating the iteration early if doHeapRegion() returns true. + // Apply blk->do_heap_region() on all committed regions in address order, + // terminating the iteration early if do_heap_region() returns true. void iterate(HeapRegionClosure* blk) const; void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const; diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,7 +103,7 @@ if (loc_hr->is_in_reserved(from)) { size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); CardIdx_t from_card = (CardIdx_t) - hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); + hw_offset >> (G1CardTable::card_shift - LogHeapWordSize); assert((size_t)from_card < HeapRegion::CardsPerRegion, "Must be in range."); @@ -170,7 +170,7 @@ bool contains_reference(OopOrNarrowOopStar from) const { assert(hr()->is_in_reserved(from), "Precondition."); size_t card_ind = pointer_delta(from, hr()->bottom(), - CardTableModRefBS::card_size); + G1CardTable::card_size); return _bm.at(card_ind); } @@ -354,7 +354,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { uint cur_hrm_ind = _hr->hrm_index(); - int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); + int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift); if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from)); @@ -382,7 +382,7 @@ uintptr_t from_hr_bot_card_index = uintptr_t(from_hr->bottom()) - >> CardTableModRefBS::card_shift; + >> G1CardTable::card_shift; CardIdx_t card_index = from_card - from_hr_bot_card_index; assert((size_t)card_index < HeapRegion::CardsPerRegion, "Must be in range."); @@ -671,9 +671,9 @@ } else { uintptr_t from_card = - (uintptr_t(from) >> CardTableModRefBS::card_shift); + (uintptr_t(from) >> G1CardTable::card_shift); uintptr_t hr_bot_card_index = - uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; + uintptr_t(hr->bottom()) >> G1CardTable::card_shift; assert(from_card >= hr_bot_card_index, "Inv"); CardIdx_t card_index = from_card - hr_bot_card_index; assert((size_t)card_index < HeapRegion::CardsPerRegion, diff --git a/src/hotspot/share/gc/g1/satbMarkQueue.cpp b/src/hotspot/share/gc/g1/satbMarkQueue.cpp --- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp +++ b/src/hotspot/share/gc/g1/satbMarkQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,8 @@ // them with their active field set to false. If a thread is // created during a cycle and its SATB queue needs to be activated // before the thread starts running, we'll need to set its active - // field to true. This is done in JavaThread::initialize_queues(). + // field to true. This is done in G1SATBCardTableLoggingModRefBS:: + // on_thread_attach(). PtrQueue(qset, permanent, false /* active */) { } diff --git a/src/hotspot/share/gc/g1/sparsePRT.cpp b/src/hotspot/share/gc/g1/sparsePRT.cpp --- a/src/hotspot/share/gc/g1/sparsePRT.cpp +++ b/src/hotspot/share/gc/g1/sparsePRT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ // Check that the card array element type can represent all cards in the region. // Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required. assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) * - G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition"); + G1CardTable::card_size >= HeapRegionBounds::max_size(), "precondition"); assert(G1RSetSparseRegionEntries > 0, "precondition"); _region_ind = region_ind; _next_index = RSHashTable::NullEntry; diff --git a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp --- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp +++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -509,7 +509,7 @@ } MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr); space_invariants(); } diff --git a/src/hotspot/share/gc/parallel/generationSizer.hpp b/src/hotspot/share/gc/parallel/generationSizer.hpp --- a/src/hotspot/share/gc/parallel/generationSizer.hpp +++ b/src/hotspot/share/gc/parallel/generationSizer.hpp @@ -41,11 +41,5 @@ void initialize_alignments(); void initialize_flags(); void initialize_size_info(); - - public: - // We don't have associated counters and complain if this is invoked. - void initialize_gc_policy_counters() { - ShouldNotReachHere(); - } }; #endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp --- a/src/hotspot/share/gc/parallel/objectStartArray.cpp +++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ void ObjectStartArray::initialize(MemRegion reserved_region) { // We're based on the assumption that we use the same // size blocks as the card table. - assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity"); + assert((int)block_size == (int)CardTable::card_size, "Sanity"); assert((int)block_size <= 512, "block_size must be less than or equal to 512"); // Calculate how much space must be reserved diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,6 @@ #include "code/codeCache.hpp" #include "gc/parallel/adjoiningGenerations.hpp" #include "gc/parallel/adjoiningVirtualSpaces.hpp" -#include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/generationSizer.hpp" #include "gc/parallel/objectStartArray.inline.hpp" @@ -57,8 +56,6 @@ GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; jint ParallelScavengeHeap::initialize() { - CollectedHeap::pre_initialize(); - const size_t heap_size = _collector_policy->max_heap_byte_size(); ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); @@ -72,7 +69,9 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - CardTableExtension* const barrier_set = new CardTableExtension(reserved_region()); + PSCardTable* card_table = new PSCardTable(reserved_region()); + card_table->initialize(); + CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table); barrier_set->initialize(); set_barrier_set(barrier_set); @@ -333,7 +332,7 @@ // excesses). Fill op.result() with a filler object so that the // heap remains parsable. const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); - const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); + const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); if (limit_exceeded && softrefs_clear) { *gc_overhead_limit_was_exceeded = true; @@ -490,13 +489,6 @@ CollectedHeap::resize_all_tlabs(); } -bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { - // We don't need barriers for stores to objects in the - // young gen and, a fortiori, for initializing stores to - // objects therein. - return is_in_young(new_obj); -} - // This method is used by System.gc() and JVMTI. void ParallelScavengeHeap::collect(GCCause::Cause cause) { assert(!Heap_lock->owned_by_self(), @@ -634,6 +626,14 @@ return (ParallelScavengeHeap*)heap; } +CardTableModRefBS* ParallelScavengeHeap::barrier_set() { + return barrier_set_cast(CollectedHeap::barrier_set()); +} + +PSCardTable* ParallelScavengeHeap::card_table() { + return static_cast(barrier_set()->card_table()); +} + // Before delegating the resize to the young generation, // the reserved space for the young and old generations // may be changed to accommodate the desired resize. @@ -719,4 +719,3 @@ memory_pools.append(_old_pool); return memory_pools; } - diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,12 @@ #include "gc/parallel/psGCAdaptivePolicyCounters.hpp" #include "gc/parallel/psOldGen.hpp" #include "gc/parallel/psYoungGen.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/gcPolicyCounters.hpp" #include "gc/shared/gcWhen.hpp" +#include "gc/shared/softRefPolicy.hpp" #include "gc/shared/strongRootsScope.hpp" #include "memory/metaspace.hpp" #include "utilities/growableArray.hpp" @@ -45,6 +47,7 @@ class MemoryManager; class MemoryPool; class PSAdaptiveSizePolicy; +class PSCardTable; class PSHeapSummary; class ParallelScavengeHeap : public CollectedHeap { @@ -59,6 +62,8 @@ GenerationSizer* _collector_policy; + SoftRefPolicy _soft_ref_policy; + // Collection of generations that are adjacent in the // space reserved for the heap. AdjoiningGenerations* _gens; @@ -106,6 +111,8 @@ virtual CollectorPolicy* collector_policy() const { return _collector_policy; } + virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } + virtual GrowableArray memory_managers(); virtual GrowableArray memory_pools(); @@ -120,6 +127,9 @@ static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } + CardTableModRefBS* barrier_set(); + PSCardTable* card_table(); + AdjoiningGenerations* gens() { return _gens; } // Returns JNI_OK on success @@ -205,21 +215,6 @@ size_t tlab_used(Thread* thr) const; size_t unsafe_max_tlab_alloc(Thread* thr) const; - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - virtual bool card_mark_must_follow_store() const { - return false; - } - - // Return true if we don't we need a store barrier for - // initializing stores to an object at this address. - virtual bool can_elide_initializing_store_barrier(oop new_obj); - void object_iterate(ObjectClosure* cl); void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } diff --git a/src/hotspot/share/gc/parallel/cardTableExtension.cpp b/src/hotspot/share/gc/parallel/psCardTable.cpp rename from src/hotspot/share/gc/parallel/cardTableExtension.cpp rename to src/hotspot/share/gc/parallel/psCardTable.cpp --- a/src/hotspot/share/gc/parallel/cardTableExtension.cpp +++ b/src/hotspot/share/gc/parallel/psCardTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,10 +23,10 @@ */ #include "precompiled.hpp" -#include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/objectStartArray.inline.hpp" -#include "gc/parallel/parallelScavengeHeap.hpp" +#include "gc/parallel/parallelScavengeHeap.inline.hpp" +#include "gc/parallel/psCardTable.hpp" #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psTasks.hpp" @@ -39,9 +39,9 @@ // may be either dirty or newgen. class CheckForUnmarkedOops : public OopClosure { private: - PSYoungGen* _young_gen; - CardTableExtension* _card_table; - HeapWord* _unmarked_addr; + PSYoungGen* _young_gen; + PSCardTable* _card_table; + HeapWord* _unmarked_addr; protected: template void do_oop_work(T* p) { @@ -56,7 +56,7 @@ } public: - CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : + CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) : _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } @@ -71,16 +71,14 @@ // precise or imprecise, dirty or newgen. class CheckForUnmarkedObjects : public ObjectClosure { private: - PSYoungGen* _young_gen; - CardTableExtension* _card_table; + PSYoungGen* _young_gen; + PSCardTable* _card_table; public: CheckForUnmarkedObjects() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _young_gen = heap->young_gen(); - _card_table = barrier_set_cast(heap->barrier_set()); - // No point in asserting barrier set type here. Need to make CardTableExtension - // a unique barrier set type. + _card_table = heap->card_table(); } // Card marks are not precise. The current system can leave us with @@ -99,8 +97,8 @@ // Checks for precise marking of oops as newgen. class CheckForPreciseMarks : public OopClosure { private: - PSYoungGen* _young_gen; - CardTableExtension* _card_table; + PSYoungGen* _young_gen; + PSCardTable* _card_table; protected: template void do_oop_work(T* p) { @@ -112,7 +110,7 @@ } public: - CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : + CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) : _young_gen(young_gen), _card_table(card_table) { } virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } @@ -128,12 +126,12 @@ // when the space is empty, fix the calculation of // end_card to allow sp_top == sp->bottom(). -void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, - MutableSpace* sp, - HeapWord* space_top, - PSPromotionManager* pm, - uint stripe_number, - uint stripe_total) { +void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array, + MutableSpace* sp, + HeapWord* space_top, + PSPromotionManager* pm, + uint stripe_number, + uint stripe_total) { int ssize = 128; // Naked constant! Work unit = 64k. int dirty_card_count = 0; @@ -320,7 +318,7 @@ } // This should be called before a scavenge. -void CardTableExtension::verify_all_young_refs_imprecise() { +void PSCardTable::verify_all_young_refs_imprecise() { CheckForUnmarkedObjects check; ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); @@ -330,26 +328,21 @@ } // This should be called immediately after a scavenge, before mutators resume. -void CardTableExtension::verify_all_young_refs_precise() { +void PSCardTable::verify_all_young_refs_precise() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); - CheckForPreciseMarks check( - heap->young_gen(), - barrier_set_cast(heap->barrier_set())); + CheckForPreciseMarks check(heap->young_gen(), this); old_gen->oop_iterate_no_header(&check); verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); } -void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { - CardTableExtension* card_table = - barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set()); - - jbyte* bot = card_table->byte_for(mr.start()); - jbyte* top = card_table->byte_for(mr.end()); - while(bot <= top) { +void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) { + jbyte* bot = byte_for(mr.start()); + jbyte* top = byte_for(mr.end()); + while (bot <= top) { assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); if (*bot == verify_card) *bot = youngergen_card; @@ -357,7 +350,7 @@ } } -bool CardTableExtension::addr_is_marked_imprecise(void *addr) { +bool PSCardTable::addr_is_marked_imprecise(void *addr) { jbyte* p = byte_for(addr); jbyte val = *p; @@ -376,7 +369,7 @@ } // Also includes verify_card -bool CardTableExtension::addr_is_marked_precise(void *addr) { +bool PSCardTable::addr_is_marked_precise(void *addr) { jbyte* p = byte_for(addr); jbyte val = *p; @@ -404,8 +397,7 @@ // The method resize_covered_region_by_end() is analogous to // CardTableModRefBS::resize_covered_region() but // for regions that grow or shrink at the low end. -void CardTableExtension::resize_covered_region(MemRegion new_region) { - +void PSCardTable::resize_covered_region(MemRegion new_region) { for (int i = 0; i < _cur_covered_regions; i++) { if (_covered[i].start() == new_region.start()) { // Found a covered region with the same start as the @@ -439,13 +431,13 @@ resize_covered_region_by_start(new_region); } -void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { - CardTableModRefBS::resize_covered_region(new_region); +void PSCardTable::resize_covered_region_by_start(MemRegion new_region) { + CardTable::resize_covered_region(new_region); debug_only(verify_guard();) } -void CardTableExtension::resize_covered_region_by_end(int changed_region, - MemRegion new_region) { +void PSCardTable::resize_covered_region_by_end(int changed_region, + MemRegion new_region) { assert(SafepointSynchronize::is_at_safepoint(), "Only expect an expansion at the low end at a GC"); debug_only(verify_guard();) @@ -484,8 +476,8 @@ debug_only(verify_guard();) } -bool CardTableExtension::resize_commit_uncommit(int changed_region, - MemRegion new_region) { +bool PSCardTable::resize_commit_uncommit(int changed_region, + MemRegion new_region) { bool result = false; // Commit new or uncommit old pages, if necessary. MemRegion cur_committed = _committed[changed_region]; @@ -506,13 +498,12 @@ #ifdef ASSERT ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()), - "Starts should have proper alignment"); + "Starts should have proper alignment"); #endif jbyte* new_start = byte_for(new_region.start()); // Round down because this is for the start address - HeapWord* new_start_aligned = - (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size()); + HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); // The guard page is always committed and should not be committed over. // This method is used in cases where the generation is growing toward // lower addresses but the guard region is still at the end of the @@ -579,21 +570,20 @@ return result; } -void CardTableExtension::resize_update_committed_table(int changed_region, - MemRegion new_region) { +void PSCardTable::resize_update_committed_table(int changed_region, + MemRegion new_region) { jbyte* new_start = byte_for(new_region.start()); // Set the new start of the committed region - HeapWord* new_start_aligned = - (HeapWord*)align_down(new_start, os::vm_page_size()); + HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); MemRegion new_committed = MemRegion(new_start_aligned, - _committed[changed_region].end()); + _committed[changed_region].end()); _committed[changed_region] = new_committed; _committed[changed_region].set_start(new_start_aligned); } -void CardTableExtension::resize_update_card_table_entries(int changed_region, - MemRegion new_region) { +void PSCardTable::resize_update_card_table_entries(int changed_region, + MemRegion new_region) { debug_only(verify_guard();) MemRegion original_covered = _covered[changed_region]; // Initialize the card entries. Only consider the @@ -610,8 +600,8 @@ while (entry < end) { *entry++ = clean_card; } } -void CardTableExtension::resize_update_covered_table(int changed_region, - MemRegion new_region) { +void PSCardTable::resize_update_covered_table(int changed_region, + MemRegion new_region) { // Update the covered region _covered[changed_region].set_start(new_region.start()); _covered[changed_region].set_word_size(new_region.word_size()); @@ -665,7 +655,7 @@ // ------------- // ^ returns this -HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { +HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const { assert(_cur_covered_regions >= 0, "Expecting at least on region"); HeapWord* min_start = _committed[ind].start(); for (int j = 0; j < ind; j++) { @@ -677,3 +667,7 @@ } return min_start; } + +bool PSCardTable::is_in_young(oop obj) const { + return ParallelScavengeHeap::heap()->is_in_young(obj); +} diff --git a/src/hotspot/share/gc/parallel/cardTableExtension.hpp b/src/hotspot/share/gc/parallel/psCardTable.hpp rename from src/hotspot/share/gc/parallel/cardTableExtension.hpp rename to src/hotspot/share/gc/parallel/psCardTable.hpp --- a/src/hotspot/share/gc/parallel/cardTableExtension.hpp +++ b/src/hotspot/share/gc/parallel/psCardTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,17 +22,18 @@ * */ -#ifndef SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP -#define SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP +#ifndef SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP +#define SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTable.hpp" +#include "oops/oop.hpp" class MutableSpace; class ObjectStartArray; class PSPromotionManager; class GCTaskQueue; -class CardTableExtension : public CardTableModRefBS { +class PSCardTable: public CardTable { private: // Support methods for resizing the card table. // resize_commit_uncommit() returns true if the pages were committed or @@ -43,21 +44,18 @@ void resize_update_committed_table(int changed_region, MemRegion new_region); void resize_update_covered_table(int changed_region, MemRegion new_region); - protected: + void verify_all_young_refs_precise_helper(MemRegion mr); - static void verify_all_young_refs_precise_helper(MemRegion mr); + enum ExtendedCardValue { + youngergen_card = CT_MR_BS_last_reserved + 1, + verify_card = CT_MR_BS_last_reserved + 5 + }; public: - enum ExtendedCardValue { - youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1, - verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5 - }; + PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {} - CardTableExtension(MemRegion whole_heap) : - CardTableModRefBS( - whole_heap, - BarrierSet::FakeRtti(BarrierSet::CardTableExtension)) - { } + static jbyte youngergen_card_val() { return youngergen_card; } + static jbyte verify_card_val() { return verify_card; } // Scavenge support void scavenge_contents_parallel(ObjectStartArray* start_array, @@ -67,10 +65,6 @@ uint stripe_number, uint stripe_total); - // Verification - static void verify_all_young_refs_imprecise(); - static void verify_all_young_refs_precise(); - bool addr_is_marked_imprecise(void *addr); bool addr_is_marked_precise(void *addr); @@ -88,6 +82,9 @@ *byte = youngergen_card; } + // ReduceInitialCardMarks support + bool is_in_young(oop obj) const; + // Adaptive size policy support // Allows adjustment of the base and size of the covered regions void resize_covered_region(MemRegion new_region); @@ -102,22 +99,14 @@ HeapWord* lowest_prev_committed_start(int ind) const; #ifdef ASSERT - bool is_valid_card_address(jbyte* addr) { return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size); } +#endif // ASSERT -#endif // ASSERT + // Verification + void verify_all_young_refs_imprecise(); + void verify_all_young_refs_precise(); }; -template<> -struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::CardTableExtension; -}; - -template<> -struct BarrierSet::GetType { - typedef ::CardTableExtension type; -}; - -#endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP +#endif // SHARE_VM_GC_PARALLEL_PSCARDTABLE diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp --- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,7 +181,7 @@ template static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); T heap_oop = oopDesc::load_heap_oop(referent_addr); log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj)); if (!oopDesc::is_null(heap_oop)) { @@ -198,12 +198,12 @@ cm->mark_and_push(referent_addr); } } - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); // Treat discovered as normal oop, if ref is not "active", // i.e. if next is non-NULL. T next_oop = oopDesc::load_heap_oop(next_addr); if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); cm->mark_and_push(discovered_addr); } diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp --- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp @@ -29,7 +29,8 @@ #include "gc/parallel/psCompactionManager.hpp" #include "gc/parallel/psParallelCompact.inline.hpp" #include "gc/shared/taskqueue.inline.hpp" -#include "oops/objArrayOop.hpp" +#include "oops/arrayOop.inline.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -117,7 +118,7 @@ const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); const size_t end_index = beg_index + stride; - T* const base = (T*)obj->base(); + T* const base = (T*)obj->base_raw(); T* const beg = base + beg_index; T* const end = base + end_index; diff --git a/src/hotspot/share/gc/parallel/psMarkSweep.cpp b/src/hotspot/share/gc/parallel/psMarkSweep.cpp --- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp +++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,7 @@ } const bool clear_all_soft_refs = - heap->collector_policy()->should_clear_all_soft_refs(); + heap->soft_ref_policy()->should_clear_all_soft_refs(); uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); @@ -126,7 +126,7 @@ // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. - ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); + ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); @@ -236,12 +236,12 @@ young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; - ModRefBarrierSet* modBS = barrier_set_cast(heap->barrier_set()); + PSCardTable* card_table = heap->card_table(); MemRegion old_mr = heap->old_gen()->reserved(); if (young_gen_empty) { - modBS->clear(MemRegion(old_mr.start(), old_mr.end())); + card_table->clear(MemRegion(old_mr.start(), old_mr.end())); } else { - modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); + card_table->invalidate(MemRegion(old_mr.start(), old_mr.end())); } // Delete metaspaces for unloaded class loaders and clean up loader_data graph @@ -320,7 +320,7 @@ max_eden_size, true /* full gc*/, gc_cause, - heap->collector_policy()); + heap->soft_ref_policy()); size_policy->decay_supplemental_growth(true /* full gc*/); diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/parallel/objectStartArray.inline.hpp" #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/parallel/psAdaptiveSizePolicy.hpp" +#include "gc/parallel/psCardTable.hpp" #include "gc/parallel/psMarkSweepDecorator.hpp" #include "gc/parallel/psOldGen.hpp" #include "gc/shared/cardTableModRefBS.hpp" @@ -111,11 +112,8 @@ } ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); - BarrierSet* bs = heap->barrier_set(); - - bs->resize_covered_region(cmr); - - CardTableModRefBS* ct = barrier_set_cast(bs); + PSCardTable* ct = heap->card_table(); + ct->resize_covered_region(cmr); // Verify that the start and end of this generation is the start of a card. // If this wasn't true, a single card could span more than one generation, @@ -386,7 +384,7 @@ size_t new_word_size = new_memregion.word_size(); start_array()->set_covered_region(new_memregion); - ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion); + ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion); // ALWAYS do this last!! object_space()->initialize(new_memregion, diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1017,12 +1017,12 @@ bool young_gen_empty = eden_empty && from_space->is_empty() && to_space->is_empty(); - ModRefBarrierSet* modBS = barrier_set_cast(heap->barrier_set()); + PSCardTable* ct = heap->card_table(); MemRegion old_mr = heap->old_gen()->reserved(); if (young_gen_empty) { - modBS->clear(MemRegion(old_mr.start(), old_mr.end())); + ct->clear(MemRegion(old_mr.start(), old_mr.end())); } else { - modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); + ct->invalidate(MemRegion(old_mr.start(), old_mr.end())); } // Delete metaspaces for unloaded class loaders and clean up loader_data graph @@ -1707,7 +1707,7 @@ } const bool clear_all_soft_refs = - heap->collector_policy()->should_clear_all_soft_refs(); + heap->soft_ref_policy()->should_clear_all_soft_refs(); PSParallelCompact::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); @@ -1741,7 +1741,7 @@ // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(maximum_heap_compaction, - heap->collector_policy()); + heap->soft_ref_policy()); if (ZapUnusedHeapArea) { // Save information needed to minimize mangling @@ -1869,7 +1869,7 @@ max_eden_size, true /* full gc*/, gc_cause, - heap->collector_policy()); + heap->soft_ref_policy()); size_policy->decay_supplemental_growth(true /* full gc*/); @@ -3087,11 +3087,11 @@ template static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); PSParallelCompact::adjust_pointer(referent_addr, cm); - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); PSParallelCompact::adjust_pointer(next_addr, cm); - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); PSParallelCompact::adjust_pointer(discovered_addr, cm); debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj, referent_addr, next_addr, discovered_addr);) diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -538,7 +538,7 @@ { assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los >= dc_one, "count would go negative"); - Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); + Atomic::add(dc_mask, &_dc_and_los); } inline HeapWord* ParallelCompactData::RegionData::data_location() const @@ -578,7 +578,7 @@ inline void ParallelCompactData::RegionData::add_live_obj(size_t words) { assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); - Atomic::add((int) words, (volatile int*) &_dc_and_los); + Atomic::add(static_cast(words), &_dc_and_los); } inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp --- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,9 +38,11 @@ #include "memory/memRegion.hpp" #include "memory/padded.inline.hpp" #include "memory/resourceArea.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" PaddedEnd* PSPromotionManager::_manager_array = NULL; @@ -434,7 +436,7 @@ template static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); if (PSScavenge::should_scavenge(referent_addr)) { ReferenceProcessor* rp = PSScavenge::reference_processor(); if (rp->discover_reference(obj, klass->reference_type())) { @@ -448,10 +450,10 @@ } // Treat discovered as normal oop, if ref is not "active", // i.e. if next is non-NULL. - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); T next_oop = oopDesc::load_heap_oop(next_addr); if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); if (PSScavenge::should_scavenge(discovered_addr)) { pm->claim_or_forward_depth(discovered_addr); diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp --- a/src/hotspot/share/gc/parallel/psScavenge.cpp +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "classfile/stringTable.hpp" #include "code/codeCache.hpp" -#include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/parallel/psAdaptiveSizePolicy.hpp" @@ -60,7 +59,7 @@ HeapWord* PSScavenge::_to_space_top_before_gc = NULL; int PSScavenge::_consecutive_skipped_scavenges = 0; ReferenceProcessor* PSScavenge::_ref_processor = NULL; -CardTableExtension* PSScavenge::_card_table = NULL; +PSCardTable* PSScavenge::_card_table = NULL; bool PSScavenge::_survivor_overflow = false; uint PSScavenge::_tenuring_threshold = 0; HeapWord* PSScavenge::_young_generation_boundary = NULL; @@ -228,8 +227,8 @@ if (need_full_gc) { GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); - CollectorPolicy* cp = heap->collector_policy(); - const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); + SoftRefPolicy* srp = heap->soft_ref_policy(); + const bool clear_all_softrefs = srp->should_clear_all_soft_refs(); if (UseParallelOldGC) { full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); @@ -322,7 +321,7 @@ // Verify no unmarked old->young roots if (VerifyRememberedSets) { - CardTableExtension::verify_all_young_refs_imprecise(); + heap->card_table()->verify_all_young_refs_imprecise(); } assert(young_gen->to_space()->is_empty(), @@ -569,7 +568,7 @@ max_eden_size, false /* not full gc*/, gc_cause, - heap->collector_policy()); + heap->soft_ref_policy()); size_policy->decay_supplemental_growth(false /* not full gc*/); } @@ -617,8 +616,8 @@ if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. - // CardTableExtension::verify_all_young_refs_precise(); - CardTableExtension::verify_all_young_refs_imprecise(); + // heap->card_table()->verify_all_young_refs_precise(); + heap->card_table()->verify_all_young_refs_imprecise(); } if (log_is_enabled(Debug, gc, heap, exit)) { @@ -778,7 +777,7 @@ NULL); // header provides liveness info // Cache the cardtable - _card_table = barrier_set_cast(heap->barrier_set()); + _card_table = heap->card_table(); _counters = new CollectorCounters("PSScavenge", 0); } diff --git a/src/hotspot/share/gc/parallel/psScavenge.hpp b/src/hotspot/share/gc/parallel/psScavenge.hpp --- a/src/hotspot/share/gc/parallel/psScavenge.hpp +++ b/src/hotspot/share/gc/parallel/psScavenge.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP #define SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP -#include "gc/parallel/cardTableExtension.hpp" +#include "gc/parallel/psCardTable.hpp" #include "gc/parallel/psVirtualspace.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/gcTrace.hpp" @@ -67,7 +67,7 @@ // Flags/counters static ReferenceProcessor* _ref_processor; // Reference processor for scavenging. static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing - static CardTableExtension* _card_table; // We cache the card table for fast access. + static PSCardTable* _card_table; // We cache the card table for fast access. static bool _survivor_overflow; // Overflow this collection static uint _tenuring_threshold; // tenuring threshold for next scavenge static elapsedTimer _accumulated_time; // total time spent on scavenge @@ -89,7 +89,7 @@ static inline void save_to_space_top_before_gc(); // Private accessors - static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; } + static PSCardTable* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; } static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; } public: diff --git a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp --- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp +++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP #define SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP -#include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.hpp" diff --git a/src/hotspot/share/gc/parallel/psTasks.cpp b/src/hotspot/share/gc/parallel/psTasks.cpp --- a/src/hotspot/share/gc/parallel/psTasks.cpp +++ b/src/hotspot/share/gc/parallel/psTasks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,9 +26,9 @@ #include "aot/aotLoader.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" -#include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/psMarkSweep.hpp" +#include "gc/parallel/psCardTable.hpp" #include "gc/parallel/psPromotionManager.hpp" #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.inline.hpp" @@ -176,8 +176,7 @@ { PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); - CardTableExtension* card_table = - barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set()); + PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); card_table->scavenge_contents_parallel(_old_gen->start_array(), _old_gen->object_space(), diff --git a/src/hotspot/share/gc/parallel/psTasks.hpp b/src/hotspot/share/gc/parallel/psTasks.hpp --- a/src/hotspot/share/gc/parallel/psTasks.hpp +++ b/src/hotspot/share/gc/parallel/psTasks.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -148,7 +148,7 @@ // will be covered. In this example if 4 tasks have been created to cover // all the stripes and there are only 3 threads, one of the threads will // get the tasks with the 4th stripe. However, there is a dependence in -// CardTableExtension::scavenge_contents_parallel() on the number +// PSCardTable::scavenge_contents_parallel() on the number // of tasks created. In scavenge_contents_parallel the distance // to the next stripe is calculated based on the number of tasks. // If the stripe width is ssize, a task's next stripe is at diff --git a/src/hotspot/share/gc/parallel/psYoungGen.cpp b/src/hotspot/share/gc/parallel/psYoungGen.cpp --- a/src/hotspot/share/gc/parallel/psYoungGen.cpp +++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr); if (ZapUnusedHeapArea) { // Mangle newly committed space immediately because it @@ -870,7 +870,7 @@ MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr); space_invariants(); } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/serial/defNewGeneration.inline.hpp" +#include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/ageTable.inline.hpp" #include "gc/shared/cardTableRS.hpp" #include "gc/shared/collectorCounters.hpp" @@ -188,7 +189,7 @@ (HeapWord*)_virtual_space.high()); GenCollectedHeap* gch = GenCollectedHeap::heap(); - gch->barrier_set()->resize_covered_region(cmr); + gch->rem_set()->resize_covered_region(cmr); _eden_space = new ContiguousSpace(); _from_space = new ContiguousSpace(); @@ -453,7 +454,7 @@ SpaceDecorator::DontMangle); MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); - gch->barrier_set()->resize_covered_region(cmr); + gch->rem_set()->resize_covered_region(cmr); log_debug(gc, ergo, heap)( "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", @@ -564,7 +565,7 @@ _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size); if (UsePerfData) { - GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters(); + GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters(); gc_counters->tenuring_threshold()->set_value(_tenuring_threshold); gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize); } @@ -616,9 +617,6 @@ assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set."); - // Not very pretty. - CollectorPolicy* cp = gch->collector_policy(); - FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); @@ -636,7 +634,7 @@ { // DefNew needs to run with n_threads == 0, to make sure the serial // version of the card table scanning code is used. - // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel. + // See: CardTableRS::non_clean_card_iterate_possibly_parallel. StrongRootsScope srs(0); gch->young_process_roots(&srs, @@ -688,7 +686,7 @@ // A successful scavenge should restart the GC time limit count which is // for full GC's. - AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); + AdaptiveSizePolicy* size_policy = gch->size_policy(); size_policy->reset_gc_overhead_limit_count(); assert(!gch->incremental_collection_failed(), "Should be clear"); } else { @@ -953,7 +951,7 @@ // update the generation and space performance counters update_counters(); - gch->gen_policy()->counters()->update_counters(); + gch->counters()->update_counters(); } void DefNewGeneration::record_spaces_top() { diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -60,7 +60,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); #ifdef ASSERT - if (gch->collector_policy()->should_clear_all_soft_refs()) { + if (gch->soft_ref_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earlier"); } #endif diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -29,7 +29,13 @@ #include "services/memoryManager.hpp" SerialHeap::SerialHeap(GenCollectorPolicy* policy) : - GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) { + GenCollectedHeap(policy, + Generation::DefNew, + Generation::MarkSweepCompact, + "Copy:MSC"), + _eden_pool(NULL), + _survivor_pool(NULL), + _old_pool(NULL) { _young_manager = new GCMemoryManager("Copy", "end of minor GC"); _old_manager = new GCMemoryManager("MarkSweepCompact", "end of major GC"); } diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp --- a/src/hotspot/share/gc/serial/serialHeap.hpp +++ b/src/hotspot/share/gc/serial/serialHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,10 +61,6 @@ virtual bool is_in_closed_subset(const void* p) const { return is_in(p); } - - virtual bool card_mark_must_follow_store() const { - return false; - } }; #endif // SHARE_VM_GC_CMS_CMSHEAP_HPP diff --git a/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp b/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp --- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp +++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp @@ -27,10 +27,12 @@ #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcUtil.inline.hpp" +#include "gc/shared/softRefPolicy.hpp" #include "gc/shared/workgroup.hpp" #include "logging/log.hpp" #include "runtime/timer.hpp" #include "utilities/ostream.hpp" + elapsedTimer AdaptiveSizePolicy::_minor_timer; elapsedTimer AdaptiveSizePolicy::_major_timer; bool AdaptiveSizePolicy::_debug_perturbation = false; @@ -409,7 +411,7 @@ size_t max_eden_size, bool is_full_gc, GCCause::Cause gc_cause, - CollectorPolicy* collector_policy) { + SoftRefPolicy* soft_ref_policy) { // Ignore explicit GC's. Exiting here does not set the flag and // does not reset the count. Updating of the averages for system @@ -506,7 +508,7 @@ // The clearing will be done on the next GC. bool near_limit = gc_overhead_limit_near(); if (near_limit) { - collector_policy->set_should_clear_all_soft_refs(true); + soft_ref_policy->set_should_clear_all_soft_refs(true); log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference"); } } diff --git a/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp b/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp --- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp +++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp @@ -37,7 +37,7 @@ // Forward decls class elapsedTimer; -class CollectorPolicy; +class SoftRefPolicy; class AdaptiveSizePolicy : public CHeapObj { friend class GCAdaptivePolicyCounters; @@ -486,7 +486,7 @@ size_t max_eden_size, bool is_full_gc, GCCause::Cause gc_cause, - CollectorPolicy* collector_policy); + SoftRefPolicy* soft_ref_policy); static bool should_update_promo_stats(GCCause::Cause cause) { return ((GCCause::is_user_requested_gc(cause) && diff --git a/src/hotspot/share/gc/shared/barrierSet.cpp b/src/hotspot/share/gc/shared/barrierSet.cpp --- a/src/hotspot/share/gc/shared/barrierSet.cpp +++ b/src/hotspot/share/gc/shared/barrierSet.cpp @@ -41,6 +41,12 @@ } // count is number of array elements being written +void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) { + // simply delegate to instance method + Universe::heap()->barrier_set()->write_ref_array(start, count); +} + +// count is number of array elements being written void BarrierSet::write_ref_array(HeapWord* start, size_t count) { assert(count <= (size_t)max_intx, "count too large"); HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); @@ -63,12 +69,6 @@ write_ref_array_work(MemRegion(aligned_start, aligned_end)); } -// count is number of array elements being written -void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) { - // simply delegate to instance method - Universe::heap()->barrier_set()->write_ref_array(start, count); -} - bool BarrierSet::obj_equals(oop obj1, oop obj2) { return oopDesc::unsafe_equals(obj1, obj2); } @@ -85,4 +85,5 @@ void BarrierSet::verify_safe_oop(narrowOop p) { // Do nothing } + #endif diff --git a/src/hotspot/share/gc/shared/barrierSet.hpp b/src/hotspot/share/gc/shared/barrierSet.hpp --- a/src/hotspot/share/gc/shared/barrierSet.hpp +++ b/src/hotspot/share/gc/shared/barrierSet.hpp @@ -33,6 +33,8 @@ #include "asm/register.hpp" #include "utilities/fakeRttiSupport.hpp" +class JavaThread; + // This class provides the interface between a barrier implementation and // the rest of the system. @@ -110,28 +112,20 @@ static void static_write_ref_array_pre(HeapWord* start, size_t count); static void static_write_ref_array_post(HeapWord* start, size_t count); + // Support for optimizing compilers to call the barrier set on slow path allocations + // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks. + // The allocation is safe to use iff it returns true. If not, the slow-path allocation + // is redone until it succeeds. This can e.g. prevent allocations from the slow path + // to be in old. + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {} + virtual void on_thread_attach(JavaThread* thread) {} + virtual void on_thread_detach(JavaThread* thread) {} + virtual void make_parsable(JavaThread* thread) {} + protected: virtual void write_ref_array_work(MemRegion mr) = 0; public: - // (For efficiency reasons, this operation is specialized for certain - // barrier types. Semantically, it should be thought of as a call to the - // virtual "_work" function below, which must implement the barrier.) - void write_region(MemRegion mr); - -protected: - virtual void write_region_work(MemRegion mr) = 0; - -public: - // Inform the BarrierSet that the the covered heap region that starts - // with "base" has been changed to have the given size (possibly from 0, - // for initialization.) - virtual void resize_covered_region(MemRegion new_region) = 0; - - // If the barrier set imposes any alignment restrictions on boundaries - // within the heap, this function tells whether they are met. - virtual bool is_aligned(HeapWord* addr) = 0; - // Print a description of the memory for the barrier set virtual void print_on(outputStream* st) const = 0; @@ -317,6 +311,10 @@ static void clone_in_heap(oop src, oop dst, size_t size) { Raw::clone(src, dst, size); } + + static oop resolve(oop obj) { + return Raw::resolve(obj); + } }; }; diff --git a/src/hotspot/share/gc/shared/barrierSet.inline.hpp b/src/hotspot/share/gc/shared/barrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp +++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,4 @@ #include "gc/shared/barrierSetConfig.inline.hpp" #include "utilities/align.hpp" -inline void BarrierSet::write_region(MemRegion mr) { - write_region_work(mr); -} - #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.hpp --- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,34 +29,47 @@ #if INCLUDE_ALL_GCS #define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \ - f(CardTableExtension) \ f(G1SATBCTLogging) \ f(Shenandoah) #else #define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) #endif +#if INCLUDE_ALL_GCS +#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \ + f(G1SATBCT) +#else +#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) +#endif + // Do something for each concrete barrier set part of the build. #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \ - f(CardTableForRS) \ + f(CardTableModRef) \ FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) +#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ + f(ModRef) \ + FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) + // Do something for each known barrier set. #define FOR_EACH_BARRIER_SET_DO(f) \ - f(ModRef) \ - f(CardTableModRef) \ - f(CardTableForRS) \ - f(CardTableExtension) \ - f(G1SATBCT) \ - f(G1SATBCTLogging) \ - f(Shenandoah) + FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ + FOR_EACH_CONCRETE_BARRIER_SET_DO(f) // To enable runtime-resolution of GC barriers on primitives, please // define SUPPORT_BARRIER_ON_PRIMITIVES. #ifdef SUPPORT_BARRIER_ON_PRIMITIVES -#define BT_BUILDTIME_DECORATORS INTERNAL_BT_BARRIER_ON_PRIMITIVES +#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_BT_BARRIER_ON_PRIMITIVES #else -#define BT_BUILDTIME_DECORATORS INTERNAL_EMPTY +#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_EMPTY #endif +#ifdef SUPPORT_NOT_TO_SPACE_INVARIANT +#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_EMPTY +#else +#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_BT_TO_SPACE_INVARIANT +#endif + +#define BT_BUILDTIME_DECORATORS (ACCESS_PRIMITIVE_SUPPORT | ACCESS_TO_SPACE_INVARIANT_SUPPORT) + #endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_HPP diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp --- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,8 @@ #include "gc/shared/modRefBarrierSet.inline.hpp" #include "gc/shared/cardTableModRefBS.inline.hpp" -#include "gc/shared/cardTableModRefBSForCTRS.hpp" #if INCLUDE_ALL_GCS -#include "gc/parallel/cardTableExtension.hpp" // Parallel support #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" // Shenandoah support #endif diff --git a/src/hotspot/share/gc/shared/cardGeneration.cpp b/src/hotspot/share/gc/shared/cardGeneration.cpp --- a/src/hotspot/share/gc/shared/cardGeneration.cpp +++ b/src/hotspot/share/gc/shared/cardGeneration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ heap_word_size(_virtual_space.committed_size()); MemRegion mr(space()->bottom(), new_word_size); // Expand card table - GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr); + GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr); // Expand shared block offset array _bts->resize(new_word_size); @@ -166,7 +166,7 @@ _bts->resize(new_word_size); MemRegion mr(space()->bottom(), new_word_size); // Shrink the card table - GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr); + GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr); size_t new_mem_size = _virtual_space.committed_size(); size_t old_mem_size = new_mem_size + size; diff --git a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp b/src/hotspot/share/gc/shared/cardTable.cpp copy from src/hotspot/share/gc/shared/cardTableModRefBS.cpp copy to src/hotspot/share/gc/shared/cardTable.cpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,23 +23,17 @@ */ #include "precompiled.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/collectedHeap.hpp" -#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/space.inline.hpp" #include "logging/log.hpp" #include "memory/virtualspace.hpp" -#include "oops/oop.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/os.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" -#include "utilities/macros.hpp" -// This kind of "BarrierSet" allows a "CollectedHeap" to detect and -// enumerate ref fields that have been modified (since the last -// enumeration.) - -size_t CardTableModRefBS::compute_byte_map_size() -{ +size_t CardTable::compute_byte_map_size() { assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, "uninitialized, check declaration order"); assert(_page_size != 0, "uninitialized, check declaration order"); @@ -47,10 +41,8 @@ return align_up(_guard_index + 1, MAX2(_page_size, granularity)); } -CardTableModRefBS::CardTableModRefBS( - MemRegion whole_heap, - const BarrierSet::FakeRtti& fake_rtti) : - ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), +CardTable::CardTable(MemRegion whole_heap, bool conc_scan) : + _scanned_concurrently(conc_scan), _whole_heap(whole_heap), _guard_index(0), _guard_region(), @@ -61,7 +53,7 @@ _committed(NULL), _cur_covered_regions(0), _byte_map(NULL), - byte_map_base(NULL) + _byte_map_base(NULL) { assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); @@ -74,7 +66,18 @@ } } -void CardTableModRefBS::initialize() { +CardTable::~CardTable() { + if (_covered) { + delete[] _covered; + _covered = NULL; + } + if (_committed) { + delete[] _committed; + _committed = NULL; + } +} + +void CardTable::initialize() { _guard_index = cards_required(_whole_heap.word_size()) - 1; _last_valid_index = _guard_index - 1; @@ -103,39 +106,28 @@ } // The assembler store_check code will do an unsigned shift of the oop, - // then add it to byte_map_base, i.e. + // then add it to _byte_map_base, i.e. // - // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) + // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) _byte_map = (jbyte*) heap_rs.base(); - byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); jbyte* guard_card = &_byte_map[_guard_index]; - uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size); - _guard_region = MemRegion((HeapWord*)guard_page, _page_size); + HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size); + _guard_region = MemRegion(guard_page, _page_size); os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, !ExecMem, "card table last card"); *guard_card = last_card; - log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); + log_trace(gc, barrier)("CardTable::CardTable: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); + log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); } -CardTableModRefBS::~CardTableModRefBS() { - if (_covered) { - delete[] _covered; - _covered = NULL; - } - if (_committed) { - delete[] _committed; - _committed = NULL; - } -} - -int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { +int CardTable::find_covering_region_by_base(HeapWord* base) { int i; for (i = 0; i < _cur_covered_regions; i++) { if (_covered[i].start() == base) return i; @@ -154,13 +146,13 @@ _covered[res].set_start(base); _covered[res].set_word_size(0); jbyte* ct_start = byte_for(base); - uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size); - _committed[res].set_start((HeapWord*)ct_start_aligned); + HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size); + _committed[res].set_start(ct_start_aligned); _committed[res].set_word_size(0); return res; } -int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { +int CardTable::find_covering_region_containing(HeapWord* addr) { for (int i = 0; i < _cur_covered_regions; i++) { if (_covered[i].contains(addr)) { return i; @@ -170,7 +162,7 @@ return -1; } -HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { +HeapWord* CardTable::largest_prev_committed_end(int ind) const { HeapWord* max_end = NULL; for (int j = 0; j < ind; j++) { HeapWord* this_end = _committed[j].end(); @@ -179,8 +171,7 @@ return max_end; } -MemRegion CardTableModRefBS::committed_unique_to_self(int self, - MemRegion mr) const { +MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const { MemRegion result = mr; for (int r = 0; r < _cur_covered_regions; r += 1) { if (r != self) { @@ -192,7 +183,7 @@ return result; } -void CardTableModRefBS::resize_covered_region(MemRegion new_region) { +void CardTable::resize_covered_region(MemRegion new_region) { // We don't change the start of a region, only the end. assert(_whole_heap.contains(new_region), "attempt to cover area not in reserved area"); @@ -213,9 +204,9 @@ cur_committed.set_end(max_prev_end); } // Align the end up to a page size (starts are already aligned). - jbyte* const new_end = byte_after(new_region.last()); - HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size); - assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); + HeapWord* new_end = (HeapWord*) byte_after(new_region.last()); + HeapWord* new_end_aligned = align_up(new_end, _page_size); + assert(new_end_aligned >= new_end, "align up, but less"); // Check the other regions (excludes "ind") to ensure that // the new_end_aligned does not intrude onto the committed // space of another region. @@ -345,7 +336,7 @@ // In any case, the covered size changes. _covered[ind].set_word_size(new_region.word_size()); - log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: "); + log_trace(gc, barrier)("CardTable::resize_covered_region: "); log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, @@ -363,8 +354,7 @@ // Note that these versions are precise! The scanning code has to handle the // fact that the write barrier may be either precise or imprecise. - -void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { +void CardTable::dirty_MemRegion(MemRegion mr) { assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); jbyte* cur = byte_for(mr.start()); @@ -375,16 +365,7 @@ } } -void CardTableModRefBS::invalidate(MemRegion mr) { - assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); - assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - for (int i = 0; i < _cur_covered_regions; i++) { - MemRegion mri = mr.intersection(_covered[i]); - if (!mri.is_empty()) dirty_MemRegion(mri); - } -} - -void CardTableModRefBS::clear_MemRegion(MemRegion mr) { +void CardTable::clear_MemRegion(MemRegion mr) { // Be conservative: only clean cards entirely contained within the // region. jbyte* cur; @@ -398,14 +379,14 @@ memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); } -void CardTableModRefBS::clear(MemRegion mr) { +void CardTable::clear(MemRegion mr) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) clear_MemRegion(mri); } } -void CardTableModRefBS::dirty(MemRegion mr) { +void CardTable::dirty(MemRegion mr) { jbyte* first = byte_for(mr.start()); jbyte* last = byte_after(mr.last()); memset(first, dirty_card, last-first); @@ -413,8 +394,7 @@ // Unlike several other card table methods, dirty_card_iterate() // iterates over dirty cards ranges in increasing address order. -void CardTableModRefBS::dirty_card_iterate(MemRegion mr, - MemRegionClosure* cl) { +void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { @@ -438,9 +418,9 @@ } } -MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, - bool reset, - int reset_val) { +MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr, + bool reset, + int reset_val) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { @@ -470,22 +450,31 @@ return MemRegion(mr.end(), mr.end()); } -uintx CardTableModRefBS::ct_max_alignment_constraint() { +uintx CardTable::ct_max_alignment_constraint() { return card_size * os::vm_page_size(); } -void CardTableModRefBS::verify_guard() { +void CardTable::verify_guard() { // For product build verification guarantee(_byte_map[_guard_index] == last_card, "card table guard has been modified"); } -void CardTableModRefBS::verify() { +void CardTable::invalidate(MemRegion mr) { + assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); + assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); + for (int i = 0; i < _cur_covered_regions; i++) { + MemRegion mri = mr.intersection(_covered[i]); + if (!mri.is_empty()) dirty_MemRegion(mri); + } +} + +void CardTable::verify() { verify_guard(); } #ifndef PRODUCT -void CardTableModRefBS::verify_region(MemRegion mr, +void CardTable::verify_region(MemRegion mr, jbyte val, bool val_equals) { jbyte* start = byte_for(mr.start()); jbyte* end = byte_for(mr.last()); @@ -508,16 +497,16 @@ guarantee(!failures, "there should not have been any failures"); } -void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { +void CardTable::verify_not_dirty_region(MemRegion mr) { verify_region(mr, dirty_card, false /* val_equals */); } -void CardTableModRefBS::verify_dirty_region(MemRegion mr) { +void CardTable::verify_dirty_region(MemRegion mr) { verify_region(mr, dirty_card, true /* val_equals */); } #endif -void CardTableModRefBS::print_on(outputStream* st) const { - st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, - p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); +void CardTable::print_on(outputStream* st) const { + st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT, + p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base)); } diff --git a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp b/src/hotspot/share/gc/shared/cardTable.hpp copy from src/hotspot/share/gc/shared/cardTableModRefBS.hpp copy to src/hotspot/share/gc/shared/cardTable.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp +++ b/src/hotspot/share/gc/shared/cardTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,47 +22,20 @@ * */ -#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP -#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP +#ifndef SHARE_VM_GC_SHARED_CARDTABLE_HPP +#define SHARE_VM_GC_SHARED_CARDTABLE_HPP -#include "gc/shared/modRefBarrierSet.hpp" +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" +#include "oops/oopsHierarchy.hpp" #include "utilities/align.hpp" -// This kind of "BarrierSet" allows a "CollectedHeap" to detect and -// enumerate ref fields that have been modified (since the last -// enumeration.) - -// As it currently stands, this barrier is *imprecise*: when a ref field in -// an object "o" is modified, the card table entry for the card containing -// the head of "o" is dirtied, not necessarily the card containing the -// modified field itself. For object arrays, however, the barrier *is* -// precise; only the card containing the modified element is dirtied. -// Closures used to scan dirty cards should take these -// considerations into account. - -class CardTableModRefBS: public ModRefBarrierSet { - // Some classes get to look at some private stuff. +class CardTable: public CHeapObj { friend class VMStructs; - protected: - - enum CardValues { - clean_card = -1, - // The mask contains zeros in places for all other values. - clean_card_mask = clean_card - 31, - - dirty_card = 0, - precleaned_card = 1, - claimed_card = 2, - deferred_card = 4, - last_card = 8, - CT_MR_BS_last_reserved = 16 - }; - - // a word's worth (row) of clean card values - static const intptr_t clean_card_row = (intptr_t)(-1); - +protected: // The declaration order of these const fields is important; see the // constructor before changing. + const bool _scanned_concurrently; const MemRegion _whole_heap; // the region covered by the card table size_t _guard_index; // index of very last element in the card // table; it is set to a guard value @@ -71,13 +44,7 @@ const size_t _page_size; // page size used when mapping _byte_map size_t _byte_map_size; // in bytes jbyte* _byte_map; // the card marking array - - // Some barrier sets create tables whose elements correspond to parts of - // the heap; the CardTableModRefBS is an example. Such barrier sets will - // normally reserve space for such tables, and commit parts of the table - // "covering" parts of the heap that are committed. At most one covered - // region per generation is needed. - static const int _max_covered_regions = 2; + jbyte* _byte_map_base; int _cur_covered_regions; @@ -108,9 +75,6 @@ // instead of starting at a given base address. int find_covering_region_containing(HeapWord* addr); - // Resize one of the regions covered by the remembered set. - virtual void resize_covered_region(MemRegion new_region); - // Returns the leftmost end of a committed region corresponding to a // covered region before covered region "ind", or else "NULL" if "ind" is // the first covered region. @@ -122,13 +86,76 @@ // against uncommitting the guard region. MemRegion committed_unique_to_self(int self, MemRegion mr) const; + // Some barrier sets create tables whose elements correspond to parts of + // the heap; the CardTableModRefBS is an example. Such barrier sets will + // normally reserve space for such tables, and commit parts of the table + // "covering" parts of the heap that are committed. At most one covered + // region per generation is needed. + static const int _max_covered_regions = 2; + + enum CardValues { + clean_card = -1, + // The mask contains zeros in places for all other values. + clean_card_mask = clean_card - 31, + + dirty_card = 0, + precleaned_card = 1, + claimed_card = 2, + deferred_card = 4, + last_card = 8, + CT_MR_BS_last_reserved = 16 + }; + + // a word's worth (row) of clean card values + static const intptr_t clean_card_row = (intptr_t)(-1); + +public: + CardTable(MemRegion whole_heap, bool conc_scan); + virtual ~CardTable(); + virtual void initialize(); + + // The kinds of precision a CardTableModRefBS may offer. + enum PrecisionStyle { + Precise, + ObjHeadPreciseArray + }; + + // Tells what style of precision this card table offers. + PrecisionStyle precision() { + return ObjHeadPreciseArray; // Only one supported for now. + } + + // *** Barrier set functions. + + // Initialization utilities; covered_words is the size of the covered region + // in, um, words. + inline size_t cards_required(size_t covered_words) { + // Add one for a guard card, used to detect errors. + const size_t words = align_up(covered_words, card_size_in_words); + return words / card_size_in_words + 1; + } + + // Dirty the bytes corresponding to "mr" (not all of which must be + // covered.) + void dirty_MemRegion(MemRegion mr); + + // Clear (to clean_card) the bytes entirely contained within "mr" (not + // all of which must be covered.) + void clear_MemRegion(MemRegion mr); + + // Return true if "p" is at the start of a card. + bool is_card_aligned(HeapWord* p) { + jbyte* pcard = byte_for(p); + return (addr_for(pcard) == p); + } + // Mapping from address to card marking array entry jbyte* byte_for(const void* p) const { assert(_whole_heap.contains(p), "Attempt to access p = " PTR_FORMAT " out of bounds of " " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; + jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift]; assert(result >= _byte_map && result < _byte_map + _byte_map_size, "out of bounds accessor for card marking array"); return result; @@ -141,131 +168,10 @@ return byte_for(p) + 1; } - // Dirty the bytes corresponding to "mr" (not all of which must be - // covered.) - void dirty_MemRegion(MemRegion mr); - - // Clear (to clean_card) the bytes entirely contained within "mr" (not - // all of which must be covered.) - void clear_MemRegion(MemRegion mr); - - public: - // Constants - enum SomePublicConstants { - card_shift = 9, - card_size = 1 << card_shift, - card_size_in_words = card_size / sizeof(HeapWord) - }; - - static int clean_card_val() { return clean_card; } - static int clean_card_mask_val() { return clean_card_mask; } - static int dirty_card_val() { return dirty_card; } - static int claimed_card_val() { return claimed_card; } - static int precleaned_card_val() { return precleaned_card; } - static int deferred_card_val() { return deferred_card; } - - virtual void initialize(); - - // *** Barrier set functions. - - // Initialization utilities; covered_words is the size of the covered region - // in, um, words. - inline size_t cards_required(size_t covered_words) { - // Add one for a guard card, used to detect errors. - const size_t words = align_up(covered_words, card_size_in_words); - return words / card_size_in_words + 1; - } - - protected: - CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); - ~CardTableModRefBS(); - - protected: - void write_region_work(MemRegion mr) { - dirty_MemRegion(mr); - } - - protected: - void write_ref_array_work(MemRegion mr) { - dirty_MemRegion(mr); - } - - public: - bool is_aligned(HeapWord* addr) { - return is_card_aligned(addr); - } - - // *** Card-table-barrier-specific things. - - // Record a reference update. Note that these versions are precise! - // The scanning code has to handle the fact that the write barrier may be - // either precise or imprecise. We make non-virtual inline variants of - // these functions here for performance. - template - void write_ref_field_post(T* field, oop newVal); - - // These are used by G1, when it uses the card table as a temporary data - // structure for card claiming. - bool is_card_dirty(size_t card_index) { - return _byte_map[card_index] == dirty_card_val(); - } - - void mark_card_dirty(size_t card_index) { - _byte_map[card_index] = dirty_card_val(); - } - - bool is_card_clean(size_t card_index) { - return _byte_map[card_index] == clean_card_val(); - } - - // Card marking array base (adjusted for heap low boundary) - // This would be the 0th element of _byte_map, if the heap started at 0x0. - // But since the heap starts at some higher address, this points to somewhere - // before the beginning of the actual _byte_map. - jbyte* byte_map_base; - - // Return true if "p" is at the start of a card. - bool is_card_aligned(HeapWord* p) { - jbyte* pcard = byte_for(p); - return (addr_for(pcard) == p); - } - - HeapWord* align_to_card_boundary(HeapWord* p) { - jbyte* pcard = byte_for(p + card_size_in_words - 1); - return addr_for(pcard); - } - - // The kinds of precision a CardTableModRefBS may offer. - enum PrecisionStyle { - Precise, - ObjHeadPreciseArray - }; - - // Tells what style of precision this card table offers. - PrecisionStyle precision() { - return ObjHeadPreciseArray; // Only one supported for now. - } - - // ModRefBS functions. virtual void invalidate(MemRegion mr); void clear(MemRegion mr); void dirty(MemRegion mr); - // *** Card-table-RemSet-specific things. - - static uintx ct_max_alignment_constraint(); - - // Apply closure "cl" to the dirty cards containing some part of - // MemRegion "mr". - void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); - - // Return the MemRegion corresponding to the first maximal run - // of dirty cards lying completely within MemRegion mr. - // If reset is "true", then sets those card table entries to the given - // value. - MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, - int reset_val); - // Provide read-only access to the card table array. const jbyte* byte_for_const(const void* p) const { return byte_for(p); @@ -280,7 +186,7 @@ "out of bounds access to card marking array. p: " PTR_FORMAT " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT, p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size)); - size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); + size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte)); HeapWord* result = (HeapWord*) (delta << card_shift); assert(_whole_heap.contains(result), "Returning result = " PTR_FORMAT " out of bounds of " @@ -302,7 +208,49 @@ return _byte_map + card_index; } - // Print a description of the memory for the barrier set + // Resize one of the regions covered by the remembered set. + virtual void resize_covered_region(MemRegion new_region); + + // *** Card-table-RemSet-specific things. + + static uintx ct_max_alignment_constraint(); + + // Apply closure "cl" to the dirty cards containing some part of + // MemRegion "mr". + void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); + + // Return the MemRegion corresponding to the first maximal run + // of dirty cards lying completely within MemRegion mr. + // If reset is "true", then sets those card table entries to the given + // value. + MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, + int reset_val); + + // Constants + enum SomePublicConstants { + card_shift = 9, + card_size = 1 << card_shift, + card_size_in_words = card_size / sizeof(HeapWord) + }; + + static jbyte clean_card_val() { return clean_card; } + static jbyte clean_card_mask_val() { return clean_card_mask; } + static jbyte dirty_card_val() { return dirty_card; } + static jbyte claimed_card_val() { return claimed_card; } + static jbyte precleaned_card_val() { return precleaned_card; } + static jbyte deferred_card_val() { return deferred_card; } + static intptr_t clean_card_row_val() { return clean_card_row; } + + // Card marking array base (adjusted for heap low boundary) + // This would be the 0th element of _byte_map, if the heap started at 0x0. + // But since the heap starts at some higher address, this points to somewhere + // before the beginning of the actual _byte_map. + jbyte* byte_map_base() const { return _byte_map_base; } + bool scanned_concurrently() const { return _scanned_concurrently; } + + virtual bool is_in_young(oop obj) const = 0; + + // Print a description of the memory for the card table virtual void print_on(outputStream* st) const; void verify(); @@ -313,19 +261,6 @@ void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; - - template - class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; }; -template<> -struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::CardTableModRef; -}; - -template<> -struct BarrierSet::GetType { - typedef CardTableModRefBS type; -}; - -#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP +#endif // SHARE_VM_GC_SHARED_CARDTABLE_HPP diff --git a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp +++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "logging/log.hpp" #include "memory/virtualspace.hpp" #include "oops/oop.inline.hpp" +#include "runtime/thread.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -38,486 +39,149 @@ // enumerate ref fields that have been modified (since the last // enumeration.) -size_t CardTableModRefBS::compute_byte_map_size() -{ - assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, - "uninitialized, check declaration order"); - assert(_page_size != 0, "uninitialized, check declaration order"); - const size_t granularity = os::vm_allocation_granularity(); - return align_up(_guard_index + 1, MAX2(_page_size, granularity)); -} - CardTableModRefBS::CardTableModRefBS( - MemRegion whole_heap, + CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti) : ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), - _whole_heap(whole_heap), - _guard_index(0), - _guard_region(), - _last_valid_index(0), - _page_size(os::vm_page_size()), - _byte_map_size(0), - _covered(NULL), - _committed(NULL), - _cur_covered_regions(0), - _byte_map(NULL), - byte_map_base(NULL) -{ - assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); - assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); + _defer_initial_card_mark(false), + _card_table(card_table) +{} - assert(card_size <= 512, "card_size must be less than 512"); // why? - - _covered = new MemRegion[_max_covered_regions]; - if (_covered == NULL) { - vm_exit_during_initialization("Could not allocate card table covered region set."); - } -} +CardTableModRefBS::CardTableModRefBS(CardTable* card_table) : + ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)), + _defer_initial_card_mark(false), + _card_table(card_table) +{} void CardTableModRefBS::initialize() { - _guard_index = cards_required(_whole_heap.word_size()) - 1; - _last_valid_index = _guard_index - 1; - - _byte_map_size = compute_byte_map_size(); - - HeapWord* low_bound = _whole_heap.start(); - HeapWord* high_bound = _whole_heap.end(); - - _cur_covered_regions = 0; - _committed = new MemRegion[_max_covered_regions]; - if (_committed == NULL) { - vm_exit_during_initialization("Could not allocate card table committed region set."); - } - - const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : - MAX2(_page_size, (size_t) os::vm_allocation_granularity()); - ReservedSpace heap_rs(_byte_map_size, rs_align, false); - - MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); - - os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1, - _page_size, heap_rs.base(), heap_rs.size()); - if (!heap_rs.is_reserved()) { - vm_exit_during_initialization("Could not reserve enough space for the " - "card marking array"); - } - - // The assembler store_check code will do an unsigned shift of the oop, - // then add it to byte_map_base, i.e. - // - // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) - _byte_map = (jbyte*) heap_rs.base(); - byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); - assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); - assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); - - jbyte* guard_card = &_byte_map[_guard_index]; - uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size); - _guard_region = MemRegion((HeapWord*)guard_page, _page_size); - os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, - !ExecMem, "card table last card"); - *guard_card = last_card; - - log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); - log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, - p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); + initialize_deferred_card_mark_barriers(); } CardTableModRefBS::~CardTableModRefBS() { - if (_covered) { - delete[] _covered; - _covered = NULL; - } - if (_committed) { - delete[] _committed; - _committed = NULL; - } + delete _card_table; } -int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { - int i; - for (i = 0; i < _cur_covered_regions; i++) { - if (_covered[i].start() == base) return i; - if (_covered[i].start() > base) break; - } - // If we didn't find it, create a new one. - assert(_cur_covered_regions < _max_covered_regions, - "too many covered regions"); - // Move the ones above up, to maintain sorted order. - for (int j = _cur_covered_regions; j > i; j--) { - _covered[j] = _covered[j-1]; - _committed[j] = _committed[j-1]; - } - int res = i; - _cur_covered_regions++; - _covered[res].set_start(base); - _covered[res].set_word_size(0); - jbyte* ct_start = byte_for(base); - uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size); - _committed[res].set_start((HeapWord*)ct_start_aligned); - _committed[res].set_word_size(0); - return res; -} - -int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { - for (int i = 0; i < _cur_covered_regions; i++) { - if (_covered[i].contains(addr)) { - return i; - } - } - assert(0, "address outside of heap?"); - return -1; -} - -HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { - HeapWord* max_end = NULL; - for (int j = 0; j < ind; j++) { - HeapWord* this_end = _committed[j].end(); - if (this_end > max_end) max_end = this_end; - } - return max_end; -} - -MemRegion CardTableModRefBS::committed_unique_to_self(int self, - MemRegion mr) const { - MemRegion result = mr; - for (int r = 0; r < _cur_covered_regions; r += 1) { - if (r != self) { - result = result.minus(_committed[r]); - } - } - // Never include the guard page. - result = result.minus(_guard_region); - return result; -} - -void CardTableModRefBS::resize_covered_region(MemRegion new_region) { - // We don't change the start of a region, only the end. - assert(_whole_heap.contains(new_region), - "attempt to cover area not in reserved area"); - debug_only(verify_guard();) - // collided is true if the expansion would push into another committed region - debug_only(bool collided = false;) - int const ind = find_covering_region_by_base(new_region.start()); - MemRegion const old_region = _covered[ind]; - assert(old_region.start() == new_region.start(), "just checking"); - if (new_region.word_size() != old_region.word_size()) { - // Commit new or uncommit old pages, if necessary. - MemRegion cur_committed = _committed[ind]; - // Extend the end of this _committed region - // to cover the end of any lower _committed regions. - // This forms overlapping regions, but never interior regions. - HeapWord* const max_prev_end = largest_prev_committed_end(ind); - if (max_prev_end > cur_committed.end()) { - cur_committed.set_end(max_prev_end); - } - // Align the end up to a page size (starts are already aligned). - jbyte* const new_end = byte_after(new_region.last()); - HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size); - assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); - // Check the other regions (excludes "ind") to ensure that - // the new_end_aligned does not intrude onto the committed - // space of another region. - int ri = 0; - for (ri = ind + 1; ri < _cur_covered_regions; ri++) { - if (new_end_aligned > _committed[ri].start()) { - assert(new_end_aligned <= _committed[ri].end(), - "An earlier committed region can't cover a later committed region"); - // Any region containing the new end - // should start at or beyond the region found (ind) - // for the new end (committed regions are not expected to - // be proper subsets of other committed regions). - assert(_committed[ri].start() >= _committed[ind].start(), - "New end of committed region is inconsistent"); - new_end_aligned = _committed[ri].start(); - // new_end_aligned can be equal to the start of its - // committed region (i.e., of "ind") if a second - // region following "ind" also start at the same location - // as "ind". - assert(new_end_aligned >= _committed[ind].start(), - "New end of committed region is before start"); - debug_only(collided = true;) - // Should only collide with 1 region - break; - } - } -#ifdef ASSERT - for (++ri; ri < _cur_covered_regions; ri++) { - assert(!_committed[ri].contains(new_end_aligned), - "New end of committed region is in a second committed region"); - } -#endif - // The guard page is always committed and should not be committed over. - // "guarded" is used for assertion checking below and recalls the fact - // that the would-be end of the new committed region would have - // penetrated the guard page. - HeapWord* new_end_for_commit = new_end_aligned; - - DEBUG_ONLY(bool guarded = false;) - if (new_end_for_commit > _guard_region.start()) { - new_end_for_commit = _guard_region.start(); - DEBUG_ONLY(guarded = true;) - } - - if (new_end_for_commit > cur_committed.end()) { - // Must commit new pages. - MemRegion const new_committed = - MemRegion(cur_committed.end(), new_end_for_commit); - - assert(!new_committed.is_empty(), "Region should not be empty here"); - os::commit_memory_or_exit((char*)new_committed.start(), - new_committed.byte_size(), _page_size, - !ExecMem, "card table expansion"); - // Use new_end_aligned (as opposed to new_end_for_commit) because - // the cur_committed region may include the guard region. - } else if (new_end_aligned < cur_committed.end()) { - // Must uncommit pages. - MemRegion const uncommit_region = - committed_unique_to_self(ind, MemRegion(new_end_aligned, - cur_committed.end())); - if (!uncommit_region.is_empty()) { - // It is not safe to uncommit cards if the boundary between - // the generations is moving. A shrink can uncommit cards - // owned by generation A but being used by generation B. - if (!UseAdaptiveGCBoundary) { - if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { - assert(false, "Card table contraction failed"); - // The call failed so don't change the end of the - // committed region. This is better than taking the - // VM down. - new_end_aligned = _committed[ind].end(); - } - } else { - new_end_aligned = _committed[ind].end(); - } - } - } - // In any case, we can reset the end of the current committed entry. - _committed[ind].set_end(new_end_aligned); - -#ifdef ASSERT - // Check that the last card in the new region is committed according - // to the tables. - bool covered = false; - for (int cr = 0; cr < _cur_covered_regions; cr++) { - if (_committed[cr].contains(new_end - 1)) { - covered = true; - break; - } - } - assert(covered, "Card for end of new region not committed"); -#endif - - // The default of 0 is not necessarily clean cards. - jbyte* entry; - if (old_region.last() < _whole_heap.start()) { - entry = byte_for(_whole_heap.start()); - } else { - entry = byte_after(old_region.last()); - } - assert(index_for(new_region.last()) < _guard_index, - "The guard card will be overwritten"); - // This line commented out cleans the newly expanded region and - // not the aligned up expanded region. - // jbyte* const end = byte_after(new_region.last()); - jbyte* const end = (jbyte*) new_end_for_commit; - assert((end >= byte_after(new_region.last())) || collided || guarded, - "Expect to be beyond new region unless impacting another region"); - // do nothing if we resized downward. -#ifdef ASSERT - for (int ri = 0; ri < _cur_covered_regions; ri++) { - if (ri != ind) { - // The end of the new committed region should not - // be in any existing region unless it matches - // the start of the next region. - assert(!_committed[ri].contains(end) || - (_committed[ri].start() == (HeapWord*) end), - "Overlapping committed regions"); - } - } -#endif - if (entry < end) { - memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); - } - } - // In any case, the covered size changes. - _covered[ind].set_word_size(new_region.word_size()); - - log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: "); - log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, - ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); - log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, - ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); - log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, - p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); - log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, - p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); - - // Touch the last card of the covered region to show that it - // is committed (or SEGV). - debug_only((void) (*byte_for(_covered[ind].last()));) - debug_only(verify_guard();) -} - -// Note that these versions are precise! The scanning code has to handle the -// fact that the write barrier may be either precise or imprecise. - -void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { - assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); - assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - jbyte* cur = byte_for(mr.start()); - jbyte* last = byte_after(mr.last()); - while (cur < last) { - *cur = dirty_card; - cur++; - } +void CardTableModRefBS::write_ref_array_work(MemRegion mr) { + _card_table->dirty_MemRegion(mr); } void CardTableModRefBS::invalidate(MemRegion mr) { - assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); - assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - for (int i = 0; i < _cur_covered_regions; i++) { - MemRegion mri = mr.intersection(_covered[i]); - if (!mri.is_empty()) dirty_MemRegion(mri); - } + _card_table->invalidate(mr); } -void CardTableModRefBS::clear_MemRegion(MemRegion mr) { - // Be conservative: only clean cards entirely contained within the - // region. - jbyte* cur; - if (mr.start() == _whole_heap.start()) { - cur = byte_for(mr.start()); - } else { - assert(mr.start() > _whole_heap.start(), "mr is not covered."); - cur = byte_after(mr.start() - 1); - } - jbyte* last = byte_after(mr.last()); - memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); +void CardTableModRefBS::print_on(outputStream* st) const { + _card_table->print_on(st); } -void CardTableModRefBS::clear(MemRegion mr) { - for (int i = 0; i < _cur_covered_regions; i++) { - MemRegion mri = mr.intersection(_covered[i]); - if (!mri.is_empty()) clear_MemRegion(mri); +// Helper for ReduceInitialCardMarks. For performance, +// compiled code may elide card-marks for initializing stores +// to a newly allocated object along the fast-path. We +// compensate for such elided card-marks as follows: +// (a) Generational, non-concurrent collectors, such as +// GenCollectedHeap(ParNew,DefNew,Tenured) and +// ParallelScavengeHeap(ParallelGC, ParallelOldGC) +// need the card-mark if and only if the region is +// in the old gen, and do not care if the card-mark +// succeeds or precedes the initializing stores themselves, +// so long as the card-mark is completed before the next +// scavenge. For all these cases, we can do a card mark +// at the point at which we do a slow path allocation +// in the old gen, i.e. in this call. +// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires +// in addition that the card-mark for an old gen allocated +// object strictly follow any associated initializing stores. +// In these cases, the memRegion remembered below is +// used to card-mark the entire region either just before the next +// slow-path allocation by this thread or just before the next scavenge or +// CMS-associated safepoint, whichever of these events happens first. +// (The implicit assumption is that the object has been fully +// initialized by this point, a fact that we assert when doing the +// card-mark.) +// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a +// G1 concurrent marking is in progress an SATB (pre-write-)barrier +// is used to remember the pre-value of any store. Initializing +// stores will not need this barrier, so we need not worry about +// compensating for the missing pre-barrier here. Turning now +// to the post-barrier, we note that G1 needs a RS update barrier +// which simply enqueues a (sequence of) dirty cards which may +// optionally be refined by the concurrent update threads. Note +// that this barrier need only be applied to a non-young write, +// but, like in CMS, because of the presence of concurrent refinement +// (much like CMS' precleaning), must strictly follow the oop-store. +// Thus, using the same protocol for maintaining the intended +// invariants turns out, serendepitously, to be the same for both +// G1 and CMS. +// +// For any future collector, this code should be reexamined with +// that specific collector in mind, and the documentation above suitably +// extended and updated. +void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { + if (!ReduceInitialCardMarks) { + return; } -} - -void CardTableModRefBS::dirty(MemRegion mr) { - jbyte* first = byte_for(mr.start()); - jbyte* last = byte_after(mr.last()); - memset(first, dirty_card, last-first); -} - -// Unlike several other card table methods, dirty_card_iterate() -// iterates over dirty cards ranges in increasing address order. -void CardTableModRefBS::dirty_card_iterate(MemRegion mr, - MemRegionClosure* cl) { - for (int i = 0; i < _cur_covered_regions; i++) { - MemRegion mri = mr.intersection(_covered[i]); - if (!mri.is_empty()) { - jbyte *cur_entry, *next_entry, *limit; - for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); - cur_entry <= limit; - cur_entry = next_entry) { - next_entry = cur_entry + 1; - if (*cur_entry == dirty_card) { - size_t dirty_cards; - // Accumulate maximal dirty card range, starting at cur_entry - for (dirty_cards = 1; - next_entry <= limit && *next_entry == dirty_card; - dirty_cards++, next_entry++); - MemRegion cur_cards(addr_for(cur_entry), - dirty_cards*card_size_in_words); - cl->do_MemRegion(cur_cards); - } - } + // If a previous card-mark was deferred, flush it now. + flush_deferred_card_mark_barrier(thread); + if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) { + // Arrays of non-references don't need a post-barrier. + // The deferred_card_mark region should be empty + // following the flush above. + assert(thread->deferred_card_mark().is_empty(), "Error"); + } else { + MemRegion mr((HeapWord*)new_obj, new_obj->size()); + assert(!mr.is_empty(), "Error"); + if (_defer_initial_card_mark) { + // Defer the card mark + thread->set_deferred_card_mark(mr); + } else { + // Do the card mark + invalidate(mr); } } } -MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, - bool reset, - int reset_val) { - for (int i = 0; i < _cur_covered_regions; i++) { - MemRegion mri = mr.intersection(_covered[i]); - if (!mri.is_empty()) { - jbyte* cur_entry, *next_entry, *limit; - for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); - cur_entry <= limit; - cur_entry = next_entry) { - next_entry = cur_entry + 1; - if (*cur_entry == dirty_card) { - size_t dirty_cards; - // Accumulate maximal dirty card range, starting at cur_entry - for (dirty_cards = 1; - next_entry <= limit && *next_entry == dirty_card; - dirty_cards++, next_entry++); - MemRegion cur_cards(addr_for(cur_entry), - dirty_cards*card_size_in_words); - if (reset) { - for (size_t i = 0; i < dirty_cards; i++) { - cur_entry[i] = reset_val; - } - } - return cur_cards; - } - } - } - } - return MemRegion(mr.end(), mr.end()); +void CardTableModRefBS::initialize_deferred_card_mark_barriers() { + // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used); + // otherwise remains unused. +#if defined(COMPILER2) || INCLUDE_JVMCI + _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() + && (DeferInitialCardMark || card_mark_must_follow_store()); +#else + assert(_defer_initial_card_mark == false, "Who would set it?"); +#endif } -uintx CardTableModRefBS::ct_max_alignment_constraint() { - return card_size * os::vm_page_size(); +void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) { +#if defined(COMPILER2) || INCLUDE_JVMCI + MemRegion deferred = thread->deferred_card_mark(); + if (!deferred.is_empty()) { + assert(_defer_initial_card_mark, "Otherwise should be empty"); + { + // Verify that the storage points to a parsable object in heap + DEBUG_ONLY(oop old_obj = oop(deferred.start());) + assert(!_card_table->is_in_young(old_obj), + "Else should have been filtered in on_slowpath_allocation_exit()"); + assert(oopDesc::is_oop(old_obj, true), "Not an oop"); + assert(deferred.word_size() == (size_t)(old_obj->size()), + "Mismatch: multiple objects?"); + } + write_region(deferred); + // "Clear" the deferred_card_mark field + thread->set_deferred_card_mark(MemRegion()); + } + assert(thread->deferred_card_mark().is_empty(), "invariant"); +#else + assert(!_defer_initial_card_mark, "Should be false"); + assert(thread->deferred_card_mark().is_empty(), "Should be empty"); +#endif } -void CardTableModRefBS::verify_guard() { - // For product build verification - guarantee(_byte_map[_guard_index] == last_card, - "card table guard has been modified"); +void CardTableModRefBS::on_thread_detach(JavaThread* thread) { + // The deferred store barriers must all have been flushed to the + // card-table (or other remembered set structure) before GC starts + // processing the card-table (or other remembered set). + flush_deferred_card_mark_barrier(thread); } -void CardTableModRefBS::verify() { - verify_guard(); +bool CardTableModRefBS::card_mark_must_follow_store() const { + return _card_table->scanned_concurrently(); } - -#ifndef PRODUCT -void CardTableModRefBS::verify_region(MemRegion mr, - jbyte val, bool val_equals) { - jbyte* start = byte_for(mr.start()); - jbyte* end = byte_for(mr.last()); - bool failures = false; - for (jbyte* curr = start; curr <= end; ++curr) { - jbyte curr_val = *curr; - bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); - if (failed) { - if (!failures) { - log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); - log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); - failures = true; - } - log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", - p2i(curr), p2i(addr_for(curr)), - p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), - (int) curr_val); - } - } - guarantee(!failures, "there should not have been any failures"); -} - -void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { - verify_region(mr, dirty_card, false /* val_equals */); -} - -void CardTableModRefBS::verify_dirty_region(MemRegion mr) { - verify_region(mr, dirty_card, true /* val_equals */); -} -#endif - -void CardTableModRefBS::print_on(outputStream* st) const { - st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, - p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); -} diff --git a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp +++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,8 @@ #include "gc/shared/modRefBarrierSet.hpp" #include "utilities/align.hpp" +class CardTable; + // This kind of "BarrierSet" allows a "CollectedHeap" to detect and // enumerate ref fields that have been modified (since the last // enumeration.) @@ -45,158 +47,29 @@ friend class VMStructs; protected: - enum CardValues { - clean_card = -1, - // The mask contains zeros in places for all other values. - clean_card_mask = clean_card - 31, + // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 + // or INCLUDE_JVMCI is being used + bool _defer_initial_card_mark; + CardTable* _card_table; - dirty_card = 0, - precleaned_card = 1, - claimed_card = 2, - deferred_card = 4, - last_card = 8, - CT_MR_BS_last_reserved = 16 - }; - - // a word's worth (row) of clean card values - static const intptr_t clean_card_row = (intptr_t)(-1); - - // The declaration order of these const fields is important; see the - // constructor before changing. - const MemRegion _whole_heap; // the region covered by the card table - size_t _guard_index; // index of very last element in the card - // table; it is set to a guard value - // (last_card) and should never be modified - size_t _last_valid_index; // index of the last valid element - const size_t _page_size; // page size used when mapping _byte_map - size_t _byte_map_size; // in bytes - jbyte* _byte_map; // the card marking array - - // Some barrier sets create tables whose elements correspond to parts of - // the heap; the CardTableModRefBS is an example. Such barrier sets will - // normally reserve space for such tables, and commit parts of the table - // "covering" parts of the heap that are committed. At most one covered - // region per generation is needed. - static const int _max_covered_regions = 2; - - int _cur_covered_regions; - - // The covered regions should be in address order. - MemRegion* _covered; - // The committed regions correspond one-to-one to the covered regions. - // They represent the card-table memory that has been committed to service - // the corresponding covered region. It may be that committed region for - // one covered region corresponds to a larger region because of page-size - // roundings. Thus, a committed region for one covered region may - // actually extend onto the card-table space for the next covered region. - MemRegion* _committed; - - // The last card is a guard card, and we commit the page for it so - // we can use the card for verification purposes. We make sure we never - // uncommit the MemRegion for that page. - MemRegion _guard_region; - - inline size_t compute_byte_map_size(); - - // Finds and return the index of the region, if any, to which the given - // region would be contiguous. If none exists, assign a new region and - // returns its index. Requires that no more than the maximum number of - // covered regions defined in the constructor are ever in use. - int find_covering_region_by_base(HeapWord* base); - - // Same as above, but finds the region containing the given address - // instead of starting at a given base address. - int find_covering_region_containing(HeapWord* addr); - - // Resize one of the regions covered by the remembered set. - virtual void resize_covered_region(MemRegion new_region); - - // Returns the leftmost end of a committed region corresponding to a - // covered region before covered region "ind", or else "NULL" if "ind" is - // the first covered region. - HeapWord* largest_prev_committed_end(int ind) const; - - // Returns the part of the region mr that doesn't intersect with - // any committed region other than self. Used to prevent uncommitting - // regions that are also committed by other regions. Also protects - // against uncommitting the guard region. - MemRegion committed_unique_to_self(int self, MemRegion mr) const; - - // Mapping from address to card marking array entry - jbyte* byte_for(const void* p) const { - assert(_whole_heap.contains(p), - "Attempt to access p = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; - assert(result >= _byte_map && result < _byte_map + _byte_map_size, - "out of bounds accessor for card marking array"); - return result; - } - - // The card table byte one after the card marking array - // entry for argument address. Typically used for higher bounds - // for loops iterating through the card table. - jbyte* byte_after(const void* p) const { - return byte_for(p) + 1; - } - - // Dirty the bytes corresponding to "mr" (not all of which must be - // covered.) - void dirty_MemRegion(MemRegion mr); - - // Clear (to clean_card) the bytes entirely contained within "mr" (not - // all of which must be covered.) - void clear_MemRegion(MemRegion mr); + CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti); public: - // Constants - enum SomePublicConstants { - card_shift = 9, - card_size = 1 << card_shift, - card_size_in_words = card_size / sizeof(HeapWord) - }; + CardTableModRefBS(CardTable* card_table); + ~CardTableModRefBS(); - static int clean_card_val() { return clean_card; } - static int clean_card_mask_val() { return clean_card_mask; } - static int dirty_card_val() { return dirty_card; } - static int claimed_card_val() { return claimed_card; } - static int precleaned_card_val() { return precleaned_card; } - static int deferred_card_val() { return deferred_card; } + CardTable* card_table() const { return _card_table; } virtual void initialize(); - // *** Barrier set functions. - - // Initialization utilities; covered_words is the size of the covered region - // in, um, words. - inline size_t cards_required(size_t covered_words) { - // Add one for a guard card, used to detect errors. - const size_t words = align_up(covered_words, card_size_in_words); - return words / card_size_in_words + 1; + void write_region(MemRegion mr) { + invalidate(mr); } protected: - CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); - ~CardTableModRefBS(); - - protected: - void write_region_work(MemRegion mr) { - dirty_MemRegion(mr); - } - - protected: - void write_ref_array_work(MemRegion mr) { - dirty_MemRegion(mr); - } + void write_ref_array_work(MemRegion mr); public: - bool is_aligned(HeapWord* addr) { - return is_card_aligned(addr); - } - - // *** Card-table-barrier-specific things. - // Record a reference update. Note that these versions are precise! // The scanning code has to handle the fact that the write barrier may be // either precise or imprecise. We make non-virtual inline variants of @@ -204,116 +77,51 @@ template void write_ref_field_post(T* field, oop newVal); - // These are used by G1, when it uses the card table as a temporary data - // structure for card claiming. - bool is_card_dirty(size_t card_index) { - return _byte_map[card_index] == dirty_card_val(); + virtual void invalidate(MemRegion mr); + + // ReduceInitialCardMarks + void initialize_deferred_card_mark_barriers(); + + // If the CollectedHeap was asked to defer a store barrier above, + // this informs it to flush such a deferred store barrier to the + // remembered set. + void flush_deferred_card_mark_barrier(JavaThread* thread); + + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. If such permission + // is granted for this heap type, the compiler promises to call + // defer_store_barrier() below on any slow path allocation of + // a new object for which such initializing store barriers will + // have been elided. G1, like CMS, allows this, but should be + // ready to provide a compensating write barrier as necessary + // if that storage came out of a non-young region. The efficiency + // of this implementation depends crucially on being able to + // answer very efficiently in constant time whether a piece of + // storage in the heap comes from a young region or not. + // See ReduceInitialCardMarks. + virtual bool can_elide_tlab_store_barriers() const { + return true; } - void mark_card_dirty(size_t card_index) { - _byte_map[card_index] = dirty_card_val(); - } + // If a compiler is eliding store barriers for TLAB-allocated objects, + // we will be informed of a slow-path allocation by a call + // to on_slowpath_allocation_exit() below. Such a call precedes the + // initialization of the object itself, and no post-store-barriers will + // be issued. Some heap types require that the barrier strictly follows + // the initializing stores. (This is currently implemented by deferring the + // barrier until the next slow-path allocation or gc-related safepoint.) + // This interface answers whether a particular barrier type needs the card + // mark to be thus strictly sequenced after the stores. + virtual bool card_mark_must_follow_store() const; - bool is_card_clean(size_t card_index) { - return _byte_map[card_index] == clean_card_val(); - } + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); + virtual void on_thread_detach(JavaThread* thread); - // Card marking array base (adjusted for heap low boundary) - // This would be the 0th element of _byte_map, if the heap started at 0x0. - // But since the heap starts at some higher address, this points to somewhere - // before the beginning of the actual _byte_map. - jbyte* byte_map_base; + virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } - // Return true if "p" is at the start of a card. - bool is_card_aligned(HeapWord* p) { - jbyte* pcard = byte_for(p); - return (addr_for(pcard) == p); - } - - HeapWord* align_to_card_boundary(HeapWord* p) { - jbyte* pcard = byte_for(p + card_size_in_words - 1); - return addr_for(pcard); - } - - // The kinds of precision a CardTableModRefBS may offer. - enum PrecisionStyle { - Precise, - ObjHeadPreciseArray - }; - - // Tells what style of precision this card table offers. - PrecisionStyle precision() { - return ObjHeadPreciseArray; // Only one supported for now. - } - - // ModRefBS functions. - virtual void invalidate(MemRegion mr); - void clear(MemRegion mr); - void dirty(MemRegion mr); - - // *** Card-table-RemSet-specific things. - - static uintx ct_max_alignment_constraint(); - - // Apply closure "cl" to the dirty cards containing some part of - // MemRegion "mr". - void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); - - // Return the MemRegion corresponding to the first maximal run - // of dirty cards lying completely within MemRegion mr. - // If reset is "true", then sets those card table entries to the given - // value. - MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, - int reset_val); - - // Provide read-only access to the card table array. - const jbyte* byte_for_const(const void* p) const { - return byte_for(p); - } - const jbyte* byte_after_const(const void* p) const { - return byte_after(p); - } - - // Mapping from card marking array entry to address of first word - HeapWord* addr_for(const jbyte* p) const { - assert(p >= _byte_map && p < _byte_map + _byte_map_size, - "out of bounds access to card marking array. p: " PTR_FORMAT - " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT, - p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size)); - size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); - HeapWord* result = (HeapWord*) (delta << card_shift); - assert(_whole_heap.contains(result), - "Returning result = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - return result; - } - - // Mapping from address to card marking array index. - size_t index_for(void* p) { - assert(_whole_heap.contains(p), - "Attempt to access p = " PTR_FORMAT " out of bounds of " - " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - return byte_for(p) - _byte_map; - } - - const jbyte* byte_for_index(const size_t card_index) const { - return _byte_map + card_index; - } - - // Print a description of the memory for the barrier set virtual void print_on(outputStream* st) const; - void verify(); - void verify_guard(); - - // val_equals -> it will check that all cards covered by mr equal val - // !val_equals -> it will check that all cards covered by mr do not equal val - void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; - void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; - void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; - template class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; }; diff --git a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp +++ b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,17 +26,18 @@ #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP #include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTable.hpp" #include "runtime/orderAccess.inline.hpp" template inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) { - volatile jbyte* byte = byte_for(field); + volatile jbyte* byte = _card_table->byte_for(field); if (UseConcMarkSweepGC) { // Perform a releasing store if using CMS so that it may // scan and clear the cards concurrently during pre-cleaning. - OrderAccess::release_store(byte, jbyte(dirty_card)); + OrderAccess::release_store(byte, CardTable::dirty_card_val()); } else { - *byte = dirty_card; + *byte = CardTable::dirty_card_val(); } } diff --git a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp deleted file mode 100644 --- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" -#include "gc/shared/cardTableRS.hpp" -#include "memory/allocation.inline.hpp" -#include "gc/shared/space.inline.hpp" - -CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) : - CardTableModRefBS( - whole_heap, - BarrierSet::FakeRtti(BarrierSet::CardTableForRS)), - // LNC functionality - _lowest_non_clean(NULL), - _lowest_non_clean_chunk_size(NULL), - _lowest_non_clean_base_chunk_index(NULL), - _last_LNC_resizing_collection(NULL) -{ } - -void CardTableModRefBSForCTRS::initialize() { - CardTableModRefBS::initialize(); - _lowest_non_clean = - NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); - _lowest_non_clean_chunk_size = - NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); - _lowest_non_clean_base_chunk_index = - NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); - _last_LNC_resizing_collection = - NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); - if (_lowest_non_clean == NULL - || _lowest_non_clean_chunk_size == NULL - || _lowest_non_clean_base_chunk_index == NULL - || _last_LNC_resizing_collection == NULL) - vm_exit_during_initialization("couldn't allocate an LNC array."); - for (int i = 0; i < _max_covered_regions; i++) { - _lowest_non_clean[i] = NULL; - _lowest_non_clean_chunk_size[i] = 0; - _last_LNC_resizing_collection[i] = -1; - } -} - -CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() { - if (_lowest_non_clean) { - FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); - _lowest_non_clean = NULL; - } - if (_lowest_non_clean_chunk_size) { - FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); - _lowest_non_clean_chunk_size = NULL; - } - if (_lowest_non_clean_base_chunk_index) { - FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); - _lowest_non_clean_base_chunk_index = NULL; - } - if (_last_LNC_resizing_collection) { - FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); - _last_LNC_resizing_collection = NULL; - } -} - -bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { - return - card_is_dirty_wrt_gen_iter(cv) || - _rs->is_prev_nonclean_card_val(cv); -} - -bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { - return - cv != clean_card && - (card_is_dirty_wrt_gen_iter(cv) || - CardTableRS::youngergen_may_have_been_dirty(cv)); -} - -void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel( - Space* sp, - MemRegion mr, - OopsInGenClosure* cl, - CardTableRS* ct, - uint n_threads) -{ - if (!mr.is_empty()) { - if (n_threads > 0) { -#if INCLUDE_ALL_GCS - non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); -#else // INCLUDE_ALL_GCS - fatal("Parallel gc not supported here."); -#endif // INCLUDE_ALL_GCS - } else { - // clear_cl finds contiguous dirty ranges of cards to process and clear. - - // This is the single-threaded version used by DefNew. - const bool parallel = false; - - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); - ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); - - clear_cl.do_MemRegion(mr); - } - } -} - diff --git a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp +++ b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,12 @@ void set_CTRS(CardTableRS* rs) { _rs = rs; } + virtual bool card_mark_must_follow_store() const { + return UseConcMarkSweepGC; + } + + virtual bool is_in_young(oop obj) const; + private: CardTableRS* _rs; diff --git a/src/hotspot/share/gc/shared/cardTableRS.cpp b/src/hotspot/share/gc/shared/cardTableRS.cpp --- a/src/hotspot/share/gc/shared/cardTableRS.cpp +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,41 +75,6 @@ } -CardTableRS::CardTableRS(MemRegion whole_heap) : - _bs(NULL), - _cur_youngergen_card_val(youngergenP1_card) -{ - _ct_bs = new CardTableModRefBSForCTRS(whole_heap); - _ct_bs->initialize(); - set_bs(_ct_bs); - // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() - // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. - uint max_gens = 2; - _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, - mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); - if (_last_cur_val_in_gen == NULL) { - vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); - } - for (uint i = 0; i < max_gens + 1; i++) { - _last_cur_val_in_gen[i] = clean_card_val(); - } - _ct_bs->set_CTRS(this); -} - -CardTableRS::~CardTableRS() { - if (_ct_bs) { - delete _ct_bs; - _ct_bs = NULL; - } - if (_last_cur_val_in_gen) { - FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); - } -} - -void CardTableRS::resize_covered_region(MemRegion new_region) { - _ct_bs->resize_covered_region(new_region); -} - jbyte CardTableRS::find_unused_youngergenP_card_value() { for (jbyte v = youngergenP1_card; v < cur_youngergen_and_prev_nonclean_card; @@ -247,7 +212,7 @@ // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary if (is_word_aligned(cur_entry)) { jbyte* cur_row = cur_entry - BytesPerWord; - while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { + while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) { cur_row -= BytesPerWord; } cur_entry = cur_row + BytesPerWord; @@ -283,7 +248,7 @@ // cur-younger-gen ==> cur_younger_gen // cur_youngergen_and_prev_nonclean_card ==> no change. void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { - volatile jbyte* entry = _ct_bs->byte_for(field); + volatile jbyte* entry = byte_for(field); do { jbyte entry_val = *entry; // We put this first because it's probably the most common case. @@ -341,7 +306,7 @@ ShouldNotReachHere(); } #endif - _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); + non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); } void CardTableRS::clear_into_younger(Generation* old_gen) { @@ -642,5 +607,115 @@ // generational heaps. VerifyCTGenClosure blk(this); GenCollectedHeap::heap()->generation_iterate(&blk, false); - _ct_bs->verify(); + CardTable::verify(); } + +CardTableRS::CardTableRS(MemRegion whole_heap) : + CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled), + _cur_youngergen_card_val(youngergenP1_card), + // LNC functionality + _lowest_non_clean(NULL), + _lowest_non_clean_chunk_size(NULL), + _lowest_non_clean_base_chunk_index(NULL), + _last_LNC_resizing_collection(NULL) +{ + // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() + // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. + uint max_gens = 2; + _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, + mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); + if (_last_cur_val_in_gen == NULL) { + vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); + } + for (uint i = 0; i < max_gens + 1; i++) { + _last_cur_val_in_gen[i] = clean_card_val(); + } +} + +CardTableRS::~CardTableRS() { + if (_last_cur_val_in_gen) { + FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); + _last_cur_val_in_gen = NULL; + } + if (_lowest_non_clean) { + FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); + _lowest_non_clean = NULL; + } + if (_lowest_non_clean_chunk_size) { + FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); + _lowest_non_clean_chunk_size = NULL; + } + if (_lowest_non_clean_base_chunk_index) { + FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); + _lowest_non_clean_base_chunk_index = NULL; + } + if (_last_LNC_resizing_collection) { + FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); + _last_LNC_resizing_collection = NULL; + } +} + +void CardTableRS::initialize() { + CardTable::initialize(); + _lowest_non_clean = + NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); + _lowest_non_clean_chunk_size = + NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); + _lowest_non_clean_base_chunk_index = + NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); + _last_LNC_resizing_collection = + NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); + if (_lowest_non_clean == NULL + || _lowest_non_clean_chunk_size == NULL + || _lowest_non_clean_base_chunk_index == NULL + || _last_LNC_resizing_collection == NULL) + vm_exit_during_initialization("couldn't allocate an LNC array."); + for (int i = 0; i < _max_covered_regions; i++) { + _lowest_non_clean[i] = NULL; + _lowest_non_clean_chunk_size[i] = 0; + _last_LNC_resizing_collection[i] = -1; + } +} + +bool CardTableRS::card_will_be_scanned(jbyte cv) { + return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv); +} + +bool CardTableRS::card_may_have_been_dirty(jbyte cv) { + return + cv != clean_card && + (card_is_dirty_wrt_gen_iter(cv) || + CardTableRS::youngergen_may_have_been_dirty(cv)); +} + +void CardTableRS::non_clean_card_iterate_possibly_parallel( + Space* sp, + MemRegion mr, + OopsInGenClosure* cl, + CardTableRS* ct, + uint n_threads) +{ + if (!mr.is_empty()) { + if (n_threads > 0) { +#if INCLUDE_ALL_GCS + non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); +#else // INCLUDE_ALL_GCS + fatal("Parallel gc not supported here."); +#endif // INCLUDE_ALL_GCS + } else { + // clear_cl finds contiguous dirty ranges of cards to process and clear. + + // This is the single-threaded version used by DefNew. + const bool parallel = false; + + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); + + clear_cl.do_MemRegion(mr); + } + } +} + +bool CardTableRS::is_in_young(oop obj) const { + return GenCollectedHeap::heap()->is_in_young(obj); +} diff --git a/src/hotspot/share/gc/shared/cardTableRS.hpp b/src/hotspot/share/gc/shared/cardTableRS.hpp --- a/src/hotspot/share/gc/shared/cardTableRS.hpp +++ b/src/hotspot/share/gc/shared/cardTableRS.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,9 +25,11 @@ #ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP #define SHARE_VM_GC_SHARED_CARDTABLERS_HPP -#include "gc/shared/cardTableModRefBSForCTRS.hpp" +#include "gc/shared/cardTable.hpp" #include "memory/memRegion.hpp" +#include "oops/oop.hpp" +class DirtyCardToOopClosure; class Generation; class Space; class OopsInGenClosure; @@ -46,44 +48,28 @@ // This RemSet uses a card table both as shared data structure // for a mod ref barrier set and for the rem set information. -class CardTableRS: public CHeapObj { +class CardTableRS: public CardTable { friend class VMStructs; // Below are private classes used in impl. friend class VerifyCTSpaceClosure; friend class ClearNoncleanCardWrapper; - static jbyte clean_card_val() { - return CardTableModRefBSForCTRS::clean_card; - } - - static intptr_t clean_card_row() { - return CardTableModRefBSForCTRS::clean_card_row; - } - - static bool - card_is_dirty_wrt_gen_iter(jbyte cv) { - return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv); - } - CLDRemSet _cld_rem_set; - BarrierSet* _bs; - - CardTableModRefBSForCTRS* _ct_bs; void verify_space(Space* s, HeapWord* gen_start); enum ExtendedCardValue { - youngergen_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 1, + youngergen_card = CT_MR_BS_last_reserved + 1, // These are for parallel collection. // There are three P (parallel) youngergen card values. In general, this // needs to be more than the number of generations (including the perm // gen) that might have younger_refs_do invoked on them separately. So // if we add more gens, we have to add more values. - youngergenP1_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 2, - youngergenP2_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 3, - youngergenP3_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 4, + youngergenP1_card = CT_MR_BS_last_reserved + 2, + youngergenP2_card = CT_MR_BS_last_reserved + 3, + youngergenP3_card = CT_MR_BS_last_reserved + 4, cur_youngergen_and_prev_nonclean_card = - CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 5 + CT_MR_BS_last_reserved + 5 }; // An array that contains, for each generation, the card table value last @@ -116,16 +102,8 @@ CardTableRS(MemRegion whole_heap); ~CardTableRS(); - // Return the barrier set associated with "this." - BarrierSet* bs() { return _bs; } - - // Set the barrier set. - void set_bs(BarrierSet* bs) { _bs = bs; } - CLDRemSet* cld_rem_set() { return &_cld_rem_set; } - CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; } - void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); // Override. @@ -137,7 +115,7 @@ void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads); void inline_write_ref_field_gc(void* field, oop new_val) { - jbyte* byte = _ct_bs->byte_for(field); + jbyte* byte = byte_for(field); *byte = youngergen_card; } void write_ref_field_gc_work(void* field, oop new_val) { @@ -149,30 +127,17 @@ // a younger card in the current collection. virtual void write_ref_field_gc_par(void* field, oop new_val); - void resize_covered_region(MemRegion new_region); - bool is_aligned(HeapWord* addr) { - return _ct_bs->is_card_aligned(addr); + return is_card_aligned(addr); } void verify(); + void initialize(); - void clear(MemRegion mr) { _ct_bs->clear(mr); } void clear_into_younger(Generation* old_gen); - void invalidate(MemRegion mr) { - _ct_bs->invalidate(mr); - } void invalidate_or_clear(Generation* old_gen); - static uintx ct_max_alignment_constraint() { - return CardTableModRefBSForCTRS::ct_max_alignment_constraint(); - } - - jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); } - jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); } - HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); } - bool is_prev_nonclean_card_val(jbyte v) { return youngergen_card <= v && @@ -184,6 +149,94 @@ return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card; } + // *** Support for parallel card scanning. + + // dirty and precleaned are equivalent wrt younger_refs_iter. + static bool card_is_dirty_wrt_gen_iter(jbyte cv) { + return cv == dirty_card || cv == precleaned_card; + } + + // Returns "true" iff the value "cv" will cause the card containing it + // to be scanned in the current traversal. May be overridden by + // subtypes. + bool card_will_be_scanned(jbyte cv); + + // Returns "true" iff the value "cv" may have represented a dirty card at + // some point. + bool card_may_have_been_dirty(jbyte cv); + + // Iterate over the portion of the card-table which covers the given + // region mr in the given space and apply cl to any dirty sub-regions + // of mr. Clears the dirty cards as they are processed. + void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct, + uint n_threads); + + // Work method used to implement non_clean_card_iterate_possibly_parallel() + // above in the parallel case. + void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct, + uint n_threads); + + // This is an array, one element per covered region of the card table. + // Each entry is itself an array, with one element per chunk in the + // covered region. Each entry of these arrays is the lowest non-clean + // card of the corresponding chunk containing part of an object from the + // previous chunk, or else NULL. + typedef jbyte* CardPtr; + typedef CardPtr* CardArr; + CardArr* _lowest_non_clean; + size_t* _lowest_non_clean_chunk_size; + uintptr_t* _lowest_non_clean_base_chunk_index; + volatile int* _last_LNC_resizing_collection; + + // Initializes "lowest_non_clean" to point to the array for the region + // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk + // index of the corresponding to the first element of that array. + // Ensures that these arrays are of sufficient size, allocating if necessary. + // May be called by several threads concurrently. + void get_LNC_array_for_space(Space* sp, + jbyte**& lowest_non_clean, + uintptr_t& lowest_non_clean_base_chunk_index, + size_t& lowest_non_clean_chunk_size); + + // Returns the number of chunks necessary to cover "mr". + size_t chunks_to_cover(MemRegion mr) { + return (size_t)(addr_to_chunk_index(mr.last()) - + addr_to_chunk_index(mr.start()) + 1); + } + + // Returns the index of the chunk in a stride which + // covers the given address. + uintptr_t addr_to_chunk_index(const void* addr) { + uintptr_t card = (uintptr_t) byte_for(addr); + return card / ParGCCardsPerStrideChunk; + } + + // Apply cl, which must either itself apply dcto_cl or be dcto_cl, + // to the cards in the stride (of n_strides) within the given space. + void process_stride(Space* sp, + MemRegion used, + jint stride, int n_strides, + OopsInGenClosure* cl, + CardTableRS* ct, + jbyte** lowest_non_clean, + uintptr_t lowest_non_clean_base_chunk_index, + size_t lowest_non_clean_chunk_size); + + // Makes sure that chunk boundaries are handled appropriately, by + // adjusting the min_done of dcto_cl, and by using a special card-table + // value to indicate how min_done should be set. + void process_chunk_boundaries(Space* sp, + DirtyCardToOopClosure* dcto_cl, + MemRegion chunk_mr, + MemRegion used, + jbyte** lowest_non_clean, + uintptr_t lowest_non_clean_base_chunk_index, + size_t lowest_non_clean_chunk_size); + + virtual bool is_in_young(oop obj) const; + }; class ClearNoncleanCardWrapper: public MemRegionClosure { diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc/shared/barrierSet.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.inline.hpp" +#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.inline.hpp" @@ -41,9 +42,11 @@ #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" +#include "runtime/vmThread.hpp" #include "services/heapDumper.hpp" #include "utilities/align.hpp" +class ClassLoaderData; #ifdef ASSERT int CollectedHeap::_fire_out_of_memory_count = 0; @@ -185,8 +188,7 @@ _total_collections(0), _total_full_collections(0), _gc_cause(GCCause::_no_gc), - _gc_lastcause(GCCause::_no_gc), - _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. + _gc_lastcause(GCCause::_no_gc) { const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); @@ -242,22 +244,85 @@ } } +MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t word_size, + Metaspace::MetadataType mdtype) { + uint loop_count = 0; + uint gc_count = 0; + uint full_gc_count = 0; + + assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); + + do { + MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); + if (result != NULL) { + return result; + } + + if (GCLocker::is_active_and_needs_gc()) { + // If the GCLocker is active, just expand and allocate. + // If that does not succeed, wait if this thread is not + // in a critical section itself. + result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); + if (result != NULL) { + return result; + } + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + // Wait for JNI critical section to be exited + GCLocker::stall_until_clear(); + // The GC invoked by the last thread leaving the critical + // section will be a young collection and a full collection + // is (currently) needed for unloading classes so continue + // to the next iteration to get a full GC. + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } + + { // Need lock to get self consistent gc_count's + MutexLocker ml(Heap_lock); + gc_count = Universe::heap()->total_collections(); + full_gc_count = Universe::heap()->total_full_collections(); + } + + // Generate a VM operation + VM_CollectForMetadataAllocation op(loader_data, + word_size, + mdtype, + gc_count, + full_gc_count, + GCCause::_metadata_GC_threshold); + VMThread::execute(&op); + + // If GC was locked out, try again. Check before checking success because the + // prologue could have succeeded and the GC still have been locked out. + if (op.gc_locked()) { + continue; + } + + if (op.prologue_succeeded()) { + return op.result(); + } + loop_count++; + if ((QueuedAllocationWarningCount > 0) && + (loop_count % QueuedAllocationWarningCount == 0)) { + log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," + " size=" SIZE_FORMAT, loop_count, word_size); + } + } while (true); // Until a GC is done +} + void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) { _barrier_set = barrier_set; BarrierSet::set_bs(barrier_set); } -void CollectedHeap::pre_initialize() { - // Used for ReduceInitialCardMarks (when COMPILER2 is used); - // otherwise remains unused. -#if COMPILER2_OR_JVMCI - _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() - && (DeferInitialCardMark || card_mark_must_follow_store()); -#else - assert(_defer_initial_card_mark == false, "Who would set it?"); -#endif -} - #ifndef PRODUCT void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { if (CheckMemoryInitialization && ZapUnusedHeapArea) { @@ -341,28 +406,6 @@ return Universe::heap()->tlab_post_allocation_setup(obj); } -void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { - MemRegion deferred = thread->deferred_card_mark(); - if (!deferred.is_empty()) { - assert(_defer_initial_card_mark, "Otherwise should be empty"); - { - // Verify that the storage points to a parsable object in heap - DEBUG_ONLY(oop old_obj = oop(deferred.start());) - assert(is_in(old_obj), "Not in allocated heap"); - assert(!can_elide_initializing_store_barrier(old_obj), - "Else should have been filtered in new_store_pre_barrier()"); - assert(oopDesc::is_oop(old_obj, true), "Not an oop"); - assert(deferred.word_size() == (size_t)(old_obj->size()), - "Mismatch: multiple objects?"); - } - BarrierSet* bs = barrier_set(); - bs->write_region(deferred); - // "Clear" the deferred_card_mark field - thread->set_deferred_card_mark(MemRegion()); - } - assert(thread->deferred_card_mark().is_empty(), "invariant"); -} - size_t CollectedHeap::max_tlab_size() const { // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. // This restriction could be removed by enabling filling with multiple arrays. @@ -378,72 +421,6 @@ return align_down(max_int_size, MinObjAlignment); } -// Helper for ReduceInitialCardMarks. For performance, -// compiled code may elide card-marks for initializing stores -// to a newly allocated object along the fast-path. We -// compensate for such elided card-marks as follows: -// (a) Generational, non-concurrent collectors, such as -// GenCollectedHeap(ParNew,DefNew,Tenured) and -// ParallelScavengeHeap(ParallelGC, ParallelOldGC) -// need the card-mark if and only if the region is -// in the old gen, and do not care if the card-mark -// succeeds or precedes the initializing stores themselves, -// so long as the card-mark is completed before the next -// scavenge. For all these cases, we can do a card mark -// at the point at which we do a slow path allocation -// in the old gen, i.e. in this call. -// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires -// in addition that the card-mark for an old gen allocated -// object strictly follow any associated initializing stores. -// In these cases, the memRegion remembered below is -// used to card-mark the entire region either just before the next -// slow-path allocation by this thread or just before the next scavenge or -// CMS-associated safepoint, whichever of these events happens first. -// (The implicit assumption is that the object has been fully -// initialized by this point, a fact that we assert when doing the -// card-mark.) -// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a -// G1 concurrent marking is in progress an SATB (pre-write-)barrier -// is used to remember the pre-value of any store. Initializing -// stores will not need this barrier, so we need not worry about -// compensating for the missing pre-barrier here. Turning now -// to the post-barrier, we note that G1 needs a RS update barrier -// which simply enqueues a (sequence of) dirty cards which may -// optionally be refined by the concurrent update threads. Note -// that this barrier need only be applied to a non-young write, -// but, like in CMS, because of the presence of concurrent refinement -// (much like CMS' precleaning), must strictly follow the oop-store. -// Thus, using the same protocol for maintaining the intended -// invariants turns out, serendepitously, to be the same for both -// G1 and CMS. -// -// For any future collector, this code should be reexamined with -// that specific collector in mind, and the documentation above suitably -// extended and updated. -oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { - // If a previous card-mark was deferred, flush it now. - flush_deferred_store_barrier(thread); - if (can_elide_initializing_store_barrier(new_obj) || - new_obj->is_typeArray()) { - // Arrays of non-references don't need a pre-barrier. - // The deferred_card_mark region should be empty - // following the flush above. - assert(thread->deferred_card_mark().is_empty(), "Error"); - } else { - MemRegion mr((HeapWord*)new_obj, new_obj->size()); - assert(!mr.is_empty(), "Error"); - if (_defer_initial_card_mark) { - // Defer the card mark - thread->set_deferred_card_mark(mr); - } else { - // Do the card mark - BarrierSet* bs = barrier_set(); - bs->write_region(mr); - } - } - return new_obj; -} - size_t CollectedHeap::filler_array_hdr_size() { return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long } @@ -546,24 +523,16 @@ " otherwise concurrent mutator activity may make heap " " unparsable again"); const bool use_tlab = UseTLAB; - const bool deferred = _defer_initial_card_mark; // The main thread starts allocating via a TLAB even before it // has added itself to the threads list at vm boot-up. JavaThreadIteratorWithHandle jtiwh; assert(!use_tlab || jtiwh.length() > 0, "Attempt to fill tlabs before main thread has been added" " to threads list is doomed to failure!"); + BarrierSet *bs = barrier_set(); for (; JavaThread *thread = jtiwh.next(); ) { if (use_tlab) thread->tlab().make_parsable(retire_tlabs); -#if COMPILER2_OR_JVMCI - // The deferred store barriers must all have been flushed to the - // card-table (or other remembered set structure) before GC starts - // processing the card-table (or other remembered set). - if (deferred) flush_deferred_store_barrier(thread); -#else - assert(!deferred, "Should be false"); - assert(thread->deferred_card_mark().is_empty(), "Should be empty"); -#endif + bs->make_parsable(thread); } } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,6 +50,7 @@ class GCMemoryManager; class MemoryPool; class MetaspaceSummary; +class SoftRefPolicy; class Thread; class ThreadClosure; class VirtualSpaceSummary; @@ -102,10 +103,6 @@ GCHeapLog* _gc_heap_log; - // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 - // or INCLUDE_JVMCI is being used - bool _defer_initial_card_mark; - MemRegion _reserved; protected: @@ -130,13 +127,6 @@ // Constructor CollectedHeap(); - // Do common initializations that must follow instance construction, - // for example, those needing virtual calls. - // This code could perhaps be moved into initialize() but would - // be slightly more awkward because we want the latter to be a - // pure virtual. - void pre_initialize(); - // Create a new tlab. All TLAB allocations must go through this. virtual HeapWord* allocate_new_tlab(size_t size); @@ -418,45 +408,6 @@ return 0; } - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. If such permission - // is granted for this heap type, the compiler promises to call - // defer_store_barrier() below on any slow path allocation of - // a new object for which such initializing store barriers will - // have been elided. - virtual bool can_elide_tlab_store_barriers() const = 0; - - // If a compiler is eliding store barriers for TLAB-allocated objects, - // there is probably a corresponding slow path which can produce - // an object allocated anywhere. The compiler's runtime support - // promises to call this function on such a slow-path-allocated - // object before performing initializations that have elided - // store barriers. Returns new_obj, or maybe a safer copy thereof. - virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj); - - // Answers whether an initializing store to a new object currently - // allocated at the given address doesn't need a store - // barrier. Returns "true" if it doesn't need an initializing - // store barrier; answers "false" if it does. - virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0; - - // If a compiler is eliding store barriers for TLAB-allocated objects, - // we will be informed of a slow-path allocation by a call - // to new_store_pre_barrier() above. Such a call precedes the - // initialization of the object itself, and no post-store-barriers will - // be issued. Some heap types require that the barrier strictly follows - // the initializing stores. (This is currently implemented by deferring the - // barrier until the next slow-path allocation or gc-related safepoint.) - // This interface answers whether a particular heap type needs the card - // mark to be thus strictly sequenced after the stores. - virtual bool card_mark_must_follow_store() const = 0; - - // If the CollectedHeap was asked to defer a store barrier above, - // this informs it to flush such a deferred store barrier to the - // remembered set. - virtual void flush_deferred_store_barrier(JavaThread* thread); - // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. @@ -471,6 +422,10 @@ // the context of the vm thread. virtual void collect_as_vm_thread(GCCause::Cause cause); + virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype); + // Returns the barrier set for this heap BarrierSet* barrier_set() { return _barrier_set; } void set_barrier_set(BarrierSet* barrier_set); @@ -498,6 +453,9 @@ // Return the CollectorPolicy for the heap virtual CollectorPolicy* collector_policy() const = 0; + // Return the SoftRefPolicy for the heap; + virtual SoftRefPolicy* soft_ref_policy() = 0; + virtual GrowableArray memory_managers() = 0; virtual GrowableArray memory_pools() = 0; @@ -552,7 +510,7 @@ void pre_full_gc_dump(GCTimer* timer); void post_full_gc_dump(GCTimer* timer); - VirtualSpaceSummary create_heap_space_summary(); + virtual VirtualSpaceSummary create_heap_space_summary(); GCHeapSummary create_heap_summary(); MetaspaceSummary create_metaspace_summary(); @@ -670,20 +628,6 @@ return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt); } #endif - - public: - // Copy the current allocation context statistics for the specified contexts. - // For each context in contexts, set the corresponding entries in the totals - // and accuracy arrays to the current values held by the statistics. Each - // array should be of length len. - // Returns true if there are more stats available. - virtual bool copy_allocation_context_stats(const jint* contexts, - jlong* totals, - jbyte* accuracy, - jint len) { - return false; - } - }; // Class to set and reset the GC cause for a CollectedHeap. @@ -693,16 +637,12 @@ GCCause::Cause _previous_cause; public: GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) { - assert(SafepointSynchronize::is_at_safepoint(), - "This method manipulates heap state without locking"); _heap = heap; _previous_cause = _heap->gc_cause(); _heap->set_gc_cause(cause); } ~GCCauseSetter() { - assert(SafepointSynchronize::is_at_safepoint(), - "This method manipulates heap state without locking"); _heap->set_gc_cause(_previous_cause); } }; diff --git a/src/hotspot/share/gc/shared/collectorPolicy.cpp b/src/hotspot/share/gc/shared/collectorPolicy.cpp --- a/src/hotspot/share/gc/shared/collectorPolicy.cpp +++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,9 +50,7 @@ _heap_alignment(0), _initial_heap_byte_size(InitialHeapSize), _max_heap_byte_size(MaxHeapSize), - _min_heap_byte_size(Arguments::min_heap_size()), - _should_clear_all_soft_refs(false), - _all_soft_refs_clear(false) + _min_heap_byte_size(Arguments::min_heap_size()) {} #ifdef ASSERT @@ -145,20 +143,6 @@ DEBUG_ONLY(CollectorPolicy::assert_size_info();) } -bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { - bool result = _should_clear_all_soft_refs; - set_should_clear_all_soft_refs(false); - return result; -} - -CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) { - return new CardTableRS(whole_heap); -} - -void CollectorPolicy::cleared_all_soft_refs() { - _all_soft_refs_clear = true; -} - size_t CollectorPolicy::compute_heap_alignment() { // The card marking array and the offset arrays for old generations are // committed in os pages as well. Make sure they are entirely full (to @@ -186,10 +170,7 @@ _min_old_size(0), _initial_old_size(0), _max_old_size(0), - _gen_alignment(0), - _young_gen_spec(NULL), - _old_gen_spec(NULL), - _size_policy(NULL) + _gen_alignment(0) {} size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { @@ -202,29 +183,6 @@ return desired_size < max_minus ? desired_size : max_minus; } - -void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, - size_t init_promo_size, - size_t init_survivor_size) { - const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; - _size_policy = new AdaptiveSizePolicy(init_eden_size, - init_promo_size, - init_survivor_size, - max_gc_pause_sec, - GCTimeRatio); -} - -void GenCollectorPolicy::cleared_all_soft_refs() { - // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may - // have been cleared in the last collection but if the gc overhear - // limit continues to be near, SoftRefs should still be cleared. - if (size_policy() != NULL) { - _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); - } - - CollectorPolicy::cleared_all_soft_refs(); -} - size_t GenCollectorPolicy::young_gen_size_lower_bound() { // The young generation must be aligned and have room for eden + two survivors return align_up(3 * _space_alignment, _gen_alignment); @@ -580,322 +538,6 @@ DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) } -HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, - bool is_tlab, - bool* gc_overhead_limit_was_exceeded) { - GenCollectedHeap *gch = GenCollectedHeap::heap(); - - debug_only(gch->check_for_valid_allocation_state()); - assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); - - // In general gc_overhead_limit_was_exceeded should be false so - // set it so here and reset it to true only if the gc time - // limit is being exceeded as checked below. - *gc_overhead_limit_was_exceeded = false; - - HeapWord* result = NULL; - - // Loop until the allocation is satisfied, or unsatisfied after GC. - for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { - HandleMark hm; // Discard any handles allocated in each iteration. - - // First allocation attempt is lock-free. - Generation *young = gch->young_gen(); - assert(young->supports_inline_contig_alloc(), - "Otherwise, must do alloc within heap lock"); - if (young->should_allocate(size, is_tlab)) { - result = young->par_allocate(size, is_tlab); - if (result != NULL) { - assert(gch->is_in_reserved(result), "result not in heap"); - return result; - } - } - uint gc_count_before; // Read inside the Heap_lock locked region. - { - MutexLocker ml(Heap_lock); - log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation"); - // Note that only large objects get a shot at being - // allocated in later generations. - bool first_only = ! should_try_older_generation_allocation(size); - - result = gch->attempt_allocation(size, is_tlab, first_only); - if (result != NULL) { - assert(gch->is_in_reserved(result), "result not in heap"); - return result; - } - - if (GCLocker::is_active_and_needs_gc()) { - if (is_tlab) { - return NULL; // Caller will retry allocating individual object. - } - if (!gch->is_maximal_no_gc()) { - // Try and expand heap to satisfy request. - result = expand_heap_and_allocate(size, is_tlab); - // Result could be null if we are out of space. - if (result != NULL) { - return result; - } - } - - if (gclocker_stalled_count > GCLockerRetryAllocationCount) { - return NULL; // We didn't get to do a GC and we didn't get any memory. - } - - // If this thread is not in a jni critical section, we stall - // the requestor until the critical section has cleared and - // GC allowed. When the critical section clears, a GC is - // initiated by the last thread exiting the critical section; so - // we retry the allocation sequence from the beginning of the loop, - // rather than causing more, now probably unnecessary, GC attempts. - JavaThread* jthr = JavaThread::current(); - if (!jthr->in_critical()) { - MutexUnlocker mul(Heap_lock); - // Wait for JNI critical section to be exited - GCLocker::stall_until_clear(); - gclocker_stalled_count += 1; - continue; - } else { - if (CheckJNICalls) { - fatal("Possible deadlock due to allocating while" - " in jni critical section"); - } - return NULL; - } - } - - // Read the gc count while the heap lock is held. - gc_count_before = gch->total_collections(); - } - - VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); - VMThread::execute(&op); - if (op.prologue_succeeded()) { - result = op.result(); - if (op.gc_locked()) { - assert(result == NULL, "must be NULL if gc_locked() is true"); - continue; // Retry and/or stall as necessary. - } - - // Allocation has failed and a collection - // has been done. If the gc time limit was exceeded the - // this time, return NULL so that an out-of-memory - // will be thrown. Clear gc_overhead_limit_exceeded - // so that the overhead exceeded does not persist. - - const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); - const bool softrefs_clear = all_soft_refs_clear(); - - if (limit_exceeded && softrefs_clear) { - *gc_overhead_limit_was_exceeded = true; - size_policy()->set_gc_overhead_limit_exceeded(false); - if (op.result() != NULL) { - CollectedHeap::fill_with_object(op.result(), size); - } - return NULL; - } - assert(result == NULL || gch->is_in_reserved(result), - "result not in heap"); - return result; - } - - // Give a warning if we seem to be looping forever. - if ((QueuedAllocationWarningCount > 0) && - (try_count % QueuedAllocationWarningCount == 0)) { - log_warning(gc, ergo)("GenCollectorPolicy::mem_allocate_work retries %d times," - " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); - } - } -} - -HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, - bool is_tlab) { - GenCollectedHeap *gch = GenCollectedHeap::heap(); - HeapWord* result = NULL; - Generation *old = gch->old_gen(); - if (old->should_allocate(size, is_tlab)) { - result = old->expand_and_allocate(size, is_tlab); - } - if (result == NULL) { - Generation *young = gch->young_gen(); - if (young->should_allocate(size, is_tlab)) { - result = young->expand_and_allocate(size, is_tlab); - } - } - assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); - return result; -} - -HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, - bool is_tlab) { - GenCollectedHeap *gch = GenCollectedHeap::heap(); - GCCauseSetter x(gch, GCCause::_allocation_failure); - HeapWord* result = NULL; - - assert(size != 0, "Precondition violated"); - if (GCLocker::is_active_and_needs_gc()) { - // GC locker is active; instead of a collection we will attempt - // to expand the heap, if there's room for expansion. - if (!gch->is_maximal_no_gc()) { - result = expand_heap_and_allocate(size, is_tlab); - } - return result; // Could be null if we are out of space. - } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { - // Do an incremental collection. - gch->do_collection(false, // full - false, // clear_all_soft_refs - size, // size - is_tlab, // is_tlab - GenCollectedHeap::OldGen); // max_generation - } else { - log_trace(gc)(" :: Trying full because partial may fail :: "); - // Try a full collection; see delta for bug id 6266275 - // for the original code and why this has been simplified - // with from-space allocation criteria modified and - // such allocation moved out of the safepoint path. - gch->do_collection(true, // full - false, // clear_all_soft_refs - size, // size - is_tlab, // is_tlab - GenCollectedHeap::OldGen); // max_generation - } - - result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); - - if (result != NULL) { - assert(gch->is_in_reserved(result), "result not in heap"); - return result; - } - - // OK, collection failed, try expansion. - result = expand_heap_and_allocate(size, is_tlab); - if (result != NULL) { - return result; - } - - // If we reach this point, we're really out of memory. Try every trick - // we can to reclaim memory. Force collection of soft references. Force - // a complete compaction of the heap. Any additional methods for finding - // free memory should be here, especially if they are expensive. If this - // attempt fails, an OOM exception will be thrown. - { - UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted - - gch->do_collection(true, // full - true, // clear_all_soft_refs - size, // size - is_tlab, // is_tlab - GenCollectedHeap::OldGen); // max_generation - } - - result = gch->attempt_allocation(size, is_tlab, false /* first_only */); - if (result != NULL) { - assert(gch->is_in_reserved(result), "result not in heap"); - return result; - } - - assert(!should_clear_all_soft_refs(), - "Flag should have been handled and cleared prior to this point"); - - // What else? We might try synchronous finalization later. If the total - // space available is large enough for the allocation, then a more - // complete compaction phase than we've tried so far might be - // appropriate. - return NULL; -} - -MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( - ClassLoaderData* loader_data, - size_t word_size, - Metaspace::MetadataType mdtype) { - uint loop_count = 0; - uint gc_count = 0; - uint full_gc_count = 0; - - assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); - - do { - MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); - if (result != NULL) { - return result; - } - - if (GCLocker::is_active_and_needs_gc()) { - // If the GCLocker is active, just expand and allocate. - // If that does not succeed, wait if this thread is not - // in a critical section itself. - result = - loader_data->metaspace_non_null()->expand_and_allocate(word_size, - mdtype); - if (result != NULL) { - return result; - } - JavaThread* jthr = JavaThread::current(); - if (!jthr->in_critical()) { - // Wait for JNI critical section to be exited - GCLocker::stall_until_clear(); - // The GC invoked by the last thread leaving the critical - // section will be a young collection and a full collection - // is (currently) needed for unloading classes so continue - // to the next iteration to get a full GC. - continue; - } else { - if (CheckJNICalls) { - fatal("Possible deadlock due to allocating while" - " in jni critical section"); - } - return NULL; - } - } - - { // Need lock to get self consistent gc_count's - MutexLocker ml(Heap_lock); - gc_count = Universe::heap()->total_collections(); - full_gc_count = Universe::heap()->total_full_collections(); - } - - // Generate a VM operation - VM_CollectForMetadataAllocation op(loader_data, - word_size, - mdtype, - gc_count, - full_gc_count, - GCCause::_metadata_GC_threshold); - VMThread::execute(&op); - - // If GC was locked out, try again. Check before checking success because the - // prologue could have succeeded and the GC still have been locked out. - if (op.gc_locked()) { - continue; - } - - if (op.prologue_succeeded()) { - return op.result(); - } - loop_count++; - if ((QueuedAllocationWarningCount > 0) && - (loop_count % QueuedAllocationWarningCount == 0)) { - log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," - " size=" SIZE_FORMAT, loop_count, word_size); - } - } while (true); // Until a GC is done -} - -// Return true if any of the following is true: -// . the allocation won't fit into the current young gen heap -// . gc locker is occupied (jni critical section) -// . heap memory is tight -- the most recent previous collection -// was a full collection because a partial collection (would -// have) failed and is likely to fail again -bool GenCollectorPolicy::should_try_older_generation_allocation( - size_t word_size) const { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - size_t young_capacity = gch->young_gen()->capacity_before_gc(); - return (word_size > heap_word_size(young_capacity)) - || GCLocker::is_active_and_needs_gc() - || gch->incremental_collection_failed(); -} - - // // MarkSweepPolicy methods // @@ -904,14 +546,3 @@ _space_alignment = _gen_alignment = (size_t)Generation::GenGrain; _heap_alignment = compute_heap_alignment(); } - -void MarkSweepPolicy::initialize_generations() { - _young_gen_spec = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size, _gen_alignment); - _old_gen_spec = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size, _gen_alignment); -} - -void MarkSweepPolicy::initialize_gc_policy_counters() { - // Initialize the policy counters - 2 collectors, 2 generations. - _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 2); -} - diff --git a/src/hotspot/share/gc/shared/collectorPolicy.hpp b/src/hotspot/share/gc/shared/collectorPolicy.hpp --- a/src/hotspot/share/gc/shared/collectorPolicy.hpp +++ b/src/hotspot/share/gc/shared/collectorPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,6 @@ class G1CollectorPolicy; #endif // INCLUDE_ALL_GCS -class GCPolicyCounters; class MarkSweepPolicy; class CollectorPolicy : public CHeapObj { @@ -72,21 +71,10 @@ size_t _space_alignment; size_t _heap_alignment; - // Set to true when policy wants soft refs cleared. - // Reset to false by gc after it clears all soft refs. - bool _should_clear_all_soft_refs; - - // Set to true by the GC if the just-completed gc cleared all - // softrefs. This is set to true whenever a gc clears all softrefs, and - // set to false each time gc returns to the mutator. For example, in the - // ParallelScavengeHeap case the latter would be done toward the end of - // mem_allocate() where it returns op.result() - bool _all_soft_refs_clear; - CollectorPolicy(); public: - virtual void initialize_all() { + void initialize_all() { initialize_alignments(); initialize_flags(); initialize_size_info(); @@ -101,58 +89,6 @@ size_t initial_heap_byte_size() { return _initial_heap_byte_size; } size_t max_heap_byte_size() { return _max_heap_byte_size; } size_t min_heap_byte_size() { return _min_heap_byte_size; } - - bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; } - void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; } - // Returns the current value of _should_clear_all_soft_refs. - // _should_clear_all_soft_refs is set to false as a side effect. - bool use_should_clear_all_soft_refs(bool v); - bool all_soft_refs_clear() { return _all_soft_refs_clear; } - void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; } - - // Called by the GC after Soft Refs have been cleared to indicate - // that the request in _should_clear_all_soft_refs has been fulfilled. - virtual void cleared_all_soft_refs(); - - // Identification methods. - virtual GenCollectorPolicy* as_generation_policy() { return NULL; } - virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; } -#if INCLUDE_ALL_GCS - virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; } -#endif // INCLUDE_ALL_GCS - // Note that these are not virtual. - bool is_generation_policy() { return as_generation_policy() != NULL; } - bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; } -#if INCLUDE_ALL_GCS - bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; } -#else // INCLUDE_ALL_GCS - bool is_concurrent_mark_sweep_policy() { return false; } -#endif // INCLUDE_ALL_GCS - - - virtual CardTableRS* create_rem_set(MemRegion reserved); - - MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, - size_t size, - Metaspace::MetadataType mdtype); -}; - -class ClearedAllSoftRefs : public StackObj { - bool _clear_all_soft_refs; - CollectorPolicy* _collector_policy; - public: - ClearedAllSoftRefs(bool clear_all_soft_refs, - CollectorPolicy* collector_policy) : - _clear_all_soft_refs(clear_all_soft_refs), - _collector_policy(collector_policy) {} - - ~ClearedAllSoftRefs() { - if (_clear_all_soft_refs) { - _collector_policy->cleared_all_soft_refs(); - } - } - - bool should_clear() { return _clear_all_soft_refs; } }; class GenCollectorPolicy : public CollectorPolicy { @@ -171,27 +107,12 @@ // time. When using large pages they can differ. size_t _gen_alignment; - GenerationSpec* _young_gen_spec; - GenerationSpec* _old_gen_spec; - - GCPolicyCounters* _gc_policy_counters; - - // The sizing of the heap is controlled by a sizing policy. - AdaptiveSizePolicy* _size_policy; - - // Return true if an allocation should be attempted in the older generation - // if it fails in the younger generation. Return false, otherwise. - virtual bool should_try_older_generation_allocation(size_t word_size) const; - void initialize_flags(); void initialize_size_info(); DEBUG_ONLY(void assert_flags();) DEBUG_ONLY(void assert_size_info();) - // Try to allocate space by expanding the heap. - virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); - // Compute max heap alignment. size_t compute_max_alignment(); @@ -215,63 +136,17 @@ size_t initial_old_size() { return _initial_old_size; } size_t max_old_size() { return _max_old_size; } - GenerationSpec* young_gen_spec() const { - assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized"); - return _young_gen_spec; - } - - GenerationSpec* old_gen_spec() const { - assert(_old_gen_spec != NULL, "_old_gen_spec should have been initialized"); - return _old_gen_spec; - } - - // Performance Counter support - GCPolicyCounters* counters() { return _gc_policy_counters; } - - // Create the jstat counters for the GC policy. - virtual void initialize_gc_policy_counters() = 0; - - virtual GenCollectorPolicy* as_generation_policy() { return this; } - - virtual void initialize_generations() { }; - - virtual void initialize_all() { - CollectorPolicy::initialize_all(); - initialize_generations(); - } - size_t young_gen_size_lower_bound(); size_t old_gen_size_lower_bound(); - - HeapWord* mem_allocate_work(size_t size, - bool is_tlab, - bool* gc_overhead_limit_was_exceeded); - - HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab); - - // Adaptive size policy - AdaptiveSizePolicy* size_policy() { return _size_policy; } - - virtual void initialize_size_policy(size_t init_eden_size, - size_t init_promo_size, - size_t init_survivor_size); - - virtual void cleared_all_soft_refs(); - }; class MarkSweepPolicy : public GenCollectorPolicy { protected: void initialize_alignments(); - void initialize_generations(); public: MarkSweepPolicy() {} - - MarkSweepPolicy* as_mark_sweep_policy() { return this; } - - void initialize_gc_policy_counters(); }; #endif // SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP diff --git a/src/hotspot/share/gc/shared/gcCause.cpp b/src/hotspot/share/gc/shared/gcCause.cpp --- a/src/hotspot/share/gc/shared/gcCause.cpp +++ b/src/hotspot/share/gc/shared/gcCause.cpp @@ -60,10 +60,6 @@ case _wb_full_gc: return "WhiteBox Initiated Full GC"; - case _update_allocation_context_stats_inc: - case _update_allocation_context_stats_full: - return "Update Allocation Context Stats"; - case _no_gc: return "No GC"; diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp --- a/src/hotspot/share/gc/shared/gcCause.hpp +++ b/src/hotspot/share/gc/shared/gcCause.hpp @@ -52,8 +52,6 @@ _wb_young_gc, _wb_conc_mark, _wb_full_gc, - _update_allocation_context_stats_inc, - _update_allocation_context_stats_full, /* implementation independent, but reserved for GC use */ _no_gc, diff --git a/src/hotspot/share/gc/shared/gcId.cpp b/src/hotspot/share/gc/shared/gcId.cpp --- a/src/hotspot/share/gc/shared/gcId.cpp +++ b/src/hotspot/share/gc/shared/gcId.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,27 +35,28 @@ return (NamedThread*)Thread::current(); } -const uint GCId::create() { +uint GCId::create() { return _next_id++; } -const uint GCId::peek() { +uint GCId::peek() { return _next_id; } -const uint GCId::current() { - assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id."); - return current_raw(); +uint GCId::current() { + const uint gc_id = currentNamedthread()->gc_id(); + assert(gc_id != undefined(), "Using undefined GC id."); + return gc_id; } -const uint GCId::current_raw() { - return currentNamedthread()->gc_id(); +uint GCId::current_or_undefined() { + return Thread::current()->is_Named_thread() ? currentNamedthread()->gc_id() : undefined(); } size_t GCId::print_prefix(char* buf, size_t len) { Thread* thread = Thread::current_or_null(); - if (thread != NULL && thread->is_Named_thread()) { - uint gc_id = current_raw(); + if (thread != NULL) { + uint gc_id = current_or_undefined(); if (gc_id != undefined()) { int ret = jio_snprintf(buf, len, "GC(%u) ", gc_id); assert(ret > 0, "Failed to print prefix. Log buffer too small?"); @@ -65,28 +66,14 @@ return 0; } -GCIdMark::GCIdMark() : _gc_id(GCId::create()) { - currentNamedthread()->set_gc_id(_gc_id); +GCIdMark::GCIdMark() : _previous_gc_id(currentNamedthread()->gc_id()) { + currentNamedthread()->set_gc_id(GCId::create()); } -GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) { - currentNamedthread()->set_gc_id(_gc_id); +GCIdMark::GCIdMark(uint gc_id) : _previous_gc_id(currentNamedthread()->gc_id()) { + currentNamedthread()->set_gc_id(gc_id); } GCIdMark::~GCIdMark() { - currentNamedthread()->set_gc_id(GCId::undefined()); -} - -GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) { - _previous_gc_id = GCId::current_raw(); - currentNamedthread()->set_gc_id(_gc_id); -} - -GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) { - _previous_gc_id = GCId::current_raw(); - currentNamedthread()->set_gc_id(_gc_id); -} - -GCIdMarkAndRestore::~GCIdMarkAndRestore() { currentNamedthread()->set_gc_id(_previous_gc_id); } diff --git a/src/hotspot/share/gc/shared/gcId.hpp b/src/hotspot/share/gc/shared/gcId.hpp --- a/src/hotspot/share/gc/shared/gcId.hpp +++ b/src/hotspot/share/gc/shared/gcId.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,38 +28,32 @@ #include "memory/allocation.hpp" class GCId : public AllStatic { +private: friend class GCIdMark; - friend class GCIdMarkAndRestore; + static uint _next_id; static const uint UNDEFINED = (uint)-1; - static const uint create(); + static uint create(); - public: +public: // Returns the currently active GC id. Asserts that there is an active GC id. - static const uint current(); + static uint current(); // Same as current() but can return undefined() if no GC id is currently active - static const uint current_raw(); + static uint current_or_undefined(); // Returns the next expected GCId. - static const uint peek(); - static const uint undefined() { return UNDEFINED; } + static uint peek(); + static uint undefined() { return UNDEFINED; } static size_t print_prefix(char* buf, size_t len); }; class GCIdMark : public StackObj { - uint _gc_id; - public: +private: + const uint _previous_gc_id; + +public: GCIdMark(); GCIdMark(uint gc_id); ~GCIdMark(); }; -class GCIdMarkAndRestore : public StackObj { - uint _gc_id; - uint _previous_gc_id; - public: - GCIdMarkAndRestore(); - GCIdMarkAndRestore(uint gc_id); - ~GCIdMarkAndRestore(); -}; - #endif // SHARE_VM_GC_SHARED_GCID_HPP diff --git a/src/hotspot/share/gc/shared/gcTimer.cpp b/src/hotspot/share/gc/shared/gcTimer.cpp --- a/src/hotspot/share/gc/shared/gcTimer.cpp +++ b/src/hotspot/share/gc/shared/gcTimer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,14 +68,14 @@ GCTimer::register_gc_end(time); } -void ConcurrentGCTimer::register_gc_pause_start(const char* name) { +void ConcurrentGCTimer::register_gc_pause_start(const char* name, const Ticks& time) { assert(!_is_concurrent_phase_active, "A pause phase can't be started while a concurrent phase is active."); - GCTimer::register_gc_pause_start(name); + GCTimer::register_gc_pause_start(name, time); } -void ConcurrentGCTimer::register_gc_pause_end() { +void ConcurrentGCTimer::register_gc_pause_end(const Ticks& time) { assert(!_is_concurrent_phase_active, "A pause phase can't be ended while a concurrent phase is active."); - GCTimer::register_gc_pause_end(); + GCTimer::register_gc_pause_end(time); } void ConcurrentGCTimer::register_gc_concurrent_start(const char* name, const Ticks& time) { diff --git a/src/hotspot/share/gc/shared/gcTimer.hpp b/src/hotspot/share/gc/shared/gcTimer.hpp --- a/src/hotspot/share/gc/shared/gcTimer.hpp +++ b/src/hotspot/share/gc/shared/gcTimer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -166,8 +166,8 @@ public: ConcurrentGCTimer(): GCTimer(), _is_concurrent_phase_active(false) {}; - void register_gc_pause_start(const char* name); - void register_gc_pause_end(); + void register_gc_pause_start(const char* name, const Ticks& time = Ticks::now()); + void register_gc_pause_end(const Ticks& time = Ticks::now()); void register_gc_concurrent_start(const char* name, const Ticks& time = Ticks::now()); void register_gc_concurrent_end(const Ticks& time = Ticks::now()); diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,14 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" +#include "gc/shared/adaptiveSizePolicy.hpp" +#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableRS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" +#include "gc/shared/gcPolicyCounters.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" @@ -60,19 +64,28 @@ #include "utilities/stack.inline.hpp" #include "utilities/vmError.hpp" -GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : +GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy, + Generation::Name young, + Generation::Name old, + const char* policy_counters_name) : CollectedHeap(), _rem_set(NULL), + _young_gen_spec(new GenerationSpec(young, + policy->initial_young_size(), + policy->max_young_size(), + policy->gen_alignment())), + _old_gen_spec(new GenerationSpec(old, + policy->initial_old_size(), + policy->max_old_size(), + policy->gen_alignment())), _gen_policy(policy), + _soft_ref_gen_policy(), + _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)), _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), - _full_collections_completed(0) -{ - assert(policy != NULL, "Sanity check"); + _full_collections_completed(0) { } jint GenCollectedHeap::initialize() { - CollectedHeap::pre_initialize(); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some @@ -97,32 +110,43 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - _rem_set = collector_policy()->create_rem_set(reserved_region()); - set_barrier_set(rem_set()->bs()); + _rem_set = new CardTableRS(reserved_region()); + _rem_set->initialize(); + CardTableModRefBS *bs = new CardTableModRefBS(_rem_set); + bs->initialize(); + set_barrier_set(bs); - ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); - _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); - heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); + ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false); + _young_gen = _young_gen_spec->init(young_rs, rem_set()); + heap_rs = heap_rs.last_part(_young_gen_spec->max_size()); - ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); - _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); + ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false); + _old_gen = _old_gen_spec->init(old_rs, rem_set()); clear_incremental_collection_failed(); return JNI_OK; } +void GenCollectedHeap::initialize_size_policy(size_t init_eden_size, + size_t init_promo_size, + size_t init_survivor_size) { + const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; + _size_policy = new AdaptiveSizePolicy(init_eden_size, + init_promo_size, + init_survivor_size, + max_gc_pause_sec, + GCTimeRatio); +} + char* GenCollectedHeap::allocate(size_t alignment, ReservedSpace* heap_rs){ // Now figure out the total size. const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); - GenerationSpec* young_spec = gen_policy()->young_gen_spec(); - GenerationSpec* old_spec = gen_policy()->old_gen_spec(); - // Check for overflow. - size_t total_reserved = young_spec->max_size() + old_spec->max_size(); - if (total_reserved < young_spec->max_size()) { + size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size(); + if (total_reserved < _young_gen_spec->max_size()) { vm_exit_during_initialization("The size of the object heap + VM data exceeds " "the maximum representable size"); } @@ -148,10 +172,9 @@ check_gen_kinds(); DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen; - _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(), - _old_gen->capacity(), - def_new_gen->from()->capacity()); - _gen_policy->initialize_gc_policy_counters(); + initialize_size_policy(def_new_gen->eden()->capacity(), + _old_gen->capacity(), + def_new_gen->from()->capacity()); } void GenCollectedHeap::ref_processing_init() { @@ -159,6 +182,14 @@ _old_gen->ref_processor_init(); } +GenerationSpec* GenCollectedHeap::young_gen_spec() const { + return _young_gen_spec; +} + +GenerationSpec* GenCollectedHeap::old_gen_spec() const { + return _old_gen_spec; +} + size_t GenCollectedHeap::capacity() const { return _young_gen->capacity() + _old_gen->capacity(); } @@ -204,6 +235,157 @@ return _full_collections_completed; } +// Return true if any of the following is true: +// . the allocation won't fit into the current young gen heap +// . gc locker is occupied (jni critical section) +// . heap memory is tight -- the most recent previous collection +// was a full collection because a partial collection (would +// have) failed and is likely to fail again +bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const { + size_t young_capacity = young_gen()->capacity_before_gc(); + return (word_size > heap_word_size(young_capacity)) + || GCLocker::is_active_and_needs_gc() + || incremental_collection_failed(); +} + +HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab) { + HeapWord* result = NULL; + if (old_gen()->should_allocate(size, is_tlab)) { + result = old_gen()->expand_and_allocate(size, is_tlab); + } + if (result == NULL) { + if (young_gen()->should_allocate(size, is_tlab)) { + result = young_gen()->expand_and_allocate(size, is_tlab); + } + } + assert(result == NULL || is_in_reserved(result), "result not in heap"); + return result; +} + +HeapWord* GenCollectedHeap::mem_allocate_work(size_t size, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded) { + debug_only(check_for_valid_allocation_state()); + assert(no_gc_in_progress(), "Allocation during gc not allowed"); + + // In general gc_overhead_limit_was_exceeded should be false so + // set it so here and reset it to true only if the gc time + // limit is being exceeded as checked below. + *gc_overhead_limit_was_exceeded = false; + + HeapWord* result = NULL; + + // Loop until the allocation is satisfied, or unsatisfied after GC. + for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { + HandleMark hm; // Discard any handles allocated in each iteration. + + // First allocation attempt is lock-free. + Generation *young = young_gen(); + assert(young->supports_inline_contig_alloc(), + "Otherwise, must do alloc within heap lock"); + if (young->should_allocate(size, is_tlab)) { + result = young->par_allocate(size, is_tlab); + if (result != NULL) { + assert(is_in_reserved(result), "result not in heap"); + return result; + } + } + uint gc_count_before; // Read inside the Heap_lock locked region. + { + MutexLocker ml(Heap_lock); + log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation"); + // Note that only large objects get a shot at being + // allocated in later generations. + bool first_only = !should_try_older_generation_allocation(size); + + result = attempt_allocation(size, is_tlab, first_only); + if (result != NULL) { + assert(is_in_reserved(result), "result not in heap"); + return result; + } + + if (GCLocker::is_active_and_needs_gc()) { + if (is_tlab) { + return NULL; // Caller will retry allocating individual object. + } + if (!is_maximal_no_gc()) { + // Try and expand heap to satisfy request. + result = expand_heap_and_allocate(size, is_tlab); + // Result could be null if we are out of space. + if (result != NULL) { + return result; + } + } + + if (gclocker_stalled_count > GCLockerRetryAllocationCount) { + return NULL; // We didn't get to do a GC and we didn't get any memory. + } + + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + // Wait for JNI critical section to be exited + GCLocker::stall_until_clear(); + gclocker_stalled_count += 1; + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } + + // Read the gc count while the heap lock is held. + gc_count_before = total_collections(); + } + + VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); + VMThread::execute(&op); + if (op.prologue_succeeded()) { + result = op.result(); + if (op.gc_locked()) { + assert(result == NULL, "must be NULL if gc_locked() is true"); + continue; // Retry and/or stall as necessary. + } + + // Allocation has failed and a collection + // has been done. If the gc time limit was exceeded the + // this time, return NULL so that an out-of-memory + // will be thrown. Clear gc_overhead_limit_exceeded + // so that the overhead exceeded does not persist. + + const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); + const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); + + if (limit_exceeded && softrefs_clear) { + *gc_overhead_limit_was_exceeded = true; + size_policy()->set_gc_overhead_limit_exceeded(false); + if (op.result() != NULL) { + CollectedHeap::fill_with_object(op.result(), size); + } + return NULL; + } + assert(result == NULL || is_in_reserved(result), + "result not in heap"); + return result; + } + + // Give a warning if we seem to be looping forever. + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times," + " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); + } + } +} #ifndef PRODUCT // Override of memory state checking method in CollectedHeap: @@ -255,9 +437,9 @@ HeapWord* GenCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { - return gen_policy()->mem_allocate_work(size, - false /* is_tlab */, - gc_overhead_limit_was_exceeded); + return mem_allocate_work(size, + false /* is_tlab */, + gc_overhead_limit_was_exceeded); } bool GenCollectedHeap::must_clear_all_soft_refs() { @@ -369,12 +551,12 @@ return; // GC is disabled (e.g. JNI GetXXXCritical operation) } - GCIdMarkAndRestore gc_id_mark; + GCIdMark gc_id_mark; const bool do_clear_all_soft_refs = clear_all_soft_refs || - collector_policy()->should_clear_all_soft_refs(); + soft_ref_policy()->should_clear_all_soft_refs(); - ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); + ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy()); const size_t metadata_prev_used = MetaspaceAux::used_bytes(); @@ -444,7 +626,7 @@ if (do_young_collection) { // We did a young GC. Need a new GC id for the old GC. - GCIdMarkAndRestore gc_id_mark; + GCIdMark gc_id_mark; GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true); collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true); } else { @@ -505,7 +687,79 @@ } HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { - return gen_policy()->satisfy_failed_allocation(size, is_tlab); + GCCauseSetter x(this, GCCause::_allocation_failure); + HeapWord* result = NULL; + + assert(size != 0, "Precondition violated"); + if (GCLocker::is_active_and_needs_gc()) { + // GC locker is active; instead of a collection we will attempt + // to expand the heap, if there's room for expansion. + if (!is_maximal_no_gc()) { + result = expand_heap_and_allocate(size, is_tlab); + } + return result; // Could be null if we are out of space. + } else if (!incremental_collection_will_fail(false /* don't consult_young */)) { + // Do an incremental collection. + do_collection(false, // full + false, // clear_all_soft_refs + size, // size + is_tlab, // is_tlab + GenCollectedHeap::OldGen); // max_generation + } else { + log_trace(gc)(" :: Trying full because partial may fail :: "); + // Try a full collection; see delta for bug id 6266275 + // for the original code and why this has been simplified + // with from-space allocation criteria modified and + // such allocation moved out of the safepoint path. + do_collection(true, // full + false, // clear_all_soft_refs + size, // size + is_tlab, // is_tlab + GenCollectedHeap::OldGen); // max_generation + } + + result = attempt_allocation(size, is_tlab, false /*first_only*/); + + if (result != NULL) { + assert(is_in_reserved(result), "result not in heap"); + return result; + } + + // OK, collection failed, try expansion. + result = expand_heap_and_allocate(size, is_tlab); + if (result != NULL) { + return result; + } + + // If we reach this point, we're really out of memory. Try every trick + // we can to reclaim memory. Force collection of soft references. Force + // a complete compaction of the heap. Any additional methods for finding + // free memory should be here, especially if they are expensive. If this + // attempt fails, an OOM exception will be thrown. + { + UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted + + do_collection(true, // full + true, // clear_all_soft_refs + size, // size + is_tlab, // is_tlab + GenCollectedHeap::OldGen); // max_generation + } + + result = attempt_allocation(size, is_tlab, false /* first_only */); + if (result != NULL) { + assert(is_in_reserved(result), "result not in heap"); + return result; + } + + assert(!soft_ref_policy()->should_clear_all_soft_refs(), + "Flag should have been handled and cleared prior to this point"); + + // What else? We might try synchronous finalization later. If the total + // space available is large enough for the allocation, then a more + // complete compaction phase than we've tried so far might be + // appropriate. + return NULL; } #ifdef ASSERT @@ -888,9 +1142,9 @@ HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; - return gen_policy()->mem_allocate_work(size /* size */, - true /* is_tlab */, - &gc_overhead_limit_was_exceeded); + return mem_allocate_work(size /* size */, + true /* is_tlab */, + &gc_overhead_limit_was_exceeded); } // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,14 @@ #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP -#include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/generation.hpp" +#include "gc/shared/softRefGenPolicy.hpp" +class AdaptiveSizePolicy; +class GCPolicyCounters; +class GenerationSpec; class StrongRootsScope; class SubTasksDone; class WorkGang; @@ -64,12 +67,22 @@ Generation* _young_gen; Generation* _old_gen; + GenerationSpec* _young_gen_spec; + GenerationSpec* _old_gen_spec; + // The singleton CardTable Remembered Set. CardTableRS* _rem_set; // The generational collector policy. GenCollectorPolicy* _gen_policy; + SoftRefGenPolicy _soft_ref_gen_policy; + + // The sizing of the heap is controlled by a sizing policy. + AdaptiveSizePolicy* _size_policy; + + GCPolicyCounters* _gc_policy_counters; + // Indicates that the most recent previous incremental collection failed. // The flag is cleared when an action is taken that might clear the // condition that caused that incremental collection to fail. @@ -143,7 +156,10 @@ // we absolutely __must__ clear soft refs? bool must_clear_all_soft_refs(); - GenCollectedHeap(GenCollectorPolicy *policy); + GenCollectedHeap(GenCollectorPolicy *policy, + Generation::Name young, + Generation::Name old, + const char* policy_counters_name); virtual void check_gen_kinds() = 0; @@ -152,6 +168,10 @@ // Returns JNI_OK on success virtual jint initialize(); + void initialize_size_policy(size_t init_eden_size, + size_t init_promo_size, + size_t init_survivor_size); + // Does operations required after initialization has been done. void post_initialize(); @@ -161,16 +181,24 @@ bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } + GenerationSpec* young_gen_spec() const; + GenerationSpec* old_gen_spec() const; + // The generational collector policy. GenCollectorPolicy* gen_policy() const { return _gen_policy; } virtual CollectorPolicy* collector_policy() const { return gen_policy(); } + virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_gen_policy; } + // Adaptive size policy virtual AdaptiveSizePolicy* size_policy() { - return gen_policy()->size_policy(); + return _size_policy; } + // Performance Counter support + GCPolicyCounters* counters() { return _gc_policy_counters; } + // Return the (conservative) maximum heap alignment static size_t conservative_max_heap_alignment() { return Generation::GenGrain; @@ -270,22 +298,6 @@ virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; virtual HeapWord* allocate_new_tlab(size_t size); - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - // We don't need barriers for stores to objects in the - // young gen and, a fortiori, for initializing stores to - // objects therein. This applies to DefNew+Tenured and ParNew+CMS - // only and may need to be re-examined in case other - // kinds of collectors are implemented in the future. - virtual bool can_elide_initializing_store_barrier(oop new_obj) { - return is_in_young(new_obj); - } - // The "requestor" generation is performing some garbage collection // action for which it would be useful to have scratch space. The // requestor promises to allocate no more than "max_alloc_words" in any @@ -472,6 +484,17 @@ private: + // Return true if an allocation should be attempted in the older generation + // if it fails in the younger generation. Return false, otherwise. + bool should_try_older_generation_allocation(size_t word_size) const; + + // Try to allocate space by expanding the heap. + HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); + + HeapWord* mem_allocate_work(size_t size, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded); + // Override void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; diff --git a/src/hotspot/share/gc/shared/generation.cpp b/src/hotspot/share/gc/shared/generation.cpp --- a/src/hotspot/share/gc/shared/generation.cpp +++ b/src/hotspot/share/gc/shared/generation.cpp @@ -63,9 +63,9 @@ size_t Generation::initial_size() { GenCollectedHeap* gch = GenCollectedHeap::heap(); if (gch->is_young_gen(this)) { - return gch->gen_policy()->young_gen_spec()->init_size(); + return gch->young_gen_spec()->init_size(); } - return gch->gen_policy()->old_gen_spec()->init_size(); + return gch->old_gen_spec()->init_size(); } size_t Generation::max_capacity() const { diff --git a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,10 +45,7 @@ // Causes all refs in "mr" to be assumed to be modified. virtual void invalidate(MemRegion mr) = 0; - - // The caller guarantees that "mr" contains no references. (Perhaps it's - // objects have been moved elsewhere.) - virtual void clear(MemRegion mr) = 0; + virtual void write_region(MemRegion mr) = 0; // The ModRef abstraction introduces pre and post barriers template diff --git a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,7 +73,7 @@ if (!HasDecorator::value) { // Optimized covariant case bs->write_ref_array_pre(dst, (int)length, - HasDecorator::value); + HasDecorator::value); Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length); bs->write_ref_array((HeapWord*)dst, length); } else { diff --git a/src/hotspot/share/gc/shared/oopStorageParState.hpp b/src/hotspot/share/gc/shared/oopStorageParState.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_OOPSTORAGEPARSTATE_HPP +#define SHARE_GC_SHARED_OOPSTORAGEPARSTATE_HPP + +#include "gc/shared/oopStorage.hpp" +#include "memory/allocation.hpp" +#include "utilities/macros.hpp" + +#if INCLUDE_ALL_GCS + +////////////////////////////////////////////////////////////////////////////// +// Support for parallel and optionally concurrent state iteration. +// +// Parallel iteration is for the exclusive use of the GC. Other iteration +// clients must use serial iteration. +// +// Concurrent Iteration +// +// Iteration involves the _active_list, which contains all of the blocks owned +// by a storage object. This is a doubly-linked list, linked through +// dedicated fields in the blocks. +// +// At most one concurrent ParState can exist at a time for a given storage +// object. +// +// A concurrent ParState sets the associated storage's +// _concurrent_iteration_active flag true when the state is constructed, and +// sets it false when the state is destroyed. These assignments are made with +// _active_mutex locked. Meanwhile, empty block deletion is not done while +// _concurrent_iteration_active is true. The flag check and the dependent +// removal of a block from the _active_list is performed with _active_mutex +// locked. This prevents concurrent iteration and empty block deletion from +// interfering with with each other. +// +// Both allocate() and delete_empty_blocks_concurrent() lock the +// _allocate_mutex while performing their respective list manipulations, +// preventing them from interfering with each other. +// +// When allocate() creates a new block, it is added to the front of the +// _active_list. Then _active_head is set to the new block. When concurrent +// iteration is started (by a parallel worker thread calling the state's +// iterate() function), the current _active_head is used as the initial block +// for the iteration, with iteration proceeding down the list headed by that +// block. +// +// As a result, the list over which concurrent iteration operates is stable. +// However, once the iteration is started, later allocations may add blocks to +// the front of the list that won't be examined by the iteration. And while +// the list is stable, concurrent allocate() and release() operations may +// change the set of allocated entries in a block at any time during the +// iteration. +// +// As a result, a concurrent iteration handler must accept that some +// allocations and releases that occur after the iteration started will not be +// seen by the iteration. Further, some may overlap examination by the +// iteration. To help with this, allocate() and release() have an invariant +// that an entry's value must be NULL when it is not in use. +// +// An in-progress delete_empty_blocks_concurrent() operation can contend with +// the start of a concurrent iteration over the _active_mutex. Since both are +// under GC control, that potential contention can be eliminated by never +// scheduling both operations to run at the same time. +// +// ParState +// concurrent must be true if iteration is concurrent with the +// mutator, false if iteration is at a safepoint. +// +// is_const must be true if the iteration is over a constant storage +// object, false if the iteration may modify the storage object. +// +// ParState([const] OopStorage* storage) +// Construct an object for managing an iteration over storage. For a +// concurrent ParState, empty block deletion for the associated storage +// is inhibited for the life of the ParState. There can be no more +// than one live concurrent ParState at a time for a given storage object. +// +// template void iterate(F f) +// Repeatedly claims a block from the associated storage that has +// not been processed by this iteration (possibly by other threads), +// and applies f to each entry in the claimed block. Assume p is of +// type const oop* or oop*, according to is_const. Then f(p) must be +// a valid expression whose value is ignored. Concurrent uses must +// be prepared for an entry's value to change at any time, due to +// mutator activity. +// +// template void oops_do(Closure* cl) +// Wrapper around iterate, providing an adaptation layer allowing +// the use of OopClosures and similar objects for iteration. Assume +// p is of type const oop* or oop*, according to is_const. Then +// cl->do_oop(p) must be a valid expression whose value is ignored. +// Concurrent uses must be prepared for the entry's value to change +// at any time, due to mutator activity. +// +// Optional operations, provided only if !concurrent && !is_const. +// These are not provided when is_const, because the storage object +// may be modified by the iteration infrastructure, even if the +// provided closure doesn't modify the storage object. These are not +// provided when concurrent because any pre-filtering behavior by the +// iteration infrastructure is inappropriate for concurrent iteration; +// modifications of the storage by the mutator could result in the +// pre-filtering being applied (successfully or not) to objects that +// are unrelated to what the closure finds in the entry. +// +// template void weak_oops_do(Closure* cl) +// template +// void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) +// Wrappers around iterate, providing an adaptation layer allowing +// the use of is-alive closures and OopClosures for iteration. +// Assume p is of type oop*. Then +// +// - cl->do_oop(p) must be a valid expression whose value is ignored. +// +// - is_alive->do_object_b(*p) must be a valid expression whose value +// is convertible to bool. +// +// If *p == NULL then neither is_alive nor cl will be invoked for p. +// If is_alive->do_object_b(*p) is false, then cl will not be +// invoked on p. + +class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC { + OopStorage* _storage; + void* volatile _next_block; + bool _concurrent; + + // Noncopyable. + BasicParState(const BasicParState&); + BasicParState& operator=(const BasicParState&); + + void update_iteration_state(bool value); + void ensure_iteration_started(); + Block* claim_next_block(); + + // Wrapper for iteration handler; ignore handler result and return true. + template class AlwaysTrueFn; + +public: + BasicParState(OopStorage* storage, bool concurrent); + ~BasicParState(); + + template void iterate(F f); +}; + +template +class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { + BasicParState _basic_state; + +public: + ParState(const OopStorage* storage) : + // For simplicity, always recorded as non-const. + _basic_state(const_cast(storage), concurrent) + {} + + template void iterate(F f); + template void oops_do(Closure* cl); +}; + +template<> +class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { + BasicParState _basic_state; + +public: + ParState(OopStorage* storage) : + _basic_state(storage, false) + {} + + template void iterate(F f); + template void oops_do(Closure* cl); + template void weak_oops_do(Closure* cl); + template + void weak_oops_do(IsAliveClosure* is_alive, Closure* cl); +}; + +#endif // INCLUDE_ALL_GCS + +#endif // SHARE_GC_SHARED_OOPSTORAGEPARSTATE_HPP diff --git a/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp b/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp --- a/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp +++ b/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp @@ -26,152 +26,13 @@ #define SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP #include "gc/shared/oopStorage.inline.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "memory/allocation.hpp" #include "metaprogramming/conditional.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS -////////////////////////////////////////////////////////////////////////////// -// Support for parallel and optionally concurrent state iteration. -// -// Parallel iteration is for the exclusive use of the GC. Other iteration -// clients must use serial iteration. -// -// Concurrent Iteration -// -// Iteration involves the _active_list, which contains all of the blocks owned -// by a storage object. This is a doubly-linked list, linked through -// dedicated fields in the blocks. -// -// At most one concurrent ParState can exist at a time for a given storage -// object. -// -// A concurrent ParState sets the associated storage's -// _concurrent_iteration_active flag true when the state is constructed, and -// sets it false when the state is destroyed. These assignments are made with -// _active_mutex locked. Meanwhile, empty block deletion is not done while -// _concurrent_iteration_active is true. The flag check and the dependent -// removal of a block from the _active_list is performed with _active_mutex -// locked. This prevents concurrent iteration and empty block deletion from -// interfering with with each other. -// -// Both allocate() and delete_empty_blocks_concurrent() lock the -// _allocate_mutex while performing their respective list manipulations, -// preventing them from interfering with each other. -// -// When allocate() creates a new block, it is added to the front of the -// _active_list. Then _active_head is set to the new block. When concurrent -// iteration is started (by a parallel worker thread calling the state's -// iterate() function), the current _active_head is used as the initial block -// for the iteration, with iteration proceeding down the list headed by that -// block. -// -// As a result, the list over which concurrent iteration operates is stable. -// However, once the iteration is started, later allocations may add blocks to -// the front of the list that won't be examined by the iteration. And while -// the list is stable, concurrent allocate() and release() operations may -// change the set of allocated entries in a block at any time during the -// iteration. -// -// As a result, a concurrent iteration handler must accept that some -// allocations and releases that occur after the iteration started will not be -// seen by the iteration. Further, some may overlap examination by the -// iteration. To help with this, allocate() and release() have an invariant -// that an entry's value must be NULL when it is not in use. -// -// An in-progress delete_empty_blocks_concurrent() operation can contend with -// the start of a concurrent iteration over the _active_mutex. Since both are -// under GC control, that potential contention can be eliminated by never -// scheduling both operations to run at the same time. -// -// ParState -// concurrent must be true if iteration is concurrent with the -// mutator, false if iteration is at a safepoint. -// -// is_const must be true if the iteration is over a constant storage -// object, false if the iteration may modify the storage object. -// -// ParState([const] OopStorage* storage) -// Construct an object for managing an iteration over storage. For a -// concurrent ParState, empty block deletion for the associated storage -// is inhibited for the life of the ParState. There can be no more -// than one live concurrent ParState at a time for a given storage object. -// -// template void iterate(F f) -// Repeatedly claims a block from the associated storage that has -// not been processed by this iteration (possibly by other threads), -// and applies f to each entry in the claimed block. Assume p is of -// type const oop* or oop*, according to is_const. Then f(p) must be -// a valid expression whose value is ignored. Concurrent uses must -// be prepared for an entry's value to change at any time, due to -// mutator activity. -// -// template void oops_do(Closure* cl) -// Wrapper around iterate, providing an adaptation layer allowing -// the use of OopClosures and similar objects for iteration. Assume -// p is of type const oop* or oop*, according to is_const. Then -// cl->do_oop(p) must be a valid expression whose value is ignored. -// Concurrent uses must be prepared for the entry's value to change -// at any time, due to mutator activity. -// -// Optional operations, provided only if !concurrent && !is_const. -// These are not provided when is_const, because the storage object -// may be modified by the iteration infrastructure, even if the -// provided closure doesn't modify the storage object. These are not -// provided when concurrent because any pre-filtering behavior by the -// iteration infrastructure is inappropriate for concurrent iteration; -// modifications of the storage by the mutator could result in the -// pre-filtering being applied (successfully or not) to objects that -// are unrelated to what the closure finds in the entry. -// -// template void weak_oops_do(Closure* cl) -// template -// void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) -// Wrappers around iterate, providing an adaptation layer allowing -// the use of is-alive closures and OopClosures for iteration. -// Assume p is of type oop*. Then -// -// - cl->do_oop(p) must be a valid expression whose value is ignored. -// -// - is_alive->do_object_b(*p) must be a valid expression whose value -// is convertible to bool. -// -// If *p == NULL then neither is_alive nor cl will be invoked for p. -// If is_alive->do_object_b(*p) is false, then cl will not be -// invoked on p. - -class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC { - OopStorage* _storage; - void* volatile _next_block; - bool _concurrent; - - // Noncopyable. - BasicParState(const BasicParState&); - BasicParState& operator=(const BasicParState&); - - void update_iteration_state(bool value); - void ensure_iteration_started(); - Block* claim_next_block(); - - // Wrapper for iteration handler; ignore handler result and return true. - template class AlwaysTrueFn; - -public: - BasicParState(OopStorage* storage, bool concurrent); - ~BasicParState(); - - template void iterate(F f) { - // Wrap f in ATF so we can use Block::iterate. - AlwaysTrueFn atf_f(f); - ensure_iteration_started(); - typename Conditional::type block; - while ((block = claim_next_block()) != NULL) { - block->iterate(atf_f); - } - } -}; - template class OopStorage::BasicParState::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC { F _f; @@ -183,57 +44,49 @@ bool operator()(OopPtr ptr) const { _f(ptr); return true; } }; +template +inline void OopStorage::BasicParState::iterate(F f) { + // Wrap f in ATF so we can use Block::iterate. + AlwaysTrueFn atf_f(f); + ensure_iteration_started(); + typename Conditional::type block; + while ((block = claim_next_block()) != NULL) { + block->iterate(atf_f); + } +} + template -class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { - BasicParState _basic_state; +template +inline void OopStorage::ParState::iterate(F f) { + _basic_state.template iterate(f); +} -public: - ParState(const OopStorage* storage) : - // For simplicity, always recorded as non-const. - _basic_state(const_cast(storage), concurrent) - {} +template +template +inline void OopStorage::ParState::oops_do(Closure* cl) { + this->iterate(oop_fn(cl)); +} - template - void iterate(F f) { - _basic_state.template iterate(f); - } +template +inline void OopStorage::ParState::iterate(F f) { + _basic_state.template iterate(f); +} - template - void oops_do(Closure* cl) { - this->iterate(oop_fn(cl)); - } -}; +template +inline void OopStorage::ParState::oops_do(Closure* cl) { + this->iterate(oop_fn(cl)); +} -template<> -class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { - BasicParState _basic_state; +template +inline void OopStorage::ParState::weak_oops_do(Closure* cl) { + this->iterate(skip_null_fn(oop_fn(cl))); +} -public: - ParState(OopStorage* storage) : - _basic_state(storage, false) - {} - - template - void iterate(F f) { - _basic_state.template iterate(f); - } - - template - void oops_do(Closure* cl) { - this->iterate(oop_fn(cl)); - } - - template - void weak_oops_do(Closure* cl) { - this->iterate(skip_null_fn(oop_fn(cl))); - } - - template - void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { - this->iterate(if_alive_fn(is_alive, oop_fn(cl))); - } -}; +template +inline void OopStorage::ParState::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { + this->iterate(if_alive_fn(is_alive, oop_fn(cl))); +} #endif // INCLUDE_ALL_GCS -#endif // include guard +#endif // SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/plab.hpp b/src/hotspot/share/gc/shared/plab.hpp --- a/src/hotspot/share/gc/shared/plab.hpp +++ b/src/hotspot/share/gc/shared/plab.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,6 @@ // Initializes the buffer to be empty, but with the given "word_sz". // Must get initialized with "set_buf" for an allocation to succeed. PLAB(size_t word_sz); - virtual ~PLAB() {} static size_t size_required_for_allocation(size_t word_size) { return word_size + AlignmentReserve; } @@ -120,7 +119,7 @@ } // Sets the space of the buffer to be [buf, space+word_sz()). - virtual void set_buf(HeapWord* buf, size_t new_word_sz) { + void set_buf(HeapWord* buf, size_t new_word_sz) { assert(new_word_sz > AlignmentReserve, "Too small"); _word_sz = new_word_sz; @@ -136,11 +135,11 @@ // Flush allocation statistics into the given PLABStats supporting ergonomic // sizing of PLAB's and retire the current buffer. To be called at the end of // GC. - virtual void flush_and_retire_stats(PLABStats* stats); + void flush_and_retire_stats(PLABStats* stats); // Fills in the unallocated portion of the buffer with a garbage object and updates // statistics. To be called during GC. - virtual void retire(); + void retire(); }; // PLAB book-keeping. diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -367,12 +367,12 @@ } void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { - _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); + _discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_ref); oop discovered = java_lang_ref_Reference::discovered(_ref); assert(_discovered_addr && oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); _next = discovered; - _referent_addr = java_lang_ref_Reference::referent_addr(_ref); + _referent_addr = java_lang_ref_Reference::referent_addr_raw(_ref); _referent = java_lang_ref_Reference::referent(_ref); assert(Universe::heap()->is_in_reserved_or_null(_referent), "Wrong oop found in java.lang.Reference object"); @@ -498,7 +498,7 @@ DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); + HeapWord* next_addr = java_lang_ref_Reference::next_addr_raw(iter.obj()); oop next = java_lang_ref_Reference::next(iter.obj()); if ((iter.referent() == NULL || iter.is_referent_alive() || next != NULL)) { @@ -1024,7 +1024,7 @@ ResourceMark rm; // Needed for tracing. - HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); + HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); const oop discovered = java_lang_ref_Reference::discovered(obj); assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); if (discovered != NULL) { @@ -1191,10 +1191,10 @@ // Keep alive its cohort. iter.make_referent_alive(); if (UseCompressedOops) { - narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); + narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr_raw(obj); keep_alive->do_oop(next_addr); } else { - oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); + oop* next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj); keep_alive->do_oop(next_addr); } iter.move_to_next(); diff --git a/src/hotspot/share/gc/shared/softRefGenPolicy.cpp b/src/hotspot/share/gc/shared/softRefGenPolicy.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/shared/softRefGenPolicy.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/adaptiveSizePolicy.hpp" +#include "gc/shared/genCollectedHeap.hpp" +#include "gc/shared/softRefGenPolicy.hpp" + +void SoftRefGenPolicy::cleared_all_soft_refs() { + // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may + // have been cleared in the last collection but if the gc overhear + // limit continues to be near, SoftRefs should still be cleared. + AdaptiveSizePolicy* size_policy = GenCollectedHeap::heap()->size_policy(); + if (size_policy != NULL) { + set_should_clear_all_soft_refs(size_policy->gc_overhead_limit_near()); + } + + SoftRefPolicy::cleared_all_soft_refs(); +} diff --git a/src/hotspot/share/gc/shared/softRefGenPolicy.hpp b/src/hotspot/share/gc/shared/softRefGenPolicy.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/shared/softRefGenPolicy.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_SOFTREFGENPOLICY_HPP +#define SHARE_VM_GC_SHARED_SOFTREFGENPOLICY_HPP + +#include "gc/shared/softRefPolicy.hpp" +#include "utilities/globalDefinitions.hpp" + +class AdaptiveSizePolicy; + +class SoftRefGenPolicy : public SoftRefPolicy { +public: + virtual void cleared_all_soft_refs(); +}; + +#endif // SHARE_VM_GC_SHARED_SOFTREFGENPOLICY_HPP diff --git a/src/hotspot/share/gc/shared/softRefPolicy.cpp b/src/hotspot/share/gc/shared/softRefPolicy.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/shared/softRefPolicy.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/softRefPolicy.hpp" + +SoftRefPolicy::SoftRefPolicy() : + _should_clear_all_soft_refs(false), + _all_soft_refs_clear(false) { +} + +bool SoftRefPolicy::use_should_clear_all_soft_refs(bool v) { + bool result = _should_clear_all_soft_refs; + set_should_clear_all_soft_refs(false); + return result; +} + +void SoftRefPolicy::cleared_all_soft_refs() { + _all_soft_refs_clear = true; +} diff --git a/src/hotspot/share/gc/shared/softRefPolicy.hpp b/src/hotspot/share/gc/shared/softRefPolicy.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/gc/shared/softRefPolicy.hpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_SOFTREFPOLICY_HPP +#define SHARE_VM_GC_SHARED_SOFTREFPOLICY_HPP + +#include "memory/allocation.hpp" + +class SoftRefPolicy { + private: + // Set to true when policy wants soft refs cleared. + // Reset to false by gc after it clears all soft refs. + bool _should_clear_all_soft_refs; + + // Set to true by the GC if the just-completed gc cleared all + // softrefs. This is set to true whenever a gc clears all softrefs, and + // set to false each time gc returns to the mutator. For example, in the + // ParallelScavengeHeap case the latter would be done toward the end of + // mem_allocate() where it returns op.result() + bool _all_soft_refs_clear; + + public: + SoftRefPolicy(); + + bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; } + void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; } + // Returns the current value of _should_clear_all_soft_refs. + // _should_clear_all_soft_refs is set to false as a side effect. + bool use_should_clear_all_soft_refs(bool v); + bool all_soft_refs_clear() { return _all_soft_refs_clear; } + void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; } + + // Called by the GC after Soft Refs have been cleared to indicate + // that the request in _should_clear_all_soft_refs has been fulfilled. + virtual void cleared_all_soft_refs(); +}; + +class ClearedAllSoftRefs : public StackObj { + bool _clear_all_soft_refs; + SoftRefPolicy* _soft_ref_policy; + public: + ClearedAllSoftRefs(bool clear_all_soft_refs, SoftRefPolicy* soft_ref_policy) : + _clear_all_soft_refs(clear_all_soft_refs), + _soft_ref_policy(soft_ref_policy) {} + + ~ClearedAllSoftRefs() { + if (_clear_all_soft_refs) { + _soft_ref_policy->cleared_all_soft_refs(); + } + } + + bool should_clear() { return _clear_all_soft_refs; } +}; + +#endif // SHARE_VM_GC_SHARED_SOFTREFPOLICY_HPP diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp --- a/src/hotspot/share/gc/shared/space.cpp +++ b/src/hotspot/share/gc/shared/space.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ HeapWord* top_obj) { if (top_obj != NULL) { if (_sp->block_is_obj(top_obj)) { - if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { + if (_precision == CardTable::ObjHeadPreciseArray) { if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { // An arrayOop is starting on the dirty card - since we do exact // store checks for objArrays we are done. @@ -125,11 +125,11 @@ HeapWord* bottom_obj; HeapWord* top_obj; - assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || - _precision == CardTableModRefBS::Precise, + assert(_precision == CardTable::ObjHeadPreciseArray || + _precision == CardTable::Precise, "Only ones we deal with for now."); - assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || + assert(_precision != CardTable::ObjHeadPreciseArray || _cl->idempotent() || _last_bottom == NULL || top <= _last_bottom, "Not decreasing"); @@ -147,7 +147,7 @@ top = get_actual_top(top, top_obj); // If the previous call did some part of this region, don't redo. - if (_precision == CardTableModRefBS::ObjHeadPreciseArray && + if (_precision == CardTable::ObjHeadPreciseArray && _min_done != NULL && _min_done < top) { top = _min_done; @@ -159,7 +159,7 @@ bottom = MIN2(bottom, top); MemRegion extended_mr = MemRegion(bottom, top); assert(bottom <= top && - (_precision != CardTableModRefBS::ObjHeadPreciseArray || + (_precision != CardTable::ObjHeadPreciseArray || _min_done == NULL || top <= _min_done), "overlap!"); @@ -180,7 +180,7 @@ } DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel) { return new DirtyCardToOopClosure(this, cl, precision, boundary); @@ -189,7 +189,7 @@ HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, HeapWord* top_obj) { if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { - if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { + if (_precision == CardTable::ObjHeadPreciseArray) { if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { // An arrayOop is starting on the dirty card - since we do exact // store checks for objArrays we are done. @@ -260,7 +260,7 @@ DirtyCardToOopClosure* ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel) { return new ContiguousSpaceDCTOC(this, cl, precision, boundary); diff --git a/src/hotspot/share/gc/shared/space.hpp b/src/hotspot/share/gc/shared/space.hpp --- a/src/hotspot/share/gc/shared/space.hpp +++ b/src/hotspot/share/gc/shared/space.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_VM_GC_SHARED_SPACE_HPP #include "gc/shared/blockOffsetTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" @@ -181,7 +181,7 @@ // depending on the type of space in which the closure will // operate. ResourceArea allocated. virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel); @@ -253,7 +253,7 @@ protected: ExtendedOopClosure* _cl; Space* _sp; - CardTableModRefBS::PrecisionStyle _precision; + CardTable::PrecisionStyle _precision; HeapWord* _boundary; // If non-NULL, process only non-NULL oops // pointing below boundary. HeapWord* _min_done; // ObjHeadPreciseArray precision requires @@ -282,7 +282,7 @@ public: DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary) : _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), _min_done(NULL) { @@ -619,7 +619,7 @@ // Override. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel); @@ -694,7 +694,7 @@ public: FilteringDCTOC(Space* sp, ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary) : DirtyCardToOopClosure(sp, cl, precision, boundary) {} }; @@ -723,7 +723,7 @@ public: ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, - CardTableModRefBS::PrecisionStyle precision, + CardTable::PrecisionStyle precision, HeapWord* boundary) : FilteringDCTOC(sp, cl, precision, boundary) {} diff --git a/src/hotspot/share/gc/shared/taskqueue.cpp b/src/hotspot/share/gc/shared/taskqueue.cpp --- a/src/hotspot/share/gc/shared/taskqueue.cpp +++ b/src/hotspot/share/gc/shared/taskqueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,7 +153,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); - Atomic::inc((int *)&_offered_termination); + Atomic::inc(&_offered_termination); uint yield_count = 0; // Number of hard spin loops done since last yield @@ -228,7 +228,7 @@ #endif if ((peek_in_queue_set() && (terminator == NULL || ! terminator->should_force_termination())) || (terminator != NULL && terminator->should_exit_termination())) { - Atomic::dec((int *)&_offered_termination); + Atomic::dec(&_offered_termination); assert(_offered_termination < _n_threads, "Invariant"); return false; } diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -228,7 +228,7 @@ #if !(defined SPARC || defined IA32 || defined AMD64) OrderAccess::fence(); #endif - uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); + uint localBot = OrderAccess::load_acquire(&_bottom); uint n_elems = size(localBot, oldAge.top()); if (n_elems == 0) { return false; diff --git a/src/hotspot/share/gc/shared/vmGCOperations.cpp b/src/hotspot/share/gc/shared/vmGCOperations.cpp --- a/src/hotspot/share/gc/shared/vmGCOperations.cpp +++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp @@ -46,7 +46,7 @@ VM_GC_Operation::~VM_GC_Operation() { CollectedHeap* ch = Universe::heap(); - ch->collector_policy()->set_all_soft_refs_clear(false); + ch->soft_ref_policy()->set_all_soft_refs_clear(false); } // The same dtrace probe can't be inserted in two different files, so we diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp --- a/src/hotspot/share/gc/shared/workgroup.cpp +++ b/src/hotspot/share/gc/shared/workgroup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -157,7 +157,7 @@ // Wait for the coordinator to dispatch a task. _start_semaphore->wait(); - uint num_started = (uint) Atomic::add(1, (volatile jint*)&_started); + uint num_started = Atomic::add(1u, &_started); // Subtract one to get a zero-indexed worker id. uint worker_id = num_started - 1; @@ -168,7 +168,7 @@ void worker_done_with_task() { // Mark that the worker is done with the task. // The worker is not allowed to read the state variables after this line. - uint not_finished = (uint) Atomic::add(-1, (volatile jint*)&_not_finished); + uint not_finished = Atomic::sub(1u, &_not_finished); // The last worker signals to the coordinator that all work is completed. if (not_finished == 0) { @@ -439,7 +439,7 @@ #ifdef ASSERT if (!res) { assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); - Atomic::inc((volatile jint*) &_claimed); + Atomic::inc(&_claimed); } #endif return res; diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp --- a/src/hotspot/share/gc/shared/workgroup.hpp +++ b/src/hotspot/share/gc/shared/workgroup.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,14 +59,9 @@ const uint _gc_id; public: - AbstractGangTask(const char* name) : + explicit AbstractGangTask(const char* name) : _name(name), - _gc_id(GCId::current_raw()) - {} - - AbstractGangTask(const char* name, const uint gc_id) : - _name(name), - _gc_id(gc_id) + _gc_id(GCId::current_or_undefined()) {} // The abstract work method. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp @@ -202,7 +202,7 @@ shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress()); } -void ShenandoahBarrierSet::write_region_work(MemRegion mr) { +void ShenandoahBarrierSet::write_region(MemRegion mr) { assert(UseShenandoahGC, "should be enabled"); if (!ShenandoahCloneBarrier) return; if (! need_update_refs_barrier()) return; @@ -338,3 +338,20 @@ verify_safe_oop(oopDesc::decode_heap_oop(p)); } #endif + +void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) { + assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint"); + assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active"); + assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty"); + if (thread->satb_mark_queue_set().is_active()) { + thread->satb_mark_queue().set_active(true); + } + thread->set_gc_state(JavaThread::gc_state_global()); +} + +void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) { + thread->satb_mark_queue().flush(); + if (UseTLAB && thread->gclab().is_initialized()) { + thread->gclab().make_parsable(true); + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp @@ -71,7 +71,10 @@ void write_ref_field_pre_work(void* field, oop new_val); void write_ref_field_work(void* v, oop o, bool release = false); - void write_region_work(MemRegion mr); + void write_region(MemRegion mr); + + virtual void on_thread_attach(JavaThread* thread); + virtual void on_thread_detach(JavaThread* thread); virtual oop read_barrier(oop src); @@ -279,6 +282,10 @@ return value; } + static oop resolve(oop obj) { + return barrier_set_cast(BarrierSet::barrier_set())->write_barrier(obj); + } + }; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp @@ -81,6 +81,8 @@ template template bool ShenandoahBarrierSet::AccessBarrier::arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { + assert(((T*)(void*) src_obj) <= src && ((HeapWord*) src) < (((HeapWord*)(void*) src_obj) + src_obj->size()), "pointer out of object bounds src_obj: %p, src: %p, end: %p, size: %u", (void*) src_obj, src, (((HeapWord*)(void*) src_obj) + src_obj->size()), src_obj->size()); + assert(((T*)(void*) dst_obj) <= dst && ((HeapWord*) dst) < ((HeapWord*)(void*) dst_obj) + dst_obj->size(), "pointer out of object bounds dst_obj: %p, dst: %p, end: %p, size: %u", (void*) dst_obj, dst, (((HeapWord*)(void*) dst_obj) + dst_obj->size()), dst_obj->size()); if (!oopDesc::is_null(src_obj)) { size_t src_offset = pointer_delta((void*) src, (void*) src_obj, sizeof(T)); src_obj = arrayOop(((ShenandoahBarrierSet*) BarrierSet::barrier_set())->read_barrier(src_obj)); @@ -260,6 +262,9 @@ template template bool ShenandoahBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { + assert(((HeapWord*)(void*) src_obj) <= (HeapWord*) src && (HeapWord*) src < (((HeapWord*)(void*) src_obj) + src_obj->size()), "pointer out of object bounds src_obj: %p, src: %p, size: %ul", (void*) src_obj, src, src_obj->size()); + assert(((HeapWord*)(void*) dst_obj) <= (HeapWord*) dst && (HeapWord*) dst < (((HeapWord*)(void*) dst_obj) + dst_obj->size()), "pointer out of object bounds dst_obj: "/*POINTER_FORMAT", dst: "POINTER_FORMAT", length: "SIZE_FORMAT, p2i(dst_obj), p2i(dst), length*/); + ShenandoahHeap* heap = ShenandoahHeap::heap(); if (!oopDesc::is_null(src_obj)) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp @@ -774,7 +774,7 @@ uint nworkers = workers->active_workers(); // Setup collector policy for softref cleaning. - bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/); + bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/); log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs)); rp->setup_policy(clear_soft_refs); rp->set_active_mt_degree(nworkers); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp @@ -419,7 +419,7 @@ } void ShenandoahConcurrentThread::handle_alloc_failure() { - ShenandoahHeap::heap()->collector_policy()->set_should_clear_all_soft_refs(true); + ShenandoahHeap::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); assert(current()->is_Java_thread(), "expect Java thread here"); if (try_set_alloc_failure_gc()) { @@ -441,7 +441,7 @@ // We ran out of memory during evacuation. Cancel evacuation, and schedule a GC. ShenandoahHeap* heap = ShenandoahHeap::heap(); - heap->collector_policy()->set_should_clear_all_soft_refs(true); + heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); try_set_alloc_failure_gc(); heap->cancel_concgc(GCCause::_shenandoah_allocation_failure_evac); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -88,12 +88,7 @@ ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions, char* bitmap0_base, char* bitmap1_base, size_t bitmap_size, size_t page_size) : - AbstractGangTask("Shenandoah PreTouch", - Universe::is_fully_initialized() ? GCId::current_raw() : - // During VM initialization there is - // no GC cycle that this task can be - // associated with. - GCId::undefined()), + AbstractGangTask("Shenandoah PreTouch"), _bitmap0_base(bitmap0_base), _bitmap1_base(bitmap1_base), _regions(regions), @@ -127,7 +122,6 @@ }; jint ShenandoahHeap::initialize() { - CollectedHeap::pre_initialize(); BrooksPointer::initial_checks(); @@ -342,6 +336,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : CollectedHeap(), _shenandoah_policy(policy), + _soft_ref_policy(), _free_regions(NULL), _collection_set(NULL), _bytes_allocated_since_gc_start(0), @@ -2889,3 +2884,7 @@ void ShenandoahHeap::leave_evacuation() { _oom_evac_handler.leave_evacuation(); } + +SoftRefPolicy* ShenandoahHeap::soft_ref_policy() { + return &_soft_ref_policy; +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -25,6 +25,7 @@ #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP #include "gc/shared/markBitMap.hpp" +#include "gc/shared/softRefPolicy.hpp" #include "gc/shenandoah/shenandoahHeapLock.hpp" #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" @@ -207,6 +208,7 @@ ShenandoahSharedBitmap _gc_state; ShenandoahHeapLock _lock; ShenandoahCollectorPolicy* _shenandoah_policy; + SoftRefPolicy _soft_ref_policy; size_t _bitmap_size; size_t _bitmap_regions_per_slice; size_t _bitmap_bytes_per_slice; @@ -321,6 +323,7 @@ void do_full_collection(bool clear_all_soft_refs) /* override */; AdaptiveSizePolicy* size_policy() /* override */; CollectorPolicy* collector_policy() const /* override */; + SoftRefPolicy* soft_ref_policy() /* override */; void ensure_parsability(bool retire_tlabs) /* override */; HeapWord* block_start(const void* addr) const /* override */; size_t block_size(const HeapWord* addr) const /* override */; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.cpp b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.cpp @@ -29,6 +29,7 @@ #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahStrDedupTable.hpp" #include "memory/allocation.hpp" +#include "oops/arrayOop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/safepoint.hpp" @@ -519,3 +520,11 @@ } return transferred; } + +bool ShenandoahStrDedupEntry::equals(typeArrayOop value1, typeArrayOop value2) { + return (oopDesc::equals(value1, value2) || + (value1->length() == value2->length() && + (!memcmp(value1->base(T_BYTE), + value2->base(T_BYTE), + value1->length() * sizeof(jbyte))))); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.hpp b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupTable.hpp @@ -93,13 +93,7 @@ } private: - static bool equals(typeArrayOop value1, typeArrayOop value2) { - return (oopDesc::equals(value1, value2) || - (value1->length() == value2->length() && - (!memcmp(value1->base(T_BYTE), - value2->base(T_BYTE), - value1->length() * sizeof(jbyte))))); - } + static bool equals(typeArrayOop value1, typeArrayOop value2); }; /* ShenandoahStringDedupTable: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp @@ -863,7 +863,7 @@ uint nworkers = workers->active_workers(); // Setup collector policy for softref cleaning. - bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/); + bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/); log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs)); rp->setup_policy(clear_soft_refs); rp->set_active_mt_degree(nworkers); diff --git a/src/hotspot/share/include/jvm.h b/src/hotspot/share/include/jvm.h --- a/src/hotspot/share/include/jvm.h +++ b/src/hotspot/share/include/jvm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,6 +119,9 @@ * java.lang.Runtime */ JNIEXPORT void JNICALL +JVM_BeforeHalt(); + +JNIEXPORT void JNICALL JVM_Halt(jint code); JNIEXPORT void JNICALL diff --git a/src/hotspot/share/interpreter/bytecode.cpp b/src/hotspot/share/interpreter/bytecode.cpp --- a/src/hotspot/share/interpreter/bytecode.cpp +++ b/src/hotspot/share/interpreter/bytecode.cpp @@ -123,6 +123,11 @@ assert(cpcache() != NULL, "do not call this from verifier or rewriter"); } +int Bytecode_invoke::size_of_parameters() const { + ArgumentSizeComputer asc(signature()); + return asc.size() + (has_receiver() ? 1 : 0); +} + Symbol* Bytecode_member_ref::klass() const { return constants()->klass_ref_at_noresolve(index()); diff --git a/src/hotspot/share/interpreter/bytecode.hpp b/src/hotspot/share/interpreter/bytecode.hpp --- a/src/hotspot/share/interpreter/bytecode.hpp +++ b/src/hotspot/share/interpreter/bytecode.hpp @@ -197,7 +197,7 @@ BasicType result_type() const; // returns the result type of the getfield or invoke }; -// Abstraction for invoke_{virtual, static, interface, special} +// Abstraction for invoke_{virtual, static, interface, special, dynamic, handle} class Bytecode_invoke: public Bytecode_member_ref { protected: @@ -231,6 +231,8 @@ bool has_appendix() { return cpcache_entry()->has_appendix(); } + int size_of_parameters() const; + private: // Helper to skip verification. Used is_valid() to check if the result is really an invoke inline friend Bytecode_invoke Bytecode_invoke_check(const methodHandle& method, int bci); diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,9 +34,12 @@ #include "jvmci/jvmciJavaClasses.hpp" #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciRuntime.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/objArrayOop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "utilities/align.hpp" @@ -95,6 +98,32 @@ } } +objArrayOop CodeInstaller::sites() { + return (objArrayOop) JNIHandles::resolve(_sites_handle); +} + +arrayOop CodeInstaller::code() { + return (arrayOop) JNIHandles::resolve(_code_handle); +} + +arrayOop CodeInstaller::data_section() { + return (arrayOop) JNIHandles::resolve(_data_section_handle); +} + +objArrayOop CodeInstaller::data_section_patches() { + return (objArrayOop) JNIHandles::resolve(_data_section_patches_handle); +} + +#ifndef PRODUCT +objArrayOop CodeInstaller::comments() { + return (objArrayOop) JNIHandles::resolve(_comments_handle); +} +#endif + +oop CodeInstaller::word_kind() { + return JNIHandles::resolve(_word_kind_handle); +} + // creates a HotSpot oop map out of the byte arrays provided by DebugInfo OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) { Handle reference_map(THREAD, DebugInfo::referenceMap(debug_info)); diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,15 +188,15 @@ void pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle method, jint pc_offset, TRAPS); void pd_relocate_poll(address pc, jint mark, TRAPS); - objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); } - arrayOop code() { return (arrayOop) JNIHandles::resolve(_code_handle); } - arrayOop data_section() { return (arrayOop) JNIHandles::resolve(_data_section_handle); } - objArrayOop data_section_patches() { return (objArrayOop) JNIHandles::resolve(_data_section_patches_handle); } + objArrayOop sites(); + arrayOop code(); + arrayOop data_section(); + objArrayOop data_section_patches(); #ifndef PRODUCT - objArrayOop comments() { return (objArrayOop) JNIHandles::resolve(_comments_handle); } + objArrayOop comments(); #endif - oop word_kind() { return (oop) JNIHandles::resolve(_word_kind_handle); } + oop word_kind(); public: diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "ci/ciUtilities.hpp" #include "classfile/javaClasses.inline.hpp" #include "code/codeCache.hpp" #include "code/scopeDesc.hpp" @@ -32,8 +33,10 @@ #include "oops/fieldStreams.hpp" #include "oops/oop.inline.hpp" #include "oops/objArrayOop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "jvmci/jvmciRuntime.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compileBroker.hpp" @@ -47,6 +50,7 @@ #include "jvmci/jvmciCodeInstaller.hpp" #include "jvmci/vmStructs_jvmci.hpp" #include "gc/g1/heapRegion.hpp" +#include "gc/shared/cardTable.hpp" #include "runtime/javaCalls.hpp" #include "runtime/deoptimization.hpp" #include "runtime/timerTrace.hpp" @@ -204,10 +208,10 @@ BarrierSet* bs = Universe::heap()->barrier_set(); if (bs->is_a(BarrierSet::CardTableModRef)) { - jbyte* base = barrier_set_cast(bs)->byte_map_base; - assert(base != 0, "unexpected byte_map_base"); + jbyte* base = ci_card_table_address(); + assert(base != NULL, "unexpected byte_map_base"); cardtable_start_address = base; - cardtable_shift = CardTableModRefBS::card_shift; + cardtable_shift = CardTable::card_shift; } else { // No card mark barriers cardtable_start_address = 0; diff --git a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp --- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp +++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp @@ -28,6 +28,7 @@ #include "oops/access.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/jniHandles.inline.hpp" class JVMCIJavaClasses : AllStatic { public: diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,7 @@ #include "oops/objArrayOop.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/threadSMR.hpp" @@ -116,10 +117,7 @@ oop obj = ik->allocate_instance(CHECK); thread->set_vm_result(obj); JRT_BLOCK_END; - - if (ReduceInitialCardMarks) { - new_store_pre_barrier(thread); - } + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_klass, jint length)) @@ -151,29 +149,9 @@ } } JRT_BLOCK_END; - - if (ReduceInitialCardMarks) { - new_store_pre_barrier(thread); - } + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END -void JVMCIRuntime::new_store_pre_barrier(JavaThread* thread) { - // After any safepoint, just before going back to compiled code, - // we inform the GC that we will be doing initializing writes to - // this object in the future without emitting card-marks, so - // GC may take any compensating steps. - // NOTE: Keep this code consistent with GraphKit::store_barrier. - - oop new_obj = thread->vm_result(); - if (new_obj == NULL) return; - - assert(Universe::heap()->can_elide_tlab_store_barriers(), - "compiler must check this first"); - // GC may decide to give back a safer copy of new_obj. - new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj); - thread->set_vm_result(new_obj); -} - JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims)) assert(klass->is_klass(), "not a class"); assert(rank >= 1, "rank must be nonzero"); @@ -653,6 +631,11 @@ return Handle(THREAD, (oop)result.get_jobject()); } +Handle JVMCIRuntime::get_HotSpotJVMCIRuntime(TRAPS) { + initialize_JVMCI(CHECK_(Handle())); + return Handle(THREAD, JNIHandles::resolve_non_null(_HotSpotJVMCIRuntime_instance)); +} + void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(TRAPS) { guarantee(!_HotSpotJVMCIRuntime_initialized, "cannot reinitialize HotSpotJVMCIRuntime"); JVMCIRuntime::initialize_well_known_classes(CHECK); diff --git a/src/hotspot/share/jvmci/jvmciRuntime.hpp b/src/hotspot/share/jvmci/jvmciRuntime.hpp --- a/src/hotspot/share/jvmci/jvmciRuntime.hpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,10 +73,7 @@ /** * Gets the singleton HotSpotJVMCIRuntime instance, initializing it if necessary */ - static Handle get_HotSpotJVMCIRuntime(TRAPS) { - initialize_JVMCI(CHECK_(Handle())); - return Handle(THREAD, JNIHandles::resolve_non_null(_HotSpotJVMCIRuntime_instance)); - } + static Handle get_HotSpotJVMCIRuntime(TRAPS); static jobject get_HotSpotJVMCIRuntime_jobject(TRAPS) { initialize_JVMCI(CHECK_NULL); @@ -154,7 +151,6 @@ static void write_barrier_pre(JavaThread* thread, oopDesc* obj); static void write_barrier_post(JavaThread* thread, void* card); static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child); - static void new_store_pre_barrier(JavaThread* thread); // used to throw exceptions from compiled JVMCI code static void throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message); diff --git a/src/hotspot/share/jvmci/jvmci_globals.cpp b/src/hotspot/share/jvmci/jvmci_globals.cpp --- a/src/hotspot/share/jvmci/jvmci_globals.cpp +++ b/src/hotspot/share/jvmci/jvmci_globals.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,6 +80,15 @@ FLAG_SET_DEFAULT(EnableJVMCI, true); } + if (!EnableJVMCI) { + // Switch off eager JVMCI initialization if JVMCI is disabled. + // Don't throw error if EagerJVMCI is set to allow testing. + if (EagerJVMCI) { + FLAG_SET_DEFAULT(EagerJVMCI, false); + } + } + JVMCI_FLAG_CHECKED(EagerJVMCI) + CHECK_NOT_SET(JVMCITraceLevel, EnableJVMCI) CHECK_NOT_SET(JVMCICounterSize, EnableJVMCI) CHECK_NOT_SET(JVMCICountersExcludeCompiler, EnableJVMCI) diff --git a/src/hotspot/share/jvmci/jvmci_globals.hpp b/src/hotspot/share/jvmci/jvmci_globals.hpp --- a/src/hotspot/share/jvmci/jvmci_globals.hpp +++ b/src/hotspot/share/jvmci/jvmci_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,9 @@ experimental(bool, BootstrapJVMCI, false, \ "Bootstrap JVMCI before running Java main method") \ \ + experimental(bool, EagerJVMCI, false, \ + "Force eager JVMCI initialization") \ + \ experimental(bool, PrintBootstrap, true, \ "Print JVMCI bootstrap progress and summary") \ \ diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -426,7 +426,7 @@ declare_constant(BitData::null_seen_flag) \ declare_constant(BranchData::not_taken_off_set) \ \ - declare_constant_with_value("CardTableModRefBS::dirty_card", CardTableModRefBS::dirty_card_val()) \ + declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \ \ declare_constant(CodeInstaller::VERIFIED_ENTRY) \ declare_constant(CodeInstaller::UNVERIFIED_ENTRY) \ @@ -653,7 +653,7 @@ static_field(HeapRegion, LogOfHRGrainBytes, int) #define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value, declare_preprocessor_constant) \ - declare_constant_with_value("G1SATBCardTableModRefBS::g1_young_gen", G1SATBCardTableModRefBS::g1_young_card_val()) + declare_constant_with_value("G1CardTable::g1_young_gen", G1CardTable::g1_young_card_val()) #endif // INCLUDE_ALL_GCS diff --git a/src/hotspot/share/logging/logConfiguration.cpp b/src/hotspot/share/logging/logConfiguration.cpp --- a/src/hotspot/share/logging/logConfiguration.cpp +++ b/src/hotspot/share/logging/logConfiguration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,8 @@ #include "logging/logDiagnosticCommand.hpp" #include "logging/logFileOutput.hpp" #include "logging/logOutput.hpp" +#include "logging/logSelectionList.hpp" #include "logging/logStream.hpp" -#include "logging/logTagLevelExpression.hpp" #include "logging/logTagSet.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" @@ -78,19 +78,25 @@ #endif void LogConfiguration::post_initialize() { + // Reset the reconfigured status of all outputs + for (size_t i = 0; i < _n_outputs; i++) { + _outputs[i]->_reconfigured = false; + } + LogDiagnosticCommand::registerCommand(); Log(logging) log; if (log.is_info()) { log.info("Log configuration fully initialized."); log_develop_info(logging)("Develop logging is available."); - if (log.is_debug()) { - LogStream debug_stream(log.debug()); - describe(&debug_stream); - if (log.is_trace()) { - LogStream trace_stream(log.trace()); - LogTagSet::list_all_tagsets(&trace_stream); - } - } + + LogStream info_stream(log.info()); + describe_available(&info_stream); + + LogStream debug_stream(log.debug()); + LogTagSet::list_all_tagsets(&debug_stream); + + ConfigurationLock cl; + describe_current_configuration(&info_stream); } } @@ -207,20 +213,22 @@ delete output; } -void LogConfiguration::configure_output(size_t idx, const LogTagLevelExpression& tag_level_expression, const LogDecorators& decorators) { +void LogConfiguration::configure_output(size_t idx, const LogSelectionList& selections, const LogDecorators& decorators) { assert(ConfigurationLock::current_thread_has_lock(), "Must hold configuration lock to call this function."); assert(idx < _n_outputs, "Invalid index, idx = " SIZE_FORMAT " and _n_outputs = " SIZE_FORMAT, idx, _n_outputs); LogOutput* output = _outputs[idx]; - // Clear the previous config description - output->clear_config_string(); + output->_reconfigured = true; + + size_t on_level[LogLevel::Count] = {0}; bool enabled = false; for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { - LogLevelType level = tag_level_expression.level_for(*ts); + LogLevelType level = selections.level_for(*ts); // Ignore tagsets that do not, and will not log on the output if (!ts->has_output(output) && (level == LogLevel::NotMentioned || level == LogLevel::Off)) { + on_level[LogLevel::Off]++; continue; } @@ -233,20 +241,18 @@ // Set the new level, if it changed if (level != LogLevel::NotMentioned) { ts->set_output_level(output, level); + } else { + // Look up the previously set level for this output on this tagset + level = ts->level_for(output); } if (level != LogLevel::Off) { // Keep track of whether or not the output is ever used by some tagset enabled = true; + } - if (level == LogLevel::NotMentioned) { - // Look up the previously set level for this output on this tagset - level = ts->level_for(output); - } - - // Update the config description with this tagset and level - output->add_to_config_string(ts, level); - } + // Track of the number of tag sets on each level + on_level[level]++; } // It is now safe to set the new decorators for the actual output @@ -257,17 +263,14 @@ ts->update_decorators(); } - if (enabled) { - assert(strlen(output->config_string()) > 0, - "Should always have a config description if the output is enabled."); - } else if (idx > 1) { - // Output is unused and should be removed. + if (!enabled && idx > 1) { + // Output is unused and should be removed, unless it is stdout/stderr (idx < 2) delete_output(idx); - } else { - // Output is either stdout or stderr, which means we can't remove it. - // Update the config description to reflect that the output is disabled. - output->set_config_string("all=off"); + return; } + + output->update_config_string(on_level); + assert(strlen(output->config_string()) > 0, "should always have a config description"); } void LogConfiguration::disable_output(size_t idx) { @@ -299,11 +302,11 @@ void LogConfiguration::configure_stdout(LogLevelType level, int exact_match, ...) { size_t i; va_list ap; - LogTagLevelExpression expr; + LogTagType tags[LogTag::MaxTags]; va_start(ap, exact_match); for (i = 0; i < LogTag::MaxTags; i++) { LogTagType tag = static_cast(va_arg(ap, int)); - expr.add_tag(tag); + tags[i] = tag; if (tag == LogTag::__NO_TAG) { assert(i > 0, "Must specify at least one tag!"); break; @@ -313,17 +316,14 @@ "Too many tags specified! Can only have up to " SIZE_FORMAT " tags in a tag set.", LogTag::MaxTags); va_end(ap); - if (!exact_match) { - expr.set_allow_other_tags(); - } - expr.set_level(level); - expr.new_combination(); - assert(expr.verify_tagsets(), - "configure_stdout() called with invalid/non-existing tag set"); + LogSelection selection(tags, !exact_match, level); + assert(selection.tag_sets_selected() > 0, + "configure_stdout() called with invalid/non-existing log selection"); + LogSelectionList list(selection); // Apply configuration to stdout (output #0), with the same decorators as before. ConfigurationLock cl; - configure_output(0, expr, _outputs[0]->decorators()); + configure_output(0, list, _outputs[0]->decorators()); notify_update_listeners(); } @@ -367,14 +367,24 @@ bool success = parse_log_arguments(output, what, decorators, output_options, &ss); if (ss.size() > 0) { - errbuf[strlen(errbuf) - 1] = '\0'; // Strip trailing newline // If it failed, log the error. If it didn't fail, but something was written // to the stream, log it as a warning. - if (!success) { - log_error(logging)("%s", ss.base()); - } else { - log_warning(logging)("%s", ss.base()); - } + LogLevelType level = success ? LogLevel::Warning : LogLevel::Error; + + Log(logging) log; + char* start = errbuf; + char* end = strchr(start, '\n'); + assert(end != NULL, "line must end with newline '%s'", start); + do { + assert(start < errbuf + sizeof(errbuf) && + end < errbuf + sizeof(errbuf), + "buffer overflow"); + *end = '\0'; + log.write(level, "%s", start); + start = end + 1; + end = strchr(start, '\n'); + assert(end != NULL || *start == '\0', "line must end with newline '%s'", start); + } while (end != NULL); } os::free(copy); @@ -382,7 +392,7 @@ } bool LogConfiguration::parse_log_arguments(const char* outputstr, - const char* what, + const char* selectionstr, const char* decoratorstr, const char* output_options, outputStream* errstream) { @@ -391,8 +401,8 @@ outputstr = "stdout"; } - LogTagLevelExpression expr; - if (!expr.parse(what, errstream)) { + LogSelectionList selections; + if (!selections.parse(selectionstr, errstream)) { return false; } @@ -433,13 +443,13 @@ return false; } } - configure_output(idx, expr, decorators); + configure_output(idx, selections, decorators); notify_update_listeners(); - expr.verify_tagsets(errstream); + selections.verify_selections(errstream); return true; } -void LogConfiguration::describe_available(outputStream* out){ +void LogConfiguration::describe_available(outputStream* out) { out->print("Available log levels:"); for (size_t i = 0; i < LogLevel::Count; i++) { out->print("%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast(i))); @@ -454,19 +464,19 @@ out->cr(); out->print("Available log tags:"); - for (size_t i = 1; i < LogTag::Count; i++) { - out->print("%s %s", (i == 1 ? "" : ","), LogTag::name(static_cast(i))); - } - out->cr(); + LogTag::list_tags(out); LogTagSet::describe_tagsets(out); } -void LogConfiguration::describe_current_configuration(outputStream* out){ +void LogConfiguration::describe_current_configuration(outputStream* out) { out->print_cr("Log output configuration:"); for (size_t i = 0; i < _n_outputs; i++) { - out->print("#" SIZE_FORMAT ": ", i); + out->print(" #" SIZE_FORMAT ": ", i); _outputs[i]->describe(out); + if (_outputs[i]->is_reconfigured()) { + out->print(" (reconfigured)"); + } out->cr(); } } @@ -477,69 +487,89 @@ describe_current_configuration(out); } -void LogConfiguration::print_command_line_help(FILE* out) { - jio_fprintf(out, "-Xlog Usage: -Xlog[:[what][:[output][:[decorators][:output-options]]]]\n" - "\t where 'what' is a combination of tags and levels of the form tag1[+tag2...][*][=level][,...]\n" - "\t Unless wildcard (*) is specified, only log messages tagged with exactly the tags specified will be matched.\n\n"); +void LogConfiguration::print_command_line_help(outputStream* out) { + out->print_cr("-Xlog Usage: -Xlog[:[selections][:[output][:[decorators][:output-options]]]]"); + out->print_cr("\t where 'selections' are combinations of tags and levels of the form tag1[+tag2...][*][=level][,...]"); + out->print_cr("\t NOTE: Unless wildcard (*) is specified, only log messages tagged with exactly the tags specified will be matched."); + out->cr(); - jio_fprintf(out, "Available log levels:\n"); + out->print_cr("Available log levels:"); for (size_t i = 0; i < LogLevel::Count; i++) { - jio_fprintf(out, "%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast(i))); + out->print("%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast(i))); } + out->cr(); + out->cr(); - jio_fprintf(out, "\n\nAvailable log decorators: \n"); + out->print_cr("Available log decorators: "); for (size_t i = 0; i < LogDecorators::Count; i++) { LogDecorators::Decorator d = static_cast(i); - jio_fprintf(out, "%s %s (%s)", (i == 0 ? "" : ","), LogDecorators::name(d), LogDecorators::abbreviation(d)); + out->print("%s %s (%s)", (i == 0 ? "" : ","), LogDecorators::name(d), LogDecorators::abbreviation(d)); } - jio_fprintf(out, "\n Decorators can also be specified as 'none' for no decoration.\n\n"); + out->cr(); + out->print_cr(" Decorators can also be specified as 'none' for no decoration."); + out->cr(); - jio_fprintf(out, "Available log tags:\n"); - for (size_t i = 1; i < LogTag::Count; i++) { - jio_fprintf(out, "%s %s", (i == 1 ? "" : ","), LogTag::name(static_cast(i))); - } - jio_fprintf(out, "\n Specifying 'all' instead of a tag combination matches all tag combinations.\n\n"); + out->print_cr("Available log tags:"); + LogTag::list_tags(out); + out->print_cr(" Specifying 'all' instead of a tag combination matches all tag combinations."); + out->cr(); - fileStream stream(out, false); - LogTagSet::describe_tagsets(&stream); + LogTagSet::describe_tagsets(out); - jio_fprintf(out, "\nAvailable log outputs:\n" - " stdout, stderr, file=\n" - " Specifying %%p and/or %%t in the filename will expand to the JVM's PID and startup timestamp, respectively.\n\n" + out->print_cr("\nAvailable log outputs:"); + out->print_cr(" stdout/stderr"); + out->print_cr(" file="); + out->print_cr(" If the filename contains %%p and/or %%t, they will expand to the JVM's PID and startup timestamp, respectively."); + out->print_cr(" Additional output-options for file outputs:"); + out->print_cr(" filesize=.. - Target byte size for log rotation (supports K/M/G suffix)." + " If set to 0, log rotation will not trigger automatically," + " but can be performed manually (see the VM.log DCMD)."); + out->print_cr(" filecount=.. - Number of files to keep in rotation (not counting the active file)." + " If set to 0, log rotation is disabled." + " This will cause existing log files to be overwritten."); + out->cr(); - "Some examples:\n" - " -Xlog\n" - "\t Log all messages using 'info' level to stdout with 'uptime', 'levels' and 'tags' decorations.\n" - "\t (Equivalent to -Xlog:all=info:stdout:uptime,levels,tags).\n\n" + out->print_cr("Some examples:"); + out->print_cr(" -Xlog"); + out->print_cr("\t Log all messages up to 'info' level to stdout with 'uptime', 'levels' and 'tags' decorations."); + out->print_cr("\t (Equivalent to -Xlog:all=info:stdout:uptime,levels,tags)."); + out->cr(); - " -Xlog:gc\n" - "\t Log messages tagged with 'gc' tag using 'info' level to stdout, with default decorations.\n\n" + out->print_cr(" -Xlog:gc"); + out->print_cr("\t Log messages tagged with 'gc' tag up to 'info' level to stdout, with default decorations."); + out->cr(); - " -Xlog:gc,safepoint\n" - "\t Log messages tagged either with 'gc' or 'safepoint' tags, both using 'info' level, to stdout, with default decorations.\n" - "\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)\n\n" + out->print_cr(" -Xlog:gc,safepoint"); + out->print_cr("\t Log messages tagged either with 'gc' or 'safepoint' tags, both up to 'info' level, to stdout, with default decorations."); + out->print_cr("\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)"); + out->cr(); - " -Xlog:gc+ref=debug\n" - "\t Log messages tagged with both 'gc' and 'ref' tags, using 'debug' level, to stdout, with default decorations.\n" - "\t (Messages tagged only with one of the two tags will not be logged.)\n\n" + out->print_cr(" -Xlog:gc+ref=debug"); + out->print_cr("\t Log messages tagged with both 'gc' and 'ref' tags, up to 'debug' level, to stdout, with default decorations."); + out->print_cr("\t (Messages tagged only with one of the two tags will not be logged.)"); + out->cr(); - " -Xlog:gc=debug:file=gc.txt:none\n" - "\t Log messages tagged with 'gc' tag using 'debug' level to file 'gc.txt' with no decorations.\n\n" + out->print_cr(" -Xlog:gc=debug:file=gc.txt:none"); + out->print_cr("\t Log messages tagged with 'gc' tag up to 'debug' level to file 'gc.txt' with no decorations."); + out->cr(); - " -Xlog:gc=trace:file=gctrace.txt:uptimemillis,pids:filecount=5,filesize=1m\n" - "\t Log messages tagged with 'gc' tag using 'trace' level to a rotating fileset of 5 files of size 1MB,\n" - "\t using the base name 'gctrace.txt', with 'uptimemillis' and 'pid' decorations.\n\n" + out->print_cr(" -Xlog:gc=trace:file=gctrace.txt:uptimemillis,pids:filecount=5,filesize=1m"); + out->print_cr("\t Log messages tagged with 'gc' tag up to 'trace' level to a rotating fileset of 5 files of size 1MB,"); + out->print_cr("\t using the base name 'gctrace.txt', with 'uptimemillis' and 'pid' decorations."); + out->cr(); - " -Xlog:gc::uptime,tid\n" - "\t Log messages tagged with 'gc' tag using 'info' level to output 'stdout', using 'uptime' and 'tid' decorations.\n\n" + out->print_cr(" -Xlog:gc::uptime,tid"); + out->print_cr("\t Log messages tagged with 'gc' tag up to 'info' level to output 'stdout', using 'uptime' and 'tid' decorations."); + out->cr(); - " -Xlog:gc*=info,safepoint*=off\n" - "\t Log messages tagged with at least 'gc' using 'info' level, but turn off logging of messages tagged with 'safepoint'.\n" - "\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)\n\n" + out->print_cr(" -Xlog:gc*=info,safepoint*=off"); + out->print_cr("\t Log messages tagged with at least 'gc' up to 'info' level, but turn off logging of messages tagged with 'safepoint'."); + out->print_cr("\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)"); + out->cr(); - " -Xlog:disable -Xlog:safepoint=trace:safepointtrace.txt\n" - "\t Turn off all logging, including warnings and errors,\n" - "\t and then enable messages tagged with 'safepoint' using 'trace' level to file 'safepointtrace.txt'.\n"); + out->print_cr(" -Xlog:disable -Xlog:safepoint=trace:safepointtrace.txt"); + out->print_cr("\t Turn off all logging, including warnings and errors,"); + out->print_cr("\t and then enable messages tagged with 'safepoint' up to 'trace' level to file 'safepointtrace.txt'."); } void LogConfiguration::rotate_all_outputs() { diff --git a/src/hotspot/share/logging/logConfiguration.hpp b/src/hotspot/share/logging/logConfiguration.hpp --- a/src/hotspot/share/logging/logConfiguration.hpp +++ b/src/hotspot/share/logging/logConfiguration.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ class LogOutput; class LogDecorators; -class LogTagLevelExpression; +class LogSelectionList; // Global configuration of logging. Handles parsing and configuration of the logging framework, // and manages the list of configured log outputs. The actual tag and level configuration is @@ -38,6 +38,7 @@ // are iterated over and updated accordingly. class LogConfiguration : public AllStatic { friend class VMError; + friend class LogTestFixture; public: // Function for listeners typedef void (*UpdateListenerFunction)(void); @@ -75,7 +76,7 @@ static size_t find_output(const char* name); // Configure output (add or update existing configuration) to log on tag-level combination using specified decorators. - static void configure_output(size_t idx, const LogTagLevelExpression& tag_level_expression, const LogDecorators& decorators); + static void configure_output(size_t idx, const LogSelectionList& tag_level_expression, const LogDecorators& decorators); // This should be called after any configuration change while still holding ConfigurationLock static void notify_update_listeners(); @@ -118,7 +119,7 @@ static void describe(outputStream* out); // Prints usage help for command line log configuration. - static void print_command_line_help(FILE* out); + static void print_command_line_help(outputStream* out); // Rotates all LogOutput static void rotate_all_outputs(); diff --git a/src/hotspot/share/logging/logFileOutput.cpp b/src/hotspot/share/logging/logFileOutput.cpp --- a/src/hotspot/share/logging/logFileOutput.cpp +++ b/src/hotspot/share/logging/logFileOutput.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,6 +169,7 @@ char* equals_pos = strchr(pos, '='); if (equals_pos == NULL) { + errstream->print_cr("Invalid option '%s' for log file output.", pos); success = false; break; } diff --git a/src/hotspot/share/logging/logFileOutput.hpp b/src/hotspot/share/logging/logFileOutput.hpp --- a/src/hotspot/share/logging/logFileOutput.hpp +++ b/src/hotspot/share/logging/logFileOutput.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,7 +85,7 @@ virtual int write(const LogDecorations& decorations, const char* msg); virtual int write(LogMessageBuffer::Iterator msg_iterator); virtual void force_rotate(); - virtual void describe(outputStream *out); + virtual void describe(outputStream* out); virtual const char* name() const { return _name; diff --git a/src/hotspot/share/logging/logLevel.cpp b/src/hotspot/share/logging/logLevel.cpp --- a/src/hotspot/share/logging/logLevel.cpp +++ b/src/hotspot/share/logging/logLevel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "logging/logLevel.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/stringUtils.hpp" const char* LogLevel::_name[] = { "off", @@ -40,3 +41,19 @@ } return Invalid; } + +LogLevelType LogLevel::fuzzy_match(const char *level) { + size_t len = strlen(level); + LogLevelType match = LogLevel::Invalid; + double best = 0.4; // required similarity to be considered a match + for (uint i = 1; i < Count; i++) { + LogLevelType cur = static_cast(i); + const char* levelname = LogLevel::name(cur); + double score = StringUtils::similarity(level, len, levelname, strlen(levelname)); + if (score >= best) { + match = cur; + best= score; + } + } + return match; +} diff --git a/src/hotspot/share/logging/logLevel.hpp b/src/hotspot/share/logging/logLevel.hpp --- a/src/hotspot/share/logging/logLevel.hpp +++ b/src/hotspot/share/logging/logLevel.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,6 +71,7 @@ } static LogLevel::type from_string(const char* str); + static LogLevel::type fuzzy_match(const char *level); private: static const char* _name[]; diff --git a/src/hotspot/share/logging/logMessageBuffer.cpp b/src/hotspot/share/logging/logMessageBuffer.cpp --- a/src/hotspot/share/logging/logMessageBuffer.cpp +++ b/src/hotspot/share/logging/logMessageBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,7 +110,7 @@ va_list copy; va_copy(copy, args); - written += (size_t)os::log_vsnprintf(current_buffer_position, remaining_buffer_length, fmt, copy) + 1; + written += (size_t)os::vsnprintf(current_buffer_position, remaining_buffer_length, fmt, copy) + 1; va_end(copy); if (written > _message_buffer_capacity - _message_buffer_size) { assert(attempts == 0, "Second attempt should always have a sufficiently large buffer (resized to fit)."); diff --git a/src/hotspot/share/logging/logOutput.cpp b/src/hotspot/share/logging/logOutput.cpp --- a/src/hotspot/share/logging/logOutput.cpp +++ b/src/hotspot/share/logging/logOutput.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,8 +23,10 @@ */ #include "precompiled.hpp" #include "jvm.h" +#include "logging/log.hpp" #include "logging/logFileStreamOutput.hpp" #include "logging/logOutput.hpp" +#include "logging/logSelection.hpp" #include "logging/logTagSet.hpp" #include "memory/allocation.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -34,11 +36,23 @@ os::free(_config_string); } -void LogOutput::clear_config_string() { - os::free(_config_string); - _config_string_buffer_size = InitialConfigBufferSize; - _config_string = NEW_C_HEAP_ARRAY(char, _config_string_buffer_size, mtLogging); - _config_string[0] = '\0'; +void LogOutput::describe(outputStream *out) { + out->print("%s ", name()); + out->print_raw(config_string()); // raw printed because length might exceed O_BUFLEN + + bool has_decorator = false; + char delimiter = ' '; + for (size_t d = 0; d < LogDecorators::Count; d++) { + LogDecorators::Decorator decorator = static_cast(d); + if (decorators().is_decorator(decorator)) { + has_decorator = true; + out->print("%c%s", delimiter, LogDecorators::name(decorator)); + delimiter = ','; + } + } + if (!has_decorator) { + out->print(" none"); + } } void LogOutput::set_config_string(const char* string) { @@ -47,7 +61,7 @@ _config_string_buffer_size = strlen(_config_string) + 1; } -void LogOutput::add_to_config_string(const LogTagSet* ts, LogLevelType level) { +void LogOutput::add_to_config_string(const LogSelection& selection) { if (_config_string_buffer_size < InitialConfigBufferSize) { _config_string_buffer_size = InitialConfigBufferSize; _config_string = REALLOC_C_HEAP_ARRAY(char, _config_string, _config_string_buffer_size, mtLogging); @@ -60,7 +74,8 @@ } for (;;) { - int ret = ts->label(_config_string + offset, _config_string_buffer_size - offset, "+"); + int ret = selection.describe(_config_string + offset, + _config_string_buffer_size - offset); if (ret == -1) { // Double the buffer size and retry _config_string_buffer_size *= 2; @@ -69,30 +84,257 @@ } break; }; +} - offset = strlen(_config_string); - for (;;) { - int ret = jio_snprintf(_config_string + offset, _config_string_buffer_size - offset, "=%s", LogLevel::name(level)); - if (ret == -1) { - _config_string_buffer_size *= 2; - _config_string = REALLOC_C_HEAP_ARRAY(char, _config_string, _config_string_buffer_size, mtLogging); + +static int tag_cmp(const void *a, const void *b) { + return static_cast(a) - static_cast(b); +} + +static void sort_tags(LogTagType tags[LogTag::MaxTags]) { + size_t ntags = 0; + while (tags[ntags] != LogTag::__NO_TAG) { + ntags++; + } + qsort(tags, ntags, sizeof(*tags), tag_cmp); +} + +static const size_t MaxSubsets = 1 << LogTag::MaxTags; + +// Fill result with all possible subsets of the given tag set. Empty set not included. +// For example, if tags is {gc, heap} then the result is {{gc}, {heap}, {gc, heap}}. +// (Arguments with default values are intended exclusively for recursive calls.) +static void generate_all_subsets_of(LogTagType result[MaxSubsets][LogTag::MaxTags], + size_t* result_size, + const LogTagType tags[LogTag::MaxTags], + LogTagType subset[LogTag::MaxTags] = NULL, + const size_t subset_size = 0, + const size_t depth = 0) { + assert(subset_size <= LogTag::MaxTags, "subset must never have more than MaxTags tags"); + assert(depth <= LogTag::MaxTags, "recursion depth overflow"); + + if (subset == NULL) { + assert(*result_size == 0, "outer (non-recursive) call expects result_size to be 0"); + // Make subset the first element in the result array initially + subset = result[0]; + } + assert((void*) subset >= &result[0] && (void*) subset <= &result[MaxSubsets - 1], + "subset should always point to element in result"); + + if (depth == LogTag::MaxTags || tags[depth] == LogTag::__NO_TAG) { + if (subset_size == 0) { + // Ignore empty subset + return; + } + if (subset_size != LogTag::MaxTags) { + subset[subset_size] = LogTag::__NO_TAG; + } + assert(*result_size < MaxSubsets, "subsets overflow"); + *result_size += 1; + + // Bump subset and copy over current state + memcpy(result[*result_size], subset, sizeof(*subset) * LogTag::MaxTags); + subset = result[*result_size]; + return; + } + + // Recurse, excluding the tag of the current depth + generate_all_subsets_of(result, result_size, tags, subset, subset_size, depth + 1); + // ... and with it included + subset[subset_size] = tags[depth]; + generate_all_subsets_of(result, result_size, tags, subset, subset_size + 1, depth + 1); +} + +// Generate all possible selections (for the given level) based on the given tag set, +// and add them to the selections array (growing it as necessary). +static void add_selections(LogSelection** selections, + size_t* n_selections, + size_t* selections_cap, + const LogTagSet& tagset, + LogLevelType level) { + LogTagType tags[LogTag::MaxTags] = { LogTag::__NO_TAG }; + for (size_t i = 0; i < tagset.ntags(); i++) { + tags[i] = tagset.tag(i); + } + + size_t n_subsets = 0; + LogTagType subsets[MaxSubsets][LogTag::MaxTags]; + generate_all_subsets_of(subsets, &n_subsets, tags); + + for (size_t i = 0; i < n_subsets; i++) { + // Always keep tags sorted + sort_tags(subsets[i]); + + // Ignore subsets already represented in selections + bool unique = true; + for (size_t sel = 0; sel < *n_selections; sel++) { + if (level == (*selections)[sel].level() && (*selections)[sel].consists_of(subsets[i])) { + unique = false; + break; + } + } + if (!unique) { continue; } - break; - } -} -void LogOutput::describe(outputStream *out) { - out->print("%s ", name()); - out->print_raw(config_string()); - out->print(" "); - char delimiter[2] = {0}; - for (size_t d = 0; d < LogDecorators::Count; d++) { - LogDecorators::Decorator decorator = static_cast(d); - if (decorators().is_decorator(decorator)) { - out->print("%s%s", delimiter, LogDecorators::name(decorator)); - *delimiter = ','; + LogSelection exact_selection(subsets[i], false, level); + LogSelection wildcard_selection(subsets[i], true, level); + + // Check if the two selections match any tag sets + bool wildcard_match = false; + bool exact_match = false; + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + if (!wildcard_selection.selects(*ts)) { + continue; + } + + wildcard_match = true; + if (exact_selection.selects(*ts)) { + exact_match = true; + } + if (exact_match) { + break; + } + } + + if (!wildcard_match && !exact_match) { + continue; + } + + // Ensure there's enough room for both wildcard_match and exact_match + if (*n_selections + 2 > *selections_cap) { + *selections_cap *= 2; + *selections = REALLOC_C_HEAP_ARRAY(LogSelection, *selections, *selections_cap, mtLogging); + } + + // Add found matching selections to the result array + if (exact_match) { + (*selections)[(*n_selections)++] = exact_selection; + } + if (wildcard_match) { + (*selections)[(*n_selections)++] = wildcard_selection; } } } +void LogOutput::update_config_string(const size_t on_level[LogLevel::Count]) { + // Find the most common level (MCL) + LogLevelType mcl = LogLevel::Off; + size_t max = on_level[LogLevel::Off]; + for (LogLevelType l = LogLevel::First; l <= LogLevel::Last; l = static_cast(l + 1)) { + if (on_level[l] > max) { + mcl = l; + max = on_level[l]; + } + } + + // Always let the first part of each output's config string be "all=" + { + char buf[64]; + jio_snprintf(buf, sizeof(buf), "all=%s", LogLevel::name(mcl)); + set_config_string(buf); + } + + // If there are no deviating tag sets, we're done + size_t deviating_tagsets = LogTagSet::ntagsets() - max; + if (deviating_tagsets == 0) { + return; + } + + size_t n_selections = 0; + size_t selections_cap = 4 * MaxSubsets; // Start with some reasonably large initial capacity + LogSelection* selections = NEW_C_HEAP_ARRAY(LogSelection, selections_cap, mtLogging); + + size_t n_deviates = 0; + const LogTagSet** deviates = NEW_C_HEAP_ARRAY(const LogTagSet*, deviating_tagsets, mtLogging); + + // Generate all possible selections involving the deviating tag sets + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + LogLevelType level = ts->level_for(this); + if (level == mcl) { + continue; + } + deviates[n_deviates++] = ts; + add_selections(&selections, &n_selections, &selections_cap, *ts, level); + } + + // Reduce deviates greedily, using the "best" selection at each step to reduce the number of deviating tag sets + while (n_deviates > 0) { + size_t prev_deviates = n_deviates; + int max_score = 0; + const LogSelection* best_selection = NULL; + for (size_t i = 0; i < n_selections; i++) { + + // Give the selection a score based on how many deviating tag sets it selects (with correct level) + int score = 0; + for (size_t d = 0; d < n_deviates; d++) { + if (selections[i].selects(*deviates[d]) && deviates[d]->level_for(this) == selections[i].level()) { + score++; + } + } + + // Ignore selections with lower score than the current best even before subtracting mismatched selections + if (score < max_score) { + continue; + } + + // Subtract from the score the number of tag sets it selects with an incorrect level + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + if (selections[i].selects(*ts) && ts->level_for(this) != selections[i].level()) { + score--; + } + } + + // Pick the selection with the best score, or in the case of a tie, the one with fewest tags + if (score > max_score || + (score == max_score && best_selection != NULL && selections[i].ntags() < best_selection->ntags())) { + max_score = score; + best_selection = &selections[i]; + } + } + + assert(best_selection != NULL, "must always find a maximal selection"); + add_to_config_string(*best_selection); + + // Remove all deviates that this selection covered + for (size_t d = 0; d < n_deviates;) { + if (deviates[d]->level_for(this) == best_selection->level() && best_selection->selects(*deviates[d])) { + deviates[d] = deviates[--n_deviates]; + continue; + } + d++; + } + + // Add back any new deviates that this selection added (no array growth since removed > added) + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + if (ts->level_for(this) == best_selection->level() || !best_selection->selects(*ts)) { + continue; + } + + bool already_added = false; + for (size_t dev = 0; dev < n_deviates; dev++) { + if (deviates[dev] == ts) { + already_added = true; + break; + } + } + if (already_added) { + continue; + } + + deviates[n_deviates++] = ts; + } + + // Reset the selections and generate a new ones based on the updated deviating tag sets + n_selections = 0; + for (size_t d = 0; d < n_deviates; d++) { + add_selections(&selections, &n_selections, &selections_cap, *deviates[d], deviates[d]->level_for(this)); + } + + assert(n_deviates < deviating_tagsets, "deviating tag set array overflow"); + assert(prev_deviates > n_deviates, "number of deviating tag sets must never grow"); + } + FREE_C_HEAP_ARRAY(LogTagSet*, deviates); + FREE_C_HEAP_ARRAY(Selection, selections); +} + diff --git a/src/hotspot/share/logging/logOutput.hpp b/src/hotspot/share/logging/logOutput.hpp --- a/src/hotspot/share/logging/logOutput.hpp +++ b/src/hotspot/share/logging/logOutput.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ class LogDecorations; class LogMessageBuffer; +class LogSelection; class LogTagSet; // The base class/interface for log outputs. @@ -43,19 +44,27 @@ private: static const size_t InitialConfigBufferSize = 256; + + // Track if the output has been reconfigured dynamically during runtime. + // The status is set each time the configuration of the output is modified, + // and is reset once after logging initialization is complete. + bool _reconfigured; + char* _config_string; size_t _config_string_buffer_size; + // Adds the log selection to the config description (e.g. "tag1+tag2*=level"). + void add_to_config_string(const LogSelection& selection); + protected: LogDecorators _decorators; - // Clears any previous config description in preparation of reconfiguration. - void clear_config_string(); - // Adds the tagset on the given level to the config description (e.g. "tag1+tag2=level"). - void add_to_config_string(const LogTagSet* ts, LogLevelType level); // Replaces the current config description with the given string. void set_config_string(const char* string); + // Update the config string for this output to reflect its current configuration + void update_config_string(const size_t on_level[LogLevel::Count]); + public: void set_decorators(const LogDecorators &decorators) { _decorators = decorators; @@ -65,11 +74,15 @@ return _decorators; } + bool is_reconfigured() const { + return _reconfigured; + } + const char* config_string() const { return _config_string; } - LogOutput() : _config_string(NULL), _config_string_buffer_size(0) { + LogOutput() : _reconfigured(false), _config_string(NULL), _config_string_buffer_size(0) { } virtual ~LogOutput(); diff --git a/src/hotspot/share/logging/logSelection.cpp b/src/hotspot/share/logging/logSelection.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/logging/logSelection.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "utilities/ostream.hpp" +#include "logging/log.hpp" +#include "logging/logSelection.hpp" +#include "logging/logTagSet.hpp" +#include "runtime/os.inline.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" +#include "utilities/quickSort.hpp" + +const LogSelection LogSelection::Invalid; + +LogSelection::LogSelection() : _ntags(0), _wildcard(false), _level(LogLevel::Invalid), _tag_sets_selected(0) { +} + +LogSelection::LogSelection(const LogTagType tags[LogTag::MaxTags], bool wildcard, LogLevelType level) + : _ntags(0), _wildcard(wildcard), _level(level), _tag_sets_selected(0) { + while (_ntags < LogTag::MaxTags && tags[_ntags] != LogTag::__NO_TAG) { + _tags[_ntags] = tags[_ntags]; + _ntags++; + } + + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + if (selects(*ts)) { + _tag_sets_selected++; + } + } +} + +bool LogSelection::operator==(const LogSelection& ref) const { + if (_ntags != ref._ntags || + _wildcard != ref._wildcard || + _level != ref._level || + _tag_sets_selected != ref._tag_sets_selected) { + return false; + } + for (size_t i = 0; i < _ntags; i++) { + if (_tags[i] != ref._tags[i]) { + return false; + } + } + return true; +} + +bool LogSelection::operator!=(const LogSelection& ref) const { + return !operator==(ref); +} + +static LogSelection parse_internal(char *str, outputStream* errstream) { + // Parse the level, if specified + LogLevelType level = LogLevel::Unspecified; + char* equals = strchr(str, '='); + if (equals != NULL) { + const char* levelstr = equals + 1; + level = LogLevel::from_string(levelstr); + if (level == LogLevel::Invalid) { + if (errstream != NULL) { + errstream->print("Invalid level '%s' in log selection.", levelstr); + LogLevelType match = LogLevel::fuzzy_match(levelstr); + if (match != LogLevel::Invalid) { + errstream->print(" Did you mean '%s'?", LogLevel::name(match)); + } + errstream->cr(); + } + return LogSelection::Invalid; + } + *equals = '\0'; + } + + size_t ntags = 0; + LogTagType tags[LogTag::MaxTags] = { LogTag::__NO_TAG }; + + // Parse special tags such as 'all' + if (strcmp(str, "all") == 0) { + return LogSelection(tags, true, level); + } + + // Check for '*' suffix + bool wildcard = false; + char* asterisk_pos = strchr(str, '*'); + if (asterisk_pos != NULL && asterisk_pos[1] == '\0') { + wildcard = true; + *asterisk_pos = '\0'; + } + + // Parse the tag expression (t1+t2+...+tn) + char* plus_pos; + char* cur_tag = str; + do { + plus_pos = strchr(cur_tag, '+'); + if (plus_pos != NULL) { + *plus_pos = '\0'; + } + LogTagType tag = LogTag::from_string(cur_tag); + if (tag == LogTag::__NO_TAG) { + if (errstream != NULL) { + errstream->print("Invalid tag '%s' in log selection.", cur_tag); + LogTagType match = LogTag::fuzzy_match(cur_tag); + if (match != LogTag::__NO_TAG) { + errstream->print(" Did you mean '%s'?", LogTag::name(match)); + } + errstream->cr(); + } + return LogSelection::Invalid; + } + if (ntags == LogTag::MaxTags) { + if (errstream != NULL) { + errstream->print_cr("Too many tags in log selection '%s' (can only have up to " SIZE_FORMAT " tags).", + str, LogTag::MaxTags); + } + return LogSelection::Invalid; + } + tags[ntags++] = tag; + cur_tag = plus_pos + 1; + } while (plus_pos != NULL); + + for (size_t i = 0; i < ntags; i++) { + for (size_t j = 0; j < ntags; j++) { + if (i != j && tags[i] == tags[j]) { + if (errstream != NULL) { + errstream->print_cr("Log selection contains duplicates of tag %s.", LogTag::name(tags[i])); + } + return LogSelection::Invalid; + } + } + } + + return LogSelection(tags, wildcard, level); +} + +LogSelection LogSelection::parse(const char* str, outputStream* error_stream) { + char* copy = os::strdup_check_oom(str, mtLogging); + LogSelection s = parse_internal(copy, error_stream); + os::free(copy); + return s; +} + +bool LogSelection::selects(const LogTagSet& ts) const { + if (!_wildcard && _ntags != ts.ntags()) { + return false; + } + for (size_t i = 0; i < _ntags; i++) { + if (!ts.contains(_tags[i])) { + return false; + } + } + return true; +} + +static bool contains(LogTagType tag, const LogTagType tags[LogTag::MaxTags], size_t ntags) { + for (size_t i = 0; i < ntags; i++) { + if (tags[i] == tag) { + return true; + } + } + return false; +} + +bool LogSelection::consists_of(const LogTagType tags[LogTag::MaxTags]) const { + size_t i; + for (i = 0; tags[i] != LogTag::__NO_TAG; i++) { + if (!contains(tags[i], _tags, _ntags)) { + return false; + } + } + return i == _ntags; +} + +size_t LogSelection::ntags() const { + return _ntags; +} + +LogLevelType LogSelection::level() const { + return _level; +} + +size_t LogSelection::tag_sets_selected() const { + return _tag_sets_selected; +} + +int LogSelection::describe_tags(char* buf, size_t bufsize) const { + int tot_written = 0; + for (size_t i = 0; i < _ntags; i++) { + int written = jio_snprintf(buf + tot_written, bufsize - tot_written, + "%s%s", (i == 0 ? "" : "+"), LogTag::name(_tags[i])); + if (written == -1) { + return written; + } + tot_written += written; + } + + if (_wildcard) { + int written = jio_snprintf(buf + tot_written, bufsize - tot_written, "*"); + if (written == -1) { + return written; + } + tot_written += written; + } + return tot_written; +} + +int LogSelection::describe(char* buf, size_t bufsize) const { + int tot_written = describe_tags(buf, bufsize); + + int written = jio_snprintf(buf + tot_written, bufsize - tot_written, "=%s", LogLevel::name(_level)); + if (written == -1) { + return -1; + } + tot_written += written; + return tot_written; +} + +double LogSelection::similarity(const LogSelection& other) const { + // Compute Soerensen-Dice coefficient as the similarity measure + size_t intersecting = 0; + for (size_t i = 0; i < _ntags; i++) { + for (size_t j = 0; j < other._ntags; j++) { + if (_tags[i] == other._tags[j]) { + intersecting++; + break; + } + } + } + return 2.0 * intersecting / (_ntags + other._ntags); +} + +// Comparator used for sorting LogSelections based on their similarity to a specific LogSelection. +// A negative return value means that 'a' is more similar to 'ref' than 'b' is, while a positive +// return value means that 'b' is more similar. +// For the sake of giving short and effective suggestions, when two selections have an equal +// similarity score, the selection with the fewer tags (selecting the most tag sets) is considered +// more similar. +class SimilarityComparator { + const LogSelection& _ref; + public: + SimilarityComparator(const LogSelection& ref) : _ref(ref) { + } + int operator()(const LogSelection& a, const LogSelection& b) const { + const double epsilon = 1.0e-6; + + // Sort by similarity (descending) + double s = _ref.similarity(b) - _ref.similarity(a); + if (fabs(s) > epsilon) { + return s < 0 ? -1 : 1; + } + + // Then by number of tags (ascending) + int t = static_cast(a.ntags() - (int)b.ntags()); + if (t != 0) { + return t; + } + + // Lastly by tag sets selected (descending) + return static_cast(b.tag_sets_selected() - a.tag_sets_selected()); + } +}; + +static const size_t suggestion_cap = 5; +static const double similarity_requirement = 0.3; +void LogSelection::suggest_similar_matching(outputStream* out) const { + LogSelection suggestions[suggestion_cap]; + uint nsuggestions = 0; + + // See if simply adding a wildcard would make the selection match + if (!_wildcard) { + LogSelection sel(_tags, true, _level); + if (sel.tag_sets_selected() > 0) { + suggestions[nsuggestions++] = sel; + } + } + + // Check for matching tag sets with a single tag mismatching (a tag too many or short a tag) + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + LogTagType tags[LogTag::MaxTags] = { LogTag::__NO_TAG }; + for (size_t i = 0; i < ts->ntags(); i++) { + tags[i] = ts->tag(i); + } + + // Suggest wildcard selection unless the wildcard doesn't match anything extra + LogSelection sel(tags, true, _level); + if (sel.tag_sets_selected() == 1) { + sel = LogSelection(tags, false, _level); + } + + double score = similarity(sel); + + // Ignore suggestions with too low similarity + if (score < similarity_requirement) { + continue; + } + + // Cap not reached, simply add the new suggestion and continue searching + if (nsuggestions < suggestion_cap) { + suggestions[nsuggestions++] = sel; + continue; + } + + // Find the least matching suggestion already found, and if the new suggestion is a better match, replace it + double min = 1.0; + size_t pos = -1; + for (size_t i = 0; i < nsuggestions; i++) { + double score = similarity(suggestions[i]); + if (score < min) { + min = score; + pos = i; + } + } + if (score > min) { + suggestions[pos] = sel; + } + } + + if (nsuggestions == 0) { + // Found no similar enough selections to suggest. + return; + } + + // Sort found suggestions to suggest the best one first + SimilarityComparator sc(*this); + QuickSort::sort(suggestions, nsuggestions, sc, false); + + out->print("Did you mean any of the following?"); + for (size_t i = 0; i < nsuggestions; i++) { + char buf[128]; + suggestions[i].describe_tags(buf, sizeof(buf)); + out->print(" %s", buf); + } +} diff --git a/src/hotspot/share/logging/logSelection.hpp b/src/hotspot/share/logging/logSelection.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/logging/logSelection.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_LOGGING_LOGSELECTION_HPP +#define SHARE_VM_LOGGING_LOGSELECTION_HPP + +#include "logging/logLevel.hpp" +#include "logging/logTag.hpp" +#include "memory/allocation.hpp" + +class LogTagSet; + +// Class representing a selection of tags with for a given level. +// Consists of a set of tags, an optional wildcard flag, and a level, e.g. "tag1+tag2*=level". +class LogSelection : public StackObj { + friend class LogSelectionList; + + private: + size_t _ntags; + LogTagType _tags[LogTag::MaxTags]; + bool _wildcard; + LogLevelType _level; + size_t _tag_sets_selected; + + LogSelection(); + + public: + static const LogSelection Invalid; + + static LogSelection parse(const char* str, outputStream* error_stream = NULL); + + LogSelection(const LogTagType tags[LogTag::MaxTags], bool wildcard, LogLevelType level); + + bool operator==(const LogSelection& ref) const; + bool operator!=(const LogSelection& ref) const; + + size_t ntags() const; + LogLevelType level() const; + size_t tag_sets_selected() const; + + bool selects(const LogTagSet& ts) const; + bool consists_of(const LogTagType tags[LogTag::MaxTags]) const; + + int describe_tags(char* buf, size_t bufsize) const; + int describe(char* buf, size_t bufsize) const; + + // List similar selections that matches existing tag sets on the given outputstream + void suggest_similar_matching(outputStream* out) const; + + // Compute a similarity measure in the range [0, 1], where higher means more similar + double similarity(const LogSelection& other) const; +}; + +#endif // SHARE_VM_LOGGING_LOGSELECTION_HPP diff --git a/src/hotspot/share/logging/logTagLevelExpression.cpp b/src/hotspot/share/logging/logSelectionList.cpp rename from src/hotspot/share/logging/logTagLevelExpression.cpp rename to src/hotspot/share/logging/logSelectionList.cpp --- a/src/hotspot/share/logging/logTagLevelExpression.cpp +++ b/src/hotspot/share/logging/logSelectionList.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,74 +21,41 @@ * questions. * */ + #include "precompiled.hpp" -#include "logging/logTagLevelExpression.hpp" +#include "logging/logSelectionList.hpp" #include "logging/logTagSet.hpp" #include "runtime/arguments.hpp" #include "runtime/os.inline.hpp" -const char* LogTagLevelExpression::DefaultExpressionString = "all"; +static const char* DefaultExpressionString = "all"; -static bool matches_tagset(const LogTagType tags[], - bool allow_other_tags, - const LogTagSet& ts) { - bool contains_all = true; - size_t tag_idx; - for (tag_idx = 0; tag_idx < LogTag::MaxTags && tags[tag_idx] != LogTag::__NO_TAG; tag_idx++) { - if (!ts.contains(tags[tag_idx])) { - contains_all = false; - break; +bool LogSelectionList::verify_selections(outputStream* out) const { + bool valid = true; + + for (size_t i = 0; i < _nselections; i++) { + if (_selections[i].tag_sets_selected() == 0) { + // Return immediately unless all invalid selections should be listed + if (out == NULL) { + return false; + } + + out->print("No tag set matches selection:"); + valid = false; + + char buf[256]; + _selections[i].describe_tags(buf, sizeof(buf)); + out->print(" %s. ", buf); + + _selections[i].suggest_similar_matching(out); + out->cr(); } } - // All tags in the expression must be part of the tagset, - // and either the expression allows other tags (has a wildcard), - // or the number of tags in the expression and tagset must match. - return contains_all && (allow_other_tags || tag_idx == ts.ntags()); -} - -bool LogTagLevelExpression::verify_tagsets(outputStream* out) const { - bool valid = true; - - for (size_t i = 0; i < _ncombinations; i++) { - bool matched = false; - for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { - if (matches_tagset(_tags[i], _allow_other_tags[i], *ts)) { - matched = true; - break; - } - } - - if (!matched) { - // If this was the first invalid combination, write the message header - if (valid && out != NULL) { - out->print("No tag set matches selection(s): "); - } - valid = false; - - // Break as soon as possible unless listing all invalid combinations - if (out == NULL) { - break; - } - - // List the combination on the outputStream - for (size_t t = 0; t < LogTag::MaxTags && _tags[i][t] != LogTag::__NO_TAG; t++) { - out->print("%s%s", (t == 0 ? "" : "+"), LogTag::name(_tags[i][t])); - } - if (_allow_other_tags[i]) { - out->print("*"); - } - out->print(" "); - } - } - - if (!valid && out != NULL) { - out->cr(); - } - return valid; } -bool LogTagLevelExpression::parse(const char* str, outputStream* errstream) { + +bool LogSelectionList::parse(const char* str, outputStream* errstream) { bool success = true; if (str == NULL || strcmp(str, "") == 0) { str = DefaultExpressionString; @@ -96,10 +63,10 @@ char* copy = os::strdup_check_oom(str, mtLogging); // Split string on commas for (char *comma_pos = copy, *cur = copy; success && comma_pos != NULL; cur = comma_pos + 1) { - if (_ncombinations == MaxCombinations) { + if (_nselections == MaxSelections) { if (errstream != NULL) { - errstream->print_cr("Can not have more than " SIZE_FORMAT " tag combinations in a what-expression.", - MaxCombinations); + errstream->print_cr("Can not have more than " SIZE_FORMAT " log selections in a single configuration.", + MaxSelections); } success = false; break; @@ -110,86 +77,25 @@ *comma_pos = '\0'; } - // Parse the level, if specified - LogLevelType level = LogLevel::Unspecified; - char* equals = strchr(cur, '='); - if (equals != NULL) { - level = LogLevel::from_string(equals + 1); - if (level == LogLevel::Invalid) { - if (errstream != NULL) { - errstream->print_cr("Invalid level '%s' in what-expression.", equals + 1); - } - success = false; - break; - } - *equals = '\0'; // now ignore "=level" part of substr + LogSelection selection = LogSelection::parse(cur, errstream); + if (selection == LogSelection::Invalid) { + success = false; + break; } - set_level(level); - - // Parse special tags such as 'all' - if (strcmp(cur, "all") == 0) { - set_allow_other_tags(); - new_combination(); - continue; - } - - // Check for '*' suffix - char* asterisk_pos = strchr(cur, '*'); - if (asterisk_pos != NULL && asterisk_pos[1] == '\0') { - set_allow_other_tags(); - *asterisk_pos = '\0'; - } - - // Parse the tag expression (t1+t2+...+tn) - char* plus_pos; - char* cur_tag = cur; - do { - plus_pos = strchr(cur_tag, '+'); - if (plus_pos != NULL) { - *plus_pos = '\0'; - } - LogTagType tag = LogTag::from_string(cur_tag); - if (tag == LogTag::__NO_TAG) { - if (errstream != NULL) { - errstream->print_cr("Invalid tag '%s' in what-expression.", cur_tag); - } - success = false; - break; - } - if (_ntags == LogTag::MaxTags) { - if (errstream != NULL) { - errstream->print_cr("Tag combination exceeds the maximum of " SIZE_FORMAT " tags.", - LogTag::MaxTags); - } - success = false; - break; - } - if (!add_tag(tag)) { - if (errstream != NULL) { - errstream->print_cr("Tag combination have duplicate tag '%s' in what-expression.", - cur_tag); - } - success = false; - break; - } - cur_tag = plus_pos + 1; - } while (plus_pos != NULL); - - new_combination(); + _selections[_nselections++] = selection; } os::free(copy); return success; } -LogLevelType LogTagLevelExpression::level_for(const LogTagSet& ts) const { +LogLevelType LogSelectionList::level_for(const LogTagSet& ts) const { // Return NotMentioned if the given tagset isn't covered by this expression. LogLevelType level = LogLevel::NotMentioned; - for (size_t combination = 0; combination < _ncombinations; combination++) { - if (matches_tagset(_tags[combination], _allow_other_tags[combination], ts)) { - level = _level[combination]; + for (size_t i= 0; i < _nselections; i++) { + if (_selections[i].selects(ts)) { + level = _selections[i].level(); } } return level; } - diff --git a/src/hotspot/share/logging/logTagLevelExpression.hpp b/src/hotspot/share/logging/logSelectionList.hpp rename from src/hotspot/share/logging/logTagLevelExpression.hpp rename to src/hotspot/share/logging/logSelectionList.hpp --- a/src/hotspot/share/logging/logTagLevelExpression.hpp +++ b/src/hotspot/share/logging/logSelectionList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,11 +21,11 @@ * questions. * */ -#ifndef SHARE_VM_LOGGING_LOGTAGLEVELEXPRESSION_HPP -#define SHARE_VM_LOGGING_LOGTAGLEVELEXPRESSION_HPP +#ifndef SHARE_VM_LOGGING_LOGSELECTIONLIST_HPP +#define SHARE_VM_LOGGING_LOGSELECTIONLIST_HPP #include "logging/logConfiguration.hpp" -#include "logging/logLevel.hpp" +#include "logging/logSelection.hpp" #include "logging/logTag.hpp" #include "memory/allocation.hpp" #include "utilities/debug.hpp" @@ -33,67 +33,33 @@ class LogTagSet; -// Class used to temporary encode a 'what'-expression during log configuration. -// Consists of a combination of tags and levels, e.g. "tag1+tag2=level1,tag3*=level2". -class LogTagLevelExpression : public StackObj { +// Class used to temporary encode a series of log selections during log configuration. +// Consists of ordered LogSelections, i.e. "tag1+tag2=level1,tag3*=level2". +class LogSelectionList : public StackObj { public: - static const size_t MaxCombinations = 256; + static const size_t MaxSelections = 256; private: friend void LogConfiguration::configure_stdout(LogLevelType, int, ...); - static const char* DefaultExpressionString; + size_t _nselections; + LogSelection _selections[MaxSelections]; - size_t _ntags, _ncombinations; - LogTagType _tags[MaxCombinations][LogTag::MaxTags]; - LogLevelType _level[MaxCombinations]; - bool _allow_other_tags[MaxCombinations]; - - void new_combination() { - // Make sure either all tags are set or the last tag is __NO_TAG - if (_ntags < LogTag::MaxTags) { - _tags[_ncombinations][_ntags] = LogTag::__NO_TAG; - } - - _ncombinations++; - _ntags = 0; + public: + LogSelectionList() : _nselections(0) { } - bool add_tag(LogTagType tag) { - assert(_ntags < LogTag::MaxTags, "Can't have more tags than MaxTags!"); - for (size_t i = 0; i < _ntags; i++) { - if (_tags[_ncombinations][i] == tag) { - return false; - } - } - _tags[_ncombinations][_ntags++] = tag; - return true; - } - - void set_level(LogLevelType level) { - _level[_ncombinations] = level; - } - - void set_allow_other_tags() { - _allow_other_tags[_ncombinations] = true; - } - - public: - LogTagLevelExpression() : _ntags(0), _ncombinations(0) { - for (size_t combination = 0; combination < MaxCombinations; combination++) { - _level[combination] = LogLevel::Invalid; - _allow_other_tags[combination] = false; - _tags[combination][0] = LogTag::__NO_TAG; - } + LogSelectionList(const LogSelection& selection) : _nselections(1) { + _selections[0] = selection; } bool parse(const char* str, outputStream* errstream = NULL); LogLevelType level_for(const LogTagSet& ts) const; - // Verify the tagsets/selections mentioned in this expression. - // Returns false if some invalid tagset was found. If given an outputstream, + // Verify that each selection actually selects something. + // Returns false if some invalid selection was found. If given an outputstream, // this function will list all the invalid selections on the stream. - bool verify_tagsets(outputStream* out = NULL) const; + bool verify_selections(outputStream* out = NULL) const; }; -#endif // SHARE_VM_LOGGING_LOGTAGLEVELEXPRESSION_HPP +#endif // SHARE_VM_LOGGING_LOGSELECTIONLIST_HPP diff --git a/src/hotspot/share/logging/logTag.cpp b/src/hotspot/share/logging/logTag.cpp --- a/src/hotspot/share/logging/logTag.cpp +++ b/src/hotspot/share/logging/logTag.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,10 @@ */ #include "precompiled.hpp" #include "logging/logTag.hpp" +#include "utilities/stringUtils.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" +#include "utilities/quickSort.hpp" const char* LogTag::_name[] = { "", // __NO_TAG @@ -40,3 +43,45 @@ } return __NO_TAG; } + +LogTagType LogTag::fuzzy_match(const char *str) { + size_t len = strlen(str); + LogTagType match = LogTag::__NO_TAG; + double best = 0.5; // required similarity to be considered a match + for (size_t i = 1; i < LogTag::Count; i++) { + LogTagType tag = static_cast(i); + const char* tagname = LogTag::name(tag); + double score = StringUtils::similarity(tagname, strlen(tagname), str, len); + if (score >= best) { + match = tag; + best = score; + } + } + return match; +} + +static int cmp_logtag(LogTagType a, LogTagType b) { + return strcmp(LogTag::name(a), LogTag::name(b)); +} + +static const size_t sorted_tagcount = LogTag::Count - 1; // Not counting _NO_TAG +static LogTagType sorted_tags[sorted_tagcount]; + +class TagSorter { + public: + TagSorter() { + for (size_t i = 1; i < LogTag::Count; i++) { + sorted_tags[i - 1] = static_cast(i); + } + QuickSort::sort(sorted_tags, sorted_tagcount, cmp_logtag, true); + } +}; + +static TagSorter tagsorter; // Sorts tags during static initialization + +void LogTag::list_tags(outputStream* out) { + for (size_t i = 0; i < sorted_tagcount; i++) { + out->print("%s %s", (i == 0 ? "" : ","), _name[sorted_tags[i]]); + } + out->cr(); +} diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -60,6 +60,7 @@ LOG_TAG(cset) \ LOG_TAG(data) \ LOG_TAG(datacreation) \ + LOG_TAG(decoder) \ LOG_TAG(defaultmethods) \ LOG_TAG(dump) \ LOG_TAG(ergo) \ @@ -191,6 +192,8 @@ } static LogTag::type from_string(const char *str); + static LogTag::type fuzzy_match(const char *tag); + static void list_tags(outputStream* out); private: static const char* _name[]; diff --git a/src/hotspot/share/logging/logTagSet.cpp b/src/hotspot/share/logging/logTagSet.cpp --- a/src/hotspot/share/logging/logTagSet.cpp +++ b/src/hotspot/share/logging/logTagSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -118,17 +118,17 @@ // Check that string fits in buffer; resize buffer if necessary int ret; if (prefix_len < vwrite_buffer_size) { - ret = os::log_vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args); + ret = os::vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args); } else { // Buffer too small. Just call printf to find out the length for realloc below. - ret = os::log_vsnprintf(buf, sizeof(buf), fmt, args); + ret = os::vsnprintf(buf, sizeof(buf), fmt, args); } assert(ret >= 0, "Log message buffer issue"); if ((size_t)ret >= sizeof(buf)) { size_t newbuf_len = prefix_len + ret + 1; char* newbuf = NEW_C_HEAP_ARRAY(char, newbuf_len, mtLogging); prefix_len = _write_prefix(newbuf, newbuf_len); - ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args); + ret = os::vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args); assert(ret >= 0, "Log message buffer issue"); log(level, newbuf); FREE_C_HEAP_ARRAY(char, newbuf); @@ -141,7 +141,7 @@ static const size_t TagSetBufferSize = 128; void LogTagSet::describe_tagsets(outputStream* out) { - out->print_cr("Described tag combinations:"); + out->print_cr("Described tag sets:"); for (const LogTagSetDescription* d = tagset_descriptions; d->tagset != NULL; d++) { char buf[TagSetBufferSize]; d->tagset->label(buf, sizeof(buf), "+"); @@ -169,7 +169,7 @@ qsort(tagset_labels, _ntagsets, sizeof(*tagset_labels), qsort_strcmp); // Print and then free the labels - out->print("All available tag sets: "); + out->print("Available tag sets: "); for (idx = 0; idx < _ntagsets; idx++) { out->print("%s%s", (idx == 0 ? "" : ", "), tagset_labels[idx]); os::free(tagset_labels[idx]); diff --git a/src/hotspot/share/memory/allocation.inline.hpp b/src/hotspot/share/memory/allocation.inline.hpp --- a/src/hotspot/share/memory/allocation.inline.hpp +++ b/src/hotspot/share/memory/allocation.inline.hpp @@ -38,9 +38,9 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) { #if defined(SPARC) || defined(X86) // Sparc and X86 have atomic jlong (8 bytes) instructions - julong value = Atomic::load((volatile jlong*)dest); + julong value = Atomic::load(dest); value += add_value; - Atomic::store((jlong)value, (volatile jlong*)dest); + Atomic::store(value, dest); #else // possible word-tearing during load/store *dest += add_value; diff --git a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/memory/filemap.cpp --- a/src/hotspot/share/memory/filemap.cpp +++ b/src/hotspot/share/memory/filemap.cpp @@ -410,14 +410,11 @@ // Write the FileMapInfo information to the file. void FileMapInfo::open_for_write() { - _full_path = Arguments::GetSharedArchivePath(); - if (log_is_enabled(Info, cds)) { - ResourceMark rm; - LogMessage(cds) msg; - stringStream info_stream; - info_stream.print_cr("Dumping shared data to file: "); - info_stream.print_cr(" %s", _full_path); - msg.info("%s", info_stream.as_string()); + _full_path = Arguments::GetSharedArchivePath(); + LogMessage(cds) msg; + if (msg.is_info()) { + msg.info("Dumping shared data to file: "); + msg.info(" %s", _full_path); } #ifdef _WINDOWS // On Windows, need WRITE permission to remove the file. diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -3952,8 +3952,7 @@ // Only start a GC if the bootstrapping has completed. // Try to clean out some memory and retry. - result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( - loader_data, word_size, mdtype); + result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); } } @@ -4372,7 +4371,7 @@ // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager // content. -class ChunkManagerReturnTestImpl { +class ChunkManagerReturnTestImpl : public CHeapObj { VirtualSpaceNode _vsn; ChunkManager _cm; diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp --- a/src/hotspot/share/memory/metaspace.hpp +++ b/src/hotspot/share/memory/metaspace.hpp @@ -87,7 +87,7 @@ friend class MetaspaceGC; friend class MetaspaceAux; friend class MetaspaceShared; - friend class CollectorPolicy; + friend class CollectedHeap; friend class PrintCLDMetaspaceInfoClosure; public: diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp +++ b/src/hotspot/share/memory/metaspaceShared.cpp @@ -883,13 +883,11 @@ const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; - ResourceMark rm; LogMessage(cds) msg; - stringStream info_stream; - info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); - info_stream.print_cr("%s", hdr); - info_stream.print_cr("%s", sep); + msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); + msg.info("%s", hdr); + msg.info("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { const char *name = type_name((Type)type); int ro_count = _counts[RO][type]; @@ -903,7 +901,7 @@ double rw_perc = percent_of(rw_bytes, rw_all); double perc = percent_of(bytes, ro_all + rw_all); - info_stream.print_cr(fmt_stats, name, + msg.info(fmt_stats, name, ro_count, ro_bytes, ro_perc, rw_count, rw_bytes, rw_perc, count, bytes, perc); @@ -921,8 +919,8 @@ double all_rw_perc = percent_of(all_rw_bytes, rw_all); double all_perc = percent_of(all_bytes, ro_all + rw_all); - info_stream.print_cr("%s", sep); - info_stream.print_cr(fmt_stats, "Total", + msg.info("%s", sep); + msg.info(fmt_stats, "Total", all_ro_count, all_ro_bytes, all_ro_perc, all_rw_count, all_rw_bytes, all_rw_perc, all_count, all_bytes, all_perc); @@ -930,7 +928,6 @@ assert(all_ro_bytes == ro_all, "everything should have been counted"); assert(all_rw_bytes == rw_all, "everything should have been counted"); - msg.info("%s", info_stream.as_string()); #undef fmt_stats } diff --git a/src/hotspot/share/memory/oopFactory.cpp b/src/hotspot/share/memory/oopFactory.cpp --- a/src/hotspot/share/memory/oopFactory.cpp +++ b/src/hotspot/share/memory/oopFactory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ #include "oops/instanceOop.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" typeArrayOop oopFactory::new_charArray(const char* utf8_str, TRAPS) { diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,7 +134,6 @@ oop Universe::_arithmetic_exception_instance = NULL; oop Universe::_virtual_machine_error_instance = NULL; oop Universe::_vm_exception = NULL; -oop Universe::_allocation_context_notification_obj = NULL; oop Universe::_reference_pending_list = NULL; Array* Universe::_the_empty_int_array = NULL; @@ -213,7 +212,6 @@ f->do_oop((oop*)&_main_thread_group); f->do_oop((oop*)&_system_thread_group); f->do_oop((oop*)&_vm_exception); - f->do_oop((oop*)&_allocation_context_notification_obj); f->do_oop((oop*)&_reference_pending_list); debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) } @@ -542,32 +540,6 @@ #undef assert_pll_locked #undef assert_pll_ownership - -static bool has_run_finalizers_on_exit = false; - -void Universe::run_finalizers_on_exit() { - if (has_run_finalizers_on_exit) return; - has_run_finalizers_on_exit = true; - - // Called on VM exit. This ought to be run in a separate thread. - log_trace(ref)("Callback to run finalizers on exit"); - { - PRESERVE_EXCEPTION_MARK; - Klass* finalizer_klass = SystemDictionary::Finalizer_klass(); - JavaValue result(T_VOID); - JavaCalls::call_static( - &result, - finalizer_klass, - vmSymbols::run_finalizers_on_exit_name(), - vmSymbols::void_method_signature(), - THREAD - ); - // Ignore any pending exceptions - CLEAR_PENDING_EXCEPTION; - } -} - - // initialize_vtable could cause gc if // 1) we specified true to initialize_vtable and // 2) this ran after gc was enabled @@ -762,14 +734,8 @@ // HeapBased - Use compressed oops with heap base + encoding. jint Universe::initialize_heap() { - jint status = JNI_ERR; - - _collectedHeap = create_heap_ext(); - if (_collectedHeap == NULL) { - _collectedHeap = create_heap(); - } - - status = _collectedHeap->initialize(); + _collectedHeap = create_heap(); + jint status = _collectedHeap->initialize(); if (status != JNI_OK) { return status; } diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,8 +181,6 @@ // the vm thread. static oop _vm_exception; - static oop _allocation_context_notification_obj; - // References waiting to be transferred to the ReferenceHandler static oop _reference_pending_list; @@ -222,7 +220,6 @@ static size_t _heap_used_at_last_gc; static CollectedHeap* create_heap(); - static CollectedHeap* create_heap_ext(); static jint initialize_heap(); static void initialize_basic_type_mirrors(TRAPS); static void fixup_mirrors(TRAPS); @@ -334,9 +331,6 @@ static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; } static oop vm_exception() { return _vm_exception; } - static inline oop allocation_context_notification_obj(); - static inline void set_allocation_context_notification_obj(oop obj); - // Reference pending list manipulation. Access is protected by // Heap_lock. The getter, setter and predicate require the caller // owns the lock. Swap is used by parallel non-concurrent reference @@ -464,9 +458,6 @@ static bool should_fill_in_stack_trace(Handle throwable); static void check_alignment(uintx size, uintx alignment, const char* name); - // Finalizer support. - static void run_finalizers_on_exit(); - // Iteration // Apply "f" to the addresses of all the direct heap pointers maintained diff --git a/src/hotspot/share/memory/universe.inline.hpp b/src/hotspot/share/memory/universe.inline.hpp --- a/src/hotspot/share/memory/universe.inline.hpp +++ b/src/hotspot/share/memory/universe.inline.hpp @@ -41,12 +41,4 @@ return type == T_DOUBLE || type == T_LONG; } -inline oop Universe::allocation_context_notification_obj() { - return _allocation_context_notification_obj; -} - -inline void Universe::set_allocation_context_notification_obj(oop obj) { - _allocation_context_notification_obj = obj; -} - #endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP diff --git a/src/hotspot/share/memory/universe_ext.cpp b/src/hotspot/share/memory/universe_ext.cpp deleted file mode 100644 --- a/src/hotspot/share/memory/universe_ext.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "memory/universe.hpp" - -CollectedHeap* Universe::create_heap_ext() { - return NULL; -} diff --git a/src/hotspot/share/oops/access.hpp b/src/hotspot/share/oops/access.hpp --- a/src/hotspot/share/oops/access.hpp +++ b/src/hotspot/share/oops/access.hpp @@ -55,6 +55,7 @@ // * atomic_xchg_at: Atomically swap a new value at an internal pointer address if previous value matched the compared value. // * arraycopy: Copy data from one heap array to another heap array. // * clone: Clone the contents of an object to a newly allocated object. +// * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition. typedef uint64_t DecoratorSet; @@ -69,12 +70,15 @@ // == Internal build-time Decorators == // * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file. +// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff +// no GC is bundled in the build that is to-space invariant. const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3; +const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT = UCONST64(1) << 4; // == Internal run-time Decorators == // * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved // access backends iff UseCompressedOops is true. -const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS = UCONST64(1) << 4; +const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS = UCONST64(1) << 5; const DecoratorSet INTERNAL_DECORATOR_MASK = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP | INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS; @@ -138,12 +142,12 @@ // - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold. // * MO_SEQ_CST: Sequentially consistent xchg. // - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold. -const DecoratorSet MO_UNORDERED = UCONST64(1) << 5; -const DecoratorSet MO_VOLATILE = UCONST64(1) << 6; -const DecoratorSet MO_RELAXED = UCONST64(1) << 7; -const DecoratorSet MO_ACQUIRE = UCONST64(1) << 8; -const DecoratorSet MO_RELEASE = UCONST64(1) << 9; -const DecoratorSet MO_SEQ_CST = UCONST64(1) << 10; +const DecoratorSet MO_UNORDERED = UCONST64(1) << 6; +const DecoratorSet MO_VOLATILE = UCONST64(1) << 7; +const DecoratorSet MO_RELAXED = UCONST64(1) << 8; +const DecoratorSet MO_ACQUIRE = UCONST64(1) << 9; +const DecoratorSet MO_RELEASE = UCONST64(1) << 10; +const DecoratorSet MO_SEQ_CST = UCONST64(1) << 11; const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST; @@ -155,6 +159,8 @@ // - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks // - Accesses on HeapWord* translate to a runtime check choosing one of the above // - Accesses on other types translate to raw memory accesses without runtime checks +// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by +// marking that the previous value is uninitialized nonsense rather than a real value. // * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects // alive, regardless of the type of reference being accessed. It will however perform the memory access // in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed, @@ -164,10 +170,12 @@ // responsibility of performing the access and what barriers to be performed to the GC. This is the default. // Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time // decorator for enabling primitive barriers is enabled for the build. -const DecoratorSet AS_RAW = UCONST64(1) << 11; -const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 12; -const DecoratorSet AS_NORMAL = UCONST64(1) << 13; -const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_NO_KEEPALIVE | AS_NORMAL; +const DecoratorSet AS_RAW = UCONST64(1) << 12; +const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13; +const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 14; +const DecoratorSet AS_NORMAL = UCONST64(1) << 15; +const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_DEST_NOT_INITIALIZED | + AS_NO_KEEPALIVE | AS_NORMAL; // === Reference Strength Decorators === // These decorators only apply to accesses on oop-like types (oop/narrowOop). @@ -178,10 +186,10 @@ // * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength. // This could for example come from the unsafe API. // * Default (no explicit reference strength specified): ON_STRONG_OOP_REF -const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 14; -const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 15; -const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 16; -const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 17; +const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 16; +const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 17; +const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18; +const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19; const DecoratorSet ON_DECORATOR_MASK = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF; @@ -196,23 +204,21 @@ // * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap, // but is notably not scanned during safepoints. This is sometimes a special case for some GCs and // implies that it is also an IN_ROOT. -const DecoratorSet IN_HEAP = UCONST64(1) << 18; -const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 19; -const DecoratorSet IN_ROOT = UCONST64(1) << 20; -const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21; -const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 22; +const DecoratorSet IN_HEAP = UCONST64(1) << 20; +const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 21; +const DecoratorSet IN_ROOT = UCONST64(1) << 22; +const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23; +const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 24; const DecoratorSet IN_DECORATOR_MASK = IN_HEAP | IN_HEAP_ARRAY | IN_ROOT | IN_CONCURRENT_ROOT | IN_ARCHIVE_ROOT; // == Value Decorators == // * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops. -const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 23; +const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 25; const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL; // == Arraycopy Decorators == -// * ARRAYCOPY_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by -// marking that the previous value uninitialized nonsense rather than a real value. // * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source // are not guaranteed to be subclasses of the class of the destination array. This requires // a check-cast barrier during the copying operation. If this is not set, it is assumed @@ -222,14 +228,12 @@ // * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form. // * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements. // * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord. -const DecoratorSet ARRAYCOPY_DEST_NOT_INITIALIZED = UCONST64(1) << 24; -const DecoratorSet ARRAYCOPY_CHECKCAST = UCONST64(1) << 25; -const DecoratorSet ARRAYCOPY_DISJOINT = UCONST64(1) << 26; -const DecoratorSet ARRAYCOPY_ARRAYOF = UCONST64(1) << 27; -const DecoratorSet ARRAYCOPY_ATOMIC = UCONST64(1) << 28; -const DecoratorSet ARRAYCOPY_ALIGNED = UCONST64(1) << 29; -const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_DEST_NOT_INITIALIZED | - ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT | +const DecoratorSet ARRAYCOPY_CHECKCAST = UCONST64(1) << 26; +const DecoratorSet ARRAYCOPY_DISJOINT = UCONST64(1) << 27; +const DecoratorSet ARRAYCOPY_ARRAYOF = UCONST64(1) << 28; +const DecoratorSet ARRAYCOPY_ATOMIC = UCONST64(1) << 29; +const DecoratorSet ARRAYCOPY_ALIGNED = UCONST64(1) << 30; +const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT | ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF | ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED; @@ -297,6 +301,9 @@ template void clone(oop src, oop dst, size_t size); + template + oop resolve(oop src); + // Infer the type that should be returned from a load. template class LoadProxy: public StackObj { @@ -343,8 +350,8 @@ template static void verify_primitive_decorators() { - const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE) | IN_HEAP | - IN_HEAP_ARRAY; + const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE ^ AS_DEST_NOT_INITIALIZED) | + IN_HEAP | IN_HEAP_ARRAY; verify_decorators(); } @@ -500,6 +507,11 @@ OopType new_oop_value = new_value; return AccessInternal::atomic_xchg(new_oop_value, addr); } + + static oop resolve(oop obj) { + verify_decorators(); + return AccessInternal::resolve(obj); + } }; // Helper for performing raw accesses (knows only of memory ordering diff --git a/src/hotspot/share/oops/access.inline.hpp b/src/hotspot/share/oops/access.inline.hpp --- a/src/hotspot/share/oops/access.inline.hpp +++ b/src/hotspot/share/oops/access.inline.hpp @@ -206,6 +206,13 @@ } }; + template + struct PostRuntimeDispatch: public AllStatic { + static oop access_barrier(oop obj) { + return GCBarrierType::resolve(obj); + } + }; + // Resolving accessors with barriers from the barrier set happens in two steps. // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off. // 2. Expand paths for each BarrierSet available in the system. @@ -443,6 +450,22 @@ } }; + template + struct RuntimeDispatch: AllStatic { + typedef typename AccessFunction::type func_t; + static func_t _resolve_func; + + static oop resolve_init(oop obj) { + func_t function = BarrierResolver::resolve_barrier(); + _resolve_func = function; + return function(obj); + } + + static inline oop resolve(oop obj) { + return _resolve_func(obj); + } + }; + // Initialize the function pointers to point to the resolving function. template typename AccessFunction::type @@ -484,6 +507,10 @@ typename AccessFunction::type RuntimeDispatch::_clone_func = &clone_init; + template + typename AccessFunction::type + RuntimeDispatch::_resolve_func = &resolve_init; + // Step 3: Pre-runtime dispatching. // The PreRuntimeDispatch class is responsible for filtering the barrier strength // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime @@ -766,6 +793,21 @@ clone(oop src, oop dst, size_t size) { RuntimeDispatch::clone(src, dst, size); } + + template + inline static typename EnableIf< + HasDecorator::value, oop>::type + resolve(oop obj) { + typedef RawAccessBarrier Raw; + return Raw::resolve(obj); + } + + template + inline static typename EnableIf< + !HasDecorator::value, oop>::type + resolve(oop obj) { + return RuntimeDispatch::resolve(obj); + } }; // This class adds implied decorators that follow according to decorator rules. @@ -1051,6 +1093,12 @@ const DecoratorSet expanded_decorators = DecoratorFixup::value; PreRuntimeDispatch::clone(src, dst, size); } + + template + inline oop resolve(oop obj) { + const DecoratorSet expanded_decorators = DecoratorFixup::value; + return PreRuntimeDispatch::resolve(obj); + } } template @@ -1060,6 +1108,7 @@ const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK; STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 || + (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 || (barrier_strength_decorators ^ AS_RAW) == 0 || (barrier_strength_decorators ^ AS_NORMAL) == 0 )); diff --git a/src/hotspot/share/oops/accessBackend.hpp b/src/hotspot/share/oops/accessBackend.hpp --- a/src/hotspot/share/oops/accessBackend.hpp +++ b/src/hotspot/share/oops/accessBackend.hpp @@ -52,7 +52,8 @@ BARRIER_ATOMIC_XCHG, BARRIER_ATOMIC_XCHG_AT, BARRIER_ARRAYCOPY, - BARRIER_CLONE + BARRIER_CLONE, + BARRIER_RESOLVE }; template @@ -100,6 +101,7 @@ typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length); typedef void (*clone_func_t)(oop src, oop dst, size_t size); + typedef oop (*resolve_func_t)(oop obj); }; template struct AccessFunction {}; @@ -119,6 +121,7 @@ ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t); ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t); ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t); + ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t); #undef ACCESS_GENERATE_ACCESS_FUNCTION template @@ -379,6 +382,8 @@ static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length); static void clone(oop src, oop dst, size_t size); + + static oop resolve(oop obj) { return obj; } }; #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP diff --git a/src/hotspot/share/oops/annotations.cpp b/src/hotspot/share/oops/annotations.cpp --- a/src/hotspot/share/oops/annotations.cpp +++ b/src/hotspot/share/oops/annotations.cpp @@ -31,6 +31,7 @@ #include "memory/oopFactory.hpp" #include "oops/annotations.hpp" #include "oops/instanceKlass.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "utilities/ostream.hpp" // Allocate annotations in metadata area diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,10 +76,10 @@ return header_size(type) * HeapWordSize; } - // Returns the address of the first element. - void* base(BasicType type) const { - return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); - } + // Returns the address of the first element. The elements in the array will not + // relocate from this address until a subsequent thread transition. + inline void* base(BasicType type) const; + inline void* base_raw(BasicType type) const; // GC barrier invariant // Tells whether index is within bounds. bool is_within_bounds(int index) const { return 0 <= index && index < length(); } diff --git a/src/hotspot/share/oops/objArrayOop.inline.hpp b/src/hotspot/share/oops/arrayOop.inline.hpp copy from src/hotspot/share/oops/objArrayOop.inline.hpp copy to src/hotspot/share/oops/arrayOop.inline.hpp --- a/src/hotspot/share/oops/objArrayOop.inline.hpp +++ b/src/hotspot/share/oops/arrayOop.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,22 +22,19 @@ * */ -#ifndef SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP -#define SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP +#ifndef SHARE_OOPS_ARRAYOOP_INLINE_HPP +#define SHARE_OOPS_ARRAYOOP_INLINE_HPP #include "oops/access.inline.hpp" -#include "oops/objArrayOop.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/globals.hpp" +#include "oops/arrayOop.hpp" -inline oop objArrayOopDesc::obj_at(int index) const { - ptrdiff_t offset = UseCompressedOops ? obj_at_offset(index) : obj_at_offset(index); - return HeapAccess::oop_load_at(as_oop(), offset); +void* arrayOopDesc::base(BasicType type) const { + oop resolved_obj = Access<>::resolve(as_oop()); + return arrayOop(resolved_obj)->base_raw(type); } -inline void objArrayOopDesc::obj_at_put(int index, oop value) { - ptrdiff_t offset = UseCompressedOops ? obj_at_offset(index) : obj_at_offset(index); - HeapAccess::oop_store_at(as_oop(), offset, value); +void* arrayOopDesc::base_raw(BasicType type) const { + return reinterpret_cast(cast_from_oop(as_oop()) + base_offset_in_bytes(type)); } -#endif // SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP +#endif // SHARE_OOPS_ARRAYOOP_INLINE_HPP diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -43,6 +43,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/fieldType.hpp" #include "runtime/init.hpp" #include "runtime/javaCalls.hpp" @@ -546,12 +547,6 @@ } } - -Klass* ConstantPool::klass_ref_at_if_loaded(const constantPoolHandle& this_cp, int which) { - return klass_at_if_loaded(this_cp, this_cp->klass_ref_index_at(which)); -} - - Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool, int which) { if (cpool->cache() == NULL) return NULL; // nothing to load yet diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp --- a/src/hotspot/share/oops/constantPool.hpp +++ b/src/hotspot/share/oops/constantPool.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -147,7 +147,7 @@ assert(is_within_bounds(which), "index out of bounds"); assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool"); // Uses volatile because the klass slot changes without a lock. - intptr_t adr = OrderAccess::load_acquire(obj_at_addr_raw(which)); + intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which)); assert(adr != 0 || which == 0, "cp entry for klass should not be zero"); return CPSlot(adr); } @@ -157,7 +157,7 @@ assert(s.value() != 0, "Caught something"); *(intptr_t*)&base()[which] = s.value(); } - intptr_t* obj_at_addr_raw(int which) const { + intptr_t* obj_at_addr(int which) const { assert(is_within_bounds(which), "index out of bounds"); return (intptr_t*) &base()[which]; } @@ -824,7 +824,6 @@ static bool has_method_type_at_if_loaded (const constantPoolHandle& this_cp, int which); static oop method_type_at_if_loaded (const constantPoolHandle& this_cp, int which); static Klass* klass_at_if_loaded (const constantPoolHandle& this_cp, int which); - static Klass* klass_ref_at_if_loaded (const constantPoolHandle& this_cp, int which); // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the // future by other Java code. These take constant pool indices rather than diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp --- a/src/hotspot/share/oops/generateOopMap.cpp +++ b/src/hotspot/share/oops/generateOopMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "jvm.h" #include "interpreter/bytecodeStream.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" @@ -33,6 +32,7 @@ #include "oops/symbol.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "runtime/os.hpp" #include "runtime/relocator.hpp" #include "runtime/timerTrace.hpp" #include "utilities/bitMap.inline.hpp" @@ -2151,10 +2151,10 @@ void GenerateOopMap::error_work(const char *format, va_list ap) { _got_error = true; char msg_buffer[512]; - vsnprintf(msg_buffer, sizeof(msg_buffer), format, ap); + os::vsnprintf(msg_buffer, sizeof(msg_buffer), format, ap); // Append method name char msg_buffer2[512]; - jio_snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string()); + os::snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string()); if (Thread::current()->can_call_java()) { _exception = Exceptions::new_exception(Thread::current(), vmSymbols::java_lang_LinkageError(), msg_buffer2); diff --git a/src/hotspot/share/oops/instanceKlass.inline.hpp b/src/hotspot/share/oops/instanceKlass.inline.hpp --- a/src/hotspot/share/oops/instanceKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceKlass.inline.hpp @@ -56,7 +56,7 @@ template ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { - T* p = (T*)obj->obj_field_addr(map->offset()); + T* p = (T*)obj->obj_field_addr_raw(map->offset()); T* const end = p + map->count(); for (; p < end; ++p) { @@ -67,7 +67,7 @@ #if INCLUDE_ALL_GCS template ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { - T* const start = (T*)obj->obj_field_addr(map->offset()); + T* const start = (T*)obj->obj_field_addr_raw(map->offset()); T* p = start + map->count(); while (start < p) { @@ -79,7 +79,7 @@ template ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { - T* p = (T*)obj->obj_field_addr(map->offset()); + T* p = (T*)obj->obj_field_addr_raw(map->offset()); T* end = p + map->count(); T* const l = (T*)mr.start(); diff --git a/src/hotspot/share/oops/instanceRefKlass.inline.hpp b/src/hotspot/share/oops/instanceRefKlass.inline.hpp --- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ template void InstanceRefKlass::do_referent(oop obj, OopClosureType* closure, Contains& contains) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); if (contains(referent_addr)) { Devirtualizer::do_oop(closure, referent_addr); } @@ -45,7 +45,7 @@ template void InstanceRefKlass::do_next(oop obj, OopClosureType* closure, Contains& contains) { - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); if (contains(next_addr)) { Devirtualizer::do_oop(closure, next_addr); } @@ -53,7 +53,7 @@ template void InstanceRefKlass::do_discovered(oop obj, OopClosureType* closure, Contains& contains) { - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); if (contains(discovered_addr)) { Devirtualizer::do_oop(closure, discovered_addr); } @@ -63,7 +63,7 @@ bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) { ReferenceProcessor* rp = closure->ref_processor(); if (rp != NULL) { - T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr(obj)); + T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr_raw(obj)); if (!oopDesc::is_null(referent_oop)) { oop referent = oopDesc::decode_heap_oop_not_null(referent_oop); if (!referent->is_gc_marked()) { @@ -86,7 +86,7 @@ do_referent(obj, closure, contains); // Treat discovered as normal oop, if ref is not "active" (next non-NULL). - T next_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr(obj)); + T next_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr_raw(obj)); if (!oopDesc::is_null(next_oop)) { do_discovered(obj, closure, contains); } @@ -189,9 +189,9 @@ #ifdef ASSERT template void InstanceRefKlass::trace_reference_gc(const char *s, oop obj) { - T* referent_addr = (T*) java_lang_ref_Reference::referent_addr(obj); - T* next_addr = (T*) java_lang_ref_Reference::next_addr(obj); - T* discovered_addr = (T*) java_lang_ref_Reference::discovered_addr(obj); + T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(obj); + T* next_addr = (T*) java_lang_ref_Reference::next_addr_raw(obj); + T* discovered_addr = (T*) java_lang_ref_Reference::discovered_addr_raw(obj); log_develop_trace(gc, ref)("InstanceRefKlass %s for obj " PTR_FORMAT, s, p2i(obj)); log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT, diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -577,21 +577,15 @@ if (is_instance_klass()) { const InstanceKlass* ik = static_cast(this); if (ik->is_anonymous()) { - intptr_t hash = 0; - if (ik->java_mirror() != NULL) { - // java_mirror might not be created yet, return 0 as hash. - hash = ik->java_mirror()->identity_hash(); - } - char hash_buf[40]; - sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash); - size_t hash_len = strlen(hash_buf); - - size_t result_len = name()->utf8_length(); - char* result = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1); - name()->as_klass_external_name(result, (int) result_len + 1); - assert(strlen(result) == result_len, ""); - strcpy(result + result_len, hash_buf); - assert(strlen(result) == result_len + hash_len, ""); + char addr_buf[20]; + jio_snprintf(addr_buf, 20, "/" INTPTR_FORMAT, p2i(ik)); + size_t addr_len = strlen(addr_buf); + size_t name_len = name()->utf8_length(); + char* result = NEW_RESOURCE_ARRAY(char, name_len + addr_len + 1); + name()->as_klass_external_name(result, (int) name_len + 1); + assert(strlen(result) == name_len, ""); + strcpy(result + name_len, addr_buf); + assert(strlen(result) == name_len + addr_len, ""); return result; } } @@ -737,4 +731,82 @@ return true; } -#endif +#endif // PRODUCT + +// The caller of class_loader_and_module_name() (or one of its callers) +// must use a ResourceMark in order to correctly free the result. +const char* Klass::class_loader_and_module_name() const { + const char* delim = "/"; + size_t delim_len = strlen(delim); + + const char* fqn = external_name(); + // Length of message to return; always include FQN + size_t msglen = strlen(fqn) + 1; + + bool has_cl_name = false; + bool has_mod_name = false; + bool has_version = false; + + // Use class loader name, if exists and not builtin + const char* class_loader_name = ""; + ClassLoaderData* cld = class_loader_data(); + assert(cld != NULL, "class_loader_data should not be NULL"); + if (!cld->is_builtin_class_loader_data()) { + // If not builtin, look for name + oop loader = class_loader(); + if (loader != NULL) { + oop class_loader_name_oop = java_lang_ClassLoader::name(loader); + if (class_loader_name_oop != NULL) { + class_loader_name = java_lang_String::as_utf8_string(class_loader_name_oop); + if (class_loader_name != NULL && class_loader_name[0] != '\0') { + has_cl_name = true; + msglen += strlen(class_loader_name) + delim_len; + } + } + } + } + + const char* module_name = ""; + const char* version = ""; + const Klass* bottom_klass = is_objArray_klass() ? + ObjArrayKlass::cast(this)->bottom_klass() : this; + if (bottom_klass->is_instance_klass()) { + ModuleEntry* module = InstanceKlass::cast(bottom_klass)->module(); + // Use module name, if exists + if (module->is_named()) { + has_mod_name = true; + module_name = module->name()->as_C_string(); + msglen += strlen(module_name); + // Use version if exists and is not a jdk module + if (module->is_non_jdk_module() && module->version() != NULL) { + has_version = true; + version = module->version()->as_C_string(); + msglen += strlen("@") + strlen(version); + } + } + } else { + // klass is an array of primitives, so its module is java.base + module_name = JAVA_BASE_NAME; + } + + if (has_cl_name || has_mod_name) { + msglen += delim_len; + } + + char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen); + + // Just return the FQN if error in allocating string + if (message == NULL) { + return fqn; + } + + jio_snprintf(message, msglen, "%s%s%s%s%s%s%s", + class_loader_name, + (has_cl_name) ? delim : "", + (has_mod_name) ? module_name : "", + (has_version) ? "@" : "", + (has_version) ? version : "", + (has_cl_name || has_mod_name) ? delim : "", + fqn); + return message; +} diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -506,6 +506,8 @@ // and the package separators as '/'. virtual const char* signature_name() const; + const char* class_loader_and_module_name() const; + // type testing operations #ifdef ASSERT protected: diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -268,10 +268,14 @@ } if (UseCompressedOops) { + s = arrayOop(Access<>::resolve(s)); + d = arrayOop(Access<>::resolve(d)); narrowOop* const src = objArrayOop(s)->obj_at_addr(src_pos); narrowOop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); do_copy(s, src, d, dst, length, CHECK); } else { + s = arrayOop(Access<>::resolve(s)); + d = arrayOop(Access<>::resolve(d)); oop* const src = objArrayOop(s)->obj_at_addr(src_pos); oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); do_copy (s, src, d, dst, length, CHECK); diff --git a/src/hotspot/share/oops/objArrayKlass.inline.hpp b/src/hotspot/share/oops/objArrayKlass.inline.hpp --- a/src/hotspot/share/oops/objArrayKlass.inline.hpp +++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/memRegion.hpp" #include "memory/iterator.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/klass.hpp" #include "oops/objArrayKlass.hpp" @@ -36,7 +37,7 @@ template void ObjArrayKlass::oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure) { - T* p = (T*)a->base(); + T* p = (T*)a->base_raw(); T* const end = p + a->length(); for (;p < end; p++) { @@ -51,7 +52,7 @@ T* const l = (T*)low; T* const h = (T*)high; - T* p = (T*)a->base(); + T* p = (T*)a->base_raw(); T* end = p + a->length(); if (p < l) { @@ -110,8 +111,8 @@ template void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) { - T* low = start == 0 ? cast_from_oop(a) : a->obj_at_addr(start); - T* high = (T*)a->base() + end; + T* low = start == 0 ? cast_from_oop(a) : a->obj_at_addr_raw(start); + T* high = (T*)a->base_raw() + end; oop_oop_iterate_elements_specialized_bounded(a, closure, low, high); } diff --git a/src/hotspot/share/oops/objArrayOop.cpp b/src/hotspot/share/oops/objArrayOop.cpp --- a/src/hotspot/share/oops/objArrayOop.cpp +++ b/src/hotspot/share/oops/objArrayOop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/shared/specialized_oop_closures.hpp" #include "oops/access.inline.hpp" #include "oops/objArrayKlass.hpp" -#include "oops/objArrayOop.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" oop objArrayOopDesc::atomic_compare_exchange_oop(int index, oop exchange_value, diff --git a/src/hotspot/share/oops/objArrayOop.hpp b/src/hotspot/share/oops/objArrayOop.hpp --- a/src/hotspot/share/oops/objArrayOop.hpp +++ b/src/hotspot/share/oops/objArrayOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,10 +41,8 @@ friend class CSetMarkOopClosure; friend class G1ParScanPartialArrayClosure; - template T* obj_at_addr(int index) const { - assert(is_within_bounds(index), "index out of bounds"); - return &((T*)base())[index]; - } + template T* obj_at_addr(int index) const; + template T* obj_at_addr_raw(int index) const; template static ptrdiff_t obj_at_offset(int index) { @@ -84,7 +82,8 @@ } // base is the address following the header. - HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); } + HeapWord* base() const; + HeapWord* base_raw() const; // Accessing oop obj_at(int index) const; diff --git a/src/hotspot/share/oops/objArrayOop.inline.hpp b/src/hotspot/share/oops/objArrayOop.inline.hpp --- a/src/hotspot/share/oops/objArrayOop.inline.hpp +++ b/src/hotspot/share/oops/objArrayOop.inline.hpp @@ -26,10 +26,24 @@ #define SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP #include "oops/access.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" +inline HeapWord* objArrayOopDesc::base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); } +inline HeapWord* objArrayOopDesc::base_raw() const { return (HeapWord*) arrayOopDesc::base_raw(T_OBJECT); } + +template T* objArrayOopDesc::obj_at_addr(int index) const { + assert(is_within_bounds(index), "index out of bounds"); + return &((T*)base())[index]; +} + +template T* objArrayOopDesc::obj_at_addr_raw(int index) const { + assert(is_within_bounds(index), "index out of bounds"); + return &((T*)base_raw())[index]; +} + inline oop objArrayOopDesc::obj_at(int index) const { ptrdiff_t offset = UseCompressedOops ? obj_at_offset(index) : obj_at_offset(index); return HeapAccess::oop_load_at(as_oop(), offset); diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -125,26 +125,13 @@ protected: inline oop as_oop() const { return const_cast(this); } - private: + public: // field addresses in oop - inline void* field_base(int offset) const; + inline void* field_addr(int offset) const; + inline void* field_addr_raw(int offset) const; - inline jbyte* byte_field_addr(int offset) const; - inline jchar* char_field_addr(int offset) const; - inline jboolean* bool_field_addr(int offset) const; - inline jint* int_field_addr(int offset) const; - inline jshort* short_field_addr(int offset) const; - inline jlong* long_field_addr(int offset) const; - inline jfloat* float_field_addr(int offset) const; - inline jdouble* double_field_addr(int offset) const; - inline Metadata** metadata_field_addr(int offset) const; - - public: // Need this as public for garbage collection. - template inline T* obj_field_addr(int offset) const; - - // Needed for javaClasses - inline address* address_field_addr(int offset) const; + template inline T* obj_field_addr_raw(int offset) const; inline static bool is_null(oop obj) { return obj == NULL; } inline static bool is_null(narrowOop obj) { return obj == 0; } diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,21 +235,11 @@ bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } -void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } +void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast(cast_from_oop(as_oop()) + offset); } +void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); } -jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } -jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } -jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*) field_base(offset); } -jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } -jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } -jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } -jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } -jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } -Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); } - -template T* oopDesc::obj_field_addr(int offset) const { return (T*) field_base(offset); } -address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } - +template +T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); } // Functions for getting and setting oops within instance objects. // If the oops are compressed, the type passed to these overloaded functions diff --git a/src/hotspot/share/oops/oopsHierarchy.hpp b/src/hotspot/share/oops/oopsHierarchy.hpp --- a/src/hotspot/share/oops/oopsHierarchy.hpp +++ b/src/hotspot/share/oops/oopsHierarchy.hpp @@ -146,13 +146,6 @@ // from javaCalls.cpp operator jobject () const { return (jobject)obj(); } - // from javaClasses.cpp - operator JavaThread* () const { return (JavaThread*)obj(); } - -#ifndef _LP64 - // from jvm.cpp - operator jlong* () const { return (jlong*)obj(); } -#endif // from parNewGeneration and other things that want to get to the end of // an oop for stuff (like ObjArrayKlass.cpp) diff --git a/src/hotspot/share/oops/typeArrayOop.hpp b/src/hotspot/share/oops/typeArrayOop.hpp --- a/src/hotspot/share/oops/typeArrayOop.hpp +++ b/src/hotspot/share/oops/typeArrayOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,185 +35,67 @@ #include class typeArrayOopDesc : public arrayOopDesc { +private: + template + static ptrdiff_t element_offset(BasicType bt, int index) { + return arrayOopDesc::base_offset_in_bytes(bt) + sizeof(T) * index; + } + protected: - jchar* char_base() const { return (jchar*) base(T_CHAR); } - jboolean* bool_base() const { return (jboolean*)base(T_BOOLEAN); } - jbyte* byte_base() const { return (jbyte*) base(T_BYTE); } - jint* int_base() const { return (jint*) base(T_INT); } - jlong* long_base() const { return (jlong*) base(T_LONG); } - jshort* short_base() const { return (jshort*) base(T_SHORT); } - jfloat* float_base() const { return (jfloat*) base(T_FLOAT); } - jdouble* double_base() const { return (jdouble*) base(T_DOUBLE); } + jchar* char_base() const; + jboolean* bool_base() const; + jbyte* byte_base() const; + jint* int_base() const; + jlong* long_base() const; + jshort* short_base() const; + jfloat* float_base() const; + jdouble* double_base() const; friend class TypeArrayKlass; public: - jbyte* byte_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->byte_base()[which]; - } + jbyte* byte_at_addr(int which) const; + jboolean* bool_at_addr(int which) const; + jchar* char_at_addr(int which) const; + jint* int_at_addr(int which) const; + jshort* short_at_addr(int which) const; + jushort* ushort_at_addr(int which) const; + jlong* long_at_addr(int which) const; + jfloat* float_at_addr(int which) const; + jdouble* double_at_addr(int which) const; - jboolean* bool_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->bool_base()[which]; - } + jbyte byte_at(int which) const; + void byte_at_put(int which, jbyte contents); - jchar* char_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->char_base()[which]; - } + jboolean bool_at(int which) const; + void bool_at_put(int which, jboolean contents); - jint* int_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->int_base()[which]; - } + jchar char_at(int which) const; + void char_at_put(int which, jchar contents); - jshort* short_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->short_base()[which]; - } + jint int_at(int which) const; + void int_at_put(int which, jint contents); - jushort* ushort_at_addr(int which) const { // for field descriptor arrays - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return (jushort*) &p->short_base()[which]; - } + jshort short_at(int which) const; + void short_at_put(int which, jshort contents); - jlong* long_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->long_base()[which]; - } + jushort ushort_at(int which) const; + void ushort_at_put(int which, jushort contents); - jfloat* float_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->float_base()[which]; - } + jlong long_at(int which) const; + void long_at_put(int which, jlong contents); - jdouble* double_at_addr(int which) const { - assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(oop(this))); - return &p->double_base()[which]; - } + jfloat float_at(int which) const; + void float_at_put(int which, jfloat contents); - jbyte byte_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->byte_at_addr(which); - } - void byte_at_put(int which, jbyte contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->byte_at_addr(which) = contents; - } + jdouble double_at(int which) const; + void double_at_put(int which, jdouble contents); - jboolean bool_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->bool_at_addr(which); - } - void bool_at_put(int which, jboolean contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->bool_at_addr(which) = (((jint)contents) & 1); - } + jbyte byte_at_acquire(int which) const; + void release_byte_at_put(int which, jbyte contents); - jchar char_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->char_at_addr(which); - } - void char_at_put(int which, jchar contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->char_at_addr(which) = contents; - } - - jint int_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->int_at_addr(which); - } - void int_at_put(int which, jint contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->int_at_addr(which) = contents; - } - - jshort short_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->short_at_addr(which); - } - void short_at_put(int which, jshort contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->short_at_addr(which) = contents; - } - - jushort ushort_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->ushort_at_addr(which); - } - void ushort_at_put(int which, jushort contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->ushort_at_addr(which) = contents; - } - - jlong long_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->long_at_addr(which); - } - void long_at_put(int which, jlong contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->long_at_addr(which) = contents; - } - - jfloat float_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->float_at_addr(which); - } - void float_at_put(int which, jfloat contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->float_at_addr(which) = contents; - } - - jdouble double_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return *p->double_at_addr(which); - } - void double_at_put(int which, jdouble contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->double_at_addr(which) = contents; - } - - jbyte byte_at_acquire(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return OrderAccess::load_acquire(p->byte_at_addr(which)); - } - void release_byte_at_put(int which, jbyte contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - OrderAccess::release_store(p->byte_at_addr(which), contents); - } - - // Java thinks metadata arrays are just arrays of either long or int, since - // there doesn't seem to be T_ADDRESS, so this is a bit of unfortunate - // casting -#ifdef _LP64 - Symbol* symbol_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return (Symbol*)*p->long_at_addr(which); - } - void symbol_at_put(int which, Symbol* contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->long_at_addr(which) = (jlong)contents; - } -#else - Symbol* symbol_at(int which) const { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->read_barrier((oop) this)); - return (Symbol*)*p->int_at_addr(which); - } - void symbol_at_put(int which, Symbol* contents) { - typeArrayOop p = typeArrayOop(BarrierSet::barrier_set()->write_barrier(this)); - *p->int_at_addr(which) = (int)contents; - } -#endif // _LP64 + Symbol* symbol_at(int which) const; + void symbol_at_put(int which, Symbol* contents); // Sizing diff --git a/src/hotspot/share/oops/typeArrayOop.inline.hpp b/src/hotspot/share/oops/typeArrayOop.inline.hpp --- a/src/hotspot/share/oops/typeArrayOop.inline.hpp +++ b/src/hotspot/share/oops/typeArrayOop.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,9 @@ #ifndef SHARE_VM_OOPS_TYPEARRAYOOP_INLINE_HPP #define SHARE_VM_OOPS_TYPEARRAYOOP_INLINE_HPP +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/typeArrayOop.hpp" int typeArrayOopDesc::object_size() { @@ -33,4 +35,172 @@ return object_size(tk->layout_helper(), length()); } +inline jchar* typeArrayOopDesc::char_base() const { return (jchar*) base(T_CHAR); } +inline jboolean* typeArrayOopDesc::bool_base() const { return (jboolean*)base(T_BOOLEAN); } +inline jbyte* typeArrayOopDesc::byte_base() const { return (jbyte*) base(T_BYTE); } +inline jint* typeArrayOopDesc::int_base() const { return (jint*) base(T_INT); } +inline jlong* typeArrayOopDesc::long_base() const { return (jlong*) base(T_LONG); } +inline jshort* typeArrayOopDesc::short_base() const { return (jshort*) base(T_SHORT); } +inline jfloat* typeArrayOopDesc::float_base() const { return (jfloat*) base(T_FLOAT); } +inline jdouble* typeArrayOopDesc::double_base() const { return (jdouble*) base(T_DOUBLE); } + +inline jbyte* typeArrayOopDesc::byte_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &byte_base()[which]; +} + +inline jboolean* typeArrayOopDesc::bool_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &bool_base()[which]; +} + +inline jchar* typeArrayOopDesc::char_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &char_base()[which]; +} + +inline jint* typeArrayOopDesc::int_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &int_base()[which]; +} + +inline jshort* typeArrayOopDesc::short_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &short_base()[which]; +} + +inline jushort* typeArrayOopDesc::ushort_at_addr(int which) const { // for field descriptor arrays + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return (jushort*) &short_base()[which]; +} + +inline jlong* typeArrayOopDesc::long_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &long_base()[which]; +} + +inline jfloat* typeArrayOopDesc::float_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &float_base()[which]; +} + +inline jdouble* typeArrayOopDesc::double_at_addr(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); + return &double_base()[which]; +} + +inline jbyte typeArrayOopDesc::byte_at(int which) const { + ptrdiff_t offset = element_offset(T_BYTE, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::byte_at_put(int which, jbyte contents) { + ptrdiff_t offset = element_offset(T_BYTE, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jboolean typeArrayOopDesc::bool_at(int which) const { + ptrdiff_t offset = element_offset(T_BOOLEAN, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::bool_at_put(int which, jboolean contents) { + ptrdiff_t offset = element_offset(T_BOOLEAN, which); + HeapAccess::store_at(as_oop(), offset, jboolean(contents & 1)); +} + +inline jchar typeArrayOopDesc::char_at(int which) const { + ptrdiff_t offset = element_offset(T_CHAR, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::char_at_put(int which, jchar contents) { + ptrdiff_t offset = element_offset(T_CHAR, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jint typeArrayOopDesc::int_at(int which) const { + ptrdiff_t offset = element_offset(T_INT, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::int_at_put(int which, jint contents) { + ptrdiff_t offset = element_offset(T_INT, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jshort typeArrayOopDesc::short_at(int which) const { + ptrdiff_t offset = element_offset(T_SHORT, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::short_at_put(int which, jshort contents) { + ptrdiff_t offset = element_offset(T_SHORT, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jushort typeArrayOopDesc::ushort_at(int which) const { + ptrdiff_t offset = element_offset(T_SHORT, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::ushort_at_put(int which, jushort contents) { + ptrdiff_t offset = element_offset(T_SHORT, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jlong typeArrayOopDesc::long_at(int which) const { + ptrdiff_t offset = element_offset(T_LONG, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::long_at_put(int which, jlong contents) { + ptrdiff_t offset = element_offset(T_LONG, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jfloat typeArrayOopDesc::float_at(int which) const { + ptrdiff_t offset = element_offset(T_FLOAT, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::float_at_put(int which, jfloat contents) { + ptrdiff_t offset = element_offset(T_FLOAT, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jdouble typeArrayOopDesc::double_at(int which) const { + ptrdiff_t offset = element_offset(T_DOUBLE, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::double_at_put(int which, jdouble contents) { + ptrdiff_t offset = element_offset(T_DOUBLE, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +inline jbyte typeArrayOopDesc::byte_at_acquire(int which) const { + ptrdiff_t offset = element_offset(T_BYTE, which); + return HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::release_byte_at_put(int which, jbyte contents) { + ptrdiff_t offset = element_offset(T_BYTE, which); + HeapAccess::store_at(as_oop(), offset, contents); +} + +// Java thinks Symbol arrays are just arrays of either long or int, since +// there doesn't seem to be T_ADDRESS, so this is a bit of unfortunate +// casting +#ifdef _LP64 +inline Symbol* typeArrayOopDesc::symbol_at(int which) const { + ptrdiff_t offset = element_offset(T_LONG, which); + return (Symbol*)(jlong) HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::symbol_at_put(int which, Symbol* contents) { + ptrdiff_t offset = element_offset(T_LONG, which); + HeapAccess::store_at(as_oop(), offset, (jlong)contents); +} +#else +inline Symbol* typeArrayOopDesc::symbol_at(int which) const { + ptrdiff_t offset = element_offset(T_INT, which); + return (Symbol*)(jint) HeapAccess::load_at(as_oop(), offset); +} +inline void typeArrayOopDesc::symbol_at_put(int which, Symbol* contents) { + ptrdiff_t offset = element_offset(T_INT, which); + HeapAccess::store_at(as_oop(), offset, (jint)contents); +} +#endif // _LP64 + + #endif // SHARE_VM_OOPS_TYPEARRAYOOP_INLINE_HPP diff --git a/src/hotspot/share/opto/buildOopMap.cpp b/src/hotspot/share/opto/buildOopMap.cpp --- a/src/hotspot/share/opto/buildOopMap.cpp +++ b/src/hotspot/share/opto/buildOopMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -618,7 +618,7 @@ // last block as his only undone child, we can move the OopFlow from the // pred to this block. Otherwise we have to grab a new OopFlow. OopFlow *flow = NULL; // Flag for finding optimized flow - Block *pred = (Block*)0xdeadbeef; + Block *pred = (Block*)((intptr_t)0xdeadbeef); // Scan this block's preds to find a done predecessor for (uint j = 1; j < b->num_preds(); j++) { Block* p = _cfg->get_block_for_node(b->pred(j)); diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -1470,8 +1470,10 @@ if (!allow_new_nodes) return NULL; // Create a cast which is control dependent on the initialization to // propagate the fact that the array length must be positive. + InitializeNode* init = initialization(); + assert(init != NULL, "initialization not found"); length = new CastIINode(length, narrow_length_type); - length->set_req(0, initialization()->proj_out_or_null(0)); + length->set_req(0, init->proj_out_or_null(0)); } } diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2487,8 +2487,8 @@ print_method(PHASE_FINAL_CODE); // He's dead, Jim. - _cfg = (PhaseCFG*)0xdeadbeef; - _regalloc = (PhaseChaitin*)0xdeadbeef; + _cfg = (PhaseCFG*)((intptr_t)0xdeadbeef); + _regalloc = (PhaseChaitin*)((intptr_t)0xdeadbeef); } diff --git a/src/hotspot/share/opto/gcm.cpp b/src/hotspot/share/opto/gcm.cpp --- a/src/hotspot/share/opto/gcm.cpp +++ b/src/hotspot/share/opto/gcm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1486,7 +1486,7 @@ } #endif // Dead. - _node_latency = (GrowableArray *)0xdeadbeef; + _node_latency = (GrowableArray *)((intptr_t)0xdeadbeef); } bool PhaseCFG::do_global_code_motion() { diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -23,10 +23,13 @@ */ #include "precompiled.hpp" +#include "ci/ciUtilities.hpp" #include "compiler/compileLog.hpp" +#include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shenandoah/brooksPointer.hpp" @@ -1571,9 +1574,8 @@ case BarrierSet::Shenandoah: shenandoah_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + + case BarrierSet::CardTableModRef: break; default : @@ -1589,9 +1591,7 @@ case BarrierSet::Shenandoah: return true; // Can move it if no safepoint - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: return true; // There is no pre-barrier default : @@ -1615,12 +1615,10 @@ g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: + case BarrierSet::CardTableModRef: write_barrier_post(store, obj, adr, adr_idx, val, use_precise); break; - case BarrierSet::ModRef: case BarrierSet::Shenandoah: break; @@ -1657,9 +1655,7 @@ T_OBJECT); } break; - case BarrierSet::CardTableForRS: - case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::CardTableModRef: break; default : ShouldNotReachHere(); @@ -3875,6 +3871,13 @@ //----------------------------- store barriers ---------------------------- #define __ ideal. +bool GraphKit::use_ReduceInitialCardMarks() { + BarrierSet *bs = Universe::heap()->barrier_set(); + return bs->is_a(BarrierSet::CardTableModRef) + && barrier_set_cast(bs)->can_elide_tlab_store_barriers() + && ReduceInitialCardMarks; +} + void GraphKit::sync_kit(IdealKit& ideal) { set_all_memory(__ merged_memory()); set_i_o(__ i_o()); @@ -3888,11 +3891,9 @@ Node* GraphKit::byte_map_base_node() { // Get base of card map - CardTableModRefBS* ct = - barrier_set_cast(Universe::heap()->barrier_set()); - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code"); - if (ct->byte_map_base != NULL) { - return makecon(TypeRawPtr::make((address)ct->byte_map_base)); + jbyte* card_table_base = ci_card_table_address(); + if (card_table_base != NULL) { + return makecon(TypeRawPtr::make((address)card_table_base)); } else { return null(); } @@ -3922,7 +3923,7 @@ if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) { // We can skip marks on a freshly-allocated object in Eden. - // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. + // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. // That routine informs GC to take appropriate compensating steps, // upon a slow-path allocation, so as to make this card-mark // elision safe. @@ -3944,7 +3945,7 @@ // Divide by card size assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef), "Only one we handle so far."); - Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); + Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) ); // Combine card table base and card offset Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset ); @@ -4480,7 +4481,7 @@ * as part of the allocation in the case the allocated object is not located * in the nursery, this would happen for humongous objects. This is similar to * how CMS is required to handle this case, see the comments for the method - * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier. + * CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier. * A deferred card mark is required for these objects and handled in the above * mentioned methods. * @@ -4570,7 +4571,7 @@ if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) { // We can skip marks on a freshly-allocated object in Eden. - // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. + // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. // That routine informs GC to take appropriate compensating steps, // upon a slow-path allocation, so as to make this card-mark // elision safe. @@ -4596,8 +4597,8 @@ Node* no_base = __ top(); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); - Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val()); - Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val()); + Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val()); + Node* dirty_card = __ ConI((jint)CardTable::dirty_card_val()); Node* zeroX = __ ConX(0); // Get the alias_index for raw card-mark memory @@ -4627,7 +4628,7 @@ Node* cast = __ CastPX(__ ctrl(), adr); // Divide pointer by card size - Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); + Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) ); // Combine card table base and card offset Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset ); diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -756,10 +756,7 @@ // Returns the object (if any) which was created the moment before. Node* just_allocated_object(Node* current_control); - static bool use_ReduceInitialCardMarks() { - return (ReduceInitialCardMarks - && Universe::heap()->can_elide_tlab_store_barriers()); - } + static bool use_ReduceInitialCardMarks(); // Sync Ideal and Graph kits. void sync_kit(IdealKit& ideal); diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp --- a/src/hotspot/share/opto/idealGraphPrinter.cpp +++ b/src/hotspot/share/opto/idealGraphPrinter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -602,7 +602,7 @@ } #endif - if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) { + if (_chaitin && _chaitin != (PhaseChaitin *)((intptr_t)0xdeadbeef)) { buffer[0] = 0; _chaitin->dump_register(node, buffer); print_prop("reg", buffer); diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -899,7 +899,8 @@ // Figure out which of the two tests sets the upper bound and which // sets the lower bound if any. Node* adjusted_lim = NULL; - if (hi_type->_lo > lo_type->_hi && hi_type->_hi == max_jint && lo_type->_lo == min_jint) { + if (lo_type != NULL && hi_type != NULL && hi_type->_lo > lo_type->_hi && + hi_type->_hi == max_jint && lo_type->_lo == min_jint) { assert((dom_bool->_test.is_less() && !proj->_con) || (dom_bool->_test.is_greater() && proj->_con), "incorrect test"); // this test was canonicalized @@ -939,7 +940,8 @@ cond = BoolTest::lt; } } - } else if (lo_type->_lo > hi_type->_hi && lo_type->_hi == max_jint && hi_type->_lo == min_jint) { + } else if (lo_type != NULL && hi_type != NULL && lo_type->_lo > hi_type->_hi && + lo_type->_hi == max_jint && hi_type->_lo == min_jint) { // this_bool = < // dom_bool = < (proj = True) or dom_bool = >= (proj = False) diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -2492,7 +2492,7 @@ offset = argument(2); // type: long // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted - // by oopDesc::field_base. + // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half! @@ -2933,7 +2933,7 @@ // Build field offset expression. // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted - // by oopDesc::field_base. + // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half of long offsets offset = ConvL2X(offset); diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -2348,7 +2348,7 @@ tty->print(" "); tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); if (_irreducible) tty->print(" IRREDUCIBLE"); - Node* entry = _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl); + Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); if (predicate != NULL ) { tty->print(" limit_check"); @@ -2405,7 +2405,7 @@ if (Verbose) { tty->print(" body={"); _body.dump_simple(); tty->print(" }"); } - if (_head->as_Loop()->is_strip_mined()) { + if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { tty->print(" strip_mined"); } tty->cr(); diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -737,7 +737,9 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { // Store has to be first in the loop body IdealLoopTree *n_loop = get_loop(n_ctrl); - if (n->is_Store() && n_loop != _ltree_root && n_loop->is_loop() && n->in(0) != NULL) { + if (n->is_Store() && n_loop != _ltree_root && + n_loop->is_loop() && n_loop->_head->is_Loop() && + n->in(0) != NULL) { Node* address = n->in(MemNode::Address); Node* value = n->in(MemNode::ValueIn); Node* mem = n->in(MemNode::Memory); diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2575,7 +2575,7 @@ } Node *kill = def; // Rename 'def' to more descriptive 'kill' - debug_only( def = (Node*)0xdeadbeef; ) + debug_only( def = (Node*)((intptr_t)0xdeadbeef); ) // After some number of kills there _may_ be a later def Node *later_def = NULL; diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,23 +195,6 @@ // We failed the fast-path allocation. Now we need to do a scavenge or GC // and try allocation again. -void OptoRuntime::new_store_pre_barrier(JavaThread* thread) { - // After any safepoint, just before going back to compiled code, - // we inform the GC that we will be doing initializing writes to - // this object in the future without emitting card-marks, so - // GC may take any compensating steps. - // NOTE: Keep this code consistent with GraphKit::store_barrier. - - oop new_obj = thread->vm_result(); - if (new_obj == NULL) return; - - assert(Universe::heap()->can_elide_tlab_store_barriers(), - "compiler must check this first"); - // GC may decide to give back a safer copy of new_obj. - new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj); - thread->set_vm_result(new_obj); -} - // object allocation JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread)) JRT_BLOCK; @@ -245,10 +228,8 @@ deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END @@ -285,10 +266,8 @@ thread->set_vm_result(result); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END // array allocation without zeroing @@ -315,10 +294,9 @@ thread->set_vm_result(result); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); oop result = thread->vm_result(); if ((len > 0) && (result != NULL) && diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,10 +163,6 @@ static void new_array_C(Klass* array_klass, int len, JavaThread *thread); static void new_array_nozero_C(Klass* array_klass, int len, JavaThread *thread); - // Post-slow-path-allocation, pre-initializing-stores step for - // implementing ReduceInitialCardMarks - static void new_store_pre_barrier(JavaThread* thread); - // Allocate storage for a multi-dimensional arrays // Note: needs to be fixed for arbitrary number of dimensions static void multianewarray2_C(Klass* klass, int len1, int len2, JavaThread *thread); diff --git a/src/hotspot/share/opto/split_if.cpp b/src/hotspot/share/opto/split_if.cpp --- a/src/hotspot/share/opto/split_if.cpp +++ b/src/hotspot/share/opto/split_if.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -294,7 +294,7 @@ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) { if (use_blk->is_top()) // Handle dead uses return use_blk; - Node *prior_n = (Node*)0xdeadbeef; + Node *prior_n = (Node*)((intptr_t)0xdeadbeef); Node *n = use_blk; // Get path input assert( use_blk != iff_dom, "" ); // Here's the "spinup" the dominator tree loop. Do a cache-check @@ -341,7 +341,7 @@ } // Update cache everywhere - prior_n = (Node*)0xdeadbeef; // Reset IDOM walk + prior_n = (Node*)((intptr_t)0xdeadbeef); // Reset IDOM walk n = use_blk; // Get path input // Spin-up the idom tree again, basically doing path-compression. // Insert cache entries along the way, so that if we ever hit this diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -44,6 +44,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/access.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/markOop.hpp" @@ -53,7 +54,7 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayKlass.hpp" -#include "oops/typeArrayOop.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/jniCheck.hpp" #include "prims/jniExport.hpp" #include "prims/jniFastGetField.hpp" @@ -68,6 +69,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp --- a/src/hotspot/share/prims/jniCheck.cpp +++ b/src/hotspot/share/prims/jniCheck.cpp @@ -38,6 +38,7 @@ #include "runtime/handles.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/thread.inline.hpp" // Complain every extra number of unplanned local refs diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/os.inline.hpp" #include "runtime/perfData.hpp" @@ -166,9 +167,8 @@ } } else if (last_caller != NULL && last_caller->method_holder()->name() == - vmSymbols::java_lang_ClassLoader() && - (last_caller->name() == vmSymbols::loadClassInternal_name() || - last_caller->name() == vmSymbols::loadClass_name())) { + vmSymbols::java_lang_ClassLoader() && + last_caller->name() == vmSymbols::loadClass_name()) { found_it = true; } else if (!vfst.at_end()) { if (vfst.method()->is_native()) { @@ -435,6 +435,16 @@ extern volatile jint vm_created; +JVM_ENTRY_NO_ENV(void, JVM_BeforeHalt()) + JVMWrapper("JVM_BeforeHalt"); + EventShutdown event; + if (event.should_commit()) { + event.set_reason("Shutdown requested from Java"); + event.commit(); + } +JVM_END + + JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code)) before_exit(thread); vm_exit(code); @@ -2661,23 +2671,19 @@ ATTRIBUTE_PRINTF(3, 0) int jio_vsnprintf(char *str, size_t count, const char *fmt, va_list args) { - // see bug 4399518, 4417214 + // Reject count values that are negative signed values converted to + // unsigned; see bug 4399518, 4417214 if ((intptr_t)count <= 0) return -1; - int result = vsnprintf(str, count, fmt, args); - // Note: on truncation vsnprintf(3) on Unix returns numbers of - // characters which would have been written had the buffer been large - // enough; on Windows, it returns -1. We handle both cases here and - // always return -1, and perform null termination. - if ((result > 0 && (size_t)result >= count) || result == -1) { - str[count - 1] = '\0'; + int result = os::vsnprintf(str, count, fmt, args); + if (result > 0 && (size_t)result >= count) { result = -1; } return result; } -ATTRIBUTE_PRINTF(3, 0) +ATTRIBUTE_PRINTF(3, 4) int jio_snprintf(char *str, size_t count, const char *fmt, ...) { va_list args; int len; @@ -2687,7 +2693,7 @@ return len; } -ATTRIBUTE_PRINTF(2,3) +ATTRIBUTE_PRINTF(2, 3) int jio_fprintf(FILE* f, const char *fmt, ...) { int len; va_list args; diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,7 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/reflectionUtils.hpp" #include "runtime/signature.hpp" diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -40,6 +40,7 @@ #include "runtime/deoptimization.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/signature.hpp" @@ -501,6 +502,24 @@ } +// Handle management + +jobject JvmtiEnvBase::jni_reference(Handle hndl) { + return JNIHandles::make_local(hndl()); +} + +jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) { + return JNIHandles::make_local(thread, hndl()); +} + +void JvmtiEnvBase::destroy_jni_reference(jobject jobj) { + JNIHandles::destroy_local(jobj); +} + +void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) { + JNIHandles::destroy_local(jobj); // thread is unused. +} + // // Threads // diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -214,29 +214,20 @@ unsigned char* jvmtiMalloc(jlong size); // don't use this - call allocate // method to create a local handle - jobject jni_reference(Handle hndl) { - return JNIHandles::make_local(hndl()); - } + jobject jni_reference(Handle hndl); // method to create a local handle. // This function allows caller to specify which // threads local handle table to use. - jobject jni_reference(JavaThread *thread, Handle hndl) { - return JNIHandles::make_local(thread, hndl()); - } + jobject jni_reference(JavaThread *thread, Handle hndl); // method to destroy a local handle - void destroy_jni_reference(jobject jobj) { - JNIHandles::destroy_local(jobj); - } + void destroy_jni_reference(jobject jobj); // method to destroy a local handle. // This function allows caller to specify which - // threads local handle table to use although currently it is - // not used. - void destroy_jni_reference(JavaThread *thread, jobject jobj) { - destroy_jni_reference(jobj); - } + // threads local handle table to use. + void destroy_jni_reference(JavaThread *thread, jobject jobj); jvmtiEnv* jvmti_external() { return &_jvmti_external; }; diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,7 @@ #include "runtime/handles.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/os.inline.hpp" diff --git a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp --- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp +++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp @@ -27,6 +27,7 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/universe.inline.hpp" #include "prims/jvmtiGetLoadedClasses.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/thread.hpp" #include "utilities/stack.inline.hpp" #if INCLUDE_ALL_GCS diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp --- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,7 @@ #include "prims/resolvedMethodTable.hpp" #include "prims/methodComparator.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/relocator.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/events.hpp" diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp --- a/src/hotspot/share/prims/jvmtiTagMap.cpp +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp @@ -31,10 +31,12 @@ #include "jvmtifiles/jvmtiEnv.hpp" #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" +#include "oops/arrayOop.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiEventController.hpp" #include "prims/jvmtiEventController.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -42,7 +44,7 @@ #include "prims/jvmtiTagMap.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/jniHandles.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/reflectionUtils.hpp" diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,9 +36,11 @@ #include "memory/resourceArea.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/methodHandles.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/reflection.hpp" #include "runtime/signature.hpp" diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,10 +33,12 @@ #include "oops/fieldStreams.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/unsafe.hpp" #include "runtime/atomic.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/thread.hpp" @@ -108,8 +110,8 @@ assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); if (byte_offset == (jint)byte_offset) { void* ptr_plus_disp = (address)p + byte_offset; - assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp, - "raw [ptr+disp] must be consistent with oop::field_base"); + assert(p->field_addr_raw((jint)byte_offset) == ptr_plus_disp, + "raw [ptr+disp] must be consistent with oop::field_addr_raw"); } jlong p_size = HeapWordSize * (jlong)(p->size()); assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, (int64_t)byte_offset, (int64_t)p_size); diff --git a/src/hotspot/share/prims/wbtestmethods/parserTests.cpp b/src/hotspot/share/prims/wbtestmethods/parserTests.cpp --- a/src/hotspot/share/prims/wbtestmethods/parserTests.cpp +++ b/src/hotspot/share/prims/wbtestmethods/parserTests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "prims/whitebox.hpp" #include "prims/wbtestmethods/parserTests.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/jniHandles.inline.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -44,6 +44,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/wbtestmethods/parserTests.hpp" #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" @@ -52,6 +53,7 @@ #include "runtime/handshake.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/os.hpp" #include "runtime/sweeper.hpp" #include "runtime/thread.hpp" @@ -59,6 +61,7 @@ #include "runtime/vm_version.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" +#include "utilities/elfFile.hpp" #include "utilities/exceptions.hpp" #include "utilities/macros.hpp" #if INCLUDE_CDS @@ -79,7 +82,6 @@ #endif // INCLUDE_NMT #ifdef LINUX -#include "utilities/elfFile.hpp" #include "osContainer_linux.hpp" #endif @@ -533,7 +535,7 @@ size_t total_memory() { return _total_memory; } size_t total_memory_to_free() { return _total_memory_to_free; } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { size_t prev_live = r->marked_bytes(); size_t live = r->live_bytes(); @@ -606,6 +608,13 @@ return addr; WB_END +WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size)) + addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((size_t)size, (char*)(uintptr_t)addr); + MemTracker::record_virtual_memory_type((address)addr, mtTest); + + return addr; +WB_END + WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem); MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest); @@ -1216,12 +1225,12 @@ WB_END WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o)) - Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true); + Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); Universe::heap()->collect(GCCause::_wb_full_gc); #if INCLUDE_ALL_GCS if (UseG1GC) { // Needs to be cleared explicitly for G1 - Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(false); + Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); } #endif // INCLUDE_ALL_GCS WB_END @@ -1744,6 +1753,10 @@ #endif // INCLUDE_CDS WB_END +WB_ENTRY(jboolean, WB_IsJavaHeapArchiveSupported(JNIEnv* env)) + return MetaspaceShared::is_heap_object_archiving_allowed(); +WB_END + #if INCLUDE_CDS @@ -1916,6 +1929,13 @@ os::print_os_info(tty); WB_END +// Elf decoder +WB_ENTRY(void, WB_DisableElfSectionCache(JNIEnv* env)) +#if !defined(_WINDOWS) && !defined(__APPLE__) && !defined(_AIX) + ElfFile::_do_not_cache_elf_section = true; +#endif +WB_END + #define CC (char*) @@ -1967,6 +1987,7 @@ {CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack}, {CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree }, {CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory }, + {CC"NMTAttemptReserveMemoryAt", CC"(JJ)J", (void*)&WB_NMTAttemptReserveMemoryAt }, {CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory }, {CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory }, {CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory }, @@ -2113,6 +2134,8 @@ {CC"getResolvedReferences", CC"(Ljava/lang/Class;)Ljava/lang/Object;", (void*)&WB_GetResolvedReferences}, {CC"areOpenArchiveHeapObjectsMapped", CC"()Z", (void*)&WB_AreOpenArchiveHeapObjectsMapped}, {CC"isCDSIncludedInVmBuild", CC"()Z", (void*)&WB_IsCDSIncludedInVmBuild }, + {CC"isJavaHeapArchiveSupported", CC"()Z", (void*)&WB_IsJavaHeapArchiveSupported }, + {CC"clearInlineCaches0", CC"(Z)V", (void*)&WB_ClearInlineCaches }, {CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack }, {CC"addCompilerDirective", CC"(Ljava/lang/String;)I", @@ -2130,6 +2153,7 @@ (void*)&WB_CheckLibSpecifiesNoexecstack}, {CC"isContainerized", CC"()Z", (void*)&WB_IsContainerized }, {CC"printOsInfo", CC"()V", (void*)&WB_PrintOsInfo }, + {CC"disableElfSectionCache", CC"()V", (void*)&WB_DisableElfSectionCache }, }; diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -50,6 +50,7 @@ #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" +#include "runtime/safepoint.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/vm_version.hpp" #include "services/management.hpp" @@ -509,20 +510,19 @@ { "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "UseMembar", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, - { "FastTLABRefill", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, - { "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, - { "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, - { "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "CheckEndorsedAndExtDirs", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "CompilerThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, { "VMThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatisticsTimeout", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatisticsCount",JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in: { "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() }, { "CreateMinidumpOnCrash", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() }, - { "MustCallLoadClassInternal", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, - { "UnsyncloadClass", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, + { "MustCallLoadClassInternal", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "UnsyncloadClass", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, // -------------- Obsolete Flags - sorted by expired_in -------------- { "ConvertSleepToYield", JDK_Version::jdk(9), JDK_Version::jdk(10), JDK_Version::jdk(11) }, @@ -531,12 +531,18 @@ { "CheckAssertionStatusDirectives",JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "PrintMallocFree", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "PrintMalloc", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "ShowSafepointMsgs", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "FastTLABRefill", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "PermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() }, { "MaxPermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() }, { "SharedReadWriteSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedReadOnlySize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, + { "UseUTCFileTimestamp", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS { "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() }, @@ -1825,6 +1831,13 @@ } #endif +#if defined(IA32) + // Only server compiler can optimize safepoints well enough. + if (!is_server_compilation_mode_vm()) { + FLAG_SET_ERGO_IF_DEFAULT(bool, ThreadLocalHandshakes, false); + } +#endif + set_conservative_max_heap_alignment(); #ifndef ZERO @@ -2943,9 +2956,7 @@ if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != Flag::SUCCESS) { return JNI_EINVAL; } - if (FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1) != Flag::SUCCESS) { - return JNI_EINVAL; - } + SafepointSynchronize::set_defer_thr_suspend_loop_count(); if (FLAG_SET_CMDLINE(bool, UseTLAB, false) != Flag::SUCCESS) { return JNI_EINVAL; } @@ -3089,7 +3100,8 @@ } else if (match_option(option, "-Xlog", &tail)) { bool ret = false; if (strcmp(tail, ":help") == 0) { - LogConfiguration::print_command_line_help(defaultStream::output_stream()); + fileStream stream(defaultStream::output_stream()); + LogConfiguration::print_command_line_help(&stream); vm_exit(0); } else if (strcmp(tail, ":disable") == 0) { LogConfiguration::disable_logging(); @@ -3102,7 +3114,7 @@ } if (ret == false) { jio_fprintf(defaultStream::error_stream(), - "Invalid -Xlog option '-Xlog%s'\n", + "Invalid -Xlog option '-Xlog%s', see error log for details.\n", tail); return JNI_EINVAL; } diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,6 +131,7 @@ cmpxchg_memory_order order = memory_order_conservative); private: +WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private // Test whether From is implicitly convertible to To. // From and To must be pointer types. // Note: Provides the limited subset of C++11 std::is_convertible diff --git a/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp b/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp --- a/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp +++ b/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -376,8 +376,8 @@ if (UseConcMarkSweepGC) { // ParGCCardsPerStrideChunk should be compared with card table size. size_t heap_size = Universe::heap()->reserved_region().word_size(); - CardTableModRefBS* bs = (CardTableModRefBS*)GenCollectedHeap::heap()->rem_set()->bs(); - size_t card_table_size = bs->cards_required(heap_size) - 1; // Valid card table size + CardTableRS* ct = GenCollectedHeap::heap()->rem_set(); + size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size if ((size_t)value > card_table_size) { CommandLineError::print(verbose, @@ -388,7 +388,7 @@ } // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread) - // from CardTableModRefBSForCTRS::process_stride(). Note that ParGCStridesPerThread is already checked + // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked // not to make an overflow with ParallelGCThreads from its constraint function. uintx n_strides = ParallelGCThreads * ParGCStridesPerThread; uintx ergo_max = max_uintx / n_strides; @@ -470,9 +470,9 @@ #if INCLUDE_ALL_GCS if (status == Flag::SUCCESS && UseConcMarkSweepGC) { // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size() - // to be aligned to CardTableModRefBS::card_size * BitsPerWord. + // to be aligned to CardTable::card_size * BitsPerWord. // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize' - // because rescan_task_size() is CardTableModRefBS::card_size / HeapWordSize * BitsPerWord. + // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord. if (value % HeapWordSize != 0) { CommandLineError::print(verbose, "CMSRescanMultiple (" SIZE_FORMAT ") must be " diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -40,6 +40,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/biasedLocking.hpp" @@ -603,7 +604,7 @@ // Return BasicType of value being returned JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) - // We are already active int he special DeoptResourceMark any ResourceObj's we + // We are already active in the special DeoptResourceMark any ResourceObj's we // allocate will be freed at the end of the routine. // It is actually ok to allocate handles in a leaf method. It causes no safepoints, @@ -680,55 +681,41 @@ // at an uncommon trap for an invoke (where the compiler // generates debug info before the invoke has executed) Bytecodes::Code cur_code = str.next(); - if (cur_code == Bytecodes::_invokevirtual || - cur_code == Bytecodes::_invokespecial || - cur_code == Bytecodes::_invokestatic || - cur_code == Bytecodes::_invokeinterface || - cur_code == Bytecodes::_invokedynamic) { + if (Bytecodes::is_invoke(cur_code)) { Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); - Symbol* signature = invoke.signature(); - ArgumentSizeComputer asc(signature); - cur_invoke_parameter_size = asc.size(); - if (invoke.has_receiver()) { - // Add in receiver - ++cur_invoke_parameter_size; - } + cur_invoke_parameter_size = invoke.size_of_parameters(); if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { callee_size_of_parameters++; } } if (str.bci() < max_bci) { - Bytecodes::Code bc = str.next(); - if (bc >= 0) { + Bytecodes::Code next_code = str.next(); + if (next_code >= 0) { // The interpreter oop map generator reports results before // the current bytecode has executed except in the case of // calls. It seems to be hard to tell whether the compiler // has emitted debug information matching the "state before" // a given bytecode or the state after, so we try both - switch (cur_code) { - case Bytecodes::_invokevirtual: - case Bytecodes::_invokespecial: - case Bytecodes::_invokestatic: - case Bytecodes::_invokeinterface: - case Bytecodes::_invokedynamic: - case Bytecodes::_athrow: - break; - default: { + if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) { + // Get expression stack size for the next bytecode + if (Bytecodes::is_invoke(next_code)) { + Bytecode_invoke invoke(mh, str.bci()); + next_mask_expression_stack_size = invoke.size_of_parameters(); + } else { InterpreterOopMap next_mask; OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); next_mask_expression_stack_size = next_mask.expression_stack_size(); - // Need to subtract off the size of the result type of - // the bytecode because this is not described in the - // debug info but returned to the interpreter in the TOS - // caching register - BasicType bytecode_result_type = Bytecodes::result_type(cur_code); - if (bytecode_result_type != T_ILLEGAL) { - top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; - } - assert(top_frame_expression_stack_adjustment >= 0, ""); - try_next_mask = true; - break; } + // Need to subtract off the size of the result type of + // the bytecode because this is not described in the + // debug info but returned to the interpreter in the TOS + // caching register + BasicType bytecode_result_type = Bytecodes::result_type(cur_code); + if (bytecode_result_type != T_ILLEGAL) { + top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; + } + assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive"); + try_next_mask = true; } } } diff --git a/src/hotspot/share/runtime/fieldDescriptor.cpp b/src/hotspot/share/runtime/fieldDescriptor.cpp --- a/src/hotspot/share/runtime/fieldDescriptor.cpp +++ b/src/hotspot/share/runtime/fieldDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,6 +201,12 @@ } // Print a hint as to the underlying integer representation. This can be wrong for // pointers on an LP64 machine +#ifdef _LP64 + if ((ft == T_OBJECT || ft == T_ARRAY) && UseCompressedOops) { + st->print(" (%x)", obj->int_field(offset())); + } + else // <- intended +#endif if (ft == T_LONG || ft == T_DOUBLE LP64_ONLY(|| !is_java_primitive(ft)) ) { st->print(" (%x %x)", obj->int_field(offset()), obj->int_field(offset()+sizeof(jint))); } else if (as_int < 0 || as_int > 9) { diff --git a/src/hotspot/share/runtime/globals.cpp b/src/hotspot/share/runtime/globals.cpp --- a/src/hotspot/share/runtime/globals.cpp +++ b/src/hotspot/share/runtime/globals.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ #include "utilities/defaultStream.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" +#include "utilities/stringUtils.hpp" #if INCLUDE_ALL_GCS #include "gc/g1/g1_globals.hpp" #include "gc/shenandoah/shenandoah_globals.hpp" @@ -893,25 +894,6 @@ return _name_len; } -// Compute string similarity based on Dice's coefficient -static float str_similar(const char* str1, const char* str2, size_t len2) { - int len1 = (int) strlen(str1); - int total = len1 + (int) len2; - - int hit = 0; - - for (int i = 0; i < len1 -1; ++i) { - for (int j = 0; j < (int) len2 -1; ++j) { - if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) { - ++hit; - break; - } - } - } - - return 2.0f * (float) hit / (float) total; -} - Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) { float VMOptionsFuzzyMatchSimilarity = 0.7f; Flag* match = NULL; @@ -919,7 +901,7 @@ float max_score = -1; for (Flag* current = &flagTable[0]; current->_name != NULL; current++) { - score = str_similar(current->_name, name, length); + score = StringUtils::similarity(current->_name, strlen(current->_name), name, length); if (score > max_score) { max_score = score; match = current; diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -842,9 +842,6 @@ product(bool, FailOverToOldVerifier, true, \ "Fail over to old verifier when split verifier fails") \ \ - develop(bool, ShowSafepointMsgs, false, \ - "Show message about safepoint synchronization") \ - \ product(bool, SafepointTimeout, false, \ "Time out and warn or fail after SafepointTimeoutDelay " \ "milliseconds if failed to reach safepoint") \ @@ -1142,11 +1139,6 @@ diagnostic(bool, DynamicallyResizeSystemDictionaries, true, \ "Dynamically resize system dictionaries as needed") \ \ - diagnostic(bool, UnsyncloadClass, false, \ - "Unstable: VM calls loadClass unsynchronized. Custom " \ - "class loader must call VM synchronized for findClass " \ - "and defineClass.") \ - \ product(bool, AlwaysLockClassLoader, false, \ "Require the VM to acquire the class loader lock before calling " \ "loadClass() even for class loaders registering " \ @@ -1156,9 +1148,6 @@ "Allow parallel defineClass requests for class loaders " \ "registering as parallel capable") \ \ - product(bool, MustCallLoadClassInternal, false, \ - "Call loadClassInternal() rather than loadClass()") \ - \ product_pd(bool, DontYieldALot, \ "Throw away obvious excess yield calls") \ \ @@ -1737,13 +1726,13 @@ "enough work per iteration") \ range(0, max_intx) \ \ - /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ + /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \ product(size_t, CMSRescanMultiple, 32, \ "Size (in cards) of CMS parallel rescan task") \ range(1, SIZE_MAX / 4096) \ constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit) \ \ - /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ + /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \ product(size_t, CMSConcMarkMultiple, 32, \ "Size (in cards) of CMS concurrent MT marking task") \ range(1, SIZE_MAX / 4096) \ @@ -2015,9 +2004,6 @@ product(bool, ZeroTLAB, false, \ "Zero out the newly created TLAB") \ \ - product(bool, FastTLABRefill, false, \ - "(Deprecated) Use fast TLAB refill code") \ - \ product(bool, TLABStats, true, \ "Provide more detailed and expensive TLAB statistics.") \ \ @@ -2461,15 +2447,15 @@ "ImplicitNullChecks don't work (PPC64).") \ \ product(bool, PrintSafepointStatistics, false, \ - "Print statistics about safepoint synchronization") \ + "(Deprecated) Print statistics about safepoint synchronization") \ \ product(intx, PrintSafepointStatisticsCount, 300, \ - "Total number of safepoint statistics collected " \ + "(Deprecated) Total number of safepoint statistics collected " \ "before printing them out") \ range(1, max_intx) \ \ product(intx, PrintSafepointStatisticsTimeout, -1, \ - "Print safepoint statistics only when safepoint takes " \ + "(Deprecated) Print safepoint statistics only when safepoint takes " \ "more than PrintSafepointSatisticsTimeout in millis") \ LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ NOT_LP64(range(-1, max_intx)) \ @@ -3273,21 +3259,6 @@ develop(uintx, GCWorkerDelayMillis, 0, \ "Delay in scheduling GC workers (in milliseconds)") \ \ - product(intx, DeferThrSuspendLoopCount, 4000, \ - "(Unstable, Deprecated) " \ - "Number of times to iterate in safepoint loop " \ - "before blocking VM threads ") \ - range(-1, max_jint-1) \ - \ - product(intx, DeferPollingPageLoopCount, -1, \ - "(Unsafe,Unstable,Deprecated) " \ - "Number of iterations in safepoint loop " \ - "before changing safepoint polling page to RO ") \ - range(-1, max_jint-1) \ - \ - product(intx, SafepointSpinBeforeYield, 2000, "(Unstable, Deprecated)") \ - range(0, max_intx) \ - \ product(bool, PSChunkLargeArrays, true, \ "Process large arrays in chunks") \ \ diff --git a/src/hotspot/share/runtime/handles.cpp b/src/hotspot/share/runtime/handles.cpp --- a/src/hotspot/share/runtime/handles.cpp +++ b/src/hotspot/share/runtime/handles.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,10 +97,6 @@ // during GC phase 3, a handle may be a forward pointer that // is not yet valid, so loosen the assertion while (bottom < top) { - // This test can be moved up but for now check every oop. - - assert(oopDesc::is_oop(*bottom, true), "handle should point to oop"); - f->do_oop(bottom++); } return handles_visited; diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/signature.hpp" diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp --- a/src/hotspot/share/runtime/jniHandles.cpp +++ b/src/hotspot/share/runtime/jniHandles.cpp @@ -27,7 +27,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" -#include "runtime/jniHandles.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/thread.inline.hpp" #include "trace/traceMacros.hpp" @@ -135,6 +135,18 @@ return res; } +// Resolve some erroneous cases to NULL, rather than treating them as +// possibly unchecked errors. In particular, deleted handles are +// treated as NULL (though a deleted and later reallocated handle +// isn't detected). +oop JNIHandles::resolve_external_guard(jobject handle) { + oop result = NULL; + if (handle != NULL) { + result = resolve_impl(handle); + } + return result; +} + oop JNIHandles::resolve_jweak(jweak handle) { assert(handle != NULL, "precondition"); assert(is_jweak(handle), "precondition"); diff --git a/src/hotspot/share/runtime/jniHandles.hpp b/src/hotspot/share/runtime/jniHandles.hpp --- a/src/hotspot/share/runtime/jniHandles.hpp +++ b/src/hotspot/share/runtime/jniHandles.hpp @@ -67,10 +67,10 @@ // Resolve handle into oop inline static oop resolve(jobject handle); - // Resolve externally provided handle into oop with some guards - inline static oop resolve_external_guard(jobject handle); // Resolve handle into oop, result guaranteed not to be null inline static oop resolve_non_null(jobject handle); + // Resolve externally provided handle into oop with some guards + static oop resolve_external_guard(jobject handle); // Local handles static jobject make_local(oop obj); @@ -198,72 +198,4 @@ #endif }; -inline bool JNIHandles::is_jweak(jobject handle) { - STATIC_ASSERT(weak_tag_size == 1); - STATIC_ASSERT(weak_tag_value == 1); - return (reinterpret_cast(handle) & weak_tag_mask) != 0; -} - -inline oop& JNIHandles::jobject_ref(jobject handle) { - assert(!is_jweak(handle), "precondition"); - return *reinterpret_cast(handle); -} - -inline oop& JNIHandles::jweak_ref(jobject handle) { - assert(is_jweak(handle), "precondition"); - char* ptr = reinterpret_cast(handle) - weak_tag_value; - return *reinterpret_cast(ptr); -} - -// external_guard is true if called from resolve_external_guard. -template -inline oop JNIHandles::resolve_impl(jobject handle) { - assert(handle != NULL, "precondition"); - assert(!current_thread_in_native(), "must not be in native"); - oop result; - if (is_jweak(handle)) { // Unlikely - result = resolve_jweak(handle); - } else { - result = jobject_ref(handle); - // Construction of jobjects canonicalize a null value into a null - // jobject, so for non-jweak the pointee should never be null. - assert(external_guard || result != NULL, "Invalid JNI handle"); - } - return result; -} - -inline oop JNIHandles::resolve(jobject handle) { - oop result = NULL; - if (handle != NULL) { - result = resolve_impl(handle); - } - return result; -} - -// Resolve some erroneous cases to NULL, rather than treating them as -// possibly unchecked errors. In particular, deleted handles are -// treated as NULL (though a deleted and later reallocated handle -// isn't detected). -inline oop JNIHandles::resolve_external_guard(jobject handle) { - oop result = NULL; - if (handle != NULL) { - result = resolve_impl(handle); - } - return result; -} - -inline oop JNIHandles::resolve_non_null(jobject handle) { - assert(handle != NULL, "JNI handle should not be null"); - oop result = resolve_impl(handle); - assert(result != NULL, "NULL read from jni handle"); - return result; -} - -inline void JNIHandles::destroy_local(jobject handle) { - if (handle != NULL) { - assert(!is_jweak(handle), "Invalid JNI local handle"); - jobject_ref(handle) = NULL; - } -} - #endif // SHARE_VM_RUNTIME_JNIHANDLES_HPP diff --git a/src/hotspot/share/runtime/jniHandles.inline.hpp b/src/hotspot/share/runtime/jniHandles.inline.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/runtime/jniHandles.inline.hpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_JNIHANDLES_INLINE_HPP +#define SHARE_RUNTIME_JNIHANDLES_INLINE_HPP + +#include "oops/oop.hpp" +#include "runtime/jniHandles.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +inline bool JNIHandles::is_jweak(jobject handle) { + STATIC_ASSERT(weak_tag_size == 1); + STATIC_ASSERT(weak_tag_value == 1); + return (reinterpret_cast(handle) & weak_tag_mask) != 0; +} + +inline oop& JNIHandles::jobject_ref(jobject handle) { + assert(!is_jweak(handle), "precondition"); + return *reinterpret_cast(handle); +} + +inline oop& JNIHandles::jweak_ref(jobject handle) { + assert(is_jweak(handle), "precondition"); + char* ptr = reinterpret_cast(handle) - weak_tag_value; + return *reinterpret_cast(ptr); +} + +// external_guard is true if called from resolve_external_guard. +template +inline oop JNIHandles::resolve_impl(jobject handle) { + assert(handle != NULL, "precondition"); + assert(!current_thread_in_native(), "must not be in native"); + oop result; + if (is_jweak(handle)) { // Unlikely + result = resolve_jweak(handle); + } else { + result = jobject_ref(handle); + // Construction of jobjects canonicalize a null value into a null + // jobject, so for non-jweak the pointee should never be null. + assert(external_guard || result != NULL, "Invalid JNI handle"); + } + return result; +} + +inline oop JNIHandles::resolve(jobject handle) { + oop result = NULL; + if (handle != NULL) { + result = resolve_impl(handle); + } + return result; +} + +inline oop JNIHandles::resolve_non_null(jobject handle) { + assert(handle != NULL, "JNI handle should not be null"); + oop result = resolve_impl(handle); + assert(result != NULL, "NULL read from jni handle"); + return result; +} + +inline void JNIHandles::destroy_local(jobject handle) { + if (handle != NULL) { + assert(!is_jweak(handle), "Invalid JNI local handle"); + jobject_ref(handle) = NULL; + } +} + +#endif // SHARE_RUNTIME_JNIHANDLES_INLINE_HPP + diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,8 +102,6 @@ int ObjectMonitor::Knob_VerifyInUse = 0; int ObjectMonitor::Knob_VerifyMatch = 0; int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool - -static int Knob_LogSpins = 0; // enable jvmstat tally for spins -static int Knob_HandOff = 0; static int Knob_ReportSettings = 0; static int Knob_SpinBase = 0; // Floor AKA SpinMin @@ -2229,18 +2227,7 @@ PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL; PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL; PerfCounter * ObjectMonitor::_sync_Parks = NULL; -PerfCounter * ObjectMonitor::_sync_EmptyNotifications = NULL; PerfCounter * ObjectMonitor::_sync_Notifications = NULL; -PerfCounter * ObjectMonitor::_sync_PrivateA = NULL; -PerfCounter * ObjectMonitor::_sync_PrivateB = NULL; -PerfCounter * ObjectMonitor::_sync_SlowExit = NULL; -PerfCounter * ObjectMonitor::_sync_SlowEnter = NULL; -PerfCounter * ObjectMonitor::_sync_SlowNotify = NULL; -PerfCounter * ObjectMonitor::_sync_SlowNotifyAll = NULL; -PerfCounter * ObjectMonitor::_sync_FailedSpins = NULL; -PerfCounter * ObjectMonitor::_sync_SuccessfulSpins = NULL; -PerfCounter * ObjectMonitor::_sync_MonInCirculation = NULL; -PerfCounter * ObjectMonitor::_sync_MonScavenged = NULL; PerfCounter * ObjectMonitor::_sync_Inflations = NULL; PerfCounter * ObjectMonitor::_sync_Deflations = NULL; PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL; @@ -2271,18 +2258,7 @@ NEWPERFCOUNTER(_sync_ContendedLockAttempts); NEWPERFCOUNTER(_sync_FutileWakeups); NEWPERFCOUNTER(_sync_Parks); - NEWPERFCOUNTER(_sync_EmptyNotifications); NEWPERFCOUNTER(_sync_Notifications); - NEWPERFCOUNTER(_sync_SlowEnter); - NEWPERFCOUNTER(_sync_SlowExit); - NEWPERFCOUNTER(_sync_SlowNotify); - NEWPERFCOUNTER(_sync_SlowNotifyAll); - NEWPERFCOUNTER(_sync_FailedSpins); - NEWPERFCOUNTER(_sync_SuccessfulSpins); - NEWPERFCOUNTER(_sync_PrivateA); - NEWPERFCOUNTER(_sync_PrivateB); - NEWPERFCOUNTER(_sync_MonInCirculation); - NEWPERFCOUNTER(_sync_MonScavenged); NEWPERFVARIABLE(_sync_MonExtant); #undef NEWPERFCOUNTER #undef NEWPERFVARIABLE @@ -2328,7 +2304,7 @@ if (SyncKnobs == NULL) SyncKnobs = ""; size_t sz = strlen(SyncKnobs); - char * knobs = (char *) malloc(sz + 2); + char * knobs = (char *) os::malloc(sz + 2, mtInternal); if (knobs == NULL) { vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs"); guarantee(0, "invariant"); @@ -2351,7 +2327,6 @@ SETKNOB(SpinBackOff); SETKNOB(CASPenalty); SETKNOB(OXPenalty); - SETKNOB(LogSpins); SETKNOB(SpinSetSucc); SETKNOB(SuccEnabled); SETKNOB(SuccRestrict); @@ -2389,11 +2364,7 @@ Knob_FixedSpin = -1; } - if (Knob_LogSpins == 0) { - ObjectMonitor::_sync_FailedSpins = NULL; - } - - free(knobs); + os::free(knobs); OrderAccess::fence(); InitDone = 1; } diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,18 +191,7 @@ static PerfCounter * _sync_ContendedLockAttempts; static PerfCounter * _sync_FutileWakeups; static PerfCounter * _sync_Parks; - static PerfCounter * _sync_EmptyNotifications; static PerfCounter * _sync_Notifications; - static PerfCounter * _sync_SlowEnter; - static PerfCounter * _sync_SlowExit; - static PerfCounter * _sync_SlowNotify; - static PerfCounter * _sync_SlowNotifyAll; - static PerfCounter * _sync_FailedSpins; - static PerfCounter * _sync_SuccessfulSpins; - static PerfCounter * _sync_PrivateA; - static PerfCounter * _sync_PrivateB; - static PerfCounter * _sync_MonInCirculation; - static PerfCounter * _sync_MonScavenged; static PerfCounter * _sync_Inflations; static PerfCounter * _sync_Deflations; static PerfLongVariable * _sync_MonExtant; diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -85,7 +85,7 @@ julong os::free_bytes = 0; // # of bytes freed #endif -static juint cur_malloc_words = 0; // current size for MallocMaxTestWords +static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords void os_init_globals() { // Called from init_globals(). @@ -93,6 +93,26 @@ os::init_globals(); } +static time_t get_timezone(const struct tm* time_struct) { +#if defined(_ALLBSD_SOURCE) + return time_struct->tm_gmtoff; +#elif defined(_WINDOWS) + long zone; + _get_timezone(&zone); + return static_cast(zone); +#else + return timezone; +#endif +} + +int os::snprintf(char* buf, size_t len, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + int result = os::vsnprintf(buf, len, fmt, args); + va_end(args); + return result; +} + // Fill in buffer with current local time as an ISO-8601 string. // E.g., yyyy-mm-ddThh:mm:ss-zzzz. // Returns buffer, or NULL if it failed. @@ -137,11 +157,7 @@ return NULL; } } -#if defined(_ALLBSD_SOURCE) - const time_t zone = (time_t) time_struct.tm_gmtoff; -#else - const time_t zone = timezone; -#endif + const time_t zone = get_timezone(&time_struct); // If daylight savings time is in effect, // we are 1 hour East of our time zone @@ -229,6 +245,13 @@ return OS_OK; } + +#if !defined(LINUX) && !defined(_WINDOWS) +size_t os::committed_stack_size(address bottom, size_t size) { + return size; +} +#endif + bool os::dll_build_name(char* buffer, size_t size, const char* fname) { int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX); return (n != -1); @@ -629,12 +652,12 @@ // static bool has_reached_max_malloc_test_peak(size_t alloc_size) { if (MallocMaxTestWords > 0) { - jint words = (jint)(alloc_size / BytesPerWord); + size_t words = (alloc_size / BytesPerWord); if ((cur_malloc_words + words) > MallocMaxTestWords) { return true; } - Atomic::add(words, (volatile jint *)&cur_malloc_words); + Atomic::add(words, &cur_malloc_words); } return false; } @@ -1706,7 +1729,7 @@ } else { result = pd_attempt_reserve_memory_at(bytes, addr); if (result != NULL) { - MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); } } return result; @@ -1852,8 +1875,7 @@ os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, os::SuspendResume::State to) { - os::SuspendResume::State result = - (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from); + os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from); if (result == from) { // success return to; diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -27,6 +27,8 @@ #include "jvm.h" #include "jvmtifiles/jvmti.h" +#include "metaprogramming/isRegisteredEnum.hpp" +#include "metaprogramming/integralConstant.hpp" #include "runtime/extendedPC.hpp" #include "runtime/handles.hpp" #include "utilities/macros.hpp" @@ -271,6 +273,10 @@ static void map_stack_shadow_pages(address sp); static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp); + // Return size of stack that is actually committed. For Java thread, the bottom should be above + // guard pages (stack grows downward) + static size_t committed_stack_size(address bottom, size_t size); + // OS interface to Virtual Memory // Return the default page size. @@ -643,8 +649,10 @@ static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, const char *syms[], size_t syms_len); - // Write to stream - static int log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0); + // Provide C99 compliant versions of these functions, since some versions + // of some platforms don't. + static int vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0); + static int snprintf(char* buf, size_t len, const char* fmt, ...) ATTRIBUTE_PRINTF(3, 4); // Get host name in buffer provided static bool get_host_name(char* buf, size_t buflen); @@ -913,11 +921,11 @@ class SuspendedThreadTask { public: SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} - virtual ~SuspendedThreadTask() {} void run(); bool is_done() { return _done; } virtual void do_task(const SuspendedThreadTaskContext& context) = 0; protected: + ~SuspendedThreadTask() {} private: void internal_do_task(); Thread* _thread; @@ -1012,6 +1020,10 @@ }; +#ifndef _WINDOWS +template<> struct IsRegisteredEnum : public TrueType {}; +#endif // !_WINDOWS + // Note that "PAUSE" is almost always used with synchronization // so arguably we should provide Atomic::SpinPause() instead // of the global SpinPause() with C linkage. diff --git a/src/hotspot/share/runtime/reflection.cpp b/src/hotspot/share/runtime/reflection.cpp --- a/src/hotspot/share/runtime/reflection.cpp +++ b/src/hotspot/share/runtime/reflection.cpp @@ -39,6 +39,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/handles.inline.hpp" diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,8 @@ volatile int SafepointSynchronize::_safepoint_counter = 0; int SafepointSynchronize::_current_jni_active_count = 0; long SafepointSynchronize::_end_of_last_safepoint = 0; +int SafepointSynchronize::_defer_thr_suspend_loop_count = 4000; +static const int safepoint_spin_before_yield = 2000; static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only static bool timeout_error_printed = false; @@ -191,12 +193,10 @@ // Make interpreter safepoint aware Interpreter::notice_safepoints(); - if (DeferPollingPageLoopCount < 0) { - // Make polling safepoint aware - guarantee (PageArmed == 0, "invariant") ; - PageArmed = 1 ; - os::make_polling_page_unreadable(); - } + // Make polling safepoint aware + guarantee (PageArmed == 0, "invariant") ; + PageArmed = 1 ; + os::make_polling_page_unreadable(); } // Consider using active_processor_count() ... but that call is expensive. @@ -309,19 +309,21 @@ // 9. On windows consider using the return value from SwitchThreadTo() // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions. - if (SafepointMechanism::uses_global_page_poll() && int(iterations) == DeferPollingPageLoopCount) { - guarantee (PageArmed == 0, "invariant") ; - PageArmed = 1 ; - os::make_polling_page_unreadable(); + if (int(iterations) == -1) { // overflow - something is wrong. + // We can only overflow here when we are using global + // polling pages. We keep this guarantee in its original + // form so that searches of the bug database for this + // failure mode find the right bugs. + guarantee (PageArmed == 0, "invariant"); } // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus) ++steps ; - if (ncpus > 1 && steps < SafepointSpinBeforeYield) { + if (ncpus > 1 && steps < safepoint_spin_before_yield) { SpinPause() ; // MP-Polite spin } else - if (steps < DeferThrSuspendLoopCount) { + if (steps < _defer_thr_suspend_loop_count) { os::naked_yield() ; } else { os::naked_short_sleep(1); @@ -903,10 +905,6 @@ assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization"); } - if (ShowSafepointMsgs) { - tty->print("handle_polling_page_exception: "); - } - if (PrintSafepointStatistics) { inc_page_trap_count(); } @@ -1108,9 +1106,6 @@ "polling page exception on thread not running state: %u", uint(t)); // Step 1: Find the nmethod from the return address - if (ShowSafepointMsgs && Verbose) { - tty->print_cr("Polling page exception at " INTPTR_FORMAT, p2i(thread()->saved_exception_pc())); - } address real_return_addr = thread()->saved_exception_pc(); CodeBlob *cb = CodeCache::find_blob(real_return_addr); @@ -1207,7 +1202,6 @@ float SafepointSynchronize::_ts_of_current_safepoint = 0.0f; static jlong cleanup_end_time = 0; -static bool need_to_track_page_armed_status = false; static bool init_done = false; // Helper method to print the header. @@ -1219,11 +1213,6 @@ "[ threads: total initially_running wait_to_block ]" "[ time: spin block sync cleanup vmop ] "); - // no page armed status printed out if it is always armed. - if (need_to_track_page_armed_status) { - tty->print("page_armed "); - } - tty->print_cr("page_trap_count"); } @@ -1246,9 +1235,6 @@ guarantee(_safepoint_stats != NULL, "not enough memory for safepoint instrumentation data"); - if (DeferPollingPageLoopCount >= 0) { - need_to_track_page_armed_status = true; - } init_done = true; } @@ -1288,10 +1274,6 @@ spstat->_time_to_spin = cur_time - spstat->_time_to_spin; } - if (need_to_track_page_armed_status) { - spstat->_page_armed = (PageArmed == 1); - } - // Records the start time of waiting for to block. Updated when block is done. if (_waiting_to_block != 0) { spstat->_time_to_wait_to_block = cur_time; @@ -1380,9 +1362,6 @@ (int64_t)(sstats->_time_to_do_cleanups / MICROUNITS), (int64_t)(sstats->_time_to_exec_vmop / MICROUNITS)); - if (need_to_track_page_armed_status) { - tty->print(INT32_FORMAT_W(10) " ", sstats->_page_armed); - } tty->print_cr(INT32_FORMAT_W(15) " ", sstats->_nof_threads_hit_page_trap); } } @@ -1409,12 +1388,7 @@ tty->cr(); // Print out polling page sampling status. - if (!need_to_track_page_armed_status) { - tty->print_cr("Polling page always armed"); - } else { - tty->print_cr("Defer polling page loop count = " INTX_FORMAT "\n", - DeferPollingPageLoopCount); - } + tty->print_cr("Polling page always armed"); for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) { if (_safepoint_reasons[index] != 0) { @@ -1431,32 +1405,3 @@ INT64_FORMAT_W(5) " ms", (int64_t)(_max_vmop_time / MICROUNITS)); } - -// ------------------------------------------------------------------------------------------------ -// Non-product code - -#ifndef PRODUCT - -void SafepointSynchronize::print_state() { - if (_state == _not_synchronized) { - tty->print_cr("not synchronized"); - } else if (_state == _synchronizing || _state == _synchronized) { - tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" : - "synchronized"); - - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) { - cur->safepoint_state()->print(); - } - } -} - -void SafepointSynchronize::safepoint_msg(const char* format, ...) { - if (ShowSafepointMsgs) { - va_list ap; - va_start(ap, format); - tty->vprint_cr(format, ap); - va_end(ap); - } -} - -#endif // !PRODUCT diff --git a/src/hotspot/share/runtime/safepoint.hpp b/src/hotspot/share/runtime/safepoint.hpp --- a/src/hotspot/share/runtime/safepoint.hpp +++ b/src/hotspot/share/runtime/safepoint.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,6 @@ int _nof_total_threads; // total number of Java threads int _nof_initial_running_threads; // total number of initially seen running threads int _nof_threads_wait_to_block; // total number of threads waiting for to block - bool _page_armed; // true if polling page is armed, false otherwise int _nof_threads_hit_page_trap; // total number of threads hitting the page trap jlong _time_to_spin; // total time in millis spent in spinning jlong _time_to_wait_to_block; // total time in millis spent in waiting for to block @@ -107,6 +106,7 @@ static volatile SynchronizeState _state; // Threads might read this flag directly, without acquiring the Threads_lock static volatile int _waiting_to_block; // number of threads we are waiting for to block static int _current_jni_active_count; // Counts the number of active critical natives during the safepoint + static int _defer_thr_suspend_loop_count; // Iterations before blocking VM threads // This counter is used for fast versions of jni_GetField. // An even value means there is no ongoing safepoint operations. @@ -191,10 +191,6 @@ static bool is_cleanup_needed(); static void do_cleanup_tasks(); - // Debugging - static void print_state() PRODUCT_RETURN; - static void safepoint_msg(const char* format, ...) ATTRIBUTE_PRINTF(1, 2) PRODUCT_RETURN; - static void deferred_initialize_stat(); static void print_stat_on_exit(); inline static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; } @@ -206,6 +202,11 @@ static address address_of_state() { return (address)&_state; } static address safepoint_counter_addr() { return (address)&_safepoint_counter; } + + // This method is only used for -Xconcurrentio support. + static void set_defer_thr_suspend_loop_count() { + _defer_thr_suspend_loop_count = 1; + } }; // State class for a thread suspended at a safepoint @@ -258,15 +259,6 @@ // Initialize static void create(JavaThread *thread); static void destroy(JavaThread *thread); - - void safepoint_msg(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) { - if (ShowSafepointMsgs) { - va_list ap; - va_start(ap, format); - tty->vprint_cr(format, ap); - va_end(ap); - } - } }; diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -29,7 +29,6 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "prims/jvmtiImpl.hpp" -#include "services/allocationContextService.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" #include "services/gcNotifier.hpp" @@ -105,8 +104,7 @@ while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && - !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && - !(acs_notify = AllocationContextService::should_notify())) { + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -132,10 +130,6 @@ if(has_dcmd_notification_event) { DCmdFactory::send_notification(CHECK); } - - if (acs_notify) { - AllocationContextService::notify(CHECK); - } } } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -223,7 +223,7 @@ // Shenandoah clone barrier: makes sure that references point to to-space // in cloned objects. JRT_LEAF(void, SharedRuntime::shenandoah_clone_barrier(oopDesc* obj)) - BarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) obj, obj->size())); + barrier_set_cast(BarrierSet::barrier_set())->write_region(MemRegion((HeapWord*) obj, obj->size())); JRT_END #endif // INCLUDE_ALL_GCS @@ -1929,95 +1929,27 @@ vframeStream vfst(thread, true); assert(!vfst.at_end(), "Java frame must exist"); Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci())); - Klass* target_klass = vfst.method()->constants()->klass_at( - cc.index(), thread); - return generate_class_cast_message(caster_klass, target_klass); + constantPoolHandle cpool(thread, vfst.method()->constants()); + Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index()); + Symbol* target_klass_name = NULL; + if (target_klass == NULL) { + // This klass should be resolved, but just in case, get the name in the klass slot. + target_klass_name = cpool->klass_name_at(cc.index()); + } + return generate_class_cast_message(caster_klass, target_klass, target_klass_name); } -// The caller of class_loader_and_module_name() (or one of its callers) + +// The caller of generate_class_cast_message() (or one of its callers) // must use a ResourceMark in order to correctly free the result. -const char* class_loader_and_module_name(Klass* klass) { - const char* delim = "/"; - size_t delim_len = strlen(delim); - - const char* fqn = klass->external_name(); - // Length of message to return; always include FQN - size_t msglen = strlen(fqn) + 1; - - bool has_cl_name = false; - bool has_mod_name = false; - bool has_version = false; - - // Use class loader name, if exists and not builtin - const char* class_loader_name = ""; - ClassLoaderData* cld = klass->class_loader_data(); - assert(cld != NULL, "class_loader_data should not be NULL"); - if (!cld->is_builtin_class_loader_data()) { - // If not builtin, look for name - oop loader = klass->class_loader(); - if (loader != NULL) { - oop class_loader_name_oop = java_lang_ClassLoader::name(loader); - if (class_loader_name_oop != NULL) { - class_loader_name = java_lang_String::as_utf8_string(class_loader_name_oop); - if (class_loader_name != NULL && class_loader_name[0] != '\0') { - has_cl_name = true; - msglen += strlen(class_loader_name) + delim_len; - } - } - } - } - - const char* module_name = ""; - const char* version = ""; - Klass* bottom_klass = klass->is_objArray_klass() ? - ObjArrayKlass::cast(klass)->bottom_klass() : klass; - if (bottom_klass->is_instance_klass()) { - ModuleEntry* module = InstanceKlass::cast(bottom_klass)->module(); - // Use module name, if exists - if (module->is_named()) { - has_mod_name = true; - module_name = module->name()->as_C_string(); - msglen += strlen(module_name); - // Use version if exists and is not a jdk module - if (module->is_non_jdk_module() && module->version() != NULL) { - has_version = true; - version = module->version()->as_C_string(); - msglen += strlen("@") + strlen(version); - } - } - } else { - // klass is an array of primitives, so its module is java.base - module_name = JAVA_BASE_NAME; - } - - if (has_cl_name || has_mod_name) { - msglen += delim_len; - } - - char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen); - - // Just return the FQN if error in allocating string - if (message == NULL) { - return fqn; - } - - jio_snprintf(message, msglen, "%s%s%s%s%s%s%s", - class_loader_name, - (has_cl_name) ? delim : "", - (has_mod_name) ? module_name : "", - (has_version) ? "@" : "", - (has_version) ? version : "", - (has_cl_name || has_mod_name) ? delim : "", - fqn); - return message; -} - char* SharedRuntime::generate_class_cast_message( - Klass* caster_klass, Klass* target_klass) { - - const char* caster_name = class_loader_and_module_name(caster_klass); - - const char* target_name = class_loader_and_module_name(target_klass); + Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) { + + const char* caster_name = caster_klass->class_loader_and_module_name(); + + assert(target_klass != NULL || target_klass_name != NULL, "one must be provided"); + const char* target_name = target_klass == NULL ? target_klass_name->as_C_string() : + target_klass->class_loader_and_module_name(); size_t msglen = strlen(caster_name) + strlen(" cannot be cast to ") + strlen(target_name) + 1; @@ -3176,3 +3108,16 @@ } return activation; } + +void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) { + // After any safepoint, just before going back to compiled code, + // we inform the GC that we will be doing initializing writes to + // this object in the future without emitting card-marks, so + // GC may take any compensating steps. + + oop new_obj = thread->vm_result(); + if (new_obj == NULL) return; + + BarrierSet *bs = Universe::heap()->barrier_set(); + bs->on_slowpath_allocation_exit(thread, new_obj); +} diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -212,6 +212,10 @@ static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason); #endif + // Post-slow-path-allocation, pre-initializing-stores step for + // implementing e.g. ReduceInitialCardMarks + static void on_slowpath_allocation_exit(JavaThread* thread); + static void enable_stack_reserved_zone(JavaThread* thread); static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr); @@ -315,7 +319,7 @@ // The caller (or one of it's callers) must use a ResourceMark // in order to correctly free the result. // - static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass); + static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = NULL); // Resolves a call site- may patch in the destination of the call into the // compiled code. diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -420,7 +420,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - HeapAccess::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count); + HeapAccess::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count); JRT_END JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count)) @@ -464,7 +464,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - HeapAccess::oop_arraycopy(NULL, NULL, src, dest, count); + HeapAccess::oop_arraycopy(NULL, NULL, src, dest, count); JRT_END address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) { diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp --- a/src/hotspot/share/runtime/synchronizer.hpp +++ b/src/hotspot/share/runtime/synchronizer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -204,8 +204,6 @@ // have to pass through, and we must also be able to deal with // asynchronous exceptions. The caller is responsible for checking // the threads pending exception if needed. -// doLock was added to support classloading with UnsyncloadClass which -// requires flag based choice of locking the classloader lock. class ObjectLocker : public StackObj { private: Thread* _thread; diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -51,6 +51,7 @@ #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" @@ -70,6 +71,7 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/jniPeriodicChecker.hpp" #include "runtime/memprofiler.hpp" #include "runtime/mutexLocker.hpp" @@ -2032,23 +2034,10 @@ JvmtiExport::cleanup_thread(this); } - // We must flush any deferred card marks before removing a thread from - // the list of active threads. - Universe::heap()->flush_deferred_store_barrier(this); - assert(deferred_card_mark().is_empty(), "Should have been flushed"); - -#if INCLUDE_ALL_GCS - // We must flush the G1-related buffers before removing a thread - // from the list of active threads. We must do this after any deferred - // card marks have been flushed (above) so that any entries that are - // added to the thread's dirty card queue as a result are not lost. - if (UseG1GC || (UseShenandoahGC && (ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier || ShenandoahKeepAliveBarrier || ShenandoahStoreValEnqueueBarrier))) { - flush_barrier_queues(); - } - if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) { - gclab().make_parsable(true); - } -#endif // INCLUDE_ALL_GCS + // We must flush any deferred card marks and other various GC barrier + // related buffers (e.g. G1 SATB buffer and G1 dirty card queue buffer) + // before removing a thread from the list of active threads. + BarrierSet::barrier_set()->on_thread_detach(this); log_info(os, thread)("JavaThread %s (tid: " UINTX_FORMAT ").", exit_type == JavaThread::normal_exit ? "exiting" : "detaching", @@ -2077,38 +2066,6 @@ } } -#if INCLUDE_ALL_GCS -// Flush G1-related queues. -void JavaThread::flush_barrier_queues() { - satb_mark_queue().flush(); - dirty_card_queue().flush(); -} - -void JavaThread::initialize_queues() { - assert(!SafepointSynchronize::is_at_safepoint(), - "we should not be at a safepoint"); - - SATBMarkQueue& satb_queue = satb_mark_queue(); - SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set(); - // The SATB queue should have been constructed with its active - // field set to false. - assert(!satb_queue.is_active(), "SATB queue should not be active"); - assert(satb_queue.is_empty(), "SATB queue should be empty"); - // If we are creating the thread during a marking cycle, we should - // set the active field of the SATB queue to true. - if (satb_queue_set.is_active()) { - satb_queue.set_active(true); - } - - DirtyCardQueue& dirty_queue = dirty_card_queue(); - // The dirty card queue should have been constructed with its - // active field set to true. - assert(dirty_queue.is_active(), "dirty card queue should be active"); - - _gc_state = _gc_state_global; -} -#endif // INCLUDE_ALL_GCS - void JavaThread::cleanup_failed_attach_current_thread() { if (active_handles() != NULL) { JNIHandleBlock* block = active_handles(); @@ -2129,22 +2086,12 @@ tlab().make_parsable(true); // retire TLAB, if any } -#if INCLUDE_ALL_GCS - if (UseG1GC || (UseShenandoahGC && (ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier || ShenandoahKeepAliveBarrier || ShenandoahStoreValEnqueueBarrier))) { - flush_barrier_queues(); - } - if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) { - gclab().make_parsable(true); - } -#endif // INCLUDE_ALL_GCS + BarrierSet::barrier_set()->on_thread_detach(this); Threads::remove(this); this->smr_delete(); } - - - JavaThread* JavaThread::active() { Thread* thread = Thread::current(); if (thread->is_Java_thread()) { @@ -3962,10 +3909,11 @@ #if INCLUDE_JVMCI if (EnableJVMCI) { - // Initialize JVMCI eagerly if JVMCIPrintProperties is enabled. + // Initialize JVMCI eagerly when it is explicitly requested. + // Or when JVMCIPrintProperties is enabled. // The JVMCI Java initialization code will read this flag and // do the printing if it's set. - bool init = JVMCIPrintProperties; + bool init = EagerJVMCI || JVMCIPrintProperties; if (!init) { // 8145270: Force initialization of JVMCI runtime otherwise requests for blocking @@ -4255,10 +4203,9 @@ // SystemDictionary::resolve_or_null will return null if there was // an exception. If we cannot load the Shutdown class, just don't // call Shutdown.shutdown() at all. This will mean the shutdown hooks - // and finalizers (if runFinalizersOnExit is set) won't be run. - // Note that if a shutdown hook was registered or runFinalizersOnExit - // was called, the Shutdown class would have already been loaded - // (Runtime.addShutdownHook and runFinalizersOnExit will load it). + // won't be run. Note that if a shutdown hook was registered, + // the Shutdown class would have already been loaded + // (Runtime.addShutdownHook will load it). JavaValue result(T_VOID); JavaCalls::call_static(&result, shutdown_klass, @@ -4281,7 +4228,7 @@ // + Wait until we are the last non-daemon thread to execute // <-- every thing is still working at this moment --> // + Call java.lang.Shutdown.shutdown(), which will invoke Java level -// shutdown hooks, run finalizers if finalization-on-exit +// shutdown hooks // + Call before_exit(), prepare for VM exit // > run VM level shutdown hooks (they are registered through JVM_OnExit(), // currently the only user of this mechanism is File.deleteOnExit()) @@ -4318,6 +4265,12 @@ Mutex::_as_suspend_equivalent_flag); } + EventShutdown e; + if (e.should_commit()) { + e.set_reason("No remaining non-daemon Java threads"); + e.commit(); + } + // Hang forever on exit if we are reporting an error. if (ShowMessageBoxOnError && VMError::is_error_reported()) { os::infinite_sleep(); @@ -4405,9 +4358,8 @@ // The threads lock must be owned at this point assert_locked_or_safepoint(Threads_lock); - // See the comment for this method in thread.hpp for its purpose and - // why it is called here. - p->initialize_queues(); + BarrierSet::barrier_set()->on_thread_attach(p); + p->set_next(_thread_list); _thread_list = p; diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1101,8 +1101,6 @@ // Set of all such queues. static DirtyCardQueueSet _dirty_card_queue_set; - void flush_barrier_queues(); - // Support for Shenandoah barriers static char _gc_state_global; char _gc_state; @@ -2002,37 +2000,14 @@ inline char gc_state() const; -private: void set_gc_state(char in_prog); public: static void set_gc_state_all_threads(char in_prog); + static char gc_state_global() { return _gc_state_global; } #endif // INCLUDE_ALL_GCS - // This method initializes the SATB and dirty card queues before a - // JavaThread is added to the Java thread list. Right now, we don't - // have to do anything to the dirty card queue (it should have been - // activated when the thread was created), but we have to activate - // the SATB queue if the thread is created while a marking cycle is - // in progress. The activation / de-activation of the SATB queues at - // the beginning / end of a marking cycle is done during safepoints - // so we have to make sure this method is called outside one to be - // able to safely read the active field of the SATB queue set. Right - // now, it is called just before the thread is added to the Java - // thread list in the Threads::add() method. That method is holding - // the Threads_lock which ensures we are outside a safepoint. We - // cannot do the obvious and set the active field of the SATB queue - // when the thread is created given that, in some cases, safepoints - // might happen between the JavaThread constructor being called and the - // thread being added to the Java thread list (an example of this is - // when the structure for the DestroyJavaVM thread is created). -#if INCLUDE_ALL_GCS - void initialize_queues(); -#else // INCLUDE_ALL_GCS - void initialize_queues() { } -#endif // INCLUDE_ALL_GCS - // Machine dependent stuff #include OS_CPU_HEADER(thread) diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp --- a/src/hotspot/share/runtime/thread.inline.hpp +++ b/src/hotspot/share/runtime/thread.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,24 +30,18 @@ #include "runtime/thread.hpp" inline void Thread::set_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg((jint)(flags | f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); + while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags); } inline void Thread::clear_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg((jint)(flags & ~f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); + while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags); } inline void Thread::set_has_async_exception() { diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp --- a/src/hotspot/share/runtime/threadSMR.cpp +++ b/src/hotspot/share/runtime/threadSMR.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.inline.hpp" #include "services/threadService.hpp" diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -469,23 +469,22 @@ nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \ nonstatic_field(CardGeneration, _used_at_prologue, size_t) \ \ - nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \ - nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \ - nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \ - nonstatic_field(CardTableModRefBS, _page_size, const size_t) \ - nonstatic_field(CardTableModRefBS, _byte_map_size, const size_t) \ - nonstatic_field(CardTableModRefBS, _byte_map, jbyte*) \ - nonstatic_field(CardTableModRefBS, _cur_covered_regions, int) \ - nonstatic_field(CardTableModRefBS, _covered, MemRegion*) \ - nonstatic_field(CardTableModRefBS, _committed, MemRegion*) \ - nonstatic_field(CardTableModRefBS, _guard_region, MemRegion) \ - nonstatic_field(CardTableModRefBS, byte_map_base, jbyte*) \ - \ - nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBSForCTRS*) \ + nonstatic_field(CardTable, _whole_heap, const MemRegion) \ + nonstatic_field(CardTable, _guard_index, const size_t) \ + nonstatic_field(CardTable, _last_valid_index, const size_t) \ + nonstatic_field(CardTable, _page_size, const size_t) \ + nonstatic_field(CardTable, _byte_map_size, const size_t) \ + nonstatic_field(CardTable, _byte_map, jbyte*) \ + nonstatic_field(CardTable, _cur_covered_regions, int) \ + nonstatic_field(CardTable, _covered, MemRegion*) \ + nonstatic_field(CardTable, _committed, MemRegion*) \ + nonstatic_field(CardTable, _guard_region, MemRegion) \ + nonstatic_field(CardTable, _byte_map_base, jbyte*) \ + nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \ + nonstatic_field(CardTableModRefBS, _card_table, CardTable*) \ \ nonstatic_field(CollectedHeap, _reserved, MemRegion) \ nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \ - nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \ nonstatic_field(CollectedHeap, _is_gc_active, bool) \ nonstatic_field(CollectedHeap, _total_collections, unsigned int) \ \ @@ -517,9 +516,8 @@ \ nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \ nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \ - \ - nonstatic_field(GenCollectorPolicy, _young_gen_spec, GenerationSpec*) \ - nonstatic_field(GenCollectorPolicy, _old_gen_spec, GenerationSpec*) \ + nonstatic_field(GenCollectedHeap, _young_gen_spec, GenerationSpec*) \ + nonstatic_field(GenCollectedHeap, _old_gen_spec, GenerationSpec*) \ \ nonstatic_field(HeapWord, i, char*) \ \ @@ -1473,7 +1471,6 @@ declare_type(DefNewGeneration, Generation) \ declare_type(CardGeneration, Generation) \ declare_type(TenuredGeneration, CardGeneration) \ - declare_toplevel_type(GenCollectorPolicy) \ declare_toplevel_type(Space) \ declare_type(CompactibleSpace, Space) \ declare_type(ContiguousSpace, CompactibleSpace) \ @@ -1482,9 +1479,9 @@ declare_toplevel_type(BarrierSet) \ declare_type(ModRefBarrierSet, BarrierSet) \ declare_type(CardTableModRefBS, ModRefBarrierSet) \ - declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \ + declare_toplevel_type(CardTable) \ + declare_type(CardTableRS, CardTable) \ declare_toplevel_type(BarrierSet::Name) \ - declare_toplevel_type(CardTableRS) \ declare_toplevel_type(BlockOffsetSharedArray) \ declare_toplevel_type(BlockOffsetTable) \ declare_type(BlockOffsetArray, BlockOffsetTable) \ @@ -1507,11 +1504,11 @@ \ declare_toplevel_type(BarrierSet*) \ declare_toplevel_type(BlockOffsetSharedArray*) \ + declare_toplevel_type(CardTable*) \ + declare_toplevel_type(CardTable*const) \ declare_toplevel_type(CardTableRS*) \ declare_toplevel_type(CardTableModRefBS*) \ declare_toplevel_type(CardTableModRefBS**) \ - declare_toplevel_type(CardTableModRefBSForCTRS*) \ - declare_toplevel_type(CardTableModRefBSForCTRS**) \ declare_toplevel_type(CollectedHeap*) \ declare_toplevel_type(ContiguousSpace*) \ declare_toplevel_type(DefNewGeneration*) \ @@ -2249,8 +2246,6 @@ \ declare_constant(BarrierSet::ModRef) \ declare_constant(BarrierSet::CardTableModRef) \ - declare_constant(BarrierSet::CardTableForRS) \ - declare_constant(BarrierSet::CardTableExtension) \ declare_constant(BarrierSet::G1SATBCT) \ declare_constant(BarrierSet::G1SATBCTLogging) \ declare_constant(BarrierSet::Shenandoah) \ @@ -2263,18 +2258,18 @@ declare_constant(BOTConstants::Base) \ declare_constant(BOTConstants::N_powers) \ \ - declare_constant(CardTableModRefBS::clean_card) \ - declare_constant(CardTableModRefBS::last_card) \ - declare_constant(CardTableModRefBS::dirty_card) \ - declare_constant(CardTableModRefBS::Precise) \ - declare_constant(CardTableModRefBS::ObjHeadPreciseArray) \ - declare_constant(CardTableModRefBS::card_shift) \ - declare_constant(CardTableModRefBS::card_size) \ - declare_constant(CardTableModRefBS::card_size_in_words) \ + declare_constant(CardTable::clean_card) \ + declare_constant(CardTable::last_card) \ + declare_constant(CardTable::dirty_card) \ + declare_constant(CardTable::Precise) \ + declare_constant(CardTable::ObjHeadPreciseArray) \ + declare_constant(CardTable::card_shift) \ + declare_constant(CardTable::card_size) \ + declare_constant(CardTable::card_size_in_words) \ \ declare_constant(CardTableRS::youngergen_card) \ \ - declare_constant(G1SATBCardTableModRefBS::g1_young_gen) \ + declare_constant(G1CardTable::g1_young_gen) \ \ declare_constant(CollectedHeap::SerialHeap) \ declare_constant(CollectedHeap::CMSHeap) \ diff --git a/src/hotspot/share/runtime/vm_version.cpp b/src/hotspot/share/runtime/vm_version.cpp --- a/src/hotspot/share/runtime/vm_version.cpp +++ b/src/hotspot/share/runtime/vm_version.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -216,6 +216,10 @@ #define HOTSPOT_BUILD_COMPILER "MS VC++ 11.0 (VS2012)" #elif _MSC_VER == 1800 #define HOTSPOT_BUILD_COMPILER "MS VC++ 12.0 (VS2013)" + #elif _MSC_VER == 1900 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 14.0 (VS2015)" + #elif _MSC_VER == 1912 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.5 (VS2017)" #else #define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER) #endif diff --git a/src/hotspot/share/services/allocationContextService.hpp b/src/hotspot/share/services/allocationContextService.hpp deleted file mode 100644 --- a/src/hotspot/share/services/allocationContextService.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP -#define SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP - -#include "utilities/exceptions.hpp" - -class AllocationContextService: public AllStatic { -public: - static inline bool should_notify(); - static inline void notify(TRAPS); -}; - -bool AllocationContextService::should_notify() { return false; } -void AllocationContextService::notify(TRAPS) { } - -#endif // SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP diff --git a/src/hotspot/share/services/attachListener.cpp b/src/hotspot/share/services/attachListener.cpp --- a/src/hotspot/share/services/attachListener.cpp +++ b/src/hotspot/share/services/attachListener.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc/shared/vmGCOperations.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/globals.hpp" diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp --- a/src/hotspot/share/services/diagnosticCommand.cpp +++ b/src/hotspot/share/services/diagnosticCommand.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "gc/shared/vmGCOperations.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/globals.hpp" #include "runtime/javaCalls.hpp" #include "runtime/os.hpp" @@ -88,6 +89,7 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #if INCLUDE_JVMTI // Both JVMTI and SERVICES have to be enabled to have this dcmd DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_JVMTI diff --git a/src/hotspot/share/services/diagnosticCommand.hpp b/src/hotspot/share/services/diagnosticCommand.hpp --- a/src/hotspot/share/services/diagnosticCommand.hpp +++ b/src/hotspot/share/services/diagnosticCommand.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -839,4 +839,25 @@ virtual void execute(DCmdSource source, TRAPS); }; +class MetaspaceDCmd : public DCmd { +public: + MetaspaceDCmd(outputStream* output, bool heap); + static const char* name() { + return "VM.metaspace"; + } + static const char* description() { + return "Prints the statistics for the metaspace"; + } + static const char* impact() { + return "Medium: Depends on number of classes loaded."; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } + static int num_arguments() { return 0; } + virtual void execute(DCmdSource source, TRAPS); +}; + #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -35,6 +35,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.hpp" #include "runtime/os.hpp" diff --git a/src/hotspot/share/services/mallocSiteTable.hpp b/src/hotspot/share/services/mallocSiteTable.hpp --- a/src/hotspot/share/services/mallocSiteTable.hpp +++ b/src/hotspot/share/services/mallocSiteTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,7 +151,7 @@ ~AccessLock() { if (_lock_state == SharedLock) { - Atomic::dec((volatile jint*)_lock); + Atomic::dec(_lock); } } // Acquire shared lock. @@ -159,7 +159,7 @@ inline bool sharedLock() { jint res = Atomic::add(1, _lock); if (res < 0) { - Atomic::add(-1, _lock); + Atomic::dec(_lock); return false; } _lock_state = SharedLock; diff --git a/src/hotspot/share/services/mallocTracker.hpp b/src/hotspot/share/services/mallocTracker.hpp --- a/src/hotspot/share/services/mallocTracker.hpp +++ b/src/hotspot/share/services/mallocTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,8 +66,6 @@ assert(_size >= sz, "deallocation > allocated"); Atomic::dec(&_count); if (sz > 0) { - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning(suppress: 4146) Atomic::sub(sz, &_size); } } diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp --- a/src/hotspot/share/services/management.cpp +++ b/src/hotspot/share/services/management.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,12 +33,13 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/typeArrayOop.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/jniHandles.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/os.hpp" #include "runtime/serviceThread.hpp" #include "runtime/thread.inline.hpp" diff --git a/src/hotspot/share/services/memReporter.cpp b/src/hotspot/share/services/memReporter.cpp --- a/src/hotspot/share/services/memReporter.cpp +++ b/src/hotspot/share/services/memReporter.cpp @@ -291,7 +291,7 @@ outputStream* out = output(); const char* scale = current_scale(); const NativeCallStack* stack = reserved_rgn->call_stack(); - bool all_committed = reserved_rgn->all_committed(); + bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size(); const char* region_type = (all_committed ? "reserved and committed" : "reserved"); out->print_cr(" "); print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); @@ -303,7 +303,17 @@ stack->print_on(out, 4); } - if (all_committed) return; + if (all_committed) { + CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); + const CommittedMemoryRegion* committed_rgn = itr.next(); + if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) { + // One region spanning the entire reserved region, with the same stack trace. + // Don't print this regions because the "reserved and committed" line above + // already indicates that the region is comitted. + assert(itr.next() == NULL, "Unexpectedly more than one regions"); + return; + } + } CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); const CommittedMemoryRegion* committed_rgn; @@ -745,4 +755,3 @@ out->print_cr(")\n"); } - diff --git a/src/hotspot/share/services/memTracker.hpp b/src/hotspot/share/services/memTracker.hpp --- a/src/hotspot/share/services/memTracker.hpp +++ b/src/hotspot/share/services/memTracker.hpp @@ -113,6 +113,8 @@ }; class MemTracker : AllStatic { + friend class VirtualMemoryTrackerTest; + public: static inline NMT_TrackingLevel tracking_level() { if (_tracking_level == NMT_unknown) { @@ -215,8 +217,7 @@ if (addr != NULL) { ThreadCritical tc; if (tracking_level() < NMT_summary) return; - VirtualMemoryTracker::add_reserved_region((address)addr, size, - stack, flag, true); + VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); VirtualMemoryTracker::add_committed_region((address)addr, size, stack); } } @@ -245,7 +246,7 @@ if (addr != NULL) { // uses thread stack malloc slot for book keeping number of threads MallocMemorySummary::record_malloc(0, mtThreadStack); - record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); + record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack); } } diff --git a/src/hotspot/share/services/metaspaceDCmd.cpp b/src/hotspot/share/services/metaspaceDCmd.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/services/metaspaceDCmd.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "memory/metaspace.hpp" +#include "services/diagnosticCommand.hpp" + +MetaspaceDCmd::MetaspaceDCmd(outputStream* output, bool heap): DCmd(output, heap) { +} + +void MetaspaceDCmd::execute(DCmdSource source, TRAPS) { + const size_t scale = 1 * K; + VM_PrintMetadata op(output(), scale); + VMThread::execute(&op); +} + diff --git a/src/hotspot/share/services/nmtDCmd.cpp b/src/hotspot/share/services/nmtDCmd.cpp --- a/src/hotspot/share/services/nmtDCmd.cpp +++ b/src/hotspot/share/services/nmtDCmd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,8 +40,6 @@ _detail("detail", "request runtime to report memory allocation >= " "1K by each callsite.", "BOOLEAN", false, "false"), - _metadata("metadata", "request runtime to report metadata information", - "BOOLEAN", false, "false"), _baseline("baseline", "request runtime to baseline current memory usage, " \ "so it can be compared against in later time.", "BOOLEAN", false, "false"), @@ -61,7 +59,6 @@ "STRING", false, "KB") { _dcmdparser.add_dcmd_option(&_summary); _dcmdparser.add_dcmd_option(&_detail); - _dcmdparser.add_dcmd_option(&_metadata); _dcmdparser.add_dcmd_option(&_baseline); _dcmdparser.add_dcmd_option(&_summary_diff); _dcmdparser.add_dcmd_option(&_detail_diff); @@ -97,7 +94,6 @@ int nopt = 0; if (_summary.is_set() && _summary.value()) { ++nopt; } if (_detail.is_set() && _detail.value()) { ++nopt; } - if (_metadata.is_set() && _metadata.value()) { ++nopt; } if (_baseline.is_set() && _baseline.value()) { ++nopt; } if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; } if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; } @@ -127,10 +123,6 @@ return; } report(false, scale_unit); - } else if (_metadata.value()) { - size_t scale = get_scale(_scale.value()); - VM_PrintMetadata op(output(), scale); - VMThread::execute(&op); } else if (_baseline.value()) { MemBaseline& baseline = MemTracker::get_baseline(); if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) { diff --git a/src/hotspot/share/services/nmtDCmd.hpp b/src/hotspot/share/services/nmtDCmd.hpp --- a/src/hotspot/share/services/nmtDCmd.hpp +++ b/src/hotspot/share/services/nmtDCmd.hpp @@ -39,7 +39,6 @@ protected: DCmdArgument _summary; DCmdArgument _detail; - DCmdArgument _metadata; DCmdArgument _baseline; DCmdArgument _summary_diff; DCmdArgument _detail_diff; diff --git a/src/hotspot/share/services/serviceUtil.hpp b/src/hotspot/share/services/serviceUtil.hpp --- a/src/hotspot/share/services/serviceUtil.hpp +++ b/src/hotspot/share/services/serviceUtil.hpp @@ -63,6 +63,7 @@ return true; } } + fatal("visible_oop: should never reach here #1"); return false; } // object arrays are visible if they aren't system object arrays @@ -74,6 +75,7 @@ return true; } // everything else (Method*s, ...) aren't visible + fatal("visible_oop: should never reach here #2"); return false; }; // end of visible_oop() diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -120,7 +120,7 @@ } void ThreadService::remove_thread(JavaThread* thread, bool daemon) { - Atomic::dec((jint*) &_exiting_threads_count); + Atomic::dec(&_exiting_threads_count); if (thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread()) { @@ -131,17 +131,17 @@ if (daemon) { _daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1); - Atomic::dec((jint*) &_exiting_daemon_threads_count); + Atomic::dec(&_exiting_daemon_threads_count); } } void ThreadService::current_thread_exiting(JavaThread* jt) { assert(jt == JavaThread::current(), "Called by current thread"); - Atomic::inc((jint*) &_exiting_threads_count); + Atomic::inc(&_exiting_threads_count); oop threadObj = jt->threadObj(); if (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)) { - Atomic::inc((jint*) &_exiting_daemon_threads_count); + Atomic::inc(&_exiting_daemon_threads_count); } } diff --git a/src/hotspot/share/services/virtualMemoryTracker.cpp b/src/hotspot/share/services/virtualMemoryTracker.cpp --- a/src/hotspot/share/services/virtualMemoryTracker.cpp +++ b/src/hotspot/share/services/virtualMemoryTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,12 @@ ::new ((void*)_snapshot) VirtualMemorySnapshot(); } +void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) { + // Snapshot current thread stacks + VirtualMemoryTracker::snapshot_thread_stacks(); + as_snapshot()->copy_to(s); +} + SortedLinkedList* VirtualMemoryTracker::_reserved_regions; int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { @@ -48,57 +54,105 @@ return r1.compare(r2); } +static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { + return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack); +} + +static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { + // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. + return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack); +} + +static LinkedListNode* find_preceding_node_from(LinkedListNode* from, address addr) { + LinkedListNode* preceding = NULL; + + for (LinkedListNode* node = from; node != NULL; node = node->next()) { + CommittedMemoryRegion* rgn = node->data(); + + // We searched past the region start. + if (rgn->end() > addr) { + break; + } + + preceding = node; + } + + return preceding; +} + +static bool try_merge_with(LinkedListNode* node, address addr, size_t size, const NativeCallStack& stack) { + if (node != NULL) { + CommittedMemoryRegion* rgn = node->data(); + + if (is_mergeable_with(rgn, addr, size, stack)) { + rgn->expand_region(addr, size); + return true; + } + } + + return false; +} + +static bool try_merge_with(LinkedListNode* node, LinkedListNode* other) { + if (other == NULL) { + return false; + } + + CommittedMemoryRegion* rgn = other->data(); + return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack()); +} + bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(contain_region(addr, size), "Not contain this region"); - if (all_committed()) return true; + // Find the region that fully precedes the [addr, addr + size) region. + LinkedListNode* prev = find_preceding_node_from(_committed_regions.head(), addr); + LinkedListNode* next = (prev != NULL ? prev->next() : _committed_regions.head()); - CommittedMemoryRegion committed_rgn(addr, size, stack); - LinkedListNode* node = _committed_regions.head(); - - while (node != NULL) { - CommittedMemoryRegion* rgn = node->data(); - if (rgn->same_region(addr, size)) { + if (next != NULL) { + // Ignore request if region already exists. + if (is_same_as(next->data(), addr, size, stack)) { return true; } - if (rgn->adjacent_to(addr, size)) { - // special case to expand prior region if there is no next region - LinkedListNode* next = node->next(); - if (next == NULL && rgn->call_stack()->equals(stack)) { - VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); - // the two adjacent regions have the same call stack, merge them - rgn->expand_region(addr, size); - VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); - return true; - } - } + // The new region is after prev, and either overlaps with the + // next region (and maybe more regions), or overlaps with no region. + if (next->data()->overlap_region(addr, size)) { + // Remove _all_ overlapping regions, and parts of regions, + // in preparation for the addition of this new region. + remove_uncommitted_region(addr, size); - if (rgn->overlap_region(addr, size)) { - // Clear a space for this region in the case it overlaps with any regions. - remove_uncommitted_region(addr, size); - break; // commit below + // The remove could have split a region into two and created a + // new prev region. Need to reset the prev and next pointers. + prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr); + next = (prev != NULL ? prev->next() : _committed_regions.head()); } - if (rgn->end() >= addr + size){ - break; - } - node = node->next(); } - // New committed region - VirtualMemorySummary::record_committed_memory(size, flag()); - return add_committed_region(committed_rgn); + // At this point the previous overlapping regions have been + // cleared, and the full region is guaranteed to be inserted. + VirtualMemorySummary::record_committed_memory(size, flag()); + + // Try to merge with prev and possibly next. + if (try_merge_with(prev, addr, size, stack)) { + if (try_merge_with(prev, next)) { + // prev was expanded to contain the new region + // and next, need to remove next from the list + _committed_regions.remove_after(prev); + } + + return true; } -void ReservedMemoryRegion::set_all_committed(bool b) { - if (all_committed() != b) { - _all_committed = b; - if (b) { - VirtualMemorySummary::record_committed_memory(size(), flag()); - } + // Didn't merge with prev, try with next. + if (try_merge_with(next, addr, size, stack)) { + return true; } + + // Couldn't merge with any regions - create a new region. + return add_committed_region(CommittedMemoryRegion(addr, size, stack)); } bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode* node, @@ -135,94 +189,57 @@ } bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { - // uncommit stack guard pages - if (flag() == mtThreadStack && !same_region(addr, sz)) { - return true; - } - assert(addr != NULL, "Invalid address"); assert(sz > 0, "Invalid size"); - if (all_committed()) { - assert(_committed_regions.is_empty(), "Sanity check"); - assert(contain_region(addr, sz), "Reserved region does not contain this region"); - set_all_committed(false); - VirtualMemorySummary::record_uncommitted_memory(sz, flag()); - if (same_region(addr, sz)) { + CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); + address end = addr + sz; + + LinkedListNode* head = _committed_regions.head(); + LinkedListNode* prev = NULL; + CommittedMemoryRegion* crgn; + + while (head != NULL) { + crgn = head->data(); + + if (crgn->same_region(addr, sz)) { + VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); + _committed_regions.remove_after(prev); return true; - } else { - CommittedMemoryRegion rgn(base(), size(), *call_stack()); - if (rgn.base() == addr || rgn.end() == (addr + sz)) { - rgn.exclude_region(addr, sz); - return add_committed_region(rgn); + } + + // del_rgn contains crgn + if (del_rgn.contain_region(crgn->base(), crgn->size())) { + VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); + head = head->next(); + _committed_regions.remove_after(prev); + continue; // don't update head or prev + } + + // Found addr in the current crgn. There are 2 subcases: + if (crgn->contain_address(addr)) { + + // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) + if (crgn->contain_address(end - 1)) { + VirtualMemorySummary::record_uncommitted_memory(sz, flag()); + return remove_uncommitted_region(head, addr, sz); // done! } else { - // split this region - // top of the whole region - address top =rgn.end(); - // use this region for lower part - size_t exclude_size = rgn.end() - addr; - rgn.exclude_region(addr, exclude_size); - if (add_committed_region(rgn)) { - // higher part - address high_base = addr + sz; - size_t high_size = top - high_base; - CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK); - return add_committed_region(high_rgn); - } else { - return false; - } - } - } - } else { - CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); - address end = addr + sz; - - LinkedListNode* head = _committed_regions.head(); - LinkedListNode* prev = NULL; - CommittedMemoryRegion* crgn; - - while (head != NULL) { - crgn = head->data(); - - if (crgn->same_region(addr, sz)) { - VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); - _committed_regions.remove_after(prev); - return true; + // (2) Did not find del_rgn's end in crgn. + size_t size = crgn->end() - del_rgn.base(); + crgn->exclude_region(addr, size); + VirtualMemorySummary::record_uncommitted_memory(size, flag()); } - // del_rgn contains crgn - if (del_rgn.contain_region(crgn->base(), crgn->size())) { - VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); - head = head->next(); - _committed_regions.remove_after(prev); - continue; // don't update head or prev - } + } else if (crgn->contain_address(end - 1)) { + // Found del_rgn's end, but not its base addr. + size_t size = del_rgn.end() - crgn->base(); + crgn->exclude_region(crgn->base(), size); + VirtualMemorySummary::record_uncommitted_memory(size, flag()); + return true; // should be done if the list is sorted properly! + } - // Found addr in the current crgn. There are 2 subcases: - if (crgn->contain_address(addr)) { - - // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) - if (crgn->contain_address(end - 1)) { - VirtualMemorySummary::record_uncommitted_memory(sz, flag()); - return remove_uncommitted_region(head, addr, sz); // done! - } else { - // (2) Did not find del_rgn's end in crgn. - size_t size = crgn->end() - del_rgn.base(); - crgn->exclude_region(addr, size); - VirtualMemorySummary::record_uncommitted_memory(size, flag()); - } - - } else if (crgn->contain_address(end - 1)) { - // Found del_rgn's end, but not its base addr. - size_t size = del_rgn.end() - crgn->base(); - crgn->exclude_region(crgn->base(), size); - VirtualMemorySummary::record_uncommitted_memory(size, flag()); - return true; // should be done if the list is sorted properly! - } - - prev = head; - head = head->next(); - } + prev = head; + head = head->next(); } return true; @@ -256,18 +273,14 @@ } size_t ReservedMemoryRegion::committed_size() const { - if (all_committed()) { - return size(); - } else { - size_t committed = 0; - LinkedListNode* head = - _committed_regions.head(); - while (head != NULL) { - committed += head->data()->size(); - head = head->next(); - } - return committed; + size_t committed = 0; + LinkedListNode* head = + _committed_regions.head(); + while (head != NULL) { + committed += head->data()->size(); + head = head->next(); } + return committed; } void ReservedMemoryRegion::set_flag(MEMFLAGS f) { @@ -279,6 +292,26 @@ } } +address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const { + assert(flag() == mtThreadStack, "Only for thread stack"); + LinkedListNode* head = _committed_regions.head(); + address bottom = base(); + address top = base() + size(); + while (head != NULL) { + address committed_top = head->data()->base() + head->data()->size(); + if (committed_top < top) { + // committed stack guard pages, skip them + bottom = head->data()->base() + head->data()->size(); + head = head->next(); + } else { + assert(top == committed_top, "Sanity"); + break; + } + } + + return bottom; +} + bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { if (level >= NMT_summary) { VirtualMemorySummary::initialize(); @@ -296,22 +329,16 @@ } bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, - const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) { + const NativeCallStack& stack, MEMFLAGS flag) { assert(base_addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(base_addr, size, stack, flag); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); - LinkedListNode* node; + if (reserved_rgn == NULL) { VirtualMemorySummary::record_reserved_memory(size, flag); - node = _reserved_regions->add(rgn); - if (node != NULL) { - node->data()->set_all_committed(all_committed); - return true; - } else { - return false; - } + return _reserved_regions->add(rgn) != NULL; } else { if (reserved_rgn->same_region(base_addr, size)) { reserved_rgn->set_call_stack(stack); @@ -459,6 +486,32 @@ } } +// Walk all known thread stacks, snapshot their committed ranges. +class SnapshotThreadStackWalker : public VirtualMemoryWalker { +public: + SnapshotThreadStackWalker() {} + + bool do_allocation_site(const ReservedMemoryRegion* rgn) { + if (rgn->flag() == mtThreadStack) { + address stack_bottom = rgn->thread_stack_uncommitted_bottom(); + size_t stack_size = rgn->base() + rgn->size() - stack_bottom; + size_t committed_size = os::committed_stack_size(stack_bottom, stack_size); + if (committed_size > 0) { + ReservedMemoryRegion* region = const_cast(rgn); + NativeCallStack ncs; // empty stack + + // Stack grows downward + region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs); + } + } + return true; + } +}; + +void VirtualMemoryTracker::snapshot_thread_stacks() { + SnapshotThreadStackWalker walker; + walk_virtual_memory(&walker); +} bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { assert(_reserved_regions != NULL, "Sanity check"); diff --git a/src/hotspot/share/services/virtualMemoryTracker.hpp b/src/hotspot/share/services/virtualMemoryTracker.hpp --- a/src/hotspot/share/services/virtualMemoryTracker.hpp +++ b/src/hotspot/share/services/virtualMemoryTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -160,9 +160,7 @@ as_snapshot()->by_type(to)->commit_memory(size); } - static inline void snapshot(VirtualMemorySnapshot* s) { - as_snapshot()->copy_to(s); - } + static void snapshot(VirtualMemorySnapshot* s); static VirtualMemorySnapshot* as_snapshot() { return (VirtualMemorySnapshot*)_snapshot; @@ -210,6 +208,8 @@ inline bool overlap_region(address addr, size_t sz) const { + assert(sz > 0, "Invalid size"); + assert(size() > 0, "Invalid size"); VirtualMemoryRegion rgn(addr, sz); return contain_address(addr) || contain_address(addr + sz - 1) || @@ -295,18 +295,14 @@ NativeCallStack _stack; MEMFLAGS _flag; - bool _all_committed; - public: ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone) : - VirtualMemoryRegion(base, size), _stack(stack), _flag(flag), - _all_committed(false) { } + VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { } ReservedMemoryRegion(address base, size_t size) : - VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone), - _all_committed(false) { } + VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone) { } // Copy constructor ReservedMemoryRegion(const ReservedMemoryRegion& rr) : @@ -338,6 +334,9 @@ return compare(rgn) == 0; } + // uncommitted thread stack bottom, above guard pages if there is any. + address thread_stack_uncommitted_bottom() const; + bool add_committed_region(address addr, size_t size, const NativeCallStack& stack); bool remove_uncommitted_region(address addr, size_t size); @@ -347,9 +346,6 @@ // the new region void move_committed_regions(address addr, ReservedMemoryRegion& rgn); - inline bool all_committed() const { return _all_committed; } - void set_all_committed(bool b); - CommittedRegionIterator iterate_committed_regions() const { return CommittedRegionIterator(_committed_regions.head()); } @@ -360,17 +356,14 @@ _stack = *other.call_stack(); _flag = other.flag(); - _all_committed = other.all_committed(); - if (other.all_committed()) { - set_all_committed(true); - } else { - CommittedRegionIterator itr = other.iterate_committed_regions(); - const CommittedMemoryRegion* rgn = itr.next(); - while (rgn != NULL) { - _committed_regions.add(*rgn); - rgn = itr.next(); - } + + CommittedRegionIterator itr = other.iterate_committed_regions(); + const CommittedMemoryRegion* rgn = itr.next(); + while (rgn != NULL) { + _committed_regions.add(*rgn); + rgn = itr.next(); } + return *this; } @@ -396,14 +389,16 @@ // Main class called from MemTracker to track virtual memory allocations, commits and releases. class VirtualMemoryTracker : AllStatic { + friend class VirtualMemoryTrackerTest; + friend class ThreadStackTrackingTest; + public: static bool initialize(NMT_TrackingLevel level); // Late phase initialization static bool late_initialize(NMT_TrackingLevel level); - static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, - MEMFLAGS flag = mtNone, bool all_committed = false); + static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone); static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack); static bool remove_uncommitted_region (address base_addr, size_t size); @@ -415,6 +410,9 @@ static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); + // Snapshot current thread stacks + static void snapshot_thread_stacks(); + private: static SortedLinkedList* _reserved_regions; }; diff --git a/src/hotspot/share/trace/traceevents.xml b/src/hotspot/share/trace/traceevents.xml --- a/src/hotspot/share/trace/traceevents.xml +++ b/src/hotspot/share/trace/traceevents.xml @@ -1,6 +1,6 @@ diff --git a/src/hotspot/share/utilities/copy.cpp b/src/hotspot/share/utilities/copy.cpp --- a/src/hotspot/share/utilities/copy.cpp +++ b/src/hotspot/share/utilities/copy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,8 @@ // Copy bytes; larger units are filled atomically if everything is aligned. -void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) { - address src = (address) from; - address dst = (address) to; - uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size; +void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) { + uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size; // (Note: We could improve performance by ignoring the low bits of size, // and putting a short cleanup loop after each bulk copy loop. @@ -43,14 +41,14 @@ // which may or may not want to include such optimizations.) if (bits % sizeof(jlong) == 0) { - Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong)); + Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong)); } else if (bits % sizeof(jint) == 0) { - Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint)); + Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint)); } else if (bits % sizeof(jshort) == 0) { - Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); + Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort)); } else { // Not aligned, so no need to be atomic. - Copy::conjoint_jbytes((void*) src, (void*) dst, size); + Copy::conjoint_jbytes((const void*) from, (void*) to, size); } } diff --git a/src/hotspot/share/utilities/copy.hpp b/src/hotspot/share/utilities/copy.hpp --- a/src/hotspot/share/utilities/copy.hpp +++ b/src/hotspot/share/utilities/copy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,32 +27,33 @@ #include "runtime/stubRoutines.hpp" #include "utilities/align.hpp" +#include "utilities/debug.hpp" #include "utilities/macros.hpp" // Assembly code for platforms that need it. extern "C" { - void _Copy_conjoint_words(HeapWord* from, HeapWord* to, size_t count); - void _Copy_disjoint_words(HeapWord* from, HeapWord* to, size_t count); + void _Copy_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_conjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); - void _Copy_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); + void _Copy_conjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count); - void _Copy_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count); + void _Copy_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_conjoint_bytes(void* from, void* to, size_t count); + void _Copy_conjoint_bytes(const void* from, void* to, size_t count); - void _Copy_conjoint_bytes_atomic (void* from, void* to, size_t count); - void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count); - void _Copy_conjoint_jints_atomic (jint* from, jint* to, size_t count); - void _Copy_conjoint_jlongs_atomic (jlong* from, jlong* to, size_t count); - void _Copy_conjoint_oops_atomic (oop* from, oop* to, size_t count); + void _Copy_conjoint_bytes_atomic (const void* from, void* to, size_t count); + void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count); + void _Copy_conjoint_jints_atomic (const jint* from, jint* to, size_t count); + void _Copy_conjoint_jlongs_atomic (const jlong* from, jlong* to, size_t count); + void _Copy_conjoint_oops_atomic (const oop* from, oop* to, size_t count); - void _Copy_arrayof_conjoint_bytes (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jints (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jlongs (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_oops (HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_bytes (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jints (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jlongs (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_oops (const HeapWord* from, HeapWord* to, size_t count); } class Copy : AllStatic { @@ -87,33 +88,33 @@ // HeapWords // Word-aligned words, conjoint, not atomic on each word - static void conjoint_words(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogHeapWordSize); + static void conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, HeapWordSize); pd_conjoint_words(from, to, count); } // Word-aligned words, disjoint, not atomic on each word - static void disjoint_words(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogHeapWordSize); + static void disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, HeapWordSize); assert_disjoint(from, to, count); pd_disjoint_words(from, to, count); } // Word-aligned words, disjoint, atomic on each word - static void disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogHeapWordSize); + static void disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, HeapWordSize); assert_disjoint(from, to, count); pd_disjoint_words_atomic(from, to, count); } // Object-aligned words, conjoint, not atomic on each word - static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_aligned(from, to); pd_aligned_conjoint_words(from, to, count); } // Object-aligned words, disjoint, not atomic on each word - static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_aligned(from, to); assert_disjoint(from, to, count); pd_aligned_disjoint_words(from, to, count); @@ -122,87 +123,87 @@ // bytes, jshorts, jints, jlongs, oops // bytes, conjoint, not atomic on each byte (not that it matters) - static void conjoint_jbytes(void* from, void* to, size_t count) { + static void conjoint_jbytes(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } // bytes, conjoint, atomic on each byte (not that it matters) - static void conjoint_jbytes_atomic(void* from, void* to, size_t count) { + static void conjoint_jbytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } // jshorts, conjoint, atomic on each jshort - static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { - assert_params_ok(from, to, LogBytesPerShort); + static void conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { + assert_params_ok(from, to, BytesPerShort); pd_conjoint_jshorts_atomic(from, to, count); } // jints, conjoint, atomic on each jint - static void conjoint_jints_atomic(jint* from, jint* to, size_t count) { - assert_params_ok(from, to, LogBytesPerInt); + static void conjoint_jints_atomic(const jint* from, jint* to, size_t count) { + assert_params_ok(from, to, BytesPerInt); pd_conjoint_jints_atomic(from, to, count); } // jlongs, conjoint, atomic on each jlong - static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { - assert_params_ok(from, to, LogBytesPerLong); + static void conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { + assert_params_ok(from, to, BytesPerLong); pd_conjoint_jlongs_atomic(from, to, count); } // oops, conjoint, atomic on each oop - static void conjoint_oops_atomic(oop* from, oop* to, size_t count) { - assert_params_ok(from, to, LogBytesPerHeapOop); + static void conjoint_oops_atomic(const oop* from, oop* to, size_t count) { + assert_params_ok(from, to, BytesPerHeapOop); pd_conjoint_oops_atomic(from, to, count); } // overloaded for UseCompressedOops - static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) { + static void conjoint_oops_atomic(const narrowOop* from, narrowOop* to, size_t count) { assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong"); - assert_params_ok(from, to, LogBytesPerInt); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + assert_params_ok(from, to, BytesPerInt); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } // Copy a span of memory. If the span is an integral number of aligned // longs, words, or ints, copy those units atomically. // The largest atomic transfer unit is 8 bytes, or the largest power // of two which divides all of from, to, and size, whichever is smaller. - static void conjoint_memory_atomic(void* from, void* to, size_t size); + static void conjoint_memory_atomic(const void* from, void* to, size_t size); // bytes, conjoint array, atomic on each byte (not that it matters) - static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_jbytes(const HeapWord* from, HeapWord* to, size_t count) { pd_arrayof_conjoint_bytes(from, to, count); } // jshorts, conjoint array, atomic on each jshort - static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogBytesPerShort); + static void arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, BytesPerShort); pd_arrayof_conjoint_jshorts(from, to, count); } // jints, conjoint array, atomic on each jint - static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogBytesPerInt); + static void arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, BytesPerInt); pd_arrayof_conjoint_jints(from, to, count); } // jlongs, conjoint array, atomic on each jlong - static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogBytesPerLong); + static void arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, BytesPerLong); pd_arrayof_conjoint_jlongs(from, to, count); } // oops, conjoint array, atomic on each oop - static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogBytesPerHeapOop); + static void arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + assert_params_ok(from, to, BytesPerHeapOop); pd_arrayof_conjoint_oops(from, to, count); } // Known overlap methods // Copy word-aligned words from higher to lower addresses, not atomic on each word - inline static void conjoint_words_to_lower(HeapWord* from, HeapWord* to, size_t byte_count) { + inline static void conjoint_words_to_lower(const HeapWord* from, HeapWord* to, size_t byte_count) { // byte_count is in bytes to check its alignment - assert_params_ok(from, to, LogHeapWordSize); + assert_params_ok(from, to, HeapWordSize); assert_byte_count_ok(byte_count, HeapWordSize); size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize; @@ -214,9 +215,9 @@ } // Copy word-aligned words from lower to higher addresses, not atomic on each word - inline static void conjoint_words_to_higher(HeapWord* from, HeapWord* to, size_t byte_count) { + inline static void conjoint_words_to_higher(const HeapWord* from, HeapWord* to, size_t byte_count) { // byte_count is in bytes to check its alignment - assert_params_ok(from, to, LogHeapWordSize); + assert_params_ok(from, to, HeapWordSize); assert_byte_count_ok(byte_count, HeapWordSize); size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize; @@ -271,7 +272,7 @@ // Fill word-aligned words, not atomic on each word // set_words static void fill_to_words(HeapWord* to, size_t count, juint value = 0) { - assert_params_ok(to, LogHeapWordSize); + assert_params_ok(to, HeapWordSize); pd_fill_to_words(to, count, value); } @@ -295,7 +296,7 @@ // Zero word-aligned words, not atomic on each word static void zero_to_words(HeapWord* to, size_t count) { - assert_params_ok(to, LogHeapWordSize); + assert_params_ok(to, HeapWordSize); pd_zero_to_words(to, count); } @@ -305,7 +306,7 @@ } private: - static bool params_disjoint(HeapWord* from, HeapWord* to, size_t count) { + static bool params_disjoint(const HeapWord* from, HeapWord* to, size_t count) { if (from < to) { return pointer_delta(to, from) >= count; } @@ -314,50 +315,30 @@ // These methods raise a fatal if they detect a problem. - static void assert_disjoint(HeapWord* from, HeapWord* to, size_t count) { -#ifdef ASSERT - if (!params_disjoint(from, to, count)) - basic_fatal("source and dest overlap"); -#endif + static void assert_disjoint(const HeapWord* from, HeapWord* to, size_t count) { + assert(params_disjoint(from, to, count), "source and dest overlap"); } - static void assert_params_ok(void* from, void* to, intptr_t log_align) { -#ifdef ASSERT - if (mask_bits((uintptr_t)from, right_n_bits(log_align)) != 0) - basic_fatal("not aligned"); - if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0) - basic_fatal("not aligned"); -#endif + static void assert_params_ok(const void* from, void* to, intptr_t alignment) { + assert(is_aligned(from, alignment), "must be aligned: " INTPTR_FORMAT, p2i(from)); + assert(is_aligned(to, alignment), "must be aligned: " INTPTR_FORMAT, p2i(to)); } - static void assert_params_ok(HeapWord* to, intptr_t log_align) { -#ifdef ASSERT - if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0) - basic_fatal("not word aligned"); -#endif + static void assert_params_ok(HeapWord* to, intptr_t alignment) { + assert(is_aligned(to, alignment), "must be aligned: " INTPTR_FORMAT, p2i(to)); } - static void assert_params_aligned(HeapWord* from, HeapWord* to) { -#ifdef ASSERT - if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0) - basic_fatal("not long aligned"); - if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0) - basic_fatal("not long aligned"); -#endif + + static void assert_params_aligned(const HeapWord* from, HeapWord* to) { + assert(is_aligned(from, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(from)); + assert(is_aligned(to, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(to)); } static void assert_params_aligned(HeapWord* to) { -#ifdef ASSERT - if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0) - basic_fatal("not long aligned"); -#endif + assert(is_aligned(to, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(to)); } static void assert_byte_count_ok(size_t byte_count, size_t unit_size) { -#ifdef ASSERT - if (!is_aligned(byte_count, unit_size)) { - basic_fatal("byte count must be aligned"); - } -#endif + assert(is_aligned(byte_count, unit_size), "byte count must be aligned"); } // Platform dependent implementations of the above methods. diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp --- a/src/hotspot/share/utilities/debug.cpp +++ b/src/hotspot/share/utilities/debug.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -503,12 +503,6 @@ SystemDictionary::print(); } - -extern "C" void safepoints() { - Command c("safepoints"); - SafepointSynchronize::print_state(); -} - #endif // !PRODUCT extern "C" void pss() { // print all stacks diff --git a/src/hotspot/share/utilities/decoder.hpp b/src/hotspot/share/utilities/decoder.hpp --- a/src/hotspot/share/utilities/decoder.hpp +++ b/src/hotspot/share/utilities/decoder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,12 +33,10 @@ class AbstractDecoder : public CHeapObj { public: - virtual ~AbstractDecoder() {} - // status code for decoding native C frame enum decoder_status { not_available = -10, // real decoder is not available - no_error = 0, // successfully decoded frames + no_error = 0, // no error encountered out_of_memory, // out of memory file_invalid, // invalid elf file file_not_found, // could not found symbol file (on windows), such as jvm.pdb or jvm.map @@ -46,6 +44,12 @@ helper_init_error // SymInitialize failed (Windows only) }; +protected: + decoder_status _decoder_status; + +public: + virtual ~AbstractDecoder() {} + // decode an pc address to corresponding function name and an offset from the beginning of // the function // @@ -68,11 +72,8 @@ } static bool is_error(decoder_status status) { - return (status > 0); + return (status > no_error); } - -protected: - decoder_status _decoder_status; }; // Do nothing decoder @@ -96,10 +97,8 @@ virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } - }; - class Decoder : AllStatic { public: static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL, bool demangle = true); diff --git a/src/hotspot/share/utilities/elfFile.cpp b/src/hotspot/share/utilities/elfFile.cpp --- a/src/hotspot/share/utilities/elfFile.cpp +++ b/src/hotspot/share/utilities/elfFile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,60 +31,150 @@ #include #include +#include "logging/log.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "utilities/decoder.hpp" #include "utilities/elfFile.hpp" #include "utilities/elfFuncDescTable.hpp" #include "utilities/elfStringTable.hpp" #include "utilities/elfSymbolTable.hpp" +#include "utilities/ostream.hpp" +// For test only, disable elf section cache and force to read from file directly. +bool ElfFile::_do_not_cache_elf_section = false; -ElfFile::ElfFile(const char* filepath) { - assert(filepath, "null file path"); - memset(&m_elfHdr, 0, sizeof(m_elfHdr)); - m_string_tables = NULL; - m_symbol_tables = NULL; - m_funcDesc_table = NULL; - m_next = NULL; - m_status = NullDecoder::no_error; +ElfSection::ElfSection(FILE* fd, const Elf_Shdr& hdr) : _section_data(NULL) { + _stat = load_section(fd, hdr); +} + +ElfSection::~ElfSection() { + if (_section_data != NULL) { + os::free(_section_data); + } +} + +NullDecoder::decoder_status ElfSection::load_section(FILE* const fd, const Elf_Shdr& shdr) { + memcpy((void*)&_section_hdr, (const void*)&shdr, sizeof(shdr)); + + if (ElfFile::_do_not_cache_elf_section) { + log_debug(decoder)("Elf section cache is disabled"); + return NullDecoder::no_error; + } + + _section_data = os::malloc(shdr.sh_size, mtInternal); + // No enough memory for caching. It is okay, we can try to read from + // file instead. + if (_section_data == NULL) return NullDecoder::no_error; + + MarkedFileReader mfd(fd); + if (mfd.has_mark() && + mfd.set_position(shdr.sh_offset) && + mfd.read(_section_data, shdr.sh_size)) { + return NullDecoder::no_error; + } else { + os::free(_section_data); + _section_data = NULL; + return NullDecoder::file_invalid; + } +} + +bool FileReader::read(void* buf, size_t size) { + assert(buf != NULL, "no buffer"); + assert(size > 0, "no space"); + return fread(buf, size, 1, _fd) == 1; +} + +int FileReader::read_buffer(void* buf, size_t size) { + assert(buf != NULL, "no buffer"); + assert(size > 0, "no space"); + return fread(buf, 1, size, _fd); +} + +bool FileReader::set_position(long offset) { + return fseek(_fd, offset, SEEK_SET) == 0; +} + +MarkedFileReader::MarkedFileReader(FILE* fd) : FileReader(fd) { + _marked_pos = ftell(fd); +} + +MarkedFileReader::~MarkedFileReader() { + if (_marked_pos != -1) { + set_position(_marked_pos); + } +} + +ElfFile::ElfFile(const char* filepath) : + _string_tables(NULL), _symbol_tables(NULL), _funcDesc_table(NULL), + _next(NULL), _status(NullDecoder::no_error), + _shdr_string_table(NULL), _file(NULL), _filepath(NULL) { + memset(&_elfHdr, 0, sizeof(_elfHdr)); int len = strlen(filepath) + 1; - m_filepath = (const char*)os::malloc(len * sizeof(char), mtInternal); - if (m_filepath != NULL) { - strcpy((char*)m_filepath, filepath); - m_file = fopen(filepath, "r"); - if (m_file != NULL) { - load_tables(); - } else { - m_status = NullDecoder::file_not_found; - } - } else { - m_status = NullDecoder::out_of_memory; + _filepath = (char*)os::malloc(len * sizeof(char), mtInternal); + if (_filepath == NULL) { + _status = NullDecoder::out_of_memory; + return; + } + strcpy(_filepath, filepath); + + _status = parse_elf(filepath); + + // we no longer need section header string table + if (_shdr_string_table != NULL) { + delete _shdr_string_table; + _shdr_string_table = NULL; } } ElfFile::~ElfFile() { - if (m_string_tables != NULL) { - delete m_string_tables; + if (_shdr_string_table != NULL) { + delete _shdr_string_table; } - if (m_symbol_tables != NULL) { - delete m_symbol_tables; + cleanup_tables(); + + if (_file != NULL) { + fclose(_file); } - if (m_file != NULL) { - fclose(m_file); + if (_filepath != NULL) { + os::free((void*)_filepath); } - if (m_filepath != NULL) { - os::free((void*)m_filepath); + if (_next != NULL) { + delete _next; + } +} + +void ElfFile::cleanup_tables() { + if (_string_tables != NULL) { + delete _string_tables; + _string_tables = NULL; } - if (m_next != NULL) { - delete m_next; + if (_symbol_tables != NULL) { + delete _symbol_tables; + _symbol_tables = NULL; } -}; + if (_funcDesc_table != NULL) { + delete _funcDesc_table; + _funcDesc_table = NULL; + } +} + +NullDecoder::decoder_status ElfFile::parse_elf(const char* filepath) { + assert(filepath, "null file path"); + + _file = fopen(filepath, "r"); + if (_file != NULL) { + return load_tables(); + } else { + return NullDecoder::file_not_found; + } +} //Check elf header to ensure the file is valid. bool ElfFile::is_elf_file(Elf_Ehdr& hdr) { @@ -96,116 +186,134 @@ ELFDATANONE != hdr.e_ident[EI_DATA]); } -bool ElfFile::load_tables() { - assert(m_file, "file not open"); - assert(!NullDecoder::is_error(m_status), "already in error"); +NullDecoder::decoder_status ElfFile::load_tables() { + assert(_file, "file not open"); + assert(!NullDecoder::is_error(_status), "already in error"); + FileReader freader(fd()); // read elf file header - if (fread(&m_elfHdr, sizeof(m_elfHdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; + if (!freader.read(&_elfHdr, sizeof(_elfHdr))) { + return NullDecoder::file_invalid; } - if (!is_elf_file(m_elfHdr)) { - m_status = NullDecoder::file_invalid; - return false; + // Check signature + if (!is_elf_file(_elfHdr)) { + return NullDecoder::file_invalid; } // walk elf file's section headers, and load string tables Elf_Shdr shdr; - if (!fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) { - if (NullDecoder::is_error(m_status)) return false; + if (!freader.set_position(_elfHdr.e_shoff)) { + return NullDecoder::file_invalid; + } - for (int index = 0; index < m_elfHdr.e_shnum; index ++) { - if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; + for (int index = 0; index < _elfHdr.e_shnum; index ++) { + if (!freader.read(&shdr, sizeof(shdr))) { + return NullDecoder::file_invalid; + } + + if (shdr.sh_type == SHT_STRTAB) { + // string tables + ElfStringTable* table = new (std::nothrow) ElfStringTable(fd(), shdr, index); + if (table == NULL) { + return NullDecoder::out_of_memory; } - if (shdr.sh_type == SHT_STRTAB) { - // string tables - ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index); - if (table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } + if (index == _elfHdr.e_shstrndx) { + assert(_shdr_string_table == NULL, "Only set once"); + _shdr_string_table = table; + } else { add_string_table(table); - } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { - // symbol tables - ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr); - if (table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } - add_symbol_table(table); + } + } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { + // symbol tables + ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(fd(), shdr); + if (table == NULL) { + return NullDecoder::out_of_memory; + } + add_symbol_table(table); + } + } +#if defined(PPC64) && !defined(ABI_ELFv2) + // Now read the .opd section wich contains the PPC64 function descriptor table. + // The .opd section is only available on PPC64 (see for example: + // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html) + // so this code should do no harm on other platforms but because of performance reasons we only + // execute it on PPC64 platforms. + // Notice that we can only find the .opd section after we have successfully read in the string + // tables in the previous loop, because we need to query the name of each section which is + // contained in one of the string tables (i.e. the one with the index m_elfHdr.e_shstrndx). + + // Reset the file pointer + int sect_index = section_by_name(".opd", shdr); + + if (sect_index == -1) { + return NullDecoder::file_invalid; + } + + _funcDesc_table = new (std::nothrow) ElfFuncDescTable(_file, shdr, sect_index); + if (_funcDesc_table == NULL) { + return NullDecoder::out_of_memory; + } +#endif + return NullDecoder::no_error; +} + +int ElfFile::section_by_name(const char* name, Elf_Shdr& hdr) { + assert(name != NULL, "No section name"); + size_t len = strlen(name) + 1; + ResourceMark rm; + char* buf = NEW_RESOURCE_ARRAY(char, len); + if (buf == NULL) { + return -1; + } + + assert(_shdr_string_table != NULL, "Section header string table should be loaded"); + ElfStringTable* const table = _shdr_string_table; + MarkedFileReader mfd(fd()); + if (!mfd.has_mark() || !mfd.set_position(_elfHdr.e_shoff)) return -1; + + int sect_index = -1; + for (int index = 0; index < _elfHdr.e_shnum; index ++) { + if (!mfd.read((void*)&hdr, sizeof(hdr))) { + break; + } + if (table->string_at(hdr.sh_name, buf, len)) { + if (strncmp(buf, name, len) == 0) { + sect_index = index; + break; } } - -#if defined(PPC64) && !defined(ABI_ELFv2) - // Now read the .opd section wich contains the PPC64 function descriptor table. - // The .opd section is only available on PPC64 (see for example: - // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html) - // so this code should do no harm on other platforms but because of performance reasons we only - // execute it on PPC64 platforms. - // Notice that we can only find the .opd section after we have successfully read in the string - // tables in the previous loop, because we need to query the name of each section which is - // contained in one of the string tables (i.e. the one with the index m_elfHdr.e_shstrndx). - - // Reset the file pointer - if (fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - return false; - } - for (int index = 0; index < m_elfHdr.e_shnum; index ++) { - if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; - } - if (m_elfHdr.e_shstrndx != SHN_UNDEF && shdr.sh_type == SHT_PROGBITS) { - ElfStringTable* string_table = get_string_table(m_elfHdr.e_shstrndx); - if (string_table == NULL) { - m_status = NullDecoder::file_invalid; - return false; - } - char buf[8]; // '8' is enough because we only want to read ".opd" - if (string_table->string_at(shdr.sh_name, buf, sizeof(buf)) && !strncmp(".opd", buf, 4)) { - m_funcDesc_table = new (std::nothrow) ElfFuncDescTable(m_file, shdr, index); - if (m_funcDesc_table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } - break; - } - } - } -#endif - } - return true; + return sect_index; } bool ElfFile::decode(address addr, char* buf, int buflen, int* offset) { // something already went wrong, just give up - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(_status)) { return false; } - ElfSymbolTable* symbol_table = m_symbol_tables; + int string_table_index; int pos_in_string_table; int off = INT_MAX; bool found_symbol = false; + ElfSymbolTable* symbol_table = _symbol_tables; + while (symbol_table != NULL) { - if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off, m_funcDesc_table)) { + if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off, _funcDesc_table)) { found_symbol = true; break; } - symbol_table = symbol_table->m_next; + symbol_table = symbol_table->next(); } - if (!found_symbol) return false; + if (!found_symbol) { + return false; + } ElfStringTable* string_table = get_string_table(string_table_index); if (string_table == NULL) { - m_status = NullDecoder::file_invalid; + _status = NullDecoder::file_invalid; return false; } if (offset) *offset = off; @@ -213,74 +321,31 @@ return string_table->string_at(pos_in_string_table, buf, buflen); } - void ElfFile::add_symbol_table(ElfSymbolTable* table) { - if (m_symbol_tables == NULL) { - m_symbol_tables = table; + if (_symbol_tables == NULL) { + _symbol_tables = table; } else { - table->m_next = m_symbol_tables; - m_symbol_tables = table; + table->set_next(_symbol_tables); + _symbol_tables = table; } } void ElfFile::add_string_table(ElfStringTable* table) { - if (m_string_tables == NULL) { - m_string_tables = table; + if (_string_tables == NULL) { + _string_tables = table; } else { - table->m_next = m_string_tables; - m_string_tables = table; + table->set_next(_string_tables); + _string_tables = table; } } ElfStringTable* ElfFile::get_string_table(int index) { - ElfStringTable* p = m_string_tables; + ElfStringTable* p = _string_tables; while (p != NULL) { if (p->index() == index) return p; - p = p->m_next; + p = p->next(); } return NULL; } -#ifdef LINUX -bool ElfFile::specifies_noexecstack(const char* filepath) { - // Returns true if the elf file is marked NOT to require an executable stack, - // or if the file could not be opened. - // Returns false if the elf file requires an executable stack, the stack flag - // is not set at all, or if the file can not be read. - if (filepath == NULL) return true; - - FILE* file = fopen(filepath, "r"); - if (file == NULL) return true; - - // AARCH64 defaults to noexecstack. All others default to execstack. -#ifdef AARCH64 - bool result = true; -#else - bool result = false; -#endif - - // Read file header - Elf_Ehdr head; - if (fread(&head, sizeof(Elf_Ehdr), 1, file) == 1 && - is_elf_file(head) && - fseek(file, head.e_phoff, SEEK_SET) == 0) { - - // Read program header table - Elf_Phdr phdr; - for (int index = 0; index < head.e_phnum; index ++) { - if (fread((void*)&phdr, sizeof(Elf_Phdr), 1, file) != 1) { - result = false; - break; - } - if (phdr.p_type == PT_GNU_STACK) { - result = (phdr.p_flags == (PF_R | PF_W)); - break; - } - } - } - fclose(file); - return result; -} -#endif // LINUX - #endif // !_WINDOWS && !__APPLE__ diff --git a/src/hotspot/share/utilities/elfFile.hpp b/src/hotspot/share/utilities/elfFile.hpp --- a/src/hotspot/share/utilities/elfFile.hpp +++ b/src/hotspot/share/utilities/elfFile.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_VM_UTILITIES_ELF_FILE_HPP #define SHARE_VM_UTILITIES_ELF_FILE_HPP -#if !defined(_WINDOWS) && !defined(__APPLE__) +#if !defined(_WINDOWS) && !defined(__APPLE__) && !defined(_AIX) #if defined(__OpenBSD__) #include @@ -57,7 +57,6 @@ typedef Elf32_Off Elf_Off; typedef Elf32_Addr Elf_Addr; - typedef Elf32_Ehdr Elf_Ehdr; typedef Elf32_Shdr Elf_Shdr; typedef Elf32_Phdr Elf_Phdr; @@ -72,46 +71,126 @@ #include "memory/allocation.hpp" #include "utilities/decoder.hpp" - class ElfStringTable; class ElfSymbolTable; class ElfFuncDescTable; +// ELF section, may or may not have cached data +class ElfSection VALUE_OBJ_CLASS_SPEC { +private: + Elf_Shdr _section_hdr; + void* _section_data; + NullDecoder::decoder_status _stat; +public: + ElfSection(FILE* fd, const Elf_Shdr& hdr); + ~ElfSection(); -// On Solaris/Linux platforms, libjvm.so does contain all private symbols. + NullDecoder::decoder_status status() const { return _stat; } + + const Elf_Shdr* section_header() const { return &_section_hdr; } + const void* section_data() const { return (const void*)_section_data; } +private: + // load this section. + // it return no_error, when it fails to cache the section data due to lack of memory + NullDecoder::decoder_status load_section(FILE* const file, const Elf_Shdr& hdr); +}; + +class FileReader : public StackObj { +protected: + FILE* const _fd; +public: + FileReader(FILE* const fd) : _fd(fd) {}; + bool read(void* buf, size_t size); + int read_buffer(void* buf, size_t size); + bool set_position(long offset); +}; + +// Mark current position, so we can get back to it after +// reads. +class MarkedFileReader : public FileReader { +private: + long _marked_pos; +public: + MarkedFileReader(FILE* const fd); + ~MarkedFileReader(); + + bool has_mark() const { return _marked_pos >= 0; } +}; + // ElfFile is basically an elf file parser, which can lookup the symbol // that is the nearest to the given address. // Beware, this code is called from vm error reporting code, when vm is already // in "error" state, so there are scenarios, lookup will fail. We want this // part of code to be very defensive, and bait out if anything went wrong. - class ElfFile: public CHeapObj { friend class ElfDecoder; - public: + +private: + // link ElfFiles + ElfFile* _next; + + // Elf file + char* _filepath; + FILE* _file; + + // Elf header + Elf_Ehdr _elfHdr; + + // symbol tables + ElfSymbolTable* _symbol_tables; + + // regular string tables + ElfStringTable* _string_tables; + + // section header string table, used for finding section name + ElfStringTable* _shdr_string_table; + + // function descriptors table + ElfFuncDescTable* _funcDesc_table; + + NullDecoder::decoder_status _status; + +public: ElfFile(const char* filepath); ~ElfFile(); bool decode(address addr, char* buf, int buflen, int* offset); - const char* filepath() { - return m_filepath; + + const char* filepath() const { + return _filepath; } - bool same_elf_file(const char* filepath) { - assert(filepath, "null file path"); - assert(m_filepath, "already out of memory"); - return (m_filepath && !strcmp(filepath, m_filepath)); + bool same_elf_file(const char* filepath) const { + assert(filepath != NULL, "null file path"); + return (_filepath != NULL && !strcmp(filepath, _filepath)); } - NullDecoder::decoder_status get_status() { - return m_status; + NullDecoder::decoder_status get_status() const { + return _status; } - private: + // Returns true if the elf file is marked NOT to require an executable stack, + // or if the file could not be opened. + // Returns false if the elf file requires an executable stack, the stack flag + // is not set at all, or if the file can not be read. + // On systems other than linux it always returns false. + static bool specifies_noexecstack(const char* filepath) NOT_LINUX({ return false; }); +private: // sanity check, if the file is a real elf file static bool is_elf_file(Elf_Ehdr&); - // load string tables from the elf file - bool load_tables(); + // parse this elf file + NullDecoder::decoder_status parse_elf(const char* filename); + + // load string, symbol and function descriptor tables from the elf file + NullDecoder::decoder_status load_tables(); + + ElfFile* next() const { return _next; } + void set_next(ElfFile* file) { _next = file; } + + // find a section by name, return section index + // if there is no such section, return -1 + int section_by_name(const char* name, Elf_Shdr& hdr); // string tables are stored in a linked list void add_string_table(ElfStringTable* table); @@ -122,39 +201,15 @@ // return a string table at specified section index ElfStringTable* get_string_table(int index); -protected: - ElfFile* next() const { return m_next; } - void set_next(ElfFile* file) { m_next = file; } - public: - // Returns true if the elf file is marked NOT to require an executable stack, - // or if the file could not be opened. - // Returns false if the elf file requires an executable stack, the stack flag - // is not set at all, or if the file can not be read. - // On systems other than linux it always returns false. - static bool specifies_noexecstack(const char* filepath) NOT_LINUX({ return false; }); + FILE* const fd() const { return _file; } - protected: - ElfFile* m_next; + // Cleanup string, symbol and function descriptor tables + void cleanup_tables(); - private: - // file - const char* m_filepath; - FILE* m_file; - - // Elf header - Elf_Ehdr m_elfHdr; - - // symbol tables - ElfSymbolTable* m_symbol_tables; - - // string tables - ElfStringTable* m_string_tables; - - // function descriptors table - ElfFuncDescTable* m_funcDesc_table; - - NullDecoder::decoder_status m_status; +public: + // For whitebox test + static bool _do_not_cache_elf_section; }; #endif // !_WINDOWS && !__APPLE__ diff --git a/src/hotspot/share/utilities/elfFuncDescTable.cpp b/src/hotspot/share/utilities/elfFuncDescTable.cpp --- a/src/hotspot/share/utilities/elfFuncDescTable.cpp +++ b/src/hotspot/share/utilities/elfFuncDescTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,7 +30,8 @@ #include "memory/allocation.inline.hpp" #include "utilities/elfFuncDescTable.hpp" -ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) { +ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) : + _file(file), _index(index), _section(file, shdr) { assert(file, "null file handle"); // The actual function address (i.e. function entry point) is always the // first value in the function descriptor (on IA64 and PPC64 they look as follows): @@ -39,62 +40,33 @@ // Unfortunately 'shdr.sh_entsize' doesn't always seem to contain this size (it's zero on PPC64) so we can't assert // assert(IA64_ONLY(2) PPC64_ONLY(3) * sizeof(address) == shdr.sh_entsize, "Size mismatch for '.opd' section entries"); - m_funcDescs = NULL; - m_file = file; - m_index = index; - m_status = NullDecoder::no_error; - - // try to load the function descriptor table - long cur_offset = ftell(file); - if (cur_offset != -1) { - // call malloc so we can back up if memory allocation fails. - m_funcDescs = (address*)os::malloc(shdr.sh_size, mtInternal); - if (m_funcDescs) { - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_funcDescs, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free(m_funcDescs); - m_funcDescs = NULL; - } - } - if (!NullDecoder::is_error(m_status)) { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); - } - } else { - m_status = NullDecoder::file_invalid; - } + _status = _section.status(); } ElfFuncDescTable::~ElfFuncDescTable() { - if (m_funcDescs != NULL) { - os::free(m_funcDescs); - } } address ElfFuncDescTable::lookup(Elf_Word index) { - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(_status)) { return NULL; } - if (m_funcDescs != NULL) { - if (m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size) { - // Notice that 'index' is a byte-offset into the function descriptor table. - return m_funcDescs[(index - m_shdr.sh_addr) / sizeof(address)]; - } + address* func_descs = cached_func_descs(); + const Elf_Shdr* shdr = _section.section_header(); + if (!(shdr->sh_size > 0 && shdr->sh_addr <= index && index <= shdr->sh_addr + shdr->sh_size)) { + // don't put the whole decoder in error mode if we just tried a wrong index return NULL; + } + + if (func_descs != NULL) { + return func_descs[(index - shdr->sh_addr) / sizeof(address)]; } else { - long cur_pos; + MarkedFileReader mfd(_file); address addr; - if (!(m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size)) { - // don't put the whole decoder in error mode if we just tried a wrong index - return NULL; - } - if ((cur_pos = ftell(m_file)) == -1 || - fseek(m_file, m_shdr.sh_offset + index - m_shdr.sh_addr, SEEK_SET) || - fread(&addr, sizeof(addr), 1, m_file) != 1 || - fseek(m_file, cur_pos, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + if (!mfd.has_mark() || + !mfd.set_position(shdr->sh_offset + index - shdr->sh_addr) || + !mfd.read((void*)&addr, sizeof(addr))) { + _status = NullDecoder::file_invalid; return NULL; } return addr; diff --git a/src/hotspot/share/utilities/elfFuncDescTable.hpp b/src/hotspot/share/utilities/elfFuncDescTable.hpp --- a/src/hotspot/share/utilities/elfFuncDescTable.hpp +++ b/src/hotspot/share/utilities/elfFuncDescTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -116,32 +116,31 @@ class ElfFuncDescTable: public CHeapObj { friend class ElfFile; - public: +private: + // holds the complete function descriptor section if + // we can allocate enough memory + ElfSection _section; + + // file contains string table + FILE* const _file; + + // The section index of this function descriptor (i.e. '.opd') section in the ELF file + const int _index; + + NullDecoder::decoder_status _status; +public: ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index); ~ElfFuncDescTable(); // return the function address for the function descriptor at 'index' or NULL on error address lookup(Elf_Word index); - int get_index() { return m_index; }; + int get_index() const { return _index; }; - NullDecoder::decoder_status get_status() { return m_status; }; + NullDecoder::decoder_status get_status() const { return _status; }; - protected: - // holds the complete function descriptor section if - // we can allocate enough memory - address* m_funcDescs; - - // file contains string table - FILE* m_file; - - // section header - Elf_Shdr m_shdr; - - // The section index of this function descriptor (i.e. '.opd') section in the ELF file - int m_index; - - NullDecoder::decoder_status m_status; +private: + address* cached_func_descs() const { return (address*)_section.section_data(); } }; #endif // !_WINDOWS && !__APPLE__ diff --git a/src/hotspot/share/utilities/elfStringTable.cpp b/src/hotspot/share/utilities/elfStringTable.cpp --- a/src/hotspot/share/utilities/elfStringTable.cpp +++ b/src/hotspot/share/utilities/elfStringTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,58 +33,44 @@ // We will try to load whole string table into memory if we can. // Otherwise, fallback to more expensive file operation. -ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) { - assert(file, "null file handle"); - m_table = NULL; - m_index = index; - m_next = NULL; - m_file = file; - m_status = NullDecoder::no_error; +ElfStringTable::ElfStringTable(FILE* const file, Elf_Shdr& shdr, int index) : + _section(file, shdr), _index(index), _fd(file), _next(NULL) { + _status = _section.status(); +} - // try to load the string table - long cur_offset = ftell(file); - m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size, mtInternal); - if (m_table != NULL) { - // if there is an error, mark the error - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_table, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free((void*)m_table); - m_table = NULL; - } - } else { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); +ElfStringTable::~ElfStringTable() { + if (_next != NULL) { + delete _next; } } -ElfStringTable::~ElfStringTable() { - if (m_table != NULL) { - os::free((void*)m_table); +bool ElfStringTable::string_at(size_t pos, char* buf, int buflen) { + if (NullDecoder::is_error(get_status())) { + return false; } - if (m_next != NULL) { - delete m_next; - } -} - -bool ElfStringTable::string_at(int pos, char* buf, int buflen) { - if (NullDecoder::is_error(m_status)) { + assert(buflen > 0, "no buffer"); + if (pos >= _section.section_header()->sh_size) { return false; } - if (m_table != NULL) { - jio_snprintf(buf, buflen, "%s", (const char*)(m_table + pos)); + + const char* data = (const char*)_section.section_data(); + if (data != NULL) { + jio_snprintf(buf, buflen, "%s", data + pos); return true; - } else { - long cur_pos = ftell(m_file); - if (cur_pos == -1 || - fseek(m_file, m_shdr.sh_offset + pos, SEEK_SET) || - fread(buf, 1, buflen, m_file) <= 0 || - fseek(m_file, cur_pos, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + } else { // no cache data, read from file instead + const Elf_Shdr* const shdr = _section.section_header(); + MarkedFileReader mfd(_fd); + if (mfd.has_mark() && + mfd.set_position(shdr->sh_offset + pos) && + mfd.read((void*)buf, size_t(buflen))) { + buf[buflen - 1] = '\0'; + return true; + } else { + // put it in error state to avoid retry + _status = NullDecoder::file_invalid; return false; } - return true; } } diff --git a/src/hotspot/share/utilities/elfStringTable.hpp b/src/hotspot/share/utilities/elfStringTable.hpp --- a/src/hotspot/share/utilities/elfStringTable.hpp +++ b/src/hotspot/share/utilities/elfStringTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,37 +37,36 @@ // one blob. Otherwise, it will load string from file when requested. class ElfStringTable: CHeapObj { friend class ElfFile; - public: - ElfStringTable(FILE* file, Elf_Shdr shdr, int index); +private: + ElfStringTable* _next; + int _index; // section index + ElfSection _section; + FILE* const _fd; + NullDecoder::decoder_status _status; + +public: + ElfStringTable(FILE* const file, Elf_Shdr& shdr, int index); ~ElfStringTable(); // section index - int index() { return m_index; }; + int index() const { return _index; }; // get string at specified offset - bool string_at(int offset, char* buf, int buflen); + bool string_at(size_t offset, char* buf, int buflen); // get status code - NullDecoder::decoder_status get_status() { return m_status; }; + NullDecoder::decoder_status get_status() const { + return _status; + } - protected: - ElfStringTable* m_next; +private: + void set_next(ElfStringTable* next) { + _next = next; + } - // section index - int m_index; - - // holds complete string table if can - // allocate enough memory - const char* m_table; - - // file contains string table - FILE* m_file; - - // section header - Elf_Shdr m_shdr; - - // error code - NullDecoder::decoder_status m_status; + ElfStringTable* next() const { + return _next; + } }; #endif // !_WINDOWS && !__APPLE__ diff --git a/src/hotspot/share/utilities/elfSymbolTable.cpp b/src/hotspot/share/utilities/elfSymbolTable.cpp --- a/src/hotspot/share/utilities/elfSymbolTable.cpp +++ b/src/hotspot/share/utilities/elfSymbolTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,48 +30,26 @@ #include "utilities/elfFuncDescTable.hpp" #include "utilities/elfSymbolTable.hpp" -ElfSymbolTable::ElfSymbolTable(FILE* file, Elf_Shdr shdr) { - assert(file, "null file handle"); - m_symbols = NULL; - m_next = NULL; - m_file = file; - m_status = NullDecoder::no_error; +ElfSymbolTable::ElfSymbolTable(FILE* const file, Elf_Shdr& shdr) : + _section(file, shdr), _fd(file), _next(NULL) { + assert(file != NULL, "null file handle"); + _status = _section.status(); - // try to load the string table - long cur_offset = ftell(file); - if (cur_offset != -1) { - // call malloc so we can back up if memory allocation fails. - m_symbols = (Elf_Sym*)os::malloc(shdr.sh_size, mtInternal); - if (m_symbols) { - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_symbols, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free(m_symbols); - m_symbols = NULL; - } - } - if (!NullDecoder::is_error(m_status)) { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); - } - } else { - m_status = NullDecoder::file_invalid; + if (_section.section_header()->sh_size % sizeof(Elf_Sym) != 0) { + _status = NullDecoder::file_invalid; } } ElfSymbolTable::~ElfSymbolTable() { - if (m_symbols != NULL) { - os::free(m_symbols); - } - - if (m_next != NULL) { - delete m_next; + if (_next != NULL) { + delete _next; } } bool ElfSymbolTable::compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) { if (STT_FUNC == ELF_ST_TYPE(sym->st_info)) { Elf_Word st_size = sym->st_size; + const Elf_Shdr* shdr = _section.section_header(); address sym_addr; if (funcDescTable != NULL && funcDescTable->get_index() == sym->st_shndx) { // We need to go another step trough the function descriptor table (currently PPC64 only) @@ -82,7 +60,7 @@ if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) { *offset = (int)(addr - sym_addr); *posIndex = sym->st_name; - *stringtableIndex = m_shdr.sh_link; + *stringtableIndex = shdr->sh_link; return true; } } @@ -94,39 +72,39 @@ assert(posIndex, "null string table offset pointer"); assert(offset, "null offset pointer"); - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(get_status())) { return false; } size_t sym_size = sizeof(Elf_Sym); - assert((m_shdr.sh_size % sym_size) == 0, "check size"); - int count = m_shdr.sh_size / sym_size; - if (m_symbols != NULL) { + int count = _section.section_header()->sh_size / sym_size; + Elf_Sym* symbols = (Elf_Sym*)_section.section_data(); + + if (symbols != NULL) { for (int index = 0; index < count; index ++) { - if (compare(&m_symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) { + if (compare(&symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) { return true; } } } else { - long cur_pos; - if ((cur_pos = ftell(m_file)) == -1 || - fseek(m_file, m_shdr.sh_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + MarkedFileReader mfd(_fd); + + if (!mfd.has_mark() || !mfd.set_position(_section.section_header()->sh_offset)) { + _status = NullDecoder::file_invalid; return false; } Elf_Sym sym; for (int index = 0; index < count; index ++) { - if (fread(&sym, sym_size, 1, m_file) == 1) { - if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) { - return true; - } - } else { - m_status = NullDecoder::file_invalid; + if (!mfd.read((void*)&sym, sizeof(sym))) { + _status = NullDecoder::file_invalid; return false; } + + if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) { + return true; + } } - fseek(m_file, cur_pos, SEEK_SET); } return false; } diff --git a/src/hotspot/share/utilities/elfSymbolTable.hpp b/src/hotspot/share/utilities/elfSymbolTable.hpp --- a/src/hotspot/share/utilities/elfSymbolTable.hpp +++ b/src/hotspot/share/utilities/elfSymbolTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,29 +40,27 @@ */ class ElfSymbolTable: public CHeapObj { friend class ElfFile; - public: - ElfSymbolTable(FILE* file, Elf_Shdr shdr); +private: + ElfSymbolTable* _next; + + // file contains string table + FILE* const _fd; + + // corresponding section + ElfSection _section; + + NullDecoder::decoder_status _status; +public: + ElfSymbolTable(FILE* const file, Elf_Shdr& shdr); ~ElfSymbolTable(); // search the symbol that is nearest to the specified address. bool lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable); - NullDecoder::decoder_status get_status() { return m_status; }; - - protected: - ElfSymbolTable* m_next; - - // holds a complete symbol table section if - // can allocate enough memory - Elf_Sym* m_symbols; - - // file contains string table - FILE* m_file; - - // section header - Elf_Shdr m_shdr; - - NullDecoder::decoder_status m_status; + NullDecoder::decoder_status get_status() const { return _status; }; +private: + ElfSymbolTable* next() const { return _next; } + void set_next(ElfSymbolTable* next) { _next = next; } bool compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable); }; diff --git a/src/hotspot/share/utilities/exceptions.cpp b/src/hotspot/share/utilities/exceptions.cpp --- a/src/hotspot/share/utilities/exceptions.cpp +++ b/src/hotspot/share/utilities/exceptions.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/os.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp" #include "utilities/events.hpp" @@ -239,8 +240,7 @@ va_list ap; va_start(ap, format); char msg[max_msg_size]; - vsnprintf(msg, max_msg_size, format, ap); - msg[max_msg_size-1] = '\0'; + os::vsnprintf(msg, max_msg_size, format, ap); va_end(ap); _throw_msg(thread, file, line, h_name, msg); } diff --git a/src/hotspot/share/utilities/globalDefinitions.cpp b/src/hotspot/share/utilities/globalDefinitions.cpp --- a/src/hotspot/share/utilities/globalDefinitions.cpp +++ b/src/hotspot/share/utilities/globalDefinitions.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,10 +48,6 @@ // Oop encoding heap max uint64_t OopEncodingHeapMax = 0; -void basic_fatal(const char* msg) { - fatal("%s", msg); -} - // Something to help porters sleep at night void basic_types_init() { diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -940,15 +940,12 @@ class methodHandle; class JavaCallArguments; -// Basic support for errors. -extern void basic_fatal(const char* msg); - //---------------------------------------------------------------------------------------------------- // Special constants for debugging const jint badInt = -3; // generic "bad int" value -const long badAddressVal = -2; // generic "bad address" value -const long badOopVal = -1; // generic "bad oop" value +const intptr_t badAddressVal = -2; // generic "bad address" value +const intptr_t badOopVal = -1; // generic "bad oop" value const intptr_t badHeapOopVal = (intptr_t) CONST64(0x2BAD4B0BBAADBABE); // value used to zap heap after GC const int badStackSegVal = 0xCA; // value used to zap stack segments const int badHandleValue = 0xBC; // value used to zap vm handle area diff --git a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp --- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp +++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ # include # include # include +# include # include // for offsetof # include // for stream.cpp # include // for _isnan @@ -42,6 +43,7 @@ # include # include # include +# include // Need this on windows to get the math constants (e.g., M_PI). #define _USE_MATH_DEFINES # include @@ -77,43 +79,18 @@ // pointer is stored as integer value. #define NULL_WORD NULL -// Compiler-specific primitive types -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; - #ifdef _WIN64 -typedef unsigned __int64 uintptr_t; +typedef int64_t ssize_t; #else -typedef unsigned int uintptr_t; -#endif -typedef signed __int8 int8_t; -typedef signed __int16 int16_t; -typedef signed __int32 int32_t; -typedef signed __int64 int64_t; -#ifdef _WIN64 -typedef signed __int64 intptr_t; -typedef signed __int64 ssize_t; -#else -typedef signed int intptr_t; -typedef signed int ssize_t; -#endif - -#ifndef UINTPTR_MAX -#ifdef _WIN64 -#define UINTPTR_MAX _UI64_MAX -#else -#define UINTPTR_MAX _UI32_MAX -#endif +typedef int32_t ssize_t; #endif // Additional Java basic types -typedef unsigned char jubyte; -typedef unsigned short jushort; -typedef unsigned int juint; -typedef unsigned __int64 julong; +typedef uint8_t jubyte; +typedef uint16_t jushort; +typedef uint32_t juint; +typedef uint64_t julong; // Non-standard stdlib-like stuff: inline int strcasecmp(const char *s1, const char *s2) { return _stricmp(s1,s2); } @@ -170,14 +147,6 @@ #pragma warning( disable : 4996 ) // unsafe string functions. Same as define _CRT_SECURE_NO_WARNINGS/_CRT_SECURE_NO_DEPRICATE #endif -inline int vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { - // If number of characters written == count, Windows doesn't write a - // terminating NULL, so we do it ourselves. - int ret = _vsnprintf(buf, count, fmt, argptr); - if (count > 0) buf[count-1] = '\0'; - return ret; -} - // Portability macros #define PRAGMA_INTERFACE #define PRAGMA_IMPLEMENTATION @@ -187,26 +156,6 @@ // Formatting. #define FORMAT64_MODIFIER "I64" -// Visual Studio doesn't provide inttypes.h so provide appropriate definitions here. -// The 32 bits ones might need I32 but seem to work ok without it. -#define PRId32 "d" -#define PRIu32 "u" -#define PRIx32 "x" - -#define PRId64 "I64d" -#define PRIu64 "I64u" -#define PRIx64 "I64x" - -#ifdef _LP64 -#define PRIdPTR "I64d" -#define PRIuPTR "I64u" -#define PRIxPTR "I64x" -#else -#define PRIdPTR "d" -#define PRIuPTR "u" -#define PRIxPTR "x" -#endif - #define offset_of(klass,field) offsetof(klass,field) #ifndef USE_LIBRARY_BASED_TLS_ONLY diff --git a/src/hotspot/share/utilities/ostream.cpp b/src/hotspot/share/utilities/ostream.cpp --- a/src/hotspot/share/utilities/ostream.cpp +++ b/src/hotspot/share/utilities/ostream.cpp @@ -96,19 +96,14 @@ result_len = strlen(result); if (add_cr && result_len >= buflen) result_len = buflen-1; // truncate } else { - // Handle truncation: - // posix: upon truncation, vsnprintf returns number of bytes which - // would have been written (excluding terminating zero) had the buffer - // been large enough - // windows: upon truncation, vsnprintf returns -1 - const int written = vsnprintf(buffer, buflen, format, ap); + int written = os::vsnprintf(buffer, buflen, format, ap); + assert(written >= 0, "vsnprintf encoding error"); result = buffer; - if (written < (int) buflen && written >= 0) { + if ((size_t)written < buflen) { result_len = written; } else { DEBUG_ONLY(warning("increase O_BUFLEN in ostream.hpp -- output truncated");) result_len = buflen - 1; - buffer[result_len] = 0; } } if (add_cr) { diff --git a/src/hotspot/share/utilities/stringUtils.cpp b/src/hotspot/share/utilities/stringUtils.cpp --- a/src/hotspot/share/utilities/stringUtils.cpp +++ b/src/hotspot/share/utilities/stringUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,3 +41,19 @@ return replace_count; } + +double StringUtils::similarity(const char* str1, size_t len1, const char* str2, size_t len2) { + size_t total = len1 + len2; + + size_t hit = 0; + for (size_t i = 0; i < len1 - 1; i++) { + for (size_t j = 0; j < len2 - 1; j++) { + if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) { + ++hit; + break; + } + } + } + + return 2.0 * (double) hit / (double) total; +} diff --git a/src/hotspot/share/utilities/stringUtils.hpp b/src/hotspot/share/utilities/stringUtils.hpp --- a/src/hotspot/share/utilities/stringUtils.hpp +++ b/src/hotspot/share/utilities/stringUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,9 @@ // // Returns the count of substrings that have been replaced. static int replace_no_expand(char* string, const char* from, const char* to); + + // Compute string similarity based on Dice's coefficient + static double similarity(const char* str1, size_t len1, const char* str2, size_t len2); }; #endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -1306,6 +1306,12 @@ // are handled properly. reset_signal_handlers(); + EventShutdown e; + if (e.should_commit()) { + e.set_reason("VM Error"); + e.commit(); + } + TRACE_VM_ERROR(); } else {