--- old/src/share/vm/c1/c1_Runtime1.cpp 2017-04-25 16:44:04.907176241 +0200 +++ new/src/share/vm/c1/c1_Runtime1.cpp 2017-04-25 16:44:04.759176246 +0200 @@ -39,6 +39,7 @@ #include "code/vtableStubs.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/barrierSet.hpp" +#include "gc/shared/c1BarrierSetCodeGen.hpp" #include "gc/shared/collectedHeap.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" @@ -48,6 +49,7 @@ #include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/access.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" @@ -105,7 +107,6 @@ // statistics int Runtime1::_generic_arraycopy_cnt = 0; int Runtime1::_primitive_arraycopy_cnt = 0; -int Runtime1::_oop_arraycopy_cnt = 0; int Runtime1::_generic_arraycopystub_cnt = 0; int Runtime1::_arraycopy_slowcase_cnt = 0; int Runtime1::_arraycopy_checkcast_cnt = 0; @@ -177,9 +178,17 @@ } } +class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { + private: + Runtime1::StubID _id; + public: + StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {} + virtual OopMapSet* generate_code(StubAssembler* sasm) { + return Runtime1::generate_code_for(_id, sasm); + } +}; -void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { - assert(0 <= id && id < number_of_ids, "illegal stub id"); +CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { ResourceMark rm; // create code buffer for code storage CodeBuffer code(buffer_blob); @@ -191,19 +200,39 @@ Compilation::setup_code_buffer(&code, 0); // create assembler for code generation - StubAssembler* sasm = new StubAssembler(&code, name_for(id), id); + StubAssembler* sasm = new StubAssembler(&code, name, stub_id); // generate code for runtime stub - oop_maps = generate_code_for(id, sasm); + oop_maps = cl->generate_code(sasm); assert(oop_maps == NULL || sasm->frame_size() != no_frame_size, "if stub has an oop map it must have a valid frame size"); + assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap"); + // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) + sasm->align(BytesPerWord); + // make sure all code is in code buffer + sasm->flush(); + + frame_size = sasm->frame_size(); + must_gc_arguments = sasm->must_gc_arguments(); + // create blob - distinguish a few special cases + CodeBlob* blob = RuntimeStub::new_runtime_stub(name, + &code, + CodeOffsets::frame_never_safe, + frame_size, + oop_maps, + must_gc_arguments); + assert(blob != NULL, "blob must exist"); + return blob; +} + +void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { + assert(0 <= id && id < number_of_ids, "illegal stub id"); + bool expect_oop_map = true; #ifdef ASSERT // Make sure that stubs that need oopmaps have them switch (id) { // These stubs don't need to have an oopmap case dtrace_object_alloc_id: - case g1_pre_barrier_slow_id: - case g1_post_barrier_slow_id: case slow_subtype_check_id: case fpu2long_stub_id: case unwind_exception_id: @@ -211,34 +240,16 @@ #if defined(SPARC) || defined(PPC32) case handle_exception_nofpu_id: // Unused on sparc #endif + expect_oop_map = false; break; - - // All other stubs should have oopmaps - default: - assert(oop_maps != NULL, "must have an oopmap"); } #endif - - // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) - sasm->align(BytesPerWord); - // make sure all code is in code buffer - sasm->flush(); - - frame_size = sasm->frame_size(); - must_gc_arguments = sasm->must_gc_arguments(); - // create blob - distinguish a few special cases - CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id), - &code, - CodeOffsets::frame_never_safe, - frame_size, - oop_maps, - must_gc_arguments); + StubIDStubAssemblerCodeGenClosure cl(id); + CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); // install blob - assert(blob != NULL, "blob must exist"); _blobs[id] = blob; } - void Runtime1::initialize(BufferBlob* blob) { // platform-dependent initialization initialize_pd(); @@ -256,9 +267,10 @@ } } #endif + C1BarrierSetCodeGen* code_gen = Universe::heap()->barrier_set()->c1_code_gen(); + code_gen->generate_c1_runtime_stubs(blob); } - CodeBlob* Runtime1::blob_for(StubID id) { assert(0 <= id && id < number_of_ids, "illegal stub id"); return _blobs[id]; @@ -1225,11 +1237,6 @@ MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); guarantee(nm != NULL, "only nmethods can contain non-perm oops"); - if (!nm->on_scavenge_root_list() && - ((mirror.not_null() && mirror()->is_scavengable()) || - (appendix.not_null() && appendix->is_scavengable()))) { - CodeCache::add_scavenge_root_nmethod(nm); - } // Since we've patched some oops in the nmethod, // (re)register it with the heap. @@ -1376,27 +1383,15 @@ template int obj_arraycopy_work(oopDesc* src, T* src_addr, oopDesc* dst, T* dst_addr, int length) { - - // For performance reasons, we assume we are using a card marking write - // barrier. The assert will fail if this is not the case. - // Note that we use the non-virtual inlineable variant of write_ref_array. - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); if (src == dst) { - // same object, no check - bs->write_ref_array_pre(dst_addr, length); - Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array((HeapWord*)dst_addr, length); + HeapAccess::oop_copy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length); return ac_ok; } else { Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass(); Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass(); if (stype == bound || stype->is_subtype_of(bound)) { // Elements are guaranteed to be subtypes, so no check necessary - bs->write_ref_array_pre(dst_addr, length); - Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array((HeapWord*)dst_addr, length); + HeapAccess::oop_copy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length); return ac_ok; } } @@ -1454,25 +1449,6 @@ Copy::conjoint_jbytes(src, dst, length); JRT_END -JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num)) -#ifndef PRODUCT - _oop_arraycopy_cnt++; -#endif - - if (num == 0) return; - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); - if (UseCompressedOops) { - bs->write_ref_array_pre((narrowOop*)dst, num); - Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num); - } else { - bs->write_ref_array_pre((oop*)dst, num); - Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); - } - bs->write_ref_array(dst, num); -JRT_END - JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj)) // had to return int instead of bool, otherwise there may be a mismatch @@ -1546,7 +1522,6 @@ tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_stub_cnt); tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_stub_cnt); tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt); - tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt); tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_stub_cnt); tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt); tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);