< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

BarrierSetC1

*** 37,46 **** --- 37,47 ---- #include "code/pcDesc.hpp" #include "code/scopeDesc.hpp" #include "code/vtableStubs.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/barrierSet.hpp" + #include "gc/shared/c1/barrierSetC1.hpp" #include "gc/shared/collectedHeap.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" ***************
*** 176,188 **** Deoptimization::deoptimize_frame(thread, caller_frame.id()); assert(caller_is_deopted(), "Must be deoptimized"); } } ! void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { ! assert(0 <= id && id < number_of_ids, "illegal stub id"); ResourceMark rm; // create code buffer for code storage CodeBuffer code(buffer_blob); OopMapSet* oop_maps; --- 177,197 ---- Deoptimization::deoptimize_frame(thread, caller_frame.id()); assert(caller_is_deopted(), "Must be deoptimized"); } } + class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { + private: + Runtime1::StubID _id; + public: + StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {} + virtual OopMapSet* generate_code(StubAssembler* sasm) { + return Runtime1::generate_code_for(_id, sasm); + } + }; ! CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { ResourceMark rm; // create code buffer for code storage CodeBuffer code(buffer_blob); OopMapSet* oop_maps; ***************
*** 190,247 **** bool must_gc_arguments; Compilation::setup_code_buffer(&code, 0); // create assembler for code generation ! StubAssembler* sasm = new StubAssembler(&code, name_for(id), id); // generate code for runtime stub ! oop_maps = generate_code_for(id, sasm); assert(oop_maps == NULL || sasm->frame_size() != no_frame_size, "if stub has an oop map it must have a valid frame size"); #ifdef ASSERT // Make sure that stubs that need oopmaps have them switch (id) { // These stubs don't need to have an oopmap case dtrace_object_alloc_id: - case g1_pre_barrier_slow_id: - case g1_post_barrier_slow_id: case slow_subtype_check_id: case fpu2long_stub_id: case unwind_exception_id: case counter_overflow_id: #if defined(SPARC) || defined(PPC32) case handle_exception_nofpu_id: // Unused on sparc #endif break; - - // All other stubs should have oopmaps default: ! assert(oop_maps != NULL, "must have an oopmap"); } #endif ! ! // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) ! sasm->align(BytesPerWord); ! // make sure all code is in code buffer ! sasm->flush(); ! ! frame_size = sasm->frame_size(); ! must_gc_arguments = sasm->must_gc_arguments(); ! // create blob - distinguish a few special cases ! CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id), ! &code, ! CodeOffsets::frame_never_safe, ! frame_size, ! oop_maps, ! must_gc_arguments); // install blob - assert(blob != NULL, "blob must exist"); _blobs[id] = blob; } - void Runtime1::initialize(BufferBlob* blob) { // platform-dependent initialization initialize_pd(); // generate stubs for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id); --- 199,260 ---- bool must_gc_arguments; Compilation::setup_code_buffer(&code, 0); // create assembler for code generation ! StubAssembler* sasm = new StubAssembler(&code, name, stub_id); // generate code for runtime stub ! oop_maps = cl->generate_code(sasm); assert(oop_maps == NULL || sasm->frame_size() != no_frame_size, "if stub has an oop map it must have a valid frame size"); + assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap"); + // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) + sasm->align(BytesPerWord); + // make sure all code is in code buffer + sasm->flush(); + + frame_size = sasm->frame_size(); + must_gc_arguments = sasm->must_gc_arguments(); + // create blob - distinguish a few special cases + CodeBlob* blob = RuntimeStub::new_runtime_stub(name, + &code, + CodeOffsets::frame_never_safe, + frame_size, + oop_maps, + must_gc_arguments); + assert(blob != NULL, "blob must exist"); + return blob; + } + + void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { + assert(0 <= id && id < number_of_ids, "illegal stub id"); + bool expect_oop_map = true; #ifdef ASSERT // Make sure that stubs that need oopmaps have them switch (id) { // These stubs don't need to have an oopmap case dtrace_object_alloc_id: case slow_subtype_check_id: case fpu2long_stub_id: case unwind_exception_id: case counter_overflow_id: #if defined(SPARC) || defined(PPC32) case handle_exception_nofpu_id: // Unused on sparc #endif + expect_oop_map = false; break; default: ! break; } #endif ! StubIDStubAssemblerCodeGenClosure cl(id); ! CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); // install blob _blobs[id] = blob; } void Runtime1::initialize(BufferBlob* blob) { // platform-dependent initialization initialize_pd(); // generate stubs for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id); ***************
*** 255,267 **** _blobs[id]->oop_maps()->print(); } } } #endif } - CodeBlob* Runtime1::blob_for(StubID id) { assert(0 <= id && id < number_of_ids, "illegal stub id"); return _blobs[id]; } --- 268,281 ---- _blobs[id]->oop_maps()->print(); } } } #endif + BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1(); + bs->generate_c1_runtime_stubs(blob); } CodeBlob* Runtime1::blob_for(StubID id) { assert(0 <= id && id < number_of_ids, "illegal stub id"); return _blobs[id]; }
< prev index next >