26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/codeBuffer.hpp"
29 #include "runtime/atomic.inline.hpp"
30 #include "runtime/icache.hpp"
31 #include "runtime/os.hpp"
32
33
34 // Implementation of AbstractAssembler
35 //
36 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
37 // the assembler keeps a copy of the code buffers boundaries & modifies them when
38 // emitting bytes rather than using the code buffers accessor functions all the time.
39 // The code buffer is updated via set_code_end(...) after emitting a whole instruction.
40
41 AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
42 if (code == NULL) return;
43 CodeSection* cs = code->insts();
44 cs->clear_mark(); // new assembler kills old mark
45 if (cs->start() == NULL) {
46 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s",
47 code->name()));
48 }
49 _code_section = cs;
50 _oop_recorder= code->oop_recorder();
51 DEBUG_ONLY( _short_branch_delta = 0; )
52 }
53
54 void AbstractAssembler::set_code_section(CodeSection* cs) {
55 assert(cs->outer() == code_section()->outer(), "sanity");
56 assert(cs->is_allocated(), "need to pre-allocate this section");
57 cs->clear_mark(); // new assembly into this section kills old mark
58 _code_section = cs;
59 }
60
61 // Inform CodeBuffer that incoming code and relocation will be for stubs
62 address AbstractAssembler::start_a_stub(int required_space) {
63 CodeBuffer* cb = code();
64 CodeSection* cs = cb->stubs();
65 assert(_code_section == cb->insts(), "not in insts?");
66 if (cs->maybe_expand_to_ensure_remaining(required_space)
67 && cb->blob() == NULL) {
|
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/codeBuffer.hpp"
29 #include "runtime/atomic.inline.hpp"
30 #include "runtime/icache.hpp"
31 #include "runtime/os.hpp"
32
33
34 // Implementation of AbstractAssembler
35 //
36 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
37 // the assembler keeps a copy of the code buffers boundaries & modifies them when
38 // emitting bytes rather than using the code buffers accessor functions all the time.
39 // The code buffer is updated via set_code_end(...) after emitting a whole instruction.
40
41 AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
42 if (code == NULL) return;
43 CodeSection* cs = code->insts();
44 cs->clear_mark(); // new assembler kills old mark
45 if (cs->start() == NULL) {
46 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "CodeCache: no room for %s", code->name());
47 }
48 _code_section = cs;
49 _oop_recorder= code->oop_recorder();
50 DEBUG_ONLY( _short_branch_delta = 0; )
51 }
52
53 void AbstractAssembler::set_code_section(CodeSection* cs) {
54 assert(cs->outer() == code_section()->outer(), "sanity");
55 assert(cs->is_allocated(), "need to pre-allocate this section");
56 cs->clear_mark(); // new assembly into this section kills old mark
57 _code_section = cs;
58 }
59
60 // Inform CodeBuffer that incoming code and relocation will be for stubs
61 address AbstractAssembler::start_a_stub(int required_space) {
62 CodeBuffer* cb = code();
63 CodeSection* cs = cb->stubs();
64 assert(_code_section == cb->insts(), "not in insts?");
65 if (cs->maybe_expand_to_ensure_remaining(required_space)
66 && cb->blob() == NULL) {
|