1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_ASM_CODEBUFFER_HPP 26 #define SHARE_ASM_CODEBUFFER_HPP 27 28 #include "code/oopRecorder.hpp" 29 #include "code/relocInfo.hpp" 30 #include "utilities/align.hpp" 31 #include "utilities/debug.hpp" 32 #include "utilities/macros.hpp" 33 34 class CodeStrings; 35 class PhaseCFG; 36 class Compile; 37 class BufferBlob; 38 class CodeBuffer; 39 class Label; 40 41 class CodeOffsets: public StackObj { 42 public: 43 enum Entries { Entry, 44 Verified_Entry, 45 Verified_Value_Entry, 46 Verified_Value_Entry_RO, 47 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete 48 OSR_Entry, 49 Exceptions, // Offset where exception handler lives 50 Deopt, // Offset where deopt handler lives 51 DeoptMH, // Offset where MethodHandle deopt handler lives 52 UnwindHandler, // Offset to default unwind handler 53 max_Entries }; 54 55 // special value to note codeBlobs where profile (forte) stack walking is 56 // always dangerous and suspect. 57 58 enum { frame_never_safe = -1 }; 59 60 private: 61 int _values[max_Entries]; 62 63 public: 64 CodeOffsets() { 65 _values[Entry ] = 0; 66 _values[Verified_Entry] = 0; 67 _values[Verified_Value_Entry] = -1; 68 _values[Verified_Value_Entry_RO] = -1; 69 _values[Frame_Complete] = frame_never_safe; 70 _values[OSR_Entry ] = 0; 71 _values[Exceptions ] = -1; 72 _values[Deopt ] = -1; 73 _values[DeoptMH ] = -1; 74 _values[UnwindHandler ] = -1; 75 } 76 77 int value(Entries e) { return _values[e]; } 78 void set_value(Entries e, int val) { _values[e] = val; } 79 }; 80 81 // This class represents a stream of code and associated relocations. 82 // There are a few in each CodeBuffer. 83 // They are filled concurrently, and concatenated at the end. 84 class CodeSection { 85 friend class CodeBuffer; 86 public: 87 typedef int csize_t; // code size type; would be size_t except for history 88 89 private: 90 address _start; // first byte of contents (instructions) 91 address _mark; // user mark, usually an instruction beginning 92 address _end; // current end address 93 address _limit; // last possible (allocated) end address 94 relocInfo* _locs_start; // first byte of relocation information 95 relocInfo* _locs_end; // first byte after relocation information 96 relocInfo* _locs_limit; // first byte after relocation information buf 97 address _locs_point; // last relocated position (grows upward) 98 bool _locs_own; // did I allocate the locs myself? 99 bool _frozen; // no more expansion of this section 100 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate. 101 char _index; // my section number (SECT_INST, etc.) 102 CodeBuffer* _outer; // enclosing CodeBuffer 103 104 // (Note: _locs_point used to be called _last_reloc_offset.) 105 106 CodeSection() { 107 _start = NULL; 108 _mark = NULL; 109 _end = NULL; 110 _limit = NULL; 111 _locs_start = NULL; 112 _locs_end = NULL; 113 _locs_limit = NULL; 114 _locs_point = NULL; 115 _locs_own = false; 116 _frozen = false; 117 _scratch_emit = false; 118 debug_only(_index = (char)-1); 119 debug_only(_outer = (CodeBuffer*)badAddress); 120 } 121 122 void initialize_outer(CodeBuffer* outer, int index) { 123 _outer = outer; 124 _index = index; 125 } 126 127 void initialize(address start, csize_t size = 0) { 128 assert(_start == NULL, "only one init step, please"); 129 _start = start; 130 _mark = NULL; 131 _end = start; 132 133 _limit = start + size; 134 _locs_point = start; 135 } 136 137 void initialize_locs(int locs_capacity); 138 void expand_locs(int new_capacity); 139 void initialize_locs_from(const CodeSection* source_cs); 140 141 // helper for CodeBuffer::expand() 142 void take_over_code_from(CodeSection* cs) { 143 _start = cs->_start; 144 _mark = cs->_mark; 145 _end = cs->_end; 146 _limit = cs->_limit; 147 _locs_point = cs->_locs_point; 148 } 149 150 public: 151 address start() const { return _start; } 152 address mark() const { return _mark; } 153 address end() const { return _end; } 154 address limit() const { return _limit; } 155 csize_t size() const { return (csize_t)(_end - _start); } 156 csize_t mark_off() const { assert(_mark != NULL, "not an offset"); 157 return (csize_t)(_mark - _start); } 158 csize_t capacity() const { return (csize_t)(_limit - _start); } 159 csize_t remaining() const { return (csize_t)(_limit - _end); } 160 161 relocInfo* locs_start() const { return _locs_start; } 162 relocInfo* locs_end() const { return _locs_end; } 163 int locs_count() const { return (int)(_locs_end - _locs_start); } 164 relocInfo* locs_limit() const { return _locs_limit; } 165 address locs_point() const { return _locs_point; } 166 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); } 167 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); } 168 csize_t locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); } 169 170 int index() const { return _index; } 171 bool is_allocated() const { return _start != NULL; } 172 bool is_empty() const { return _start == _end; } 173 bool is_frozen() const { return _frozen; } 174 bool has_locs() const { return _locs_end != NULL; } 175 176 // Mark scratch buffer. 177 void set_scratch_emit() { _scratch_emit = true; } 178 bool scratch_emit() { return _scratch_emit; } 179 180 CodeBuffer* outer() const { return _outer; } 181 182 // is a given address in this section? (2nd version is end-inclusive) 183 bool contains(address pc) const { return pc >= _start && pc < _end; } 184 bool contains2(address pc) const { return pc >= _start && pc <= _end; } 185 bool allocates(address pc) const { return pc >= _start && pc < _limit; } 186 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; } 187 188 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; } 189 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer"); 190 _mark = pc; } 191 void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer"); 192 _mark = offset + _start; } 193 void set_mark() { _mark = _end; } 194 void clear_mark() { _mark = NULL; } 195 196 void set_locs_end(relocInfo* p) { 197 assert(p <= locs_limit(), "locs data fits in allocated buffer"); 198 _locs_end = p; 199 } 200 void set_locs_point(address pc) { 201 assert(pc >= locs_point(), "relocation addr may not decrease"); 202 assert(allocates2(pc), "relocation addr must be in this section"); 203 _locs_point = pc; 204 } 205 206 // Code emission 207 void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); } 208 void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); } 209 void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); } 210 void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); } 211 212 void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); } 213 void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); } 214 void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); } 215 216 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) 217 void initialize_shared_locs(relocInfo* buf, int length); 218 219 // Manage labels and their addresses. 220 address target(Label& L, address branch_pc); 221 222 // Emit a relocation. 223 void relocate(address at, RelocationHolder const& rspec, int format = 0); 224 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0); 225 226 // alignment requirement for starting offset 227 // Requirements are that the instruction area and the 228 // stubs area must start on CodeEntryAlignment, and 229 // the ctable on sizeof(jdouble) 230 int alignment() const { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } 231 232 // Slop between sections, used only when allocating temporary BufferBlob buffers. 233 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } 234 235 csize_t align_at_start(csize_t off) const { return (csize_t) align_up(off, alignment()); } 236 237 // Mark a section frozen. Assign its remaining space to 238 // the following section. It will never expand after this point. 239 inline void freeze(); // { _outer->freeze_section(this); } 240 241 // Ensure there's enough space left in the current section. 242 // Return true if there was an expansion. 243 bool maybe_expand_to_ensure_remaining(csize_t amount); 244 245 #ifndef PRODUCT 246 void decode(); 247 void dump(); 248 void print(const char* name); 249 #endif //PRODUCT 250 }; 251 252 class CodeString; 253 class CodeStrings { 254 private: 255 #ifndef PRODUCT 256 CodeString* _strings; 257 #ifdef ASSERT 258 // Becomes true after copy-out, forbids further use. 259 bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env 260 #endif 261 static const char* _prefix; // defaults to " ;; " 262 #endif 263 264 CodeString* find(intptr_t offset) const; 265 CodeString* find_last(intptr_t offset) const; 266 267 void set_null_and_invalidate() { 268 #ifndef PRODUCT 269 _strings = NULL; 270 #ifdef ASSERT 271 _defunct = true; 272 #endif 273 #endif 274 } 275 276 public: 277 CodeStrings() { 278 #ifndef PRODUCT 279 _strings = NULL; 280 #ifdef ASSERT 281 _defunct = false; 282 #endif 283 #endif 284 } 285 286 bool is_null() { 287 #ifdef ASSERT 288 return _strings == NULL; 289 #else 290 return true; 291 #endif 292 } 293 294 const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;); 295 296 void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN; 297 void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN; 298 // MOVE strings from other to this; invalidate other. 299 void assign(CodeStrings& other) PRODUCT_RETURN; 300 // COPY strings from other to this; leave other valid. 301 void copy(CodeStrings& other) PRODUCT_RETURN; 302 // FREE strings; invalidate this. 303 void free() PRODUCT_RETURN; 304 // Guarantee that _strings are used at most once; assign and free invalidate a buffer. 305 inline void check_valid() const { 306 #ifdef ASSERT 307 assert(!_defunct, "Use of invalid CodeStrings"); 308 #endif 309 } 310 311 static void set_prefix(const char *prefix) { 312 #ifndef PRODUCT 313 _prefix = prefix; 314 #endif 315 } 316 }; 317 318 // A CodeBuffer describes a memory space into which assembly 319 // code is generated. This memory space usually occupies the 320 // interior of a single BufferBlob, but in some cases it may be 321 // an arbitrary span of memory, even outside the code cache. 322 // 323 // A code buffer comes in two variants: 324 // 325 // (1) A CodeBuffer referring to an already allocated piece of memory: 326 // This is used to direct 'static' code generation (e.g. for interpreter 327 // or stubroutine generation, etc.). This code comes with NO relocation 328 // information. 329 // 330 // (2) A CodeBuffer referring to a piece of memory allocated when the 331 // CodeBuffer is allocated. This is used for nmethod generation. 332 // 333 // The memory can be divided up into several parts called sections. 334 // Each section independently accumulates code (or data) an relocations. 335 // Sections can grow (at the expense of a reallocation of the BufferBlob 336 // and recopying of all active sections). When the buffered code is finally 337 // written to an nmethod (or other CodeBlob), the contents (code, data, 338 // and relocations) of the sections are padded to an alignment and concatenated. 339 // Instructions and data in one section can contain relocatable references to 340 // addresses in a sibling section. 341 342 class CodeBuffer: public StackObj { 343 friend class CodeSection; 344 friend class StubCodeGenerator; 345 346 private: 347 // CodeBuffers must be allocated on the stack except for a single 348 // special case during expansion which is handled internally. This 349 // is done to guarantee proper cleanup of resources. 350 void* operator new(size_t size) throw() { return ResourceObj::operator new(size); } 351 void operator delete(void* p) { ShouldNotCallThis(); } 352 353 public: 354 typedef int csize_t; // code size type; would be size_t except for history 355 enum { 356 // Here is the list of all possible sections. The order reflects 357 // the final layout. 358 SECT_FIRST = 0, 359 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc. 360 SECT_INSTS, // Executable instructions. 361 SECT_STUBS, // Outbound trampolines for supporting call sites. 362 SECT_LIMIT, SECT_NONE = -1 363 }; 364 365 private: 366 enum { 367 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits)) 368 sect_mask = (1<<sect_bits)-1 369 }; 370 371 const char* _name; 372 373 CodeSection _consts; // constants, jump tables 374 CodeSection _insts; // instructions (the main section) 375 CodeSection _stubs; // stubs (call site support), deopt, exception handling 376 377 CodeBuffer* _before_expand; // dead buffer, from before the last expansion 378 379 BufferBlob* _blob; // optional buffer in CodeCache for generated code 380 address _total_start; // first address of combined memory buffer 381 csize_t _total_size; // size in bytes of combined memory buffer 382 383 OopRecorder* _oop_recorder; 384 CodeStrings _code_strings; 385 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder 386 Arena* _overflow_arena; 387 388 address _last_insn; // used to merge consecutive memory barriers, loads or stores. 389 390 #if INCLUDE_AOT 391 bool _immutable_PIC; 392 #endif 393 394 address _decode_begin; // start address for decode 395 address decode_begin(); 396 397 void initialize_misc(const char * name) { 398 // all pointers other than code_start/end and those inside the sections 399 assert(name != NULL, "must have a name"); 400 _name = name; 401 _before_expand = NULL; 402 _blob = NULL; 403 _oop_recorder = NULL; 404 _decode_begin = NULL; 405 _overflow_arena = NULL; 406 _code_strings = CodeStrings(); 407 _last_insn = NULL; 408 #if INCLUDE_AOT 409 _immutable_PIC = false; 410 #endif 411 } 412 413 void initialize(address code_start, csize_t code_size) { 414 _consts.initialize_outer(this, SECT_CONSTS); 415 _insts.initialize_outer(this, SECT_INSTS); 416 _stubs.initialize_outer(this, SECT_STUBS); 417 _total_start = code_start; 418 _total_size = code_size; 419 // Initialize the main section: 420 _insts.initialize(code_start, code_size); 421 assert(!_stubs.is_allocated(), "no garbage here"); 422 assert(!_consts.is_allocated(), "no garbage here"); 423 _oop_recorder = &_default_oop_recorder; 424 } 425 426 void initialize_section_size(CodeSection* cs, csize_t size); 427 428 void freeze_section(CodeSection* cs); 429 430 // helper for CodeBuffer::expand() 431 void take_over_code_from(CodeBuffer* cs); 432 433 // ensure sections are disjoint, ordered, and contained in the blob 434 void verify_section_allocation(); 435 436 // copies combined relocations to the blob, returns bytes copied 437 // (if target is null, it is a dry run only, just for sizing) 438 csize_t copy_relocations_to(CodeBlob* blob) const; 439 440 // copies combined code to the blob (assumes relocs are already in there) 441 void copy_code_to(CodeBlob* blob); 442 443 // moves code sections to new buffer (assumes relocs are already in there) 444 void relocate_code_to(CodeBuffer* cb) const; 445 446 // set up a model of the final layout of my contents 447 void compute_final_layout(CodeBuffer* dest) const; 448 449 // Expand the given section so at least 'amount' is remaining. 450 // Creates a new, larger BufferBlob, and rewrites the code & relocs. 451 void expand(CodeSection* which_cs, csize_t amount); 452 453 // Helper for expand. 454 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity); 455 456 public: 457 // (1) code buffer referring to pre-allocated instruction memory 458 CodeBuffer(address code_start, csize_t code_size) { 459 assert(code_start != NULL, "sanity"); 460 initialize_misc("static buffer"); 461 initialize(code_start, code_size); 462 verify_section_allocation(); 463 } 464 465 // (2) CodeBuffer referring to pre-allocated CodeBlob. 466 CodeBuffer(CodeBlob* blob); 467 468 // (3) code buffer allocating codeBlob memory for code & relocation 469 // info but with lazy initialization. The name must be something 470 // informative. 471 CodeBuffer(const char* name) { 472 initialize_misc(name); 473 } 474 475 // (4) code buffer allocating codeBlob memory for code & relocation 476 // info. The name must be something informative and code_size must 477 // include both code and stubs sizes. 478 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) { 479 initialize_misc(name); 480 initialize(code_size, locs_size); 481 } 482 483 ~CodeBuffer(); 484 485 // Initialize a CodeBuffer constructed using constructor 3. Using 486 // constructor 4 is equivalent to calling constructor 3 and then 487 // calling this method. It's been factored out for convenience of 488 // construction. 489 void initialize(csize_t code_size, csize_t locs_size); 490 491 CodeSection* consts() { return &_consts; } 492 CodeSection* insts() { return &_insts; } 493 CodeSection* stubs() { return &_stubs; } 494 495 const CodeSection* insts() const { return &_insts; } 496 497 // present sections in order; return NULL at end; consts is #0, etc. 498 CodeSection* code_section(int n) { 499 // This makes the slightly questionable but portable assumption 500 // that the various members (_consts, _insts, _stubs, etc.) are 501 // adjacent in the layout of CodeBuffer. 502 CodeSection* cs = &_consts + n; 503 assert(cs->index() == n || !cs->is_allocated(), "sanity"); 504 return cs; 505 } 506 const CodeSection* code_section(int n) const { // yucky const stuff 507 return ((CodeBuffer*)this)->code_section(n); 508 } 509 static const char* code_section_name(int n); 510 int section_index_of(address addr) const; 511 bool contains(address addr) const { 512 // handy for debugging 513 return section_index_of(addr) > SECT_NONE; 514 } 515 516 // A stable mapping between 'locators' (small ints) and addresses. 517 static int locator_pos(int locator) { return locator >> sect_bits; } 518 static int locator_sect(int locator) { return locator & sect_mask; } 519 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; } 520 int locator(address addr) const; 521 address locator_address(int locator) const; 522 523 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch. 524 bool is_backward_branch(Label& L); 525 526 // Properties 527 const char* name() const { return _name; } 528 void set_name(const char* name) { _name = name; } 529 CodeBuffer* before_expand() const { return _before_expand; } 530 BufferBlob* blob() const { return _blob; } 531 void set_blob(BufferBlob* blob); 532 void free_blob(); // Free the blob, if we own one. 533 534 // Properties relative to the insts section: 535 address insts_begin() const { return _insts.start(); } 536 address insts_end() const { return _insts.end(); } 537 void set_insts_end(address end) { _insts.set_end(end); } 538 address insts_limit() const { return _insts.limit(); } 539 address insts_mark() const { return _insts.mark(); } 540 void set_insts_mark() { _insts.set_mark(); } 541 void clear_insts_mark() { _insts.clear_mark(); } 542 543 // is there anything in the buffer other than the current section? 544 bool is_pure() const { return insts_size() == total_content_size(); } 545 546 // size in bytes of output so far in the insts sections 547 csize_t insts_size() const { return _insts.size(); } 548 549 // same as insts_size(), except that it asserts there is no non-code here 550 csize_t pure_insts_size() const { assert(is_pure(), "no non-code"); 551 return insts_size(); } 552 // capacity in bytes of the insts sections 553 csize_t insts_capacity() const { return _insts.capacity(); } 554 555 // number of bytes remaining in the insts section 556 csize_t insts_remaining() const { return _insts.remaining(); } 557 558 // is a given address in the insts section? (2nd version is end-inclusive) 559 bool insts_contains(address pc) const { return _insts.contains(pc); } 560 bool insts_contains2(address pc) const { return _insts.contains2(pc); } 561 562 // Record any extra oops required to keep embedded metadata alive 563 void finalize_oop_references(const methodHandle& method); 564 565 // Allocated size in all sections, when aligned and concatenated 566 // (this is the eventual state of the content in its final 567 // CodeBlob). 568 csize_t total_content_size() const; 569 570 // Combined offset (relative to start of first section) of given 571 // section, as eventually found in the final CodeBlob. 572 csize_t total_offset_of(const CodeSection* cs) const; 573 574 // allocated size of all relocation data, including index, rounded up 575 csize_t total_relocation_size() const; 576 577 csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const; 578 579 // allocated size of any and all recorded oops 580 csize_t total_oop_size() const { 581 OopRecorder* recorder = oop_recorder(); 582 return (recorder == NULL)? 0: recorder->oop_size(); 583 } 584 585 // allocated size of any and all recorded metadata 586 csize_t total_metadata_size() const { 587 OopRecorder* recorder = oop_recorder(); 588 return (recorder == NULL)? 0: recorder->metadata_size(); 589 } 590 591 // Configuration functions, called immediately after the CB is constructed. 592 // The section sizes are subtracted from the original insts section. 593 // Note: Call them in reverse section order, because each steals from insts. 594 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); } 595 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); } 596 // Override default oop recorder. 597 void initialize_oop_recorder(OopRecorder* r); 598 599 OopRecorder* oop_recorder() const { return _oop_recorder; } 600 CodeStrings& strings() { return _code_strings; } 601 602 address last_insn() const { return _last_insn; } 603 void set_last_insn(address a) { _last_insn = a; } 604 void clear_last_insn() { set_last_insn(NULL); } 605 606 void free_strings() { 607 if (!_code_strings.is_null()) { 608 _code_strings.free(); // sets _strings Null as a side-effect. 609 } 610 } 611 612 // Code generation 613 void relocate(address at, RelocationHolder const& rspec, int format = 0) { 614 _insts.relocate(at, rspec, format); 615 } 616 void relocate(address at, relocInfo::relocType rtype, int format = 0) { 617 _insts.relocate(at, rtype, format); 618 } 619 620 // Management of overflow storage for binding of Labels. 621 GrowableArray<int>* create_patch_overflow(); 622 623 // NMethod generation 624 void copy_code_and_locs_to(CodeBlob* blob) { 625 assert(blob != NULL, "sane"); 626 copy_relocations_to(blob); 627 copy_code_to(blob); 628 } 629 void copy_values_to(nmethod* nm) { 630 if (!oop_recorder()->is_unused()) { 631 oop_recorder()->copy_values_to(nm); 632 } 633 } 634 635 // Transform an address from the code in this code buffer to a specified code buffer 636 address transform_address(const CodeBuffer &cb, address addr) const; 637 638 void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN; 639 const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;); 640 641 // Log a little info about section usage in the CodeBuffer 642 void log_section_sizes(const char* name); 643 644 #if INCLUDE_AOT 645 // True if this is a code buffer used for immutable PIC, i.e. AOT 646 // compilation. 647 bool immutable_PIC() { return _immutable_PIC; } 648 void set_immutable_PIC(bool pic) { _immutable_PIC = pic; } 649 #endif 650 651 #ifndef PRODUCT 652 public: 653 // Printing / Decoding 654 // decodes from decode_begin() to code_end() and sets decode_begin to end 655 void decode(); 656 void decode_all(); // decodes all the code 657 void skip_decode(); // sets decode_begin to code_end(); 658 void print(); 659 #endif 660 661 662 // The following header contains architecture-specific implementations 663 #include CPU_HEADER(codeBuffer) 664 665 }; 666 667 668 inline void CodeSection::freeze() { 669 _outer->freeze_section(this); 670 } 671 672 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) { 673 if (remaining() < amount) { _outer->expand(this, amount); return true; } 674 return false; 675 } 676 677 #endif // SHARE_ASM_CODEBUFFER_HPP