1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_ASM_CODEBUFFER_HPP
  26 #define SHARE_ASM_CODEBUFFER_HPP
  27 
  28 #include "code/oopRecorder.hpp"
  29 #include "code/relocInfo.hpp"
  30 #include "utilities/align.hpp"
  31 #include "utilities/debug.hpp"
  32 #include "utilities/macros.hpp"
  33 
  34 class CodeStrings;
  35 class PhaseCFG;
  36 class Compile;
  37 class BufferBlob;
  38 class CodeBuffer;
  39 class Label;
  40 
  41 class CodeOffsets: public StackObj {
  42 public:
  43   enum Entries { Entry,
  44                  Verified_Entry,
  45                  Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
  46                  OSR_Entry,
  47                  Exceptions,     // Offset where exception handler lives
  48                  Deopt,          // Offset where deopt handler lives
  49                  DeoptMH,        // Offset where MethodHandle deopt handler lives
  50                  UnwindHandler,  // Offset to default unwind handler
  51                  max_Entries };
  52 
  53   // special value to note codeBlobs where profile (forte) stack walking is
  54   // always dangerous and suspect.
  55 
  56   enum { frame_never_safe = -1 };
  57 
  58 private:
  59   int _values[max_Entries];
  60 
  61 public:
  62   CodeOffsets() {
  63     _values[Entry         ] = 0;
  64     _values[Verified_Entry] = 0;
  65     _values[Frame_Complete] = frame_never_safe;
  66     _values[OSR_Entry     ] = 0;
  67     _values[Exceptions    ] = -1;
  68     _values[Deopt         ] = -1;
  69     _values[DeoptMH       ] = -1;
  70     _values[UnwindHandler ] = -1;
  71   }
  72 
  73   int value(Entries e) { return _values[e]; }
  74   void set_value(Entries e, int val) { _values[e] = val; }
  75 };
  76 
  77 // This class represents a stream of code and associated relocations.
  78 // There are a few in each CodeBuffer.
  79 // They are filled concurrently, and concatenated at the end.
  80 class CodeSection {
  81   friend class CodeBuffer;
  82  public:
  83   typedef int csize_t;  // code size type; would be size_t except for history
  84 
  85  private:
  86   address     _start;           // first byte of contents (instructions)
  87   address     _mark;            // user mark, usually an instruction beginning
  88   address     _end;             // current end address
  89   address     _limit;           // last possible (allocated) end address
  90   relocInfo*  _locs_start;      // first byte of relocation information
  91   relocInfo*  _locs_end;        // first byte after relocation information
  92   relocInfo*  _locs_limit;      // first byte after relocation information buf
  93   address     _locs_point;      // last relocated position (grows upward)
  94   bool        _locs_own;        // did I allocate the locs myself?
  95   bool        _frozen;          // no more expansion of this section
  96   bool        _scratch_emit;    // Buffer is used for scratch emit, don't relocate.
  97   char        _index;           // my section number (SECT_INST, etc.)
  98   CodeBuffer* _outer;           // enclosing CodeBuffer
  99 
 100   // (Note:  _locs_point used to be called _last_reloc_offset.)
 101 
 102   CodeSection() {
 103     _start         = NULL;
 104     _mark          = NULL;
 105     _end           = NULL;
 106     _limit         = NULL;
 107     _locs_start    = NULL;
 108     _locs_end      = NULL;
 109     _locs_limit    = NULL;
 110     _locs_point    = NULL;
 111     _locs_own      = false;
 112     _frozen        = false;
 113     _scratch_emit  = false;
 114     debug_only(_index = (char)-1);
 115     debug_only(_outer = (CodeBuffer*)badAddress);
 116   }
 117 
 118   void initialize_outer(CodeBuffer* outer, int index) {
 119     _outer = outer;
 120     _index = index;
 121   }
 122 
 123   void initialize(address start, csize_t size = 0) {
 124     assert(_start == NULL, "only one init step, please");
 125     _start         = start;
 126     _mark          = NULL;
 127     _end           = start;
 128 
 129     _limit         = start + size;
 130     _locs_point    = start;
 131   }
 132 
 133   void initialize_locs(int locs_capacity);
 134   void expand_locs(int new_capacity);
 135   void initialize_locs_from(const CodeSection* source_cs);
 136 
 137   // helper for CodeBuffer::expand()
 138   void take_over_code_from(CodeSection* cs) {
 139     _start      = cs->_start;
 140     _mark       = cs->_mark;
 141     _end        = cs->_end;
 142     _limit      = cs->_limit;
 143     _locs_point = cs->_locs_point;
 144   }
 145 
 146  public:
 147   address     start() const         { return _start; }
 148   address     mark() const          { return _mark; }
 149   address     end() const           { return _end; }
 150   address     limit() const         { return _limit; }
 151   csize_t     size() const          { return (csize_t)(_end - _start); }
 152   csize_t     mark_off() const      { assert(_mark != NULL, "not an offset");
 153                                       return (csize_t)(_mark - _start); }
 154   csize_t     capacity() const      { return (csize_t)(_limit - _start); }
 155   csize_t     remaining() const     { return (csize_t)(_limit - _end); }
 156 
 157   relocInfo*  locs_start() const    { return _locs_start; }
 158   relocInfo*  locs_end() const      { return _locs_end; }
 159   int         locs_count() const    { return (int)(_locs_end - _locs_start); }
 160   relocInfo*  locs_limit() const    { return _locs_limit; }
 161   address     locs_point() const    { return _locs_point; }
 162   csize_t     locs_point_off() const{ return (csize_t)(_locs_point - _start); }
 163   csize_t     locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
 164   csize_t     locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
 165 
 166   int         index() const         { return _index; }
 167   bool        is_allocated() const  { return _start != NULL; }
 168   bool        is_empty() const      { return _start == _end; }
 169   bool        is_frozen() const     { return _frozen; }
 170   bool        has_locs() const      { return _locs_end != NULL; }
 171 
 172   // Mark scratch buffer.
 173   void        set_scratch_emit()    { _scratch_emit = true; }
 174   bool        scratch_emit()        { return _scratch_emit; }
 175 
 176   CodeBuffer* outer() const         { return _outer; }
 177 
 178   // is a given address in this section?  (2nd version is end-inclusive)
 179   bool contains(address pc) const   { return pc >= _start && pc <  _end; }
 180   bool contains2(address pc) const  { return pc >= _start && pc <= _end; }
 181   bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
 182   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
 183 
 184   void    set_end(address pc)       { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
 185   void    set_mark(address pc)      { assert(contains2(pc), "not in codeBuffer");
 186                                       _mark = pc; }
 187   void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer");
 188                                       _mark = offset + _start; }
 189   void    set_mark()                { _mark = _end; }
 190   void    clear_mark()              { _mark = NULL; }
 191 
 192   void    set_locs_end(relocInfo* p) {
 193     assert(p <= locs_limit(), "locs data fits in allocated buffer");
 194     _locs_end = p;
 195   }
 196   void    set_locs_point(address pc) {
 197     assert(pc >= locs_point(), "relocation addr may not decrease");
 198     assert(allocates2(pc),     "relocation addr must be in this section");
 199     _locs_point = pc;
 200   }
 201 
 202   // Code emission
 203   void emit_int8(int8_t x1) {
 204     address curr = end();
 205     *((int8_t*)  curr++) = x1;
 206     set_end(curr);
 207   }
 208 
 209   void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
 210   void emit_int16(int8_t x1, int8_t x2) {
 211     address curr = end();
 212     *((int8_t*)  curr++) = x1;
 213     *((int8_t*)  curr++) = x2;
 214     set_end(curr);
 215   }
 216 
 217   void emit_int24(int8_t x1, int8_t x2, int8_t x3)  {
 218     address curr = end();
 219     *((int8_t*)  curr++) = x1;
 220     *((int8_t*)  curr++) = x2;
 221     *((int8_t*)  curr++) = x3;
 222     set_end(curr);
 223   }
 224 
 225   void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
 226   void emit_int32(int8_t x1, int8_t x2, int8_t x3, int8_t x4)  {
 227     address curr = end();
 228     *((int8_t*)  curr++) = x1;
 229     *((int8_t*)  curr++) = x2;
 230     *((int8_t*)  curr++) = x3;
 231     *((int8_t*)  curr++) = x4;
 232     set_end(curr);
 233   }
 234 
 235   void emit_int64( int64_t x)  { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
 236 
 237   void emit_float( jfloat  x)  { *((jfloat*)  end()) = x; set_end(end() + sizeof(jfloat)); }
 238   void emit_double(jdouble x)  { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
 239   void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
 240 
 241   // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
 242   void initialize_shared_locs(relocInfo* buf, int length);
 243 
 244   // Manage labels and their addresses.
 245   address target(Label& L, address branch_pc);
 246 
 247   // Emit a relocation.
 248   void relocate(address at, RelocationHolder const& rspec, int format = 0);
 249   void relocate(address at,    relocInfo::relocType rtype, int format = 0, jint method_index = 0);
 250 
 251   // alignment requirement for starting offset
 252   // Requirements are that the instruction area and the
 253   // stubs area must start on CodeEntryAlignment, and
 254   // the ctable on sizeof(jdouble)
 255   int alignment() const             { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
 256 
 257   // Slop between sections, used only when allocating temporary BufferBlob buffers.
 258   static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
 259 
 260   csize_t align_at_start(csize_t off) const { return (csize_t) align_up(off, alignment()); }
 261 
 262   // Mark a section frozen.  Assign its remaining space to
 263   // the following section.  It will never expand after this point.
 264   inline void freeze();         //  { _outer->freeze_section(this); }
 265 
 266   // Ensure there's enough space left in the current section.
 267   // Return true if there was an expansion.
 268   bool maybe_expand_to_ensure_remaining(csize_t amount);
 269 
 270 #ifndef PRODUCT
 271   void decode();
 272   void print(const char* name);
 273 #endif //PRODUCT
 274 };
 275 
 276 class CodeString;
 277 class CodeStrings {
 278 private:
 279 #ifndef PRODUCT
 280   CodeString* _strings;
 281   CodeString* _strings_last;
 282 #ifdef ASSERT
 283   // Becomes true after copy-out, forbids further use.
 284   bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env
 285 #endif
 286   static const char* _prefix; // defaults to " ;; "
 287 #endif
 288 
 289   CodeString* find(intptr_t offset) const;
 290   CodeString* find_last(intptr_t offset) const;
 291 
 292   void set_null_and_invalidate() {
 293 #ifndef PRODUCT
 294     _strings = NULL;
 295     _strings_last = NULL;
 296 #ifdef ASSERT
 297     _defunct = true;
 298 #endif
 299 #endif
 300   }
 301 
 302 public:
 303   CodeStrings() {
 304 #ifndef PRODUCT
 305     _strings = NULL;
 306     _strings_last = NULL;
 307 #ifdef ASSERT
 308     _defunct = false;
 309 #endif
 310 #endif
 311   }
 312 
 313   bool is_null() {
 314 #ifdef ASSERT
 315     return _strings == NULL;
 316 #else
 317     return true;
 318 #endif
 319   }
 320 
 321   const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
 322 
 323   void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
 324   bool has_block_comment(intptr_t offset) const;
 325   void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
 326   // MOVE strings from other to this; invalidate other.
 327   void assign(CodeStrings& other)  PRODUCT_RETURN;
 328   // COPY strings from other to this; leave other valid.
 329   void copy(CodeStrings& other)  PRODUCT_RETURN;
 330   // FREE strings; invalidate this.
 331   void free() PRODUCT_RETURN;
 332 
 333   // Guarantee that _strings are used at most once; assign and free invalidate a buffer.
 334   inline void check_valid() const {
 335 #ifdef ASSERT
 336     assert(!_defunct, "Use of invalid CodeStrings");
 337 #endif
 338   }
 339 
 340   static void set_prefix(const char *prefix) {
 341 #ifndef PRODUCT
 342     _prefix = prefix;
 343 #endif
 344   }
 345 };
 346 
 347 // A CodeBuffer describes a memory space into which assembly
 348 // code is generated.  This memory space usually occupies the
 349 // interior of a single BufferBlob, but in some cases it may be
 350 // an arbitrary span of memory, even outside the code cache.
 351 //
 352 // A code buffer comes in two variants:
 353 //
 354 // (1) A CodeBuffer referring to an already allocated piece of memory:
 355 //     This is used to direct 'static' code generation (e.g. for interpreter
 356 //     or stubroutine generation, etc.).  This code comes with NO relocation
 357 //     information.
 358 //
 359 // (2) A CodeBuffer referring to a piece of memory allocated when the
 360 //     CodeBuffer is allocated.  This is used for nmethod generation.
 361 //
 362 // The memory can be divided up into several parts called sections.
 363 // Each section independently accumulates code (or data) an relocations.
 364 // Sections can grow (at the expense of a reallocation of the BufferBlob
 365 // and recopying of all active sections).  When the buffered code is finally
 366 // written to an nmethod (or other CodeBlob), the contents (code, data,
 367 // and relocations) of the sections are padded to an alignment and concatenated.
 368 // Instructions and data in one section can contain relocatable references to
 369 // addresses in a sibling section.
 370 
 371 class CodeBuffer: public StackObj {
 372   friend class CodeSection;
 373   friend class StubCodeGenerator;
 374 
 375  private:
 376   // CodeBuffers must be allocated on the stack except for a single
 377   // special case during expansion which is handled internally.  This
 378   // is done to guarantee proper cleanup of resources.
 379   void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
 380   void  operator delete(void* p)          { ShouldNotCallThis(); }
 381 
 382  public:
 383   typedef int csize_t;  // code size type; would be size_t except for history
 384   enum {
 385     // Here is the list of all possible sections.  The order reflects
 386     // the final layout.
 387     SECT_FIRST = 0,
 388     SECT_CONSTS = SECT_FIRST, // Non-instruction data:  Floats, jump tables, etc.
 389     SECT_INSTS,               // Executable instructions.
 390     SECT_STUBS,               // Outbound trampolines for supporting call sites.
 391     SECT_LIMIT, SECT_NONE = -1
 392   };
 393 
 394  private:
 395   enum {
 396     sect_bits = 2,      // assert (SECT_LIMIT <= (1<<sect_bits))
 397     sect_mask = (1<<sect_bits)-1
 398   };
 399 
 400   const char*  _name;
 401 
 402   CodeSection  _consts;             // constants, jump tables
 403   CodeSection  _insts;              // instructions (the main section)
 404   CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
 405 
 406   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
 407 
 408   BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
 409   address      _total_start;    // first address of combined memory buffer
 410   csize_t      _total_size;     // size in bytes of combined memory buffer
 411 
 412   OopRecorder* _oop_recorder;
 413   CodeStrings  _code_strings;
 414   bool         _collect_comments;      // Indicate if we need to collect block comments at all.
 415   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
 416   Arena*       _overflow_arena;
 417 
 418   address      _last_insn;      // used to merge consecutive memory barriers, loads or stores.
 419 
 420 #if INCLUDE_AOT
 421   bool         _immutable_PIC;
 422 #endif
 423 
 424   address      _decode_begin;   // start address for decode
 425   address      decode_begin();
 426 
 427   void initialize_misc(const char * name) {
 428     // all pointers other than code_start/end and those inside the sections
 429     assert(name != NULL, "must have a name");
 430     _name            = name;
 431     _before_expand   = NULL;
 432     _blob            = NULL;
 433     _oop_recorder    = NULL;
 434     _decode_begin    = NULL;
 435     _overflow_arena  = NULL;
 436     _code_strings    = CodeStrings();
 437     _last_insn       = NULL;
 438 #if INCLUDE_AOT
 439     _immutable_PIC   = false;
 440 #endif
 441 
 442     // Collect block comments, but restrict collection to cases where a disassembly is output.
 443     _collect_comments = ( PrintAssembly
 444                        || PrintStubCode
 445                        || PrintMethodHandleStubs
 446                        || PrintInterpreter
 447                        || PrintSignatureHandlers
 448                        || UnlockDiagnosticVMOptions
 449                         );
 450   }
 451 
 452   void initialize(address code_start, csize_t code_size) {
 453     _consts.initialize_outer(this,  SECT_CONSTS);
 454     _insts.initialize_outer(this,   SECT_INSTS);
 455     _stubs.initialize_outer(this,   SECT_STUBS);
 456     _total_start = code_start;
 457     _total_size  = code_size;
 458     // Initialize the main section:
 459     _insts.initialize(code_start, code_size);
 460     assert(!_stubs.is_allocated(),  "no garbage here");
 461     assert(!_consts.is_allocated(), "no garbage here");
 462     _oop_recorder = &_default_oop_recorder;
 463   }
 464 
 465   void initialize_section_size(CodeSection* cs, csize_t size);
 466 
 467   void freeze_section(CodeSection* cs);
 468 
 469   // helper for CodeBuffer::expand()
 470   void take_over_code_from(CodeBuffer* cs);
 471 
 472   // ensure sections are disjoint, ordered, and contained in the blob
 473   void verify_section_allocation();
 474 
 475   // copies combined relocations to the blob, returns bytes copied
 476   // (if target is null, it is a dry run only, just for sizing)
 477   csize_t copy_relocations_to(CodeBlob* blob) const;
 478 
 479   // copies combined code to the blob (assumes relocs are already in there)
 480   void copy_code_to(CodeBlob* blob);
 481 
 482   // moves code sections to new buffer (assumes relocs are already in there)
 483   void relocate_code_to(CodeBuffer* cb) const;
 484 
 485   // set up a model of the final layout of my contents
 486   void compute_final_layout(CodeBuffer* dest) const;
 487 
 488   // Expand the given section so at least 'amount' is remaining.
 489   // Creates a new, larger BufferBlob, and rewrites the code & relocs.
 490   void expand(CodeSection* which_cs, csize_t amount);
 491 
 492   // Helper for expand.
 493   csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
 494 
 495  public:
 496   // (1) code buffer referring to pre-allocated instruction memory
 497   CodeBuffer(address code_start, csize_t code_size) {
 498     assert(code_start != NULL, "sanity");
 499     initialize_misc("static buffer");
 500     initialize(code_start, code_size);
 501     verify_section_allocation();
 502   }
 503 
 504   // (2) CodeBuffer referring to pre-allocated CodeBlob.
 505   CodeBuffer(CodeBlob* blob);
 506 
 507   // (3) code buffer allocating codeBlob memory for code & relocation
 508   // info but with lazy initialization.  The name must be something
 509   // informative.
 510   CodeBuffer(const char* name) {
 511     initialize_misc(name);
 512   }
 513 
 514   // (4) code buffer allocating codeBlob memory for code & relocation
 515   // info.  The name must be something informative and code_size must
 516   // include both code and stubs sizes.
 517   CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
 518     initialize_misc(name);
 519     initialize(code_size, locs_size);
 520   }
 521 
 522   ~CodeBuffer();
 523 
 524   // Initialize a CodeBuffer constructed using constructor 3.  Using
 525   // constructor 4 is equivalent to calling constructor 3 and then
 526   // calling this method.  It's been factored out for convenience of
 527   // construction.
 528   void initialize(csize_t code_size, csize_t locs_size);
 529 
 530   CodeSection* consts() { return &_consts; }
 531   CodeSection* insts() { return &_insts; }
 532   CodeSection* stubs() { return &_stubs; }
 533 
 534   const CodeSection* insts() const { return &_insts; }
 535 
 536   // present sections in order; return NULL at end; consts is #0, etc.
 537   CodeSection* code_section(int n) {
 538     // This makes the slightly questionable but portable assumption
 539     // that the various members (_consts, _insts, _stubs, etc.) are
 540     // adjacent in the layout of CodeBuffer.
 541     CodeSection* cs = &_consts + n;
 542     assert(cs->index() == n || !cs->is_allocated(), "sanity");
 543     return cs;
 544   }
 545   const CodeSection* code_section(int n) const {  // yucky const stuff
 546     return ((CodeBuffer*)this)->code_section(n);
 547   }
 548   static const char* code_section_name(int n);
 549   int section_index_of(address addr) const;
 550   bool contains(address addr) const {
 551     // handy for debugging
 552     return section_index_of(addr) > SECT_NONE;
 553   }
 554 
 555   // A stable mapping between 'locators' (small ints) and addresses.
 556   static int locator_pos(int locator)   { return locator >> sect_bits; }
 557   static int locator_sect(int locator)  { return locator &  sect_mask; }
 558   static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
 559   int        locator(address addr) const;
 560   address    locator_address(int locator) const;
 561 
 562   // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
 563   bool is_backward_branch(Label& L);
 564 
 565   // Properties
 566   const char* name() const                  { return _name; }
 567   void set_name(const char* name)           { _name = name; }
 568   CodeBuffer* before_expand() const         { return _before_expand; }
 569   BufferBlob* blob() const                  { return _blob; }
 570   void    set_blob(BufferBlob* blob);
 571   void   free_blob();                       // Free the blob, if we own one.
 572 
 573   // Properties relative to the insts section:
 574   address       insts_begin() const      { return _insts.start();      }
 575   address       insts_end() const        { return _insts.end();        }
 576   void      set_insts_end(address end)   {        _insts.set_end(end); }
 577   address       insts_limit() const      { return _insts.limit();      }
 578   address       insts_mark() const       { return _insts.mark();       }
 579   void      set_insts_mark()             {        _insts.set_mark();   }
 580   void    clear_insts_mark()             {        _insts.clear_mark(); }
 581 
 582   // is there anything in the buffer other than the current section?
 583   bool    is_pure() const                { return insts_size() == total_content_size(); }
 584 
 585   // size in bytes of output so far in the insts sections
 586   csize_t insts_size() const             { return _insts.size(); }
 587 
 588   // same as insts_size(), except that it asserts there is no non-code here
 589   csize_t pure_insts_size() const        { assert(is_pure(), "no non-code");
 590                                            return insts_size(); }
 591   // capacity in bytes of the insts sections
 592   csize_t insts_capacity() const         { return _insts.capacity(); }
 593 
 594   // number of bytes remaining in the insts section
 595   csize_t insts_remaining() const        { return _insts.remaining(); }
 596 
 597   // is a given address in the insts section?  (2nd version is end-inclusive)
 598   bool insts_contains(address pc) const  { return _insts.contains(pc); }
 599   bool insts_contains2(address pc) const { return _insts.contains2(pc); }
 600 
 601   // Record any extra oops required to keep embedded metadata alive
 602   void finalize_oop_references(const methodHandle& method);
 603 
 604   // Allocated size in all sections, when aligned and concatenated
 605   // (this is the eventual state of the content in its final
 606   // CodeBlob).
 607   csize_t total_content_size() const;
 608 
 609   // Combined offset (relative to start of first section) of given
 610   // section, as eventually found in the final CodeBlob.
 611   csize_t total_offset_of(const CodeSection* cs) const;
 612 
 613   // allocated size of all relocation data, including index, rounded up
 614   csize_t total_relocation_size() const;
 615 
 616   csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const;
 617 
 618   // allocated size of any and all recorded oops
 619   csize_t total_oop_size() const {
 620     OopRecorder* recorder = oop_recorder();
 621     return (recorder == NULL)? 0: recorder->oop_size();
 622   }
 623 
 624   // allocated size of any and all recorded metadata
 625   csize_t total_metadata_size() const {
 626     OopRecorder* recorder = oop_recorder();
 627     return (recorder == NULL)? 0: recorder->metadata_size();
 628   }
 629 
 630   // Configuration functions, called immediately after the CB is constructed.
 631   // The section sizes are subtracted from the original insts section.
 632   // Note:  Call them in reverse section order, because each steals from insts.
 633   void initialize_consts_size(csize_t size)            { initialize_section_size(&_consts,  size); }
 634   void initialize_stubs_size(csize_t size)             { initialize_section_size(&_stubs,   size); }
 635   // Override default oop recorder.
 636   void initialize_oop_recorder(OopRecorder* r);
 637 
 638   OopRecorder* oop_recorder() const   { return _oop_recorder; }
 639   CodeStrings& strings()              { return _code_strings; }
 640 
 641   address last_insn() const { return _last_insn; }
 642   void set_last_insn(address a) { _last_insn = a; }
 643   void clear_last_insn() { set_last_insn(NULL); }
 644 
 645   void free_strings() {
 646     if (!_code_strings.is_null()) {
 647       _code_strings.free(); // sets _strings Null as a side-effect.
 648     }
 649   }
 650 
 651   // Directly disassemble code buffer.
 652   // Print the comment associated with offset on stream, if there is one.
 653   virtual void print_block_comment(outputStream* stream, address block_begin) {
 654 #ifndef PRODUCT
 655     intptr_t offset = (intptr_t)(block_begin - _total_start);  // I assume total_start is not correct for all code sections.
 656     _code_strings.print_block_comment(stream, offset);
 657 #endif
 658   }
 659   bool has_block_comment(address block_begin) {
 660 #ifndef PRODUCT
 661     intptr_t offset = (intptr_t)(block_begin - _total_start);  // I assume total_start is not correct for all code sections.
 662     return _code_strings.has_block_comment(offset);
 663 #else
 664     return false;
 665 #endif
 666   }
 667 
 668   // Code generation
 669   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
 670     _insts.relocate(at, rspec, format);
 671   }
 672   void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
 673     _insts.relocate(at, rtype, format);
 674   }
 675 
 676   // Management of overflow storage for binding of Labels.
 677   GrowableArray<int>* create_patch_overflow();
 678 
 679   // NMethod generation
 680   void copy_code_and_locs_to(CodeBlob* blob) {
 681     assert(blob != NULL, "sane");
 682     copy_relocations_to(blob);
 683     copy_code_to(blob);
 684   }
 685   void copy_values_to(nmethod* nm) {
 686     if (!oop_recorder()->is_unused()) {
 687       oop_recorder()->copy_values_to(nm);
 688     }
 689   }
 690 
 691   // Transform an address from the code in this code buffer to a specified code buffer
 692   address transform_address(const CodeBuffer &cb, address addr) const;
 693 
 694   void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
 695   const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
 696 
 697   // Log a little info about section usage in the CodeBuffer
 698   void log_section_sizes(const char* name);
 699 
 700 #if INCLUDE_AOT
 701   // True if this is a code buffer used for immutable PIC, i.e. AOT
 702   // compilation.
 703   bool immutable_PIC() { return _immutable_PIC; }
 704   void set_immutable_PIC(bool pic) { _immutable_PIC = pic; }
 705 #endif
 706 
 707 #ifndef PRODUCT
 708  public:
 709   // Printing / Decoding
 710   // decodes from decode_begin() to code_end() and sets decode_begin to end
 711   void    decode();
 712   void    print();
 713 #endif
 714   // Directly disassemble code buffer.
 715   void    decode(address start, address end);
 716 
 717   // The following header contains architecture-specific implementations
 718 #include CPU_HEADER(codeBuffer)
 719 
 720 };
 721 
 722 
 723 inline void CodeSection::freeze() {
 724   _outer->freeze_section(this);
 725 }
 726 
 727 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
 728   if (remaining() < amount) { _outer->expand(this, amount); return true; }
 729   return false;
 730 }
 731 
 732 #endif // SHARE_ASM_CODEBUFFER_HPP