1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "oops/methodData.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/icache.hpp" 32 #include "utilities/copy.hpp" 33 #include "utilities/xmlstream.hpp" 34 35 // The structure of a CodeSection: 36 // 37 // _start -> +----------------+ 38 // | machine code...| 39 // _end -> |----------------| 40 // | | 41 // | (empty) | 42 // | | 43 // | | 44 // +----------------+ 45 // _limit -> | | 46 // 47 // _locs_start -> +----------------+ 48 // |reloc records...| 49 // |----------------| 50 // _locs_end -> | | 51 // | | 52 // | (empty) | 53 // | | 54 // | | 55 // +----------------+ 56 // _locs_limit -> | | 57 // The _end (resp. _limit) pointer refers to the first 58 // unused (resp. unallocated) byte. 59 60 // The structure of the CodeBuffer while code is being accumulated: 61 // 62 // _total_start -> \ 63 // _insts._start -> +----------------+ 64 // | | 65 // | Code | 66 // | | 67 // _stubs._start -> |----------------| 68 // | | 69 // | Stubs | (also handlers for deopt/exception) 70 // | | 71 // _consts._start -> |----------------| 72 // | | 73 // | Constants | 74 // | | 75 // +----------------+ 76 // + _total_size -> | | 77 // 78 // When the code and relocations are copied to the code cache, 79 // the empty parts of each section are removed, and everything 80 // is copied into contiguous locations. 81 82 typedef CodeBuffer::csize_t csize_t; // file-local definition 83 84 // External buffer, in a predefined CodeBlob. 85 // Important: The code_start must be taken exactly, and not realigned. 86 CodeBuffer::CodeBuffer(CodeBlob* blob) { 87 initialize_misc("static buffer"); 88 initialize(blob->content_begin(), blob->content_size()); 89 verify_section_allocation(); 90 } 91 92 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 93 // Compute maximal alignment. 94 int align = _insts.alignment(); 95 // Always allow for empty slop around each section. 96 int slop = (int) CodeSection::end_slop(); 97 98 assert(blob() == NULL, "only once"); 99 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1))); 100 if (blob() == NULL) { 101 // The assembler constructor will throw a fatal on an empty CodeBuffer. 102 return; // caller must test this 103 } 104 105 // Set up various pointers into the blob. 106 initialize(_total_start, _total_size); 107 108 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 109 110 pd_initialize(); 111 112 if (locs_size != 0) { 113 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 114 } 115 116 verify_section_allocation(); 117 } 118 119 120 CodeBuffer::~CodeBuffer() { 121 verify_section_allocation(); 122 123 // If we allocate our code buffer from the CodeCache 124 // via a BufferBlob, and it's not permanent, then 125 // free the BufferBlob. 126 // The rest of the memory will be freed when the ResourceObj 127 // is released. 128 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) { 129 // Previous incarnations of this buffer are held live, so that internal 130 // addresses constructed before expansions will not be confused. 131 cb->free_blob(); 132 } 133 134 // free any overflow storage 135 delete _overflow_arena; 136 137 // Claim is that stack allocation ensures resources are cleaned up. 138 // This is resource clean up, let's hope that all were properly copied out. 139 free_strings(); 140 141 #ifdef ASSERT 142 // Save allocation type to execute assert in ~ResourceObj() 143 // which is called after this destructor. 144 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object"); 145 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type(); 146 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); 147 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at); 148 #endif 149 } 150 151 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 152 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 153 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 154 _oop_recorder = r; 155 } 156 157 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 158 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 159 csize_t slop = CodeSection::end_slop(); // margin between sections 160 int align = cs->alignment(); 161 assert(is_power_of_2(align), "sanity"); 162 address start = _insts._start; 163 address limit = _insts._limit; 164 address middle = limit - size; 165 middle -= (intptr_t)middle & (align-1); // align the division point downward 166 guarantee(middle - slop > start, "need enough space to divide up"); 167 _insts._limit = middle - slop; // subtract desired space, plus slop 168 cs->initialize(middle, limit - middle); 169 assert(cs->start() == middle, "sanity"); 170 assert(cs->limit() == limit, "sanity"); 171 // give it some relocations to start with, if the main section has them 172 if (_insts.has_locs()) cs->initialize_locs(1); 173 } 174 175 void CodeBuffer::freeze_section(CodeSection* cs) { 176 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1); 177 csize_t frozen_size = cs->size(); 178 if (next_cs != NULL) { 179 frozen_size = next_cs->align_at_start(frozen_size); 180 } 181 address old_limit = cs->limit(); 182 address new_limit = cs->start() + frozen_size; 183 relocInfo* old_locs_limit = cs->locs_limit(); 184 relocInfo* new_locs_limit = cs->locs_end(); 185 // Patch the limits. 186 cs->_limit = new_limit; 187 cs->_locs_limit = new_locs_limit; 188 cs->_frozen = true; 189 if (!next_cs->is_allocated() && !next_cs->is_frozen()) { 190 // Give remaining buffer space to the following section. 191 next_cs->initialize(new_limit, old_limit - new_limit); 192 next_cs->initialize_shared_locs(new_locs_limit, 193 old_locs_limit - new_locs_limit); 194 } 195 } 196 197 void CodeBuffer::set_blob(BufferBlob* blob) { 198 _blob = blob; 199 if (blob != NULL) { 200 address start = blob->content_begin(); 201 address end = blob->content_end(); 202 // Round up the starting address. 203 int align = _insts.alignment(); 204 start += (-(intptr_t)start) & (align-1); 205 _total_start = start; 206 _total_size = end - start; 207 } else { 208 #ifdef ASSERT 209 // Clean out dangling pointers. 210 _total_start = badAddress; 211 _consts._start = _consts._end = badAddress; 212 _insts._start = _insts._end = badAddress; 213 _stubs._start = _stubs._end = badAddress; 214 #endif //ASSERT 215 } 216 } 217 218 void CodeBuffer::free_blob() { 219 if (_blob != NULL) { 220 BufferBlob::free(_blob); 221 set_blob(NULL); 222 } 223 } 224 225 const char* CodeBuffer::code_section_name(int n) { 226 #ifdef PRODUCT 227 return NULL; 228 #else //PRODUCT 229 switch (n) { 230 case SECT_CONSTS: return "consts"; 231 case SECT_INSTS: return "insts"; 232 case SECT_STUBS: return "stubs"; 233 default: return NULL; 234 } 235 #endif //PRODUCT 236 } 237 238 int CodeBuffer::section_index_of(address addr) const { 239 for (int n = 0; n < (int)SECT_LIMIT; n++) { 240 const CodeSection* cs = code_section(n); 241 if (cs->allocates(addr)) return n; 242 } 243 return SECT_NONE; 244 } 245 246 int CodeBuffer::locator(address addr) const { 247 for (int n = 0; n < (int)SECT_LIMIT; n++) { 248 const CodeSection* cs = code_section(n); 249 if (cs->allocates(addr)) { 250 return locator(addr - cs->start(), n); 251 } 252 } 253 return -1; 254 } 255 256 address CodeBuffer::locator_address(int locator) const { 257 if (locator < 0) return NULL; 258 address start = code_section(locator_sect(locator))->start(); 259 return start + locator_pos(locator); 260 } 261 262 bool CodeBuffer::is_backward_branch(Label& L) { 263 return L.is_bound() && insts_end() <= locator_address(L.loc()); 264 } 265 266 address CodeBuffer::decode_begin() { 267 address begin = _insts.start(); 268 if (_decode_begin != NULL && _decode_begin > begin) 269 begin = _decode_begin; 270 return begin; 271 } 272 273 274 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 275 if (_overflow_arena == NULL) { 276 _overflow_arena = new (mtCode) Arena(mtCode); 277 } 278 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 279 } 280 281 282 // Helper function for managing labels and their target addresses. 283 // Returns a sensible address, and if it is not the label's final 284 // address, notes the dependency (at 'branch_pc') on the label. 285 address CodeSection::target(Label& L, address branch_pc) { 286 if (L.is_bound()) { 287 int loc = L.loc(); 288 if (index() == CodeBuffer::locator_sect(loc)) { 289 return start() + CodeBuffer::locator_pos(loc); 290 } else { 291 return outer()->locator_address(loc); 292 } 293 } else { 294 assert(allocates2(branch_pc), "sanity"); 295 address base = start(); 296 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 297 L.add_patch_at(outer(), patch_loc); 298 299 // Need to return a pc, doesn't matter what it is since it will be 300 // replaced during resolution later. 301 // Don't return NULL or badAddress, since branches shouldn't overflow. 302 // Don't return base either because that could overflow displacements 303 // for shorter branches. It will get checked when bound. 304 return branch_pc; 305 } 306 } 307 308 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 309 Relocation* reloc = spec.reloc(); 310 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 311 if (rtype == relocInfo::none) return; 312 313 // The assertion below has been adjusted, to also work for 314 // relocation for fixup. Sometimes we want to put relocation 315 // information for the next instruction, since it will be patched 316 // with a call. 317 assert(start() <= at && at <= end()+1, 318 "cannot relocate data outside code boundaries"); 319 320 if (!has_locs()) { 321 // no space for relocation information provided => code cannot be 322 // relocated. Make sure that relocate is only called with rtypes 323 // that can be ignored for this kind of code. 324 assert(rtype == relocInfo::none || 325 rtype == relocInfo::runtime_call_type || 326 rtype == relocInfo::internal_word_type|| 327 rtype == relocInfo::section_word_type || 328 rtype == relocInfo::external_word_type, 329 "code needs relocation information"); 330 // leave behind an indication that we attempted a relocation 331 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 332 return; 333 } 334 335 // Advance the point, noting the offset we'll have to record. 336 csize_t offset = at - locs_point(); 337 set_locs_point(at); 338 339 // Test for a couple of overflow conditions; maybe expand the buffer. 340 relocInfo* end = locs_end(); 341 relocInfo* req = end + relocInfo::length_limit; 342 // Check for (potential) overflow 343 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 344 req += (uint)offset / (uint)relocInfo::offset_limit(); 345 if (req >= locs_limit()) { 346 // Allocate or reallocate. 347 expand_locs(locs_count() + (req - end)); 348 // reload pointer 349 end = locs_end(); 350 } 351 } 352 353 // If the offset is giant, emit filler relocs, of type 'none', but 354 // each carrying the largest possible offset, to advance the locs_point. 355 while (offset >= relocInfo::offset_limit()) { 356 assert(end < locs_limit(), "adjust previous paragraph of code"); 357 *end++ = filler_relocInfo(); 358 offset -= filler_relocInfo().addr_offset(); 359 } 360 361 // If it's a simple reloc with no data, we'll just write (rtype | offset). 362 (*end) = relocInfo(rtype, offset, format); 363 364 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 365 end->initialize(this, reloc); 366 } 367 368 void CodeSection::initialize_locs(int locs_capacity) { 369 assert(_locs_start == NULL, "only one locs init step, please"); 370 // Apply a priori lower limits to relocation size: 371 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 372 if (locs_capacity < min_locs) locs_capacity = min_locs; 373 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 374 _locs_start = locs_start; 375 _locs_end = locs_start; 376 _locs_limit = locs_start + locs_capacity; 377 _locs_own = true; 378 } 379 380 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 381 assert(_locs_start == NULL, "do this before locs are allocated"); 382 // Internal invariant: locs buf must be fully aligned. 383 // See copy_relocations_to() below. 384 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 385 ++buf; --length; 386 } 387 if (length > 0) { 388 _locs_start = buf; 389 _locs_end = buf; 390 _locs_limit = buf + length; 391 _locs_own = false; 392 } 393 } 394 395 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 396 int lcount = source_cs->locs_count(); 397 if (lcount != 0) { 398 initialize_shared_locs(source_cs->locs_start(), lcount); 399 _locs_end = _locs_limit = _locs_start + lcount; 400 assert(is_allocated(), "must have copied code already"); 401 set_locs_point(start() + source_cs->locs_point_off()); 402 } 403 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 404 } 405 406 void CodeSection::expand_locs(int new_capacity) { 407 if (_locs_start == NULL) { 408 initialize_locs(new_capacity); 409 return; 410 } else { 411 int old_count = locs_count(); 412 int old_capacity = locs_capacity(); 413 if (new_capacity < old_capacity * 2) 414 new_capacity = old_capacity * 2; 415 relocInfo* locs_start; 416 if (_locs_own) { 417 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 418 } else { 419 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 420 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 421 _locs_own = true; 422 } 423 _locs_start = locs_start; 424 _locs_end = locs_start + old_count; 425 _locs_limit = locs_start + new_capacity; 426 } 427 } 428 429 430 /// Support for emitting the code to its final location. 431 /// The pattern is the same for all functions. 432 /// We iterate over all the sections, padding each to alignment. 433 434 csize_t CodeBuffer::total_content_size() const { 435 csize_t size_so_far = 0; 436 for (int n = 0; n < (int)SECT_LIMIT; n++) { 437 const CodeSection* cs = code_section(n); 438 if (cs->is_empty()) continue; // skip trivial section 439 size_so_far = cs->align_at_start(size_so_far); 440 size_so_far += cs->size(); 441 } 442 return size_so_far; 443 } 444 445 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 446 address buf = dest->_total_start; 447 csize_t buf_offset = 0; 448 assert(dest->_total_size >= total_content_size(), "must be big enough"); 449 450 { 451 // not sure why this is here, but why not... 452 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 453 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 454 } 455 456 const CodeSection* prev_cs = NULL; 457 CodeSection* prev_dest_cs = NULL; 458 459 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 460 // figure compact layout of each section 461 const CodeSection* cs = code_section(n); 462 csize_t csize = cs->size(); 463 464 CodeSection* dest_cs = dest->code_section(n); 465 if (!cs->is_empty()) { 466 // Compute initial padding; assign it to the previous non-empty guy. 467 // Cf. figure_expanded_capacities. 468 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 469 if (padding != 0) { 470 buf_offset += padding; 471 assert(prev_dest_cs != NULL, "sanity"); 472 prev_dest_cs->_limit += padding; 473 } 474 #ifdef ASSERT 475 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) { 476 // Make sure the ends still match up. 477 // This is important because a branch in a frozen section 478 // might target code in a following section, via a Label, 479 // and without a relocation record. See Label::patch_instructions. 480 address dest_start = buf+buf_offset; 481 csize_t start2start = cs->start() - prev_cs->start(); 482 csize_t dest_start2start = dest_start - prev_dest_cs->start(); 483 assert(start2start == dest_start2start, "cannot stretch frozen sect"); 484 } 485 #endif //ASSERT 486 prev_dest_cs = dest_cs; 487 prev_cs = cs; 488 } 489 490 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert 491 dest_cs->initialize(buf+buf_offset, csize); 492 dest_cs->set_end(buf+buf_offset+csize); 493 assert(dest_cs->is_allocated(), "must always be allocated"); 494 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 495 496 buf_offset += csize; 497 } 498 499 // Done calculating sections; did it come out to the right end? 500 assert(buf_offset == total_content_size(), "sanity"); 501 dest->verify_section_allocation(); 502 } 503 504 // Append an oop reference that keeps the class alive. 505 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 506 oop cl = k->klass_holder(); 507 if (cl != NULL && !oops->contains(cl)) { 508 oops->append(cl); 509 } 510 } 511 512 void CodeBuffer::finalize_oop_references(methodHandle mh) { 513 No_Safepoint_Verifier nsv; 514 515 GrowableArray<oop> oops; 516 517 // Make sure that immediate metadata records something in the OopRecorder 518 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 519 // pull code out of each section 520 CodeSection* cs = code_section(n); 521 if (cs->is_empty()) continue; // skip trivial section 522 RelocIterator iter(cs); 523 while (iter.next()) { 524 if (iter.type() == relocInfo::metadata_type) { 525 metadata_Relocation* md = iter.metadata_reloc(); 526 if (md->metadata_is_immediate()) { 527 Metadata* m = md->metadata_value(); 528 if (oop_recorder()->is_real(m)) { 529 if (m->is_methodData()) { 530 m = ((MethodData*)m)->method(); 531 } 532 if (m->is_method()) { 533 m = ((Method*)m)->method_holder(); 534 } 535 if (m->is_klass()) { 536 append_oop_references(&oops, (Klass*)m); 537 } else { 538 // XXX This will currently occur for MDO which don't 539 // have a backpointer. This has to be fixed later. 540 m->print(); 541 ShouldNotReachHere(); 542 } 543 } 544 } 545 } 546 } 547 } 548 549 if (!oop_recorder()->is_unused()) { 550 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 551 Metadata* m = oop_recorder()->metadata_at(i); 552 if (oop_recorder()->is_real(m)) { 553 if (m->is_methodData()) { 554 m = ((MethodData*)m)->method(); 555 } 556 if (m->is_method()) { 557 m = ((Method*)m)->method_holder(); 558 } 559 if (m->is_klass()) { 560 append_oop_references(&oops, (Klass*)m); 561 } else { 562 m->print(); 563 ShouldNotReachHere(); 564 } 565 } 566 } 567 568 } 569 570 // Add the class loader of Method* for the nmethod itself 571 append_oop_references(&oops, mh->method_holder()); 572 573 // Add any oops that we've found 574 Thread* thread = Thread::current(); 575 for (int i = 0; i < oops.length(); i++) { 576 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 577 } 578 } 579 580 581 582 csize_t CodeBuffer::total_offset_of(CodeSection* cs) const { 583 csize_t size_so_far = 0; 584 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 585 const CodeSection* cur_cs = code_section(n); 586 if (!cur_cs->is_empty()) { 587 size_so_far = cur_cs->align_at_start(size_so_far); 588 } 589 if (cur_cs->index() == cs->index()) { 590 return size_so_far; 591 } 592 size_so_far += cur_cs->size(); 593 } 594 ShouldNotReachHere(); 595 return -1; 596 } 597 598 csize_t CodeBuffer::total_relocation_size() const { 599 csize_t lsize = copy_relocations_to(NULL); // dry run only 600 csize_t csize = total_content_size(); 601 csize_t total = RelocIterator::locs_and_index_size(csize, lsize); 602 return (csize_t) align_size_up(total, HeapWordSize); 603 } 604 605 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 606 csize_t buf_offset = 0; 607 csize_t code_end_so_far = 0; 608 csize_t code_point_so_far = 0; 609 610 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 611 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 612 613 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 614 if (only_inst && (n != (int)SECT_INSTS)) { 615 // Need only relocation info for code. 616 continue; 617 } 618 // pull relocs out of each section 619 const CodeSection* cs = code_section(n); 620 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 621 if (cs->is_empty()) continue; // skip trivial section 622 relocInfo* lstart = cs->locs_start(); 623 relocInfo* lend = cs->locs_end(); 624 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 625 csize_t csize = cs->size(); 626 code_end_so_far = cs->align_at_start(code_end_so_far); 627 628 if (lsize > 0) { 629 // Figure out how to advance the combined relocation point 630 // first to the beginning of this section. 631 // We'll insert one or more filler relocs to span that gap. 632 // (Don't bother to improve this by editing the first reloc's offset.) 633 csize_t new_code_point = code_end_so_far; 634 for (csize_t jump; 635 code_point_so_far < new_code_point; 636 code_point_so_far += jump) { 637 jump = new_code_point - code_point_so_far; 638 relocInfo filler = filler_relocInfo(); 639 if (jump >= filler.addr_offset()) { 640 jump = filler.addr_offset(); 641 } else { // else shrink the filler to fit 642 filler = relocInfo(relocInfo::none, jump); 643 } 644 if (buf != NULL) { 645 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 646 *(relocInfo*)(buf+buf_offset) = filler; 647 } 648 buf_offset += sizeof(filler); 649 } 650 651 // Update code point and end to skip past this section: 652 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 653 assert(code_point_so_far <= last_code_point, "sanity"); 654 code_point_so_far = last_code_point; // advance past this guy's relocs 655 } 656 code_end_so_far += csize; // advance past this guy's instructions too 657 658 // Done with filler; emit the real relocations: 659 if (buf != NULL && lsize != 0) { 660 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 661 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 662 if (buf_offset % HeapWordSize == 0) { 663 // Use wordwise copies if possible: 664 Copy::disjoint_words((HeapWord*)lstart, 665 (HeapWord*)(buf+buf_offset), 666 (lsize + HeapWordSize-1) / HeapWordSize); 667 } else { 668 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 669 } 670 } 671 buf_offset += lsize; 672 } 673 674 // Align end of relocation info in target. 675 while (buf_offset % HeapWordSize != 0) { 676 if (buf != NULL) { 677 relocInfo padding = relocInfo(relocInfo::none, 0); 678 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 679 *(relocInfo*)(buf+buf_offset) = padding; 680 } 681 buf_offset += sizeof(relocInfo); 682 } 683 684 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 685 686 return buf_offset; 687 } 688 689 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 690 address buf = NULL; 691 csize_t buf_offset = 0; 692 csize_t buf_limit = 0; 693 694 if (dest != NULL) { 695 buf = (address)dest->relocation_begin(); 696 buf_limit = (address)dest->relocation_end() - buf; 697 } 698 // if dest == NULL, this is just the sizing pass 699 // 700 buf_offset = copy_relocations_to(buf, buf_limit, false); 701 702 // Account for index: 703 if (buf != NULL) { 704 RelocIterator::create_index(dest->relocation_begin(), 705 buf_offset / sizeof(relocInfo), 706 dest->relocation_end()); 707 } 708 709 return buf_offset; 710 } 711 712 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 713 #ifndef PRODUCT 714 if (PrintNMethods && (WizardMode || Verbose)) { 715 tty->print("done with CodeBuffer:"); 716 ((CodeBuffer*)this)->print(); 717 } 718 #endif //PRODUCT 719 720 CodeBuffer dest(dest_blob); 721 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 722 this->compute_final_layout(&dest); 723 relocate_code_to(&dest); 724 725 // transfer strings and comments from buffer to blob 726 dest_blob->set_strings(_code_strings); 727 728 // Done moving code bytes; were they the right size? 729 assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 730 731 // Flush generated code 732 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 733 } 734 735 // Move all my code into another code buffer. Consult applicable 736 // relocs to repair embedded addresses. The layout in the destination 737 // CodeBuffer is different to the source CodeBuffer: the destination 738 // CodeBuffer gets the final layout (consts, insts, stubs in order of 739 // ascending address). 740 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 741 address dest_end = dest->_total_start + dest->_total_size; 742 address dest_filled = NULL; 743 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 744 // pull code out of each section 745 const CodeSection* cs = code_section(n); 746 if (cs->is_empty()) continue; // skip trivial section 747 CodeSection* dest_cs = dest->code_section(n); 748 assert(cs->size() == dest_cs->size(), "sanity"); 749 csize_t usize = dest_cs->size(); 750 csize_t wsize = align_size_up(usize, HeapWordSize); 751 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 752 // Copy the code as aligned machine words. 753 // This may also include an uninitialized partial word at the end. 754 Copy::disjoint_words((HeapWord*)cs->start(), 755 (HeapWord*)dest_cs->start(), 756 wsize / HeapWordSize); 757 758 if (dest->blob() == NULL) { 759 // Destination is a final resting place, not just another buffer. 760 // Normalize uninitialized bytes in the final padding. 761 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 762 Assembler::code_fill_byte()); 763 } 764 // Keep track of the highest filled address 765 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 766 767 assert(cs->locs_start() != (relocInfo*)badAddress, 768 "this section carries no reloc storage, but reloc was attempted"); 769 770 // Make the new code copy use the old copy's relocations: 771 dest_cs->initialize_locs_from(cs); 772 } 773 774 // Do relocation after all sections are copied. 775 // This is necessary if the code uses constants in stubs, which are 776 // relocated when the corresponding instruction in the code (e.g., a 777 // call) is relocated. Stubs are placed behind the main code 778 // section, so that section has to be copied before relocating. 779 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 780 // pull code out of each section 781 const CodeSection* cs = code_section(n); 782 if (cs->is_empty()) continue; // skip trivial section 783 CodeSection* dest_cs = dest->code_section(n); 784 { // Repair the pc relative information in the code after the move 785 RelocIterator iter(dest_cs); 786 while (iter.next()) { 787 iter.reloc()->fix_relocation_after_move(this, dest); 788 } 789 } 790 } 791 792 if (dest->blob() == NULL && dest_filled != NULL) { 793 // Destination is a final resting place, not just another buffer. 794 // Normalize uninitialized bytes in the final padding. 795 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 796 Assembler::code_fill_byte()); 797 798 } 799 } 800 801 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 802 csize_t amount, 803 csize_t* new_capacity) { 804 csize_t new_total_cap = 0; 805 806 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 807 const CodeSection* sect = code_section(n); 808 809 if (!sect->is_empty()) { 810 // Compute initial padding; assign it to the previous section, 811 // even if it's empty (e.g. consts section can be empty). 812 // Cf. compute_final_layout 813 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 814 if (padding != 0) { 815 new_total_cap += padding; 816 assert(n - 1 >= SECT_FIRST, "sanity"); 817 new_capacity[n - 1] += padding; 818 } 819 } 820 821 csize_t exp = sect->size(); // 100% increase 822 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 823 if (sect == which_cs) { 824 if (exp < amount) exp = amount; 825 if (StressCodeBuffers) exp = amount; // expand only slightly 826 } else if (n == SECT_INSTS) { 827 // scale down inst increases to a more modest 25% 828 exp = 4*K + ((exp - 4*K) >> 2); 829 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 830 } else if (sect->is_empty()) { 831 // do not grow an empty secondary section 832 exp = 0; 833 } 834 // Allow for inter-section slop: 835 exp += CodeSection::end_slop(); 836 csize_t new_cap = sect->size() + exp; 837 if (new_cap < sect->capacity()) { 838 // No need to expand after all. 839 new_cap = sect->capacity(); 840 } 841 new_capacity[n] = new_cap; 842 new_total_cap += new_cap; 843 } 844 845 return new_total_cap; 846 } 847 848 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 849 #ifndef PRODUCT 850 if (PrintNMethods && (WizardMode || Verbose)) { 851 tty->print("expanding CodeBuffer:"); 852 this->print(); 853 } 854 855 if (StressCodeBuffers && blob() != NULL) { 856 static int expand_count = 0; 857 if (expand_count >= 0) expand_count += 1; 858 if (expand_count > 100 && is_power_of_2(expand_count)) { 859 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 860 // simulate an occasional allocation failure: 861 free_blob(); 862 } 863 } 864 #endif //PRODUCT 865 866 // Resizing must be allowed 867 { 868 if (blob() == NULL) return; // caller must check for blob == NULL 869 for (int n = 0; n < (int)SECT_LIMIT; n++) { 870 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen"); 871 } 872 } 873 874 // Figure new capacity for each section. 875 csize_t new_capacity[SECT_LIMIT]; 876 csize_t new_total_cap 877 = figure_expanded_capacities(which_cs, amount, new_capacity); 878 879 // Create a new (temporary) code buffer to hold all the new data 880 CodeBuffer cb(name(), new_total_cap, 0); 881 if (cb.blob() == NULL) { 882 // Failed to allocate in code cache. 883 free_blob(); 884 return; 885 } 886 887 // Create an old code buffer to remember which addresses used to go where. 888 // This will be useful when we do final assembly into the code cache, 889 // because we will need to know how to warp any internal address that 890 // has been created at any time in this CodeBuffer's past. 891 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 892 bxp->take_over_code_from(this); // remember the old undersized blob 893 DEBUG_ONLY(this->_blob = NULL); // silence a later assert 894 bxp->_before_expand = this->_before_expand; 895 this->_before_expand = bxp; 896 897 // Give each section its required (expanded) capacity. 898 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 899 CodeSection* cb_sect = cb.code_section(n); 900 CodeSection* this_sect = code_section(n); 901 if (new_capacity[n] == 0) continue; // already nulled out 902 if (n != SECT_INSTS) { 903 cb.initialize_section_size(cb_sect, new_capacity[n]); 904 } 905 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 906 address cb_start = cb_sect->start(); 907 cb_sect->set_end(cb_start + this_sect->size()); 908 if (this_sect->mark() == NULL) { 909 cb_sect->clear_mark(); 910 } else { 911 cb_sect->set_mark(cb_start + this_sect->mark_off()); 912 } 913 } 914 915 // Move all the code and relocations to the new blob: 916 relocate_code_to(&cb); 917 918 // Copy the temporary code buffer into the current code buffer. 919 // Basically, do {*this = cb}, except for some control information. 920 this->take_over_code_from(&cb); 921 cb.set_blob(NULL); 922 923 // Zap the old code buffer contents, to avoid mistakenly using them. 924 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 925 badCodeHeapFreeVal)); 926 927 _decode_begin = NULL; // sanity 928 929 // Make certain that the new sections are all snugly inside the new blob. 930 verify_section_allocation(); 931 932 #ifndef PRODUCT 933 if (PrintNMethods && (WizardMode || Verbose)) { 934 tty->print("expanded CodeBuffer:"); 935 this->print(); 936 } 937 #endif //PRODUCT 938 } 939 940 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 941 // Must already have disposed of the old blob somehow. 942 assert(blob() == NULL, "must be empty"); 943 // Take the new blob away from cb. 944 set_blob(cb->blob()); 945 // Take over all the section pointers. 946 for (int n = 0; n < (int)SECT_LIMIT; n++) { 947 CodeSection* cb_sect = cb->code_section(n); 948 CodeSection* this_sect = code_section(n); 949 this_sect->take_over_code_from(cb_sect); 950 } 951 _overflow_arena = cb->_overflow_arena; 952 // Make sure the old cb won't try to use it or free it. 953 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 954 } 955 956 void CodeBuffer::verify_section_allocation() { 957 address tstart = _total_start; 958 if (tstart == badAddress) return; // smashed by set_blob(NULL) 959 address tend = tstart + _total_size; 960 if (_blob != NULL) { 961 962 guarantee(tstart >= _blob->content_begin(), "sanity"); 963 guarantee(tend <= _blob->content_end(), "sanity"); 964 } 965 // Verify disjointness. 966 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 967 CodeSection* sect = code_section(n); 968 if (!sect->is_allocated() || sect->is_empty()) continue; 969 guarantee((intptr_t)sect->start() % sect->alignment() == 0 970 || sect->is_empty() || _blob == NULL, 971 "start is aligned"); 972 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) { 973 CodeSection* other = code_section(m); 974 if (!other->is_allocated() || other == sect) continue; 975 guarantee(!other->contains(sect->start() ), "sanity"); 976 // limit is an exclusive address and can be the start of another 977 // section. 978 guarantee(!other->contains(sect->limit() - 1), "sanity"); 979 } 980 guarantee(sect->end() <= tend, "sanity"); 981 guarantee(sect->end() <= sect->limit(), "sanity"); 982 } 983 } 984 985 void CodeBuffer::log_section_sizes(const char* name) { 986 if (xtty != NULL) { 987 ttyLocker ttyl; 988 // log info about buffer usage 989 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size); 990 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 991 CodeSection* sect = code_section(n); 992 if (!sect->is_allocated() || sect->is_empty()) continue; 993 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>", 994 n, sect->limit() - sect->start(), sect->limit() - sect->end()); 995 } 996 xtty->print_cr("</blob>"); 997 } 998 } 999 1000 #ifndef PRODUCT 1001 1002 void CodeSection::dump() { 1003 address ptr = start(); 1004 for (csize_t step; ptr < end(); ptr += step) { 1005 step = end() - ptr; 1006 if (step > jintSize * 4) step = jintSize * 4; 1007 tty->print(INTPTR_FORMAT ": ", p2i(ptr)); 1008 while (step > 0) { 1009 tty->print(" " PTR32_FORMAT, *(jint*)ptr); 1010 ptr += jintSize; 1011 } 1012 tty->cr(); 1013 } 1014 } 1015 1016 1017 void CodeSection::decode() { 1018 Disassembler::decode(start(), end()); 1019 } 1020 1021 1022 void CodeBuffer::block_comment(intptr_t offset, const char * comment) { 1023 _code_strings.add_comment(offset, comment); 1024 } 1025 1026 const char* CodeBuffer::code_string(const char* str) { 1027 return _code_strings.add_string(str); 1028 } 1029 1030 class CodeString: public CHeapObj<mtCode> { 1031 private: 1032 friend class CodeStrings; 1033 const char * _string; 1034 CodeString* _next; 1035 intptr_t _offset; 1036 1037 ~CodeString() { 1038 assert(_next == NULL, "wrong interface for freeing list"); 1039 os::free((void*)_string); 1040 } 1041 1042 bool is_comment() const { return _offset >= 0; } 1043 1044 public: 1045 CodeString(const char * string, intptr_t offset = -1) 1046 : _next(NULL), _offset(offset) { 1047 _string = os::strdup(string, mtCode); 1048 } 1049 1050 const char * string() const { return _string; } 1051 intptr_t offset() const { assert(_offset >= 0, "offset for non comment?"); return _offset; } 1052 CodeString* next() const { return _next; } 1053 1054 void set_next(CodeString* next) { _next = next; } 1055 1056 CodeString* first_comment() { 1057 if (is_comment()) { 1058 return this; 1059 } else { 1060 return next_comment(); 1061 } 1062 } 1063 CodeString* next_comment() const { 1064 CodeString* s = _next; 1065 while (s != NULL && !s->is_comment()) { 1066 s = s->_next; 1067 } 1068 return s; 1069 } 1070 }; 1071 1072 CodeString* CodeStrings::find(intptr_t offset) const { 1073 CodeString* a = _strings->first_comment(); 1074 while (a != NULL && a->offset() != offset) { 1075 a = a->next_comment(); 1076 } 1077 return a; 1078 } 1079 1080 // Convenience for add_comment. 1081 CodeString* CodeStrings::find_last(intptr_t offset) const { 1082 CodeString* a = find(offset); 1083 if (a != NULL) { 1084 CodeString* c = NULL; 1085 while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) { 1086 a = c; 1087 } 1088 } 1089 return a; 1090 } 1091 1092 void CodeStrings::add_comment(intptr_t offset, const char * comment) { 1093 check_valid(); 1094 CodeString* c = new CodeString(comment, offset); 1095 CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset); 1096 1097 if (inspos) { 1098 // insert after already existing comments with same offset 1099 c->set_next(inspos->next()); 1100 inspos->set_next(c); 1101 } else { 1102 // no comments with such offset, yet. Insert before anything else. 1103 c->set_next(_strings); 1104 _strings = c; 1105 } 1106 } 1107 1108 void CodeStrings::assign(CodeStrings& other) { 1109 other.check_valid(); 1110 assert(is_null(), "Cannot assign onto non-empty CodeStrings"); 1111 _strings = other._strings; 1112 #ifdef ASSERT 1113 _defunct = false; 1114 #endif 1115 other.set_null_and_invalidate(); 1116 } 1117 1118 // Deep copy of CodeStrings for consistent memory management. 1119 // Only used for actual disassembly so this is cheaper than reference counting 1120 // for the "normal" fastdebug case. 1121 void CodeStrings::copy(CodeStrings& other) { 1122 other.check_valid(); 1123 check_valid(); 1124 assert(is_null(), "Cannot copy onto non-empty CodeStrings"); 1125 CodeString* n = other._strings; 1126 CodeString** ps = &_strings; 1127 while (n != NULL) { 1128 *ps = new CodeString(n->string(),n->offset()); 1129 ps = &((*ps)->_next); 1130 n = n->next(); 1131 } 1132 } 1133 1134 const char* CodeStrings::_prefix = " ;; "; // default: can be changed via set_prefix 1135 1136 void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const { 1137 check_valid(); 1138 if (_strings != NULL) { 1139 CodeString* c = find(offset); 1140 while (c && c->offset() == offset) { 1141 stream->bol(); 1142 stream->print("%s", _prefix); 1143 // Don't interpret as format strings since it could contain % 1144 stream->print_raw_cr(c->string()); 1145 c = c->next_comment(); 1146 } 1147 } 1148 } 1149 1150 // Also sets isNull() 1151 void CodeStrings::free() { 1152 CodeString* n = _strings; 1153 while (n) { 1154 // unlink the node from the list saving a pointer to the next 1155 CodeString* p = n->next(); 1156 n->set_next(NULL); 1157 delete n; 1158 n = p; 1159 } 1160 set_null_and_invalidate(); 1161 } 1162 1163 const char* CodeStrings::add_string(const char * string) { 1164 check_valid(); 1165 CodeString* s = new CodeString(string); 1166 s->set_next(_strings); 1167 _strings = s; 1168 assert(s->string() != NULL, "should have a string"); 1169 return s->string(); 1170 } 1171 1172 void CodeBuffer::decode() { 1173 ttyLocker ttyl; 1174 Disassembler::decode(decode_begin(), insts_end()); 1175 _decode_begin = insts_end(); 1176 } 1177 1178 1179 void CodeBuffer::skip_decode() { 1180 _decode_begin = insts_end(); 1181 } 1182 1183 1184 void CodeBuffer::decode_all() { 1185 ttyLocker ttyl; 1186 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1187 // dump contents of each section 1188 CodeSection* cs = code_section(n); 1189 tty->print_cr("! %s:", code_section_name(n)); 1190 if (cs != consts()) 1191 cs->decode(); 1192 else 1193 cs->dump(); 1194 } 1195 } 1196 1197 1198 void CodeSection::print(const char* name) { 1199 csize_t locs_size = locs_end() - locs_start(); 1200 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s", 1201 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity(), 1202 is_frozen()? " [frozen]": ""); 1203 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1204 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1205 if (PrintRelocations) { 1206 RelocIterator iter(this); 1207 iter.print(); 1208 } 1209 } 1210 1211 void CodeBuffer::print() { 1212 if (this == NULL) { 1213 tty->print_cr("NULL CodeBuffer pointer"); 1214 return; 1215 } 1216 1217 tty->print_cr("CodeBuffer:"); 1218 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1219 // print each section 1220 CodeSection* cs = code_section(n); 1221 cs->print(code_section_name(n)); 1222 } 1223 } 1224 1225 #endif // PRODUCT