1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "oops/methodData.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/icache.hpp" 32 #include "utilities/copy.hpp" 33 #include "utilities/xmlstream.hpp" 34 35 // The structure of a CodeSection: 36 // 37 // _start -> +----------------+ 38 // | machine code...| 39 // _end -> |----------------| 40 // | | 41 // | (empty) | 42 // | | 43 // | | 44 // +----------------+ 45 // _limit -> | | 46 // 47 // _locs_start -> +----------------+ 48 // |reloc records...| 49 // |----------------| 50 // _locs_end -> | | 51 // | | 52 // | (empty) | 53 // | | 54 // | | 55 // +----------------+ 56 // _locs_limit -> | | 57 // The _end (resp. _limit) pointer refers to the first 58 // unused (resp. unallocated) byte. 59 60 // The structure of the CodeBuffer while code is being accumulated: 61 // 62 // _total_start -> \ 63 // _insts._start -> +----------------+ 64 // | | 65 // | Code | 66 // | | 67 // _stubs._start -> |----------------| 68 // | | 69 // | Stubs | (also handlers for deopt/exception) 70 // | | 71 // _consts._start -> |----------------| 72 // | | 73 // | Constants | 74 // | | 75 // +----------------+ 76 // + _total_size -> | | 77 // 78 // When the code and relocations are copied to the code cache, 79 // the empty parts of each section are removed, and everything 80 // is copied into contiguous locations. 81 82 typedef CodeBuffer::csize_t csize_t; // file-local definition 83 84 // External buffer, in a predefined CodeBlob. 85 // Important: The code_start must be taken exactly, and not realigned. 86 CodeBuffer::CodeBuffer(CodeBlob* blob) { 87 initialize_misc("static buffer"); 88 initialize(blob->content_begin(), blob->content_size()); 89 verify_section_allocation(); 90 } 91 92 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 93 // Compute maximal alignment. 94 int align = _insts.alignment(); 95 // Always allow for empty slop around each section. 96 int slop = (int) CodeSection::end_slop(); 97 98 assert(blob() == NULL, "only once"); 99 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1))); 100 if (blob() == NULL) { 101 // The assembler constructor will throw a fatal on an empty CodeBuffer. 102 return; // caller must test this 103 } 104 105 // Set up various pointers into the blob. 106 initialize(_total_start, _total_size); 107 108 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 109 110 pd_initialize(); 111 112 if (locs_size != 0) { 113 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 114 } 115 116 verify_section_allocation(); 117 } 118 119 120 CodeBuffer::~CodeBuffer() { 121 verify_section_allocation(); 122 123 // If we allocate our code buffer from the CodeCache 124 // via a BufferBlob, and it's not permanent, then 125 // free the BufferBlob. 126 // The rest of the memory will be freed when the ResourceObj 127 // is released. 128 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) { 129 // Previous incarnations of this buffer are held live, so that internal 130 // addresses constructed before expansions will not be confused. 131 cb->free_blob(); 132 } 133 134 // free any overflow storage 135 delete _overflow_arena; 136 137 // Claim is that stack allocation ensures resources are cleaned up. 138 // This is resource clean up, let's hope that all were properly copied out. 139 free_strings(); 140 141 #ifdef ASSERT 142 // Save allocation type to execute assert in ~ResourceObj() 143 // which is called after this destructor. 144 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object"); 145 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type(); 146 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); 147 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at); 148 #endif 149 } 150 151 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 152 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 153 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 154 _oop_recorder = r; 155 } 156 157 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 158 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 159 csize_t slop = CodeSection::end_slop(); // margin between sections 160 int align = cs->alignment(); 161 assert(is_power_of_2(align), "sanity"); 162 address start = _insts._start; 163 address limit = _insts._limit; 164 address middle = limit - size; 165 middle -= (intptr_t)middle & (align-1); // align the division point downward 166 guarantee(middle - slop > start, "need enough space to divide up"); 167 _insts._limit = middle - slop; // subtract desired space, plus slop 168 cs->initialize(middle, limit - middle); 169 assert(cs->start() == middle, "sanity"); 170 assert(cs->limit() == limit, "sanity"); 171 // give it some relocations to start with, if the main section has them 172 if (_insts.has_locs()) cs->initialize_locs(1); 173 } 174 175 void CodeBuffer::freeze_section(CodeSection* cs) { 176 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1); 177 csize_t frozen_size = cs->size(); 178 if (next_cs != NULL) { 179 frozen_size = next_cs->align_at_start(frozen_size); 180 } 181 address old_limit = cs->limit(); 182 address new_limit = cs->start() + frozen_size; 183 relocInfo* old_locs_limit = cs->locs_limit(); 184 relocInfo* new_locs_limit = cs->locs_end(); 185 // Patch the limits. 186 cs->_limit = new_limit; 187 cs->_locs_limit = new_locs_limit; 188 cs->_frozen = true; 189 if (!next_cs->is_allocated() && !next_cs->is_frozen()) { 190 // Give remaining buffer space to the following section. 191 next_cs->initialize(new_limit, old_limit - new_limit); 192 next_cs->initialize_shared_locs(new_locs_limit, 193 old_locs_limit - new_locs_limit); 194 } 195 } 196 197 void CodeBuffer::set_blob(BufferBlob* blob) { 198 _blob = blob; 199 if (blob != NULL) { 200 address start = blob->content_begin(); 201 address end = blob->content_end(); 202 // Round up the starting address. 203 int align = _insts.alignment(); 204 start += (-(intptr_t)start) & (align-1); 205 _total_start = start; 206 _total_size = end - start; 207 } else { 208 #ifdef ASSERT 209 // Clean out dangling pointers. 210 _total_start = badAddress; 211 _consts._start = _consts._end = badAddress; 212 _insts._start = _insts._end = badAddress; 213 _stubs._start = _stubs._end = badAddress; 214 #endif //ASSERT 215 } 216 } 217 218 void CodeBuffer::free_blob() { 219 if (_blob != NULL) { 220 BufferBlob::free(_blob); 221 set_blob(NULL); 222 } 223 } 224 225 const char* CodeBuffer::code_section_name(int n) { 226 #ifdef PRODUCT 227 return NULL; 228 #else //PRODUCT 229 switch (n) { 230 case SECT_CONSTS: return "consts"; 231 case SECT_INSTS: return "insts"; 232 case SECT_STUBS: return "stubs"; 233 default: return NULL; 234 } 235 #endif //PRODUCT 236 } 237 238 int CodeBuffer::section_index_of(address addr) const { 239 for (int n = 0; n < (int)SECT_LIMIT; n++) { 240 const CodeSection* cs = code_section(n); 241 if (cs->allocates(addr)) return n; 242 } 243 return SECT_NONE; 244 } 245 246 int CodeBuffer::locator(address addr) const { 247 for (int n = 0; n < (int)SECT_LIMIT; n++) { 248 const CodeSection* cs = code_section(n); 249 if (cs->allocates(addr)) { 250 return locator(addr - cs->start(), n); 251 } 252 } 253 return -1; 254 } 255 256 address CodeBuffer::locator_address(int locator) const { 257 if (locator < 0) return NULL; 258 address start = code_section(locator_sect(locator))->start(); 259 return start + locator_pos(locator); 260 } 261 262 bool CodeBuffer::is_backward_branch(Label& L) { 263 return L.is_bound() && insts_end() <= locator_address(L.loc()); 264 } 265 266 address CodeBuffer::decode_begin() { 267 address begin = _insts.start(); 268 if (_decode_begin != NULL && _decode_begin > begin) 269 begin = _decode_begin; 270 return begin; 271 } 272 273 274 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 275 if (_overflow_arena == NULL) { 276 _overflow_arena = new (mtCode) Arena(mtCode); 277 } 278 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 279 } 280 281 282 // Helper function for managing labels and their target addresses. 283 // Returns a sensible address, and if it is not the label's final 284 // address, notes the dependency (at 'branch_pc') on the label. 285 address CodeSection::target(Label& L, address branch_pc) { 286 if (L.is_bound()) { 287 int loc = L.loc(); 288 if (index() == CodeBuffer::locator_sect(loc)) { 289 return start() + CodeBuffer::locator_pos(loc); 290 } else { 291 return outer()->locator_address(loc); 292 } 293 } else { 294 assert(allocates2(branch_pc), "sanity"); 295 address base = start(); 296 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 297 L.add_patch_at(outer(), patch_loc); 298 299 // Need to return a pc, doesn't matter what it is since it will be 300 // replaced during resolution later. 301 // Don't return NULL or badAddress, since branches shouldn't overflow. 302 // Don't return base either because that could overflow displacements 303 // for shorter branches. It will get checked when bound. 304 return branch_pc; 305 } 306 } 307 308 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { 309 RelocationHolder rh; 310 switch (rtype) { 311 case relocInfo::none: return; 312 case relocInfo::opt_virtual_call_type: { 313 rh = opt_virtual_call_Relocation::spec(method_index); 314 break; 315 } 316 case relocInfo::static_call_type: { 317 rh = static_call_Relocation::spec(method_index); 318 break; 319 } 320 case relocInfo::virtual_call_type: { 321 assert(method_index == 0, "resolved method overriding is not supported"); 322 rh = Relocation::spec_simple(rtype); 323 break; 324 } 325 default: { 326 rh = Relocation::spec_simple(rtype); 327 break; 328 } 329 } 330 relocate(at, rh, format); 331 } 332 333 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 334 // Do not relocate in scratch buffers. 335 if (scratch_emit()) { return; } 336 Relocation* reloc = spec.reloc(); 337 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 338 if (rtype == relocInfo::none) return; 339 340 // The assertion below has been adjusted, to also work for 341 // relocation for fixup. Sometimes we want to put relocation 342 // information for the next instruction, since it will be patched 343 // with a call. 344 assert(start() <= at && at <= end()+1, 345 "cannot relocate data outside code boundaries"); 346 347 if (!has_locs()) { 348 // no space for relocation information provided => code cannot be 349 // relocated. Make sure that relocate is only called with rtypes 350 // that can be ignored for this kind of code. 351 assert(rtype == relocInfo::none || 352 rtype == relocInfo::runtime_call_type || 353 rtype == relocInfo::internal_word_type|| 354 rtype == relocInfo::section_word_type || 355 rtype == relocInfo::external_word_type, 356 "code needs relocation information"); 357 // leave behind an indication that we attempted a relocation 358 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 359 return; 360 } 361 362 // Advance the point, noting the offset we'll have to record. 363 csize_t offset = at - locs_point(); 364 set_locs_point(at); 365 366 // Test for a couple of overflow conditions; maybe expand the buffer. 367 relocInfo* end = locs_end(); 368 relocInfo* req = end + relocInfo::length_limit; 369 // Check for (potential) overflow 370 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 371 req += (uint)offset / (uint)relocInfo::offset_limit(); 372 if (req >= locs_limit()) { 373 // Allocate or reallocate. 374 expand_locs(locs_count() + (req - end)); 375 // reload pointer 376 end = locs_end(); 377 } 378 } 379 380 // If the offset is giant, emit filler relocs, of type 'none', but 381 // each carrying the largest possible offset, to advance the locs_point. 382 while (offset >= relocInfo::offset_limit()) { 383 assert(end < locs_limit(), "adjust previous paragraph of code"); 384 *end++ = filler_relocInfo(); 385 offset -= filler_relocInfo().addr_offset(); 386 } 387 388 // If it's a simple reloc with no data, we'll just write (rtype | offset). 389 (*end) = relocInfo(rtype, offset, format); 390 391 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 392 end->initialize(this, reloc); 393 } 394 395 void CodeSection::initialize_locs(int locs_capacity) { 396 assert(_locs_start == NULL, "only one locs init step, please"); 397 // Apply a priori lower limits to relocation size: 398 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 399 if (locs_capacity < min_locs) locs_capacity = min_locs; 400 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 401 _locs_start = locs_start; 402 _locs_end = locs_start; 403 _locs_limit = locs_start + locs_capacity; 404 _locs_own = true; 405 } 406 407 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 408 assert(_locs_start == NULL, "do this before locs are allocated"); 409 // Internal invariant: locs buf must be fully aligned. 410 // See copy_relocations_to() below. 411 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 412 ++buf; --length; 413 } 414 if (length > 0) { 415 _locs_start = buf; 416 _locs_end = buf; 417 _locs_limit = buf + length; 418 _locs_own = false; 419 } 420 } 421 422 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 423 int lcount = source_cs->locs_count(); 424 if (lcount != 0) { 425 initialize_shared_locs(source_cs->locs_start(), lcount); 426 _locs_end = _locs_limit = _locs_start + lcount; 427 assert(is_allocated(), "must have copied code already"); 428 set_locs_point(start() + source_cs->locs_point_off()); 429 } 430 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 431 } 432 433 void CodeSection::expand_locs(int new_capacity) { 434 if (_locs_start == NULL) { 435 initialize_locs(new_capacity); 436 return; 437 } else { 438 int old_count = locs_count(); 439 int old_capacity = locs_capacity(); 440 if (new_capacity < old_capacity * 2) 441 new_capacity = old_capacity * 2; 442 relocInfo* locs_start; 443 if (_locs_own) { 444 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 445 } else { 446 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 447 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 448 _locs_own = true; 449 } 450 _locs_start = locs_start; 451 _locs_end = locs_start + old_count; 452 _locs_limit = locs_start + new_capacity; 453 } 454 } 455 456 457 /// Support for emitting the code to its final location. 458 /// The pattern is the same for all functions. 459 /// We iterate over all the sections, padding each to alignment. 460 461 csize_t CodeBuffer::total_content_size() const { 462 csize_t size_so_far = 0; 463 for (int n = 0; n < (int)SECT_LIMIT; n++) { 464 const CodeSection* cs = code_section(n); 465 if (cs->is_empty()) continue; // skip trivial section 466 size_so_far = cs->align_at_start(size_so_far); 467 size_so_far += cs->size(); 468 } 469 return size_so_far; 470 } 471 472 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 473 address buf = dest->_total_start; 474 csize_t buf_offset = 0; 475 assert(dest->_total_size >= total_content_size(), "must be big enough"); 476 477 { 478 // not sure why this is here, but why not... 479 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 480 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 481 } 482 483 const CodeSection* prev_cs = NULL; 484 CodeSection* prev_dest_cs = NULL; 485 486 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 487 // figure compact layout of each section 488 const CodeSection* cs = code_section(n); 489 csize_t csize = cs->size(); 490 491 CodeSection* dest_cs = dest->code_section(n); 492 if (!cs->is_empty()) { 493 // Compute initial padding; assign it to the previous non-empty guy. 494 // Cf. figure_expanded_capacities. 495 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 496 if (padding != 0) { 497 buf_offset += padding; 498 assert(prev_dest_cs != NULL, "sanity"); 499 prev_dest_cs->_limit += padding; 500 } 501 #ifdef ASSERT 502 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) { 503 // Make sure the ends still match up. 504 // This is important because a branch in a frozen section 505 // might target code in a following section, via a Label, 506 // and without a relocation record. See Label::patch_instructions. 507 address dest_start = buf+buf_offset; 508 csize_t start2start = cs->start() - prev_cs->start(); 509 csize_t dest_start2start = dest_start - prev_dest_cs->start(); 510 assert(start2start == dest_start2start, "cannot stretch frozen sect"); 511 } 512 #endif //ASSERT 513 prev_dest_cs = dest_cs; 514 prev_cs = cs; 515 } 516 517 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert 518 dest_cs->initialize(buf+buf_offset, csize); 519 dest_cs->set_end(buf+buf_offset+csize); 520 assert(dest_cs->is_allocated(), "must always be allocated"); 521 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 522 523 buf_offset += csize; 524 } 525 526 // Done calculating sections; did it come out to the right end? 527 assert(buf_offset == total_content_size(), "sanity"); 528 dest->verify_section_allocation(); 529 } 530 531 // Append an oop reference that keeps the class alive. 532 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 533 oop cl = k->klass_holder(); 534 if (cl != NULL && !oops->contains(cl)) { 535 oops->append(cl); 536 } 537 } 538 539 void CodeBuffer::finalize_oop_references(const methodHandle& mh) { 540 NoSafepointVerifier nsv; 541 542 GrowableArray<oop> oops; 543 544 // Make sure that immediate metadata records something in the OopRecorder 545 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 546 // pull code out of each section 547 CodeSection* cs = code_section(n); 548 if (cs->is_empty()) continue; // skip trivial section 549 RelocIterator iter(cs); 550 while (iter.next()) { 551 if (iter.type() == relocInfo::metadata_type) { 552 metadata_Relocation* md = iter.metadata_reloc(); 553 if (md->metadata_is_immediate()) { 554 Metadata* m = md->metadata_value(); 555 if (oop_recorder()->is_real(m)) { 556 if (m->is_methodData()) { 557 m = ((MethodData*)m)->method(); 558 } 559 if (m->is_method()) { 560 m = ((Method*)m)->method_holder(); 561 } 562 if (m->is_klass()) { 563 append_oop_references(&oops, (Klass*)m); 564 } else { 565 // XXX This will currently occur for MDO which don't 566 // have a backpointer. This has to be fixed later. 567 m->print(); 568 ShouldNotReachHere(); 569 } 570 } 571 } 572 } 573 } 574 } 575 576 if (!oop_recorder()->is_unused()) { 577 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 578 Metadata* m = oop_recorder()->metadata_at(i); 579 if (oop_recorder()->is_real(m)) { 580 if (m->is_methodData()) { 581 m = ((MethodData*)m)->method(); 582 } 583 if (m->is_method()) { 584 m = ((Method*)m)->method_holder(); 585 } 586 if (m->is_klass()) { 587 append_oop_references(&oops, (Klass*)m); 588 } else { 589 m->print(); 590 ShouldNotReachHere(); 591 } 592 } 593 } 594 595 } 596 597 // Add the class loader of Method* for the nmethod itself 598 append_oop_references(&oops, mh->method_holder()); 599 600 // Add any oops that we've found 601 Thread* thread = Thread::current(); 602 for (int i = 0; i < oops.length(); i++) { 603 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 604 } 605 } 606 607 608 609 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const { 610 csize_t size_so_far = 0; 611 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 612 const CodeSection* cur_cs = code_section(n); 613 if (!cur_cs->is_empty()) { 614 size_so_far = cur_cs->align_at_start(size_so_far); 615 } 616 if (cur_cs->index() == cs->index()) { 617 return size_so_far; 618 } 619 size_so_far += cur_cs->size(); 620 } 621 ShouldNotReachHere(); 622 return -1; 623 } 624 625 csize_t CodeBuffer::total_relocation_size() const { 626 csize_t lsize = copy_relocations_to(NULL); // dry run only 627 csize_t csize = total_content_size(); 628 csize_t total = RelocIterator::locs_and_index_size(csize, lsize); 629 return (csize_t) align_size_up(total, HeapWordSize); 630 } 631 632 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 633 csize_t buf_offset = 0; 634 csize_t code_end_so_far = 0; 635 csize_t code_point_so_far = 0; 636 637 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 638 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 639 640 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 641 if (only_inst && (n != (int)SECT_INSTS)) { 642 // Need only relocation info for code. 643 continue; 644 } 645 // pull relocs out of each section 646 const CodeSection* cs = code_section(n); 647 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 648 if (cs->is_empty()) continue; // skip trivial section 649 relocInfo* lstart = cs->locs_start(); 650 relocInfo* lend = cs->locs_end(); 651 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 652 csize_t csize = cs->size(); 653 code_end_so_far = cs->align_at_start(code_end_so_far); 654 655 if (lsize > 0) { 656 // Figure out how to advance the combined relocation point 657 // first to the beginning of this section. 658 // We'll insert one or more filler relocs to span that gap. 659 // (Don't bother to improve this by editing the first reloc's offset.) 660 csize_t new_code_point = code_end_so_far; 661 for (csize_t jump; 662 code_point_so_far < new_code_point; 663 code_point_so_far += jump) { 664 jump = new_code_point - code_point_so_far; 665 relocInfo filler = filler_relocInfo(); 666 if (jump >= filler.addr_offset()) { 667 jump = filler.addr_offset(); 668 } else { // else shrink the filler to fit 669 filler = relocInfo(relocInfo::none, jump); 670 } 671 if (buf != NULL) { 672 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 673 *(relocInfo*)(buf+buf_offset) = filler; 674 } 675 buf_offset += sizeof(filler); 676 } 677 678 // Update code point and end to skip past this section: 679 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 680 assert(code_point_so_far <= last_code_point, "sanity"); 681 code_point_so_far = last_code_point; // advance past this guy's relocs 682 } 683 code_end_so_far += csize; // advance past this guy's instructions too 684 685 // Done with filler; emit the real relocations: 686 if (buf != NULL && lsize != 0) { 687 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 688 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 689 if (buf_offset % HeapWordSize == 0) { 690 // Use wordwise copies if possible: 691 Copy::disjoint_words((HeapWord*)lstart, 692 (HeapWord*)(buf+buf_offset), 693 (lsize + HeapWordSize-1) / HeapWordSize); 694 } else { 695 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 696 } 697 } 698 buf_offset += lsize; 699 } 700 701 // Align end of relocation info in target. 702 while (buf_offset % HeapWordSize != 0) { 703 if (buf != NULL) { 704 relocInfo padding = relocInfo(relocInfo::none, 0); 705 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 706 *(relocInfo*)(buf+buf_offset) = padding; 707 } 708 buf_offset += sizeof(relocInfo); 709 } 710 711 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 712 713 return buf_offset; 714 } 715 716 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 717 address buf = NULL; 718 csize_t buf_offset = 0; 719 csize_t buf_limit = 0; 720 721 if (dest != NULL) { 722 buf = (address)dest->relocation_begin(); 723 buf_limit = (address)dest->relocation_end() - buf; 724 } 725 // if dest == NULL, this is just the sizing pass 726 // 727 buf_offset = copy_relocations_to(buf, buf_limit, false); 728 729 // Account for index: 730 if (buf != NULL) { 731 RelocIterator::create_index(dest->relocation_begin(), 732 buf_offset / sizeof(relocInfo), 733 dest->relocation_end()); 734 } 735 736 return buf_offset; 737 } 738 739 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 740 #ifndef PRODUCT 741 if (PrintNMethods && (WizardMode || Verbose)) { 742 tty->print("done with CodeBuffer:"); 743 ((CodeBuffer*)this)->print(); 744 } 745 #endif //PRODUCT 746 747 CodeBuffer dest(dest_blob); 748 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 749 this->compute_final_layout(&dest); 750 relocate_code_to(&dest); 751 752 // transfer strings and comments from buffer to blob 753 dest_blob->set_strings(_code_strings); 754 755 // Done moving code bytes; were they the right size? 756 assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 757 758 // Flush generated code 759 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 760 } 761 762 // Move all my code into another code buffer. Consult applicable 763 // relocs to repair embedded addresses. The layout in the destination 764 // CodeBuffer is different to the source CodeBuffer: the destination 765 // CodeBuffer gets the final layout (consts, insts, stubs in order of 766 // ascending address). 767 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 768 address dest_end = dest->_total_start + dest->_total_size; 769 address dest_filled = NULL; 770 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 771 // pull code out of each section 772 const CodeSection* cs = code_section(n); 773 if (cs->is_empty()) continue; // skip trivial section 774 CodeSection* dest_cs = dest->code_section(n); 775 assert(cs->size() == dest_cs->size(), "sanity"); 776 csize_t usize = dest_cs->size(); 777 csize_t wsize = align_size_up(usize, HeapWordSize); 778 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 779 // Copy the code as aligned machine words. 780 // This may also include an uninitialized partial word at the end. 781 Copy::disjoint_words((HeapWord*)cs->start(), 782 (HeapWord*)dest_cs->start(), 783 wsize / HeapWordSize); 784 785 if (dest->blob() == NULL) { 786 // Destination is a final resting place, not just another buffer. 787 // Normalize uninitialized bytes in the final padding. 788 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 789 Assembler::code_fill_byte()); 790 } 791 // Keep track of the highest filled address 792 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 793 794 assert(cs->locs_start() != (relocInfo*)badAddress, 795 "this section carries no reloc storage, but reloc was attempted"); 796 797 // Make the new code copy use the old copy's relocations: 798 dest_cs->initialize_locs_from(cs); 799 } 800 801 // Do relocation after all sections are copied. 802 // This is necessary if the code uses constants in stubs, which are 803 // relocated when the corresponding instruction in the code (e.g., a 804 // call) is relocated. Stubs are placed behind the main code 805 // section, so that section has to be copied before relocating. 806 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 807 // pull code out of each section 808 const CodeSection* cs = code_section(n); 809 if (cs->is_empty()) continue; // skip trivial section 810 CodeSection* dest_cs = dest->code_section(n); 811 { // Repair the pc relative information in the code after the move 812 RelocIterator iter(dest_cs); 813 while (iter.next()) { 814 iter.reloc()->fix_relocation_after_move(this, dest); 815 } 816 } 817 } 818 819 if (dest->blob() == NULL && dest_filled != NULL) { 820 // Destination is a final resting place, not just another buffer. 821 // Normalize uninitialized bytes in the final padding. 822 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 823 Assembler::code_fill_byte()); 824 825 } 826 } 827 828 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 829 csize_t amount, 830 csize_t* new_capacity) { 831 csize_t new_total_cap = 0; 832 833 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 834 const CodeSection* sect = code_section(n); 835 836 if (!sect->is_empty()) { 837 // Compute initial padding; assign it to the previous section, 838 // even if it's empty (e.g. consts section can be empty). 839 // Cf. compute_final_layout 840 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 841 if (padding != 0) { 842 new_total_cap += padding; 843 assert(n - 1 >= SECT_FIRST, "sanity"); 844 new_capacity[n - 1] += padding; 845 } 846 } 847 848 csize_t exp = sect->size(); // 100% increase 849 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 850 if (sect == which_cs) { 851 if (exp < amount) exp = amount; 852 if (StressCodeBuffers) exp = amount; // expand only slightly 853 } else if (n == SECT_INSTS) { 854 // scale down inst increases to a more modest 25% 855 exp = 4*K + ((exp - 4*K) >> 2); 856 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 857 } else if (sect->is_empty()) { 858 // do not grow an empty secondary section 859 exp = 0; 860 } 861 // Allow for inter-section slop: 862 exp += CodeSection::end_slop(); 863 csize_t new_cap = sect->size() + exp; 864 if (new_cap < sect->capacity()) { 865 // No need to expand after all. 866 new_cap = sect->capacity(); 867 } 868 new_capacity[n] = new_cap; 869 new_total_cap += new_cap; 870 } 871 872 return new_total_cap; 873 } 874 875 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 876 #ifndef PRODUCT 877 if (PrintNMethods && (WizardMode || Verbose)) { 878 tty->print("expanding CodeBuffer:"); 879 this->print(); 880 } 881 882 if (StressCodeBuffers && blob() != NULL) { 883 static int expand_count = 0; 884 if (expand_count >= 0) expand_count += 1; 885 if (expand_count > 100 && is_power_of_2(expand_count)) { 886 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 887 // simulate an occasional allocation failure: 888 free_blob(); 889 } 890 } 891 #endif //PRODUCT 892 893 // Resizing must be allowed 894 { 895 if (blob() == NULL) return; // caller must check for blob == NULL 896 for (int n = 0; n < (int)SECT_LIMIT; n++) { 897 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen"); 898 } 899 } 900 901 // Figure new capacity for each section. 902 csize_t new_capacity[SECT_LIMIT]; 903 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT); 904 csize_t new_total_cap 905 = figure_expanded_capacities(which_cs, amount, new_capacity); 906 907 // Create a new (temporary) code buffer to hold all the new data 908 CodeBuffer cb(name(), new_total_cap, 0); 909 if (cb.blob() == NULL) { 910 // Failed to allocate in code cache. 911 free_blob(); 912 return; 913 } 914 915 // Create an old code buffer to remember which addresses used to go where. 916 // This will be useful when we do final assembly into the code cache, 917 // because we will need to know how to warp any internal address that 918 // has been created at any time in this CodeBuffer's past. 919 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 920 bxp->take_over_code_from(this); // remember the old undersized blob 921 DEBUG_ONLY(this->_blob = NULL); // silence a later assert 922 bxp->_before_expand = this->_before_expand; 923 this->_before_expand = bxp; 924 925 // Give each section its required (expanded) capacity. 926 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 927 CodeSection* cb_sect = cb.code_section(n); 928 CodeSection* this_sect = code_section(n); 929 if (new_capacity[n] == 0) continue; // already nulled out 930 if (n != SECT_INSTS) { 931 cb.initialize_section_size(cb_sect, new_capacity[n]); 932 } 933 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 934 address cb_start = cb_sect->start(); 935 cb_sect->set_end(cb_start + this_sect->size()); 936 if (this_sect->mark() == NULL) { 937 cb_sect->clear_mark(); 938 } else { 939 cb_sect->set_mark(cb_start + this_sect->mark_off()); 940 } 941 } 942 943 // Move all the code and relocations to the new blob: 944 relocate_code_to(&cb); 945 946 // Copy the temporary code buffer into the current code buffer. 947 // Basically, do {*this = cb}, except for some control information. 948 this->take_over_code_from(&cb); 949 cb.set_blob(NULL); 950 951 // Zap the old code buffer contents, to avoid mistakenly using them. 952 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 953 badCodeHeapFreeVal)); 954 955 _decode_begin = NULL; // sanity 956 957 // Make certain that the new sections are all snugly inside the new blob. 958 verify_section_allocation(); 959 960 #ifndef PRODUCT 961 if (PrintNMethods && (WizardMode || Verbose)) { 962 tty->print("expanded CodeBuffer:"); 963 this->print(); 964 } 965 #endif //PRODUCT 966 } 967 968 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 969 // Must already have disposed of the old blob somehow. 970 assert(blob() == NULL, "must be empty"); 971 // Take the new blob away from cb. 972 set_blob(cb->blob()); 973 // Take over all the section pointers. 974 for (int n = 0; n < (int)SECT_LIMIT; n++) { 975 CodeSection* cb_sect = cb->code_section(n); 976 CodeSection* this_sect = code_section(n); 977 this_sect->take_over_code_from(cb_sect); 978 } 979 _overflow_arena = cb->_overflow_arena; 980 // Make sure the old cb won't try to use it or free it. 981 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 982 } 983 984 void CodeBuffer::verify_section_allocation() { 985 address tstart = _total_start; 986 if (tstart == badAddress) return; // smashed by set_blob(NULL) 987 address tend = tstart + _total_size; 988 if (_blob != NULL) { 989 990 guarantee(tstart >= _blob->content_begin(), "sanity"); 991 guarantee(tend <= _blob->content_end(), "sanity"); 992 } 993 // Verify disjointness. 994 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 995 CodeSection* sect = code_section(n); 996 if (!sect->is_allocated() || sect->is_empty()) continue; 997 guarantee((intptr_t)sect->start() % sect->alignment() == 0 998 || sect->is_empty() || _blob == NULL, 999 "start is aligned"); 1000 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) { 1001 CodeSection* other = code_section(m); 1002 if (!other->is_allocated() || other == sect) continue; 1003 guarantee(!other->contains(sect->start() ), "sanity"); 1004 // limit is an exclusive address and can be the start of another 1005 // section. 1006 guarantee(!other->contains(sect->limit() - 1), "sanity"); 1007 } 1008 guarantee(sect->end() <= tend, "sanity"); 1009 guarantee(sect->end() <= sect->limit(), "sanity"); 1010 } 1011 } 1012 1013 void CodeBuffer::log_section_sizes(const char* name) { 1014 if (xtty != NULL) { 1015 ttyLocker ttyl; 1016 // log info about buffer usage 1017 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size); 1018 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 1019 CodeSection* sect = code_section(n); 1020 if (!sect->is_allocated() || sect->is_empty()) continue; 1021 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>", 1022 n, sect->limit() - sect->start(), sect->limit() - sect->end()); 1023 } 1024 xtty->print_cr("</blob>"); 1025 } 1026 } 1027 1028 #ifndef PRODUCT 1029 1030 void CodeSection::dump() { 1031 address ptr = start(); 1032 for (csize_t step; ptr < end(); ptr += step) { 1033 step = end() - ptr; 1034 if (step > jintSize * 4) step = jintSize * 4; 1035 tty->print(INTPTR_FORMAT ": ", p2i(ptr)); 1036 while (step > 0) { 1037 tty->print(" " PTR32_FORMAT, *(jint*)ptr); 1038 ptr += jintSize; 1039 } 1040 tty->cr(); 1041 } 1042 } 1043 1044 1045 void CodeSection::decode() { 1046 Disassembler::decode(start(), end()); 1047 } 1048 1049 1050 void CodeBuffer::block_comment(intptr_t offset, const char * comment) { 1051 _code_strings.add_comment(offset, comment); 1052 } 1053 1054 const char* CodeBuffer::code_string(const char* str) { 1055 return _code_strings.add_string(str); 1056 } 1057 1058 class CodeString: public CHeapObj<mtCode> { 1059 private: 1060 friend class CodeStrings; 1061 const char * _string; 1062 CodeString* _next; 1063 intptr_t _offset; 1064 1065 ~CodeString() { 1066 assert(_next == NULL, "wrong interface for freeing list"); 1067 os::free((void*)_string); 1068 } 1069 1070 bool is_comment() const { return _offset >= 0; } 1071 1072 public: 1073 CodeString(const char * string, intptr_t offset = -1) 1074 : _next(NULL), _offset(offset) { 1075 _string = os::strdup(string, mtCode); 1076 } 1077 1078 const char * string() const { return _string; } 1079 intptr_t offset() const { assert(_offset >= 0, "offset for non comment?"); return _offset; } 1080 CodeString* next() const { return _next; } 1081 1082 void set_next(CodeString* next) { _next = next; } 1083 1084 CodeString* first_comment() { 1085 if (is_comment()) { 1086 return this; 1087 } else { 1088 return next_comment(); 1089 } 1090 } 1091 CodeString* next_comment() const { 1092 CodeString* s = _next; 1093 while (s != NULL && !s->is_comment()) { 1094 s = s->_next; 1095 } 1096 return s; 1097 } 1098 }; 1099 1100 CodeString* CodeStrings::find(intptr_t offset) const { 1101 CodeString* a = _strings->first_comment(); 1102 while (a != NULL && a->offset() != offset) { 1103 a = a->next_comment(); 1104 } 1105 return a; 1106 } 1107 1108 // Convenience for add_comment. 1109 CodeString* CodeStrings::find_last(intptr_t offset) const { 1110 CodeString* a = find(offset); 1111 if (a != NULL) { 1112 CodeString* c = NULL; 1113 while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) { 1114 a = c; 1115 } 1116 } 1117 return a; 1118 } 1119 1120 void CodeStrings::add_comment(intptr_t offset, const char * comment) { 1121 check_valid(); 1122 CodeString* c = new CodeString(comment, offset); 1123 CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset); 1124 1125 if (inspos) { 1126 // insert after already existing comments with same offset 1127 c->set_next(inspos->next()); 1128 inspos->set_next(c); 1129 } else { 1130 // no comments with such offset, yet. Insert before anything else. 1131 c->set_next(_strings); 1132 _strings = c; 1133 } 1134 } 1135 1136 void CodeStrings::assign(CodeStrings& other) { 1137 other.check_valid(); 1138 assert(is_null(), "Cannot assign onto non-empty CodeStrings"); 1139 _strings = other._strings; 1140 #ifdef ASSERT 1141 _defunct = false; 1142 #endif 1143 other.set_null_and_invalidate(); 1144 } 1145 1146 // Deep copy of CodeStrings for consistent memory management. 1147 // Only used for actual disassembly so this is cheaper than reference counting 1148 // for the "normal" fastdebug case. 1149 void CodeStrings::copy(CodeStrings& other) { 1150 other.check_valid(); 1151 check_valid(); 1152 assert(is_null(), "Cannot copy onto non-empty CodeStrings"); 1153 CodeString* n = other._strings; 1154 CodeString** ps = &_strings; 1155 while (n != NULL) { 1156 *ps = new CodeString(n->string(),n->offset()); 1157 ps = &((*ps)->_next); 1158 n = n->next(); 1159 } 1160 } 1161 1162 const char* CodeStrings::_prefix = " ;; "; // default: can be changed via set_prefix 1163 1164 void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const { 1165 check_valid(); 1166 if (_strings != NULL) { 1167 CodeString* c = find(offset); 1168 while (c && c->offset() == offset) { 1169 stream->bol(); 1170 stream->print("%s", _prefix); 1171 // Don't interpret as format strings since it could contain % 1172 stream->print_raw_cr(c->string()); 1173 c = c->next_comment(); 1174 } 1175 } 1176 } 1177 1178 // Also sets isNull() 1179 void CodeStrings::free() { 1180 CodeString* n = _strings; 1181 while (n) { 1182 // unlink the node from the list saving a pointer to the next 1183 CodeString* p = n->next(); 1184 n->set_next(NULL); 1185 delete n; 1186 n = p; 1187 } 1188 set_null_and_invalidate(); 1189 } 1190 1191 const char* CodeStrings::add_string(const char * string) { 1192 check_valid(); 1193 CodeString* s = new CodeString(string); 1194 s->set_next(_strings); 1195 _strings = s; 1196 assert(s->string() != NULL, "should have a string"); 1197 return s->string(); 1198 } 1199 1200 void CodeBuffer::decode() { 1201 ttyLocker ttyl; 1202 Disassembler::decode(decode_begin(), insts_end()); 1203 _decode_begin = insts_end(); 1204 } 1205 1206 1207 void CodeBuffer::skip_decode() { 1208 _decode_begin = insts_end(); 1209 } 1210 1211 1212 void CodeBuffer::decode_all() { 1213 ttyLocker ttyl; 1214 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1215 // dump contents of each section 1216 CodeSection* cs = code_section(n); 1217 tty->print_cr("! %s:", code_section_name(n)); 1218 if (cs != consts()) 1219 cs->decode(); 1220 else 1221 cs->dump(); 1222 } 1223 } 1224 1225 1226 void CodeSection::print(const char* name) { 1227 csize_t locs_size = locs_end() - locs_start(); 1228 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s", 1229 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity(), 1230 is_frozen()? " [frozen]": ""); 1231 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1232 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1233 if (PrintRelocations) { 1234 RelocIterator iter(this); 1235 iter.print(); 1236 } 1237 } 1238 1239 void CodeBuffer::print() { 1240 if (this == NULL) { 1241 tty->print_cr("NULL CodeBuffer pointer"); 1242 return; 1243 } 1244 1245 tty->print_cr("CodeBuffer:"); 1246 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1247 // print each section 1248 CodeSection* cs = code_section(n); 1249 cs->print(code_section_name(n)); 1250 } 1251 } 1252 1253 #endif // PRODUCT