1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "oops/methodData.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/safepointVerifiers.hpp" 32 #include "utilities/align.hpp" 33 #include "utilities/copy.hpp" 34 #include "utilities/xmlstream.hpp" 35 36 // The structure of a CodeSection: 37 // 38 // _start -> +----------------+ 39 // | machine code...| 40 // _end -> |----------------| 41 // | | 42 // | (empty) | 43 // | | 44 // | | 45 // +----------------+ 46 // _limit -> | | 47 // 48 // _locs_start -> +----------------+ 49 // |reloc records...| 50 // |----------------| 51 // _locs_end -> | | 52 // | | 53 // | (empty) | 54 // | | 55 // | | 56 // +----------------+ 57 // _locs_limit -> | | 58 // The _end (resp. _limit) pointer refers to the first 59 // unused (resp. unallocated) byte. 60 61 // The structure of the CodeBuffer while code is being accumulated: 62 // 63 // _total_start -> \ 64 // _insts._start -> +----------------+ 65 // | | 66 // | Code | 67 // | | 68 // _stubs._start -> |----------------| 69 // | | 70 // | Stubs | (also handlers for deopt/exception) 71 // | | 72 // _consts._start -> |----------------| 73 // | | 74 // | Constants | 75 // | | 76 // +----------------+ 77 // + _total_size -> | | 78 // 79 // When the code and relocations are copied to the code cache, 80 // the empty parts of each section are removed, and everything 81 // is copied into contiguous locations. 82 83 typedef CodeBuffer::csize_t csize_t; // file-local definition 84 85 // External buffer, in a predefined CodeBlob. 86 // Important: The code_start must be taken exactly, and not realigned. 87 CodeBuffer::CodeBuffer(CodeBlob* blob) { 88 initialize_misc("static buffer"); 89 initialize(blob->content_begin(), blob->content_size()); 90 verify_section_allocation(); 91 } 92 93 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 94 // Compute maximal alignment. 95 int align = _insts.alignment(); 96 // Always allow for empty slop around each section. 97 int slop = (int) CodeSection::end_slop(); 98 99 assert(blob() == NULL, "only once"); 100 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1))); 101 if (blob() == NULL) { 102 // The assembler constructor will throw a fatal on an empty CodeBuffer. 103 return; // caller must test this 104 } 105 106 // Set up various pointers into the blob. 107 initialize(_total_start, _total_size); 108 109 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 110 111 pd_initialize(); 112 113 if (locs_size != 0) { 114 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 115 } 116 117 verify_section_allocation(); 118 } 119 120 121 CodeBuffer::~CodeBuffer() { 122 verify_section_allocation(); 123 124 // If we allocate our code buffer from the CodeCache 125 // via a BufferBlob, and it's not permanent, then 126 // free the BufferBlob. 127 // The rest of the memory will be freed when the ResourceObj 128 // is released. 129 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) { 130 // Previous incarnations of this buffer are held live, so that internal 131 // addresses constructed before expansions will not be confused. 132 cb->free_blob(); 133 } 134 135 // free any overflow storage 136 delete _overflow_arena; 137 138 // Claim is that stack allocation ensures resources are cleaned up. 139 // This is resource clean up, let's hope that all were properly copied out. 140 free_strings(); 141 142 #ifdef ASSERT 143 // Save allocation type to execute assert in ~ResourceObj() 144 // which is called after this destructor. 145 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object"); 146 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type(); 147 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); 148 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at); 149 #endif 150 } 151 152 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 153 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 154 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 155 _oop_recorder = r; 156 } 157 158 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 159 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 160 csize_t slop = CodeSection::end_slop(); // margin between sections 161 int align = cs->alignment(); 162 assert(is_power_of_2(align), "sanity"); 163 address start = _insts._start; 164 address limit = _insts._limit; 165 address middle = limit - size; 166 middle -= (intptr_t)middle & (align-1); // align the division point downward 167 guarantee(middle - slop > start, "need enough space to divide up"); 168 _insts._limit = middle - slop; // subtract desired space, plus slop 169 cs->initialize(middle, limit - middle); 170 assert(cs->start() == middle, "sanity"); 171 assert(cs->limit() == limit, "sanity"); 172 // give it some relocations to start with, if the main section has them 173 if (_insts.has_locs()) cs->initialize_locs(1); 174 } 175 176 void CodeBuffer::freeze_section(CodeSection* cs) { 177 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1); 178 csize_t frozen_size = cs->size(); 179 if (next_cs != NULL) { 180 frozen_size = next_cs->align_at_start(frozen_size); 181 } 182 address old_limit = cs->limit(); 183 address new_limit = cs->start() + frozen_size; 184 relocInfo* old_locs_limit = cs->locs_limit(); 185 relocInfo* new_locs_limit = cs->locs_end(); 186 // Patch the limits. 187 cs->_limit = new_limit; 188 cs->_locs_limit = new_locs_limit; 189 cs->_frozen = true; 190 if (!next_cs->is_allocated() && !next_cs->is_frozen()) { 191 // Give remaining buffer space to the following section. 192 next_cs->initialize(new_limit, old_limit - new_limit); 193 next_cs->initialize_shared_locs(new_locs_limit, 194 old_locs_limit - new_locs_limit); 195 } 196 } 197 198 void CodeBuffer::set_blob(BufferBlob* blob) { 199 _blob = blob; 200 if (blob != NULL) { 201 address start = blob->content_begin(); 202 address end = blob->content_end(); 203 // Round up the starting address. 204 int align = _insts.alignment(); 205 start += (-(intptr_t)start) & (align-1); 206 _total_start = start; 207 _total_size = end - start; 208 } else { 209 #ifdef ASSERT 210 // Clean out dangling pointers. 211 _total_start = badAddress; 212 _consts._start = _consts._end = badAddress; 213 _insts._start = _insts._end = badAddress; 214 _stubs._start = _stubs._end = badAddress; 215 #endif //ASSERT 216 } 217 } 218 219 void CodeBuffer::free_blob() { 220 if (_blob != NULL) { 221 BufferBlob::free(_blob); 222 set_blob(NULL); 223 } 224 } 225 226 const char* CodeBuffer::code_section_name(int n) { 227 #ifdef PRODUCT 228 return NULL; 229 #else //PRODUCT 230 switch (n) { 231 case SECT_CONSTS: return "consts"; 232 case SECT_INSTS: return "insts"; 233 case SECT_STUBS: return "stubs"; 234 default: return NULL; 235 } 236 #endif //PRODUCT 237 } 238 239 int CodeBuffer::section_index_of(address addr) const { 240 for (int n = 0; n < (int)SECT_LIMIT; n++) { 241 const CodeSection* cs = code_section(n); 242 if (cs->allocates(addr)) return n; 243 } 244 return SECT_NONE; 245 } 246 247 int CodeBuffer::locator(address addr) const { 248 for (int n = 0; n < (int)SECT_LIMIT; n++) { 249 const CodeSection* cs = code_section(n); 250 if (cs->allocates(addr)) { 251 return locator(addr - cs->start(), n); 252 } 253 } 254 return -1; 255 } 256 257 address CodeBuffer::locator_address(int locator) const { 258 if (locator < 0) return NULL; 259 address start = code_section(locator_sect(locator))->start(); 260 return start + locator_pos(locator); 261 } 262 263 bool CodeBuffer::is_backward_branch(Label& L) { 264 return L.is_bound() && insts_end() <= locator_address(L.loc()); 265 } 266 267 address CodeBuffer::decode_begin() { 268 address begin = _insts.start(); 269 if (_decode_begin != NULL && _decode_begin > begin) 270 begin = _decode_begin; 271 return begin; 272 } 273 274 275 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 276 if (_overflow_arena == NULL) { 277 _overflow_arena = new (mtCode) Arena(mtCode); 278 } 279 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 280 } 281 282 283 // Helper function for managing labels and their target addresses. 284 // Returns a sensible address, and if it is not the label's final 285 // address, notes the dependency (at 'branch_pc') on the label. 286 address CodeSection::target(Label& L, address branch_pc) { 287 if (L.is_bound()) { 288 int loc = L.loc(); 289 if (index() == CodeBuffer::locator_sect(loc)) { 290 return start() + CodeBuffer::locator_pos(loc); 291 } else { 292 return outer()->locator_address(loc); 293 } 294 } else { 295 assert(allocates2(branch_pc), "sanity"); 296 address base = start(); 297 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 298 L.add_patch_at(outer(), patch_loc); 299 300 // Need to return a pc, doesn't matter what it is since it will be 301 // replaced during resolution later. 302 // Don't return NULL or badAddress, since branches shouldn't overflow. 303 // Don't return base either because that could overflow displacements 304 // for shorter branches. It will get checked when bound. 305 return branch_pc; 306 } 307 } 308 309 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { 310 RelocationHolder rh; 311 switch (rtype) { 312 case relocInfo::none: return; 313 case relocInfo::opt_virtual_call_type: { 314 rh = opt_virtual_call_Relocation::spec(method_index); 315 break; 316 } 317 case relocInfo::static_call_type: { 318 rh = static_call_Relocation::spec(method_index); 319 break; 320 } 321 case relocInfo::virtual_call_type: { 322 assert(method_index == 0, "resolved method overriding is not supported"); 323 rh = Relocation::spec_simple(rtype); 324 break; 325 } 326 default: { 327 rh = Relocation::spec_simple(rtype); 328 break; 329 } 330 } 331 relocate(at, rh, format); 332 } 333 334 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 335 // Do not relocate in scratch buffers. 336 if (scratch_emit()) { return; } 337 Relocation* reloc = spec.reloc(); 338 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 339 if (rtype == relocInfo::none) return; 340 341 // The assertion below has been adjusted, to also work for 342 // relocation for fixup. Sometimes we want to put relocation 343 // information for the next instruction, since it will be patched 344 // with a call. 345 assert(start() <= at && at <= end()+1, 346 "cannot relocate data outside code boundaries"); 347 348 if (!has_locs()) { 349 // no space for relocation information provided => code cannot be 350 // relocated. Make sure that relocate is only called with rtypes 351 // that can be ignored for this kind of code. 352 assert(rtype == relocInfo::none || 353 rtype == relocInfo::runtime_call_type || 354 rtype == relocInfo::internal_word_type|| 355 rtype == relocInfo::section_word_type || 356 rtype == relocInfo::external_word_type, 357 "code needs relocation information"); 358 // leave behind an indication that we attempted a relocation 359 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 360 return; 361 } 362 363 // Advance the point, noting the offset we'll have to record. 364 csize_t offset = at - locs_point(); 365 set_locs_point(at); 366 367 // Test for a couple of overflow conditions; maybe expand the buffer. 368 relocInfo* end = locs_end(); 369 relocInfo* req = end + relocInfo::length_limit; 370 // Check for (potential) overflow 371 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 372 req += (uint)offset / (uint)relocInfo::offset_limit(); 373 if (req >= locs_limit()) { 374 // Allocate or reallocate. 375 expand_locs(locs_count() + (req - end)); 376 // reload pointer 377 end = locs_end(); 378 } 379 } 380 381 // If the offset is giant, emit filler relocs, of type 'none', but 382 // each carrying the largest possible offset, to advance the locs_point. 383 while (offset >= relocInfo::offset_limit()) { 384 assert(end < locs_limit(), "adjust previous paragraph of code"); 385 *end++ = filler_relocInfo(); 386 offset -= filler_relocInfo().addr_offset(); 387 } 388 389 // If it's a simple reloc with no data, we'll just write (rtype | offset). 390 (*end) = relocInfo(rtype, offset, format); 391 392 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 393 end->initialize(this, reloc); 394 } 395 396 void CodeSection::initialize_locs(int locs_capacity) { 397 assert(_locs_start == NULL, "only one locs init step, please"); 398 // Apply a priori lower limits to relocation size: 399 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 400 if (locs_capacity < min_locs) locs_capacity = min_locs; 401 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 402 _locs_start = locs_start; 403 _locs_end = locs_start; 404 _locs_limit = locs_start + locs_capacity; 405 _locs_own = true; 406 } 407 408 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 409 assert(_locs_start == NULL, "do this before locs are allocated"); 410 // Internal invariant: locs buf must be fully aligned. 411 // See copy_relocations_to() below. 412 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 413 ++buf; --length; 414 } 415 if (length > 0) { 416 _locs_start = buf; 417 _locs_end = buf; 418 _locs_limit = buf + length; 419 _locs_own = false; 420 } 421 } 422 423 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 424 int lcount = source_cs->locs_count(); 425 if (lcount != 0) { 426 initialize_shared_locs(source_cs->locs_start(), lcount); 427 _locs_end = _locs_limit = _locs_start + lcount; 428 assert(is_allocated(), "must have copied code already"); 429 set_locs_point(start() + source_cs->locs_point_off()); 430 } 431 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 432 } 433 434 void CodeSection::expand_locs(int new_capacity) { 435 if (_locs_start == NULL) { 436 initialize_locs(new_capacity); 437 return; 438 } else { 439 int old_count = locs_count(); 440 int old_capacity = locs_capacity(); 441 if (new_capacity < old_capacity * 2) 442 new_capacity = old_capacity * 2; 443 relocInfo* locs_start; 444 if (_locs_own) { 445 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 446 } else { 447 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 448 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 449 _locs_own = true; 450 } 451 _locs_start = locs_start; 452 _locs_end = locs_start + old_count; 453 _locs_limit = locs_start + new_capacity; 454 } 455 } 456 457 458 /// Support for emitting the code to its final location. 459 /// The pattern is the same for all functions. 460 /// We iterate over all the sections, padding each to alignment. 461 462 csize_t CodeBuffer::total_content_size() const { 463 csize_t size_so_far = 0; 464 for (int n = 0; n < (int)SECT_LIMIT; n++) { 465 const CodeSection* cs = code_section(n); 466 if (cs->is_empty()) continue; // skip trivial section 467 size_so_far = cs->align_at_start(size_so_far); 468 size_so_far += cs->size(); 469 } 470 return size_so_far; 471 } 472 473 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 474 address buf = dest->_total_start; 475 csize_t buf_offset = 0; 476 assert(dest->_total_size >= total_content_size(), "must be big enough"); 477 478 { 479 // not sure why this is here, but why not... 480 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 481 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 482 } 483 484 const CodeSection* prev_cs = NULL; 485 CodeSection* prev_dest_cs = NULL; 486 487 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 488 // figure compact layout of each section 489 const CodeSection* cs = code_section(n); 490 csize_t csize = cs->size(); 491 492 CodeSection* dest_cs = dest->code_section(n); 493 if (!cs->is_empty()) { 494 // Compute initial padding; assign it to the previous non-empty guy. 495 // Cf. figure_expanded_capacities. 496 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 497 if (padding != 0) { 498 buf_offset += padding; 499 assert(prev_dest_cs != NULL, "sanity"); 500 prev_dest_cs->_limit += padding; 501 } 502 #ifdef ASSERT 503 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) { 504 // Make sure the ends still match up. 505 // This is important because a branch in a frozen section 506 // might target code in a following section, via a Label, 507 // and without a relocation record. See Label::patch_instructions. 508 address dest_start = buf+buf_offset; 509 csize_t start2start = cs->start() - prev_cs->start(); 510 csize_t dest_start2start = dest_start - prev_dest_cs->start(); 511 assert(start2start == dest_start2start, "cannot stretch frozen sect"); 512 } 513 #endif //ASSERT 514 prev_dest_cs = dest_cs; 515 prev_cs = cs; 516 } 517 518 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert 519 dest_cs->initialize(buf+buf_offset, csize); 520 dest_cs->set_end(buf+buf_offset+csize); 521 assert(dest_cs->is_allocated(), "must always be allocated"); 522 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 523 524 buf_offset += csize; 525 } 526 527 // Done calculating sections; did it come out to the right end? 528 assert(buf_offset == total_content_size(), "sanity"); 529 dest->verify_section_allocation(); 530 } 531 532 // Append an oop reference that keeps the class alive. 533 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 534 oop cl = k->klass_holder(); 535 if (cl != NULL && !oops->contains(cl)) { 536 oops->append(cl); 537 } 538 } 539 540 void CodeBuffer::finalize_oop_references(const methodHandle& mh) { 541 NoSafepointVerifier nsv; 542 543 GrowableArray<oop> oops; 544 545 // Make sure that immediate metadata records something in the OopRecorder 546 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 547 // pull code out of each section 548 CodeSection* cs = code_section(n); 549 if (cs->is_empty()) continue; // skip trivial section 550 RelocIterator iter(cs); 551 while (iter.next()) { 552 if (iter.type() == relocInfo::metadata_type) { 553 metadata_Relocation* md = iter.metadata_reloc(); 554 if (md->metadata_is_immediate()) { 555 Metadata* m = md->metadata_value(); 556 if (oop_recorder()->is_real(m)) { 557 if (m->is_methodData()) { 558 m = ((MethodData*)m)->method(); 559 } 560 if (m->is_method()) { 561 m = ((Method*)m)->method_holder(); 562 } 563 if (m->is_klass()) { 564 append_oop_references(&oops, (Klass*)m); 565 } else { 566 // XXX This will currently occur for MDO which don't 567 // have a backpointer. This has to be fixed later. 568 m->print(); 569 ShouldNotReachHere(); 570 } 571 } 572 } 573 } 574 } 575 } 576 577 if (!oop_recorder()->is_unused()) { 578 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 579 Metadata* m = oop_recorder()->metadata_at(i); 580 if (oop_recorder()->is_real(m)) { 581 if (m->is_methodData()) { 582 m = ((MethodData*)m)->method(); 583 } 584 if (m->is_method()) { 585 m = ((Method*)m)->method_holder(); 586 } 587 if (m->is_klass()) { 588 append_oop_references(&oops, (Klass*)m); 589 } else { 590 m->print(); 591 ShouldNotReachHere(); 592 } 593 } 594 } 595 596 } 597 598 // Add the class loader of Method* for the nmethod itself 599 append_oop_references(&oops, mh->method_holder()); 600 601 // Add any oops that we've found 602 Thread* thread = Thread::current(); 603 for (int i = 0; i < oops.length(); i++) { 604 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 605 } 606 } 607 608 609 610 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const { 611 csize_t size_so_far = 0; 612 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 613 const CodeSection* cur_cs = code_section(n); 614 if (!cur_cs->is_empty()) { 615 size_so_far = cur_cs->align_at_start(size_so_far); 616 } 617 if (cur_cs->index() == cs->index()) { 618 return size_so_far; 619 } 620 size_so_far += cur_cs->size(); 621 } 622 ShouldNotReachHere(); 623 return -1; 624 } 625 626 csize_t CodeBuffer::total_relocation_size() const { 627 csize_t total = copy_relocations_to(NULL); // dry run only 628 return (csize_t) align_up(total, HeapWordSize); 629 } 630 631 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 632 csize_t buf_offset = 0; 633 csize_t code_end_so_far = 0; 634 csize_t code_point_so_far = 0; 635 636 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 637 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 638 639 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 640 if (only_inst && (n != (int)SECT_INSTS)) { 641 // Need only relocation info for code. 642 continue; 643 } 644 // pull relocs out of each section 645 const CodeSection* cs = code_section(n); 646 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 647 if (cs->is_empty()) continue; // skip trivial section 648 relocInfo* lstart = cs->locs_start(); 649 relocInfo* lend = cs->locs_end(); 650 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 651 csize_t csize = cs->size(); 652 code_end_so_far = cs->align_at_start(code_end_so_far); 653 654 if (lsize > 0) { 655 // Figure out how to advance the combined relocation point 656 // first to the beginning of this section. 657 // We'll insert one or more filler relocs to span that gap. 658 // (Don't bother to improve this by editing the first reloc's offset.) 659 csize_t new_code_point = code_end_so_far; 660 for (csize_t jump; 661 code_point_so_far < new_code_point; 662 code_point_so_far += jump) { 663 jump = new_code_point - code_point_so_far; 664 relocInfo filler = filler_relocInfo(); 665 if (jump >= filler.addr_offset()) { 666 jump = filler.addr_offset(); 667 } else { // else shrink the filler to fit 668 filler = relocInfo(relocInfo::none, jump); 669 } 670 if (buf != NULL) { 671 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 672 *(relocInfo*)(buf+buf_offset) = filler; 673 } 674 buf_offset += sizeof(filler); 675 } 676 677 // Update code point and end to skip past this section: 678 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 679 assert(code_point_so_far <= last_code_point, "sanity"); 680 code_point_so_far = last_code_point; // advance past this guy's relocs 681 } 682 code_end_so_far += csize; // advance past this guy's instructions too 683 684 // Done with filler; emit the real relocations: 685 if (buf != NULL && lsize != 0) { 686 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 687 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 688 if (buf_offset % HeapWordSize == 0) { 689 // Use wordwise copies if possible: 690 Copy::disjoint_words((HeapWord*)lstart, 691 (HeapWord*)(buf+buf_offset), 692 (lsize + HeapWordSize-1) / HeapWordSize); 693 } else { 694 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 695 } 696 } 697 buf_offset += lsize; 698 } 699 700 // Align end of relocation info in target. 701 while (buf_offset % HeapWordSize != 0) { 702 if (buf != NULL) { 703 relocInfo padding = relocInfo(relocInfo::none, 0); 704 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 705 *(relocInfo*)(buf+buf_offset) = padding; 706 } 707 buf_offset += sizeof(relocInfo); 708 } 709 710 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 711 712 return buf_offset; 713 } 714 715 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 716 address buf = NULL; 717 csize_t buf_offset = 0; 718 csize_t buf_limit = 0; 719 720 if (dest != NULL) { 721 buf = (address)dest->relocation_begin(); 722 buf_limit = (address)dest->relocation_end() - buf; 723 } 724 // if dest == NULL, this is just the sizing pass 725 // 726 buf_offset = copy_relocations_to(buf, buf_limit, false); 727 728 return buf_offset; 729 } 730 731 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 732 #ifndef PRODUCT 733 if (PrintNMethods && (WizardMode || Verbose)) { 734 tty->print("done with CodeBuffer:"); 735 ((CodeBuffer*)this)->print(); 736 } 737 #endif //PRODUCT 738 739 CodeBuffer dest(dest_blob); 740 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 741 this->compute_final_layout(&dest); 742 743 // Set beginning of constant table before relocating. 744 dest_blob->set_ctable_begin(dest.consts()->start()); 745 746 relocate_code_to(&dest); 747 748 // transfer strings and comments from buffer to blob 749 dest_blob->set_strings(_code_strings); 750 751 // Done moving code bytes; were they the right size? 752 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 753 754 // Flush generated code 755 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 756 } 757 758 // Move all my code into another code buffer. Consult applicable 759 // relocs to repair embedded addresses. The layout in the destination 760 // CodeBuffer is different to the source CodeBuffer: the destination 761 // CodeBuffer gets the final layout (consts, insts, stubs in order of 762 // ascending address). 763 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 764 address dest_end = dest->_total_start + dest->_total_size; 765 address dest_filled = NULL; 766 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 767 // pull code out of each section 768 const CodeSection* cs = code_section(n); 769 if (cs->is_empty()) continue; // skip trivial section 770 CodeSection* dest_cs = dest->code_section(n); 771 assert(cs->size() == dest_cs->size(), "sanity"); 772 csize_t usize = dest_cs->size(); 773 csize_t wsize = align_up(usize, HeapWordSize); 774 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 775 // Copy the code as aligned machine words. 776 // This may also include an uninitialized partial word at the end. 777 Copy::disjoint_words((HeapWord*)cs->start(), 778 (HeapWord*)dest_cs->start(), 779 wsize / HeapWordSize); 780 781 if (dest->blob() == NULL) { 782 // Destination is a final resting place, not just another buffer. 783 // Normalize uninitialized bytes in the final padding. 784 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 785 Assembler::code_fill_byte()); 786 } 787 // Keep track of the highest filled address 788 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 789 790 assert(cs->locs_start() != (relocInfo*)badAddress, 791 "this section carries no reloc storage, but reloc was attempted"); 792 793 // Make the new code copy use the old copy's relocations: 794 dest_cs->initialize_locs_from(cs); 795 } 796 797 // Do relocation after all sections are copied. 798 // This is necessary if the code uses constants in stubs, which are 799 // relocated when the corresponding instruction in the code (e.g., a 800 // call) is relocated. Stubs are placed behind the main code 801 // section, so that section has to be copied before relocating. 802 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 803 // pull code out of each section 804 const CodeSection* cs = code_section(n); 805 if (cs->is_empty()) continue; // skip trivial section 806 CodeSection* dest_cs = dest->code_section(n); 807 { // Repair the pc relative information in the code after the move 808 RelocIterator iter(dest_cs); 809 while (iter.next()) { 810 iter.reloc()->fix_relocation_after_move(this, dest); 811 } 812 } 813 } 814 815 if (dest->blob() == NULL && dest_filled != NULL) { 816 // Destination is a final resting place, not just another buffer. 817 // Normalize uninitialized bytes in the final padding. 818 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 819 Assembler::code_fill_byte()); 820 821 } 822 } 823 824 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 825 csize_t amount, 826 csize_t* new_capacity) { 827 csize_t new_total_cap = 0; 828 829 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 830 const CodeSection* sect = code_section(n); 831 832 if (!sect->is_empty()) { 833 // Compute initial padding; assign it to the previous section, 834 // even if it's empty (e.g. consts section can be empty). 835 // Cf. compute_final_layout 836 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 837 if (padding != 0) { 838 new_total_cap += padding; 839 assert(n - 1 >= SECT_FIRST, "sanity"); 840 new_capacity[n - 1] += padding; 841 } 842 } 843 844 csize_t exp = sect->size(); // 100% increase 845 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 846 if (sect == which_cs) { 847 if (exp < amount) exp = amount; 848 if (StressCodeBuffers) exp = amount; // expand only slightly 849 } else if (n == SECT_INSTS) { 850 // scale down inst increases to a more modest 25% 851 exp = 4*K + ((exp - 4*K) >> 2); 852 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 853 } else if (sect->is_empty()) { 854 // do not grow an empty secondary section 855 exp = 0; 856 } 857 // Allow for inter-section slop: 858 exp += CodeSection::end_slop(); 859 csize_t new_cap = sect->size() + exp; 860 if (new_cap < sect->capacity()) { 861 // No need to expand after all. 862 new_cap = sect->capacity(); 863 } 864 new_capacity[n] = new_cap; 865 new_total_cap += new_cap; 866 } 867 868 return new_total_cap; 869 } 870 871 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 872 #ifndef PRODUCT 873 if (PrintNMethods && (WizardMode || Verbose)) { 874 tty->print("expanding CodeBuffer:"); 875 this->print(); 876 } 877 878 if (StressCodeBuffers && blob() != NULL) { 879 static int expand_count = 0; 880 if (expand_count >= 0) expand_count += 1; 881 if (expand_count > 100 && is_power_of_2(expand_count)) { 882 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 883 // simulate an occasional allocation failure: 884 free_blob(); 885 } 886 } 887 #endif //PRODUCT 888 889 // Resizing must be allowed 890 { 891 if (blob() == NULL) return; // caller must check for blob == NULL 892 for (int n = 0; n < (int)SECT_LIMIT; n++) { 893 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen"); 894 } 895 } 896 897 // Figure new capacity for each section. 898 csize_t new_capacity[SECT_LIMIT]; 899 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT); 900 csize_t new_total_cap 901 = figure_expanded_capacities(which_cs, amount, new_capacity); 902 903 // Create a new (temporary) code buffer to hold all the new data 904 CodeBuffer cb(name(), new_total_cap, 0); 905 if (cb.blob() == NULL) { 906 // Failed to allocate in code cache. 907 free_blob(); 908 return; 909 } 910 911 // Create an old code buffer to remember which addresses used to go where. 912 // This will be useful when we do final assembly into the code cache, 913 // because we will need to know how to warp any internal address that 914 // has been created at any time in this CodeBuffer's past. 915 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 916 bxp->take_over_code_from(this); // remember the old undersized blob 917 DEBUG_ONLY(this->_blob = NULL); // silence a later assert 918 bxp->_before_expand = this->_before_expand; 919 this->_before_expand = bxp; 920 921 // Give each section its required (expanded) capacity. 922 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 923 CodeSection* cb_sect = cb.code_section(n); 924 CodeSection* this_sect = code_section(n); 925 if (new_capacity[n] == 0) continue; // already nulled out 926 if (n != SECT_INSTS) { 927 cb.initialize_section_size(cb_sect, new_capacity[n]); 928 } 929 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 930 address cb_start = cb_sect->start(); 931 cb_sect->set_end(cb_start + this_sect->size()); 932 if (this_sect->mark() == NULL) { 933 cb_sect->clear_mark(); 934 } else { 935 cb_sect->set_mark(cb_start + this_sect->mark_off()); 936 } 937 } 938 939 // Needs to be initialized when calling fix_relocation_after_move. 940 cb.blob()->set_ctable_begin(cb.consts()->start()); 941 942 // Move all the code and relocations to the new blob: 943 relocate_code_to(&cb); 944 945 // Copy the temporary code buffer into the current code buffer. 946 // Basically, do {*this = cb}, except for some control information. 947 this->take_over_code_from(&cb); 948 cb.set_blob(NULL); 949 950 // Zap the old code buffer contents, to avoid mistakenly using them. 951 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 952 badCodeHeapFreeVal)); 953 954 _decode_begin = NULL; // sanity 955 956 // Make certain that the new sections are all snugly inside the new blob. 957 verify_section_allocation(); 958 959 #ifndef PRODUCT 960 if (PrintNMethods && (WizardMode || Verbose)) { 961 tty->print("expanded CodeBuffer:"); 962 this->print(); 963 } 964 #endif //PRODUCT 965 } 966 967 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 968 // Must already have disposed of the old blob somehow. 969 assert(blob() == NULL, "must be empty"); 970 // Take the new blob away from cb. 971 set_blob(cb->blob()); 972 // Take over all the section pointers. 973 for (int n = 0; n < (int)SECT_LIMIT; n++) { 974 CodeSection* cb_sect = cb->code_section(n); 975 CodeSection* this_sect = code_section(n); 976 this_sect->take_over_code_from(cb_sect); 977 } 978 _overflow_arena = cb->_overflow_arena; 979 // Make sure the old cb won't try to use it or free it. 980 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 981 } 982 983 void CodeBuffer::verify_section_allocation() { 984 address tstart = _total_start; 985 if (tstart == badAddress) return; // smashed by set_blob(NULL) 986 address tend = tstart + _total_size; 987 if (_blob != NULL) { 988 989 guarantee(tstart >= _blob->content_begin(), "sanity"); 990 guarantee(tend <= _blob->content_end(), "sanity"); 991 } 992 // Verify disjointness. 993 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 994 CodeSection* sect = code_section(n); 995 if (!sect->is_allocated() || sect->is_empty()) continue; 996 guarantee((intptr_t)sect->start() % sect->alignment() == 0 997 || sect->is_empty() || _blob == NULL, 998 "start is aligned"); 999 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) { 1000 CodeSection* other = code_section(m); 1001 if (!other->is_allocated() || other == sect) continue; 1002 guarantee(!other->contains(sect->start() ), "sanity"); 1003 // limit is an exclusive address and can be the start of another 1004 // section. 1005 guarantee(!other->contains(sect->limit() - 1), "sanity"); 1006 } 1007 guarantee(sect->end() <= tend, "sanity"); 1008 guarantee(sect->end() <= sect->limit(), "sanity"); 1009 } 1010 } 1011 1012 void CodeBuffer::log_section_sizes(const char* name) { 1013 if (xtty != NULL) { 1014 ttyLocker ttyl; 1015 // log info about buffer usage 1016 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size); 1017 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 1018 CodeSection* sect = code_section(n); 1019 if (!sect->is_allocated() || sect->is_empty()) continue; 1020 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>", 1021 n, sect->limit() - sect->start(), sect->limit() - sect->end()); 1022 } 1023 xtty->print_cr("</blob>"); 1024 } 1025 } 1026 1027 #ifndef PRODUCT 1028 1029 void CodeSection::dump() { 1030 address ptr = start(); 1031 for (csize_t step; ptr < end(); ptr += step) { 1032 step = end() - ptr; 1033 if (step > jintSize * 4) step = jintSize * 4; 1034 tty->print(INTPTR_FORMAT ": ", p2i(ptr)); 1035 while (step > 0) { 1036 tty->print(" " PTR32_FORMAT, *(jint*)ptr); 1037 ptr += jintSize; 1038 } 1039 tty->cr(); 1040 } 1041 } 1042 1043 1044 void CodeSection::decode() { 1045 Disassembler::decode(start(), end()); 1046 } 1047 1048 1049 void CodeBuffer::block_comment(intptr_t offset, const char * comment) { 1050 _code_strings.add_comment(offset, comment); 1051 } 1052 1053 const char* CodeBuffer::code_string(const char* str) { 1054 return _code_strings.add_string(str); 1055 } 1056 1057 class CodeString: public CHeapObj<mtCode> { 1058 private: 1059 friend class CodeStrings; 1060 const char * _string; 1061 CodeString* _next; 1062 intptr_t _offset; 1063 1064 ~CodeString() { 1065 assert(_next == NULL, "wrong interface for freeing list"); 1066 os::free((void*)_string); 1067 } 1068 1069 bool is_comment() const { return _offset >= 0; } 1070 1071 public: 1072 CodeString(const char * string, intptr_t offset = -1) 1073 : _next(NULL), _offset(offset) { 1074 _string = os::strdup(string, mtCode); 1075 } 1076 1077 const char * string() const { return _string; } 1078 intptr_t offset() const { 1079 assert(_offset >= 0, "offset for non comment?"); 1080 return offset_raw(); 1081 } 1082 intptr_t offset_raw() const { return _offset; } 1083 CodeString* next() const { return _next; } 1084 1085 void set_next(CodeString* next) { _next = next; } 1086 1087 CodeString* first_comment() { 1088 if (is_comment()) { 1089 return this; 1090 } else { 1091 return next_comment(); 1092 } 1093 } 1094 CodeString* next_comment() const { 1095 CodeString* s = _next; 1096 while (s != NULL && !s->is_comment()) { 1097 s = s->_next; 1098 } 1099 return s; 1100 } 1101 }; 1102 1103 CodeString* CodeStrings::find(intptr_t offset) const { 1104 CodeString* a = _strings->first_comment(); 1105 while (a != NULL && a->offset() != offset) { 1106 a = a->next_comment(); 1107 } 1108 return a; 1109 } 1110 1111 // Convenience for add_comment. 1112 CodeString* CodeStrings::find_last(intptr_t offset) const { 1113 CodeString* a = find(offset); 1114 if (a != NULL) { 1115 CodeString* c = NULL; 1116 while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) { 1117 a = c; 1118 } 1119 } 1120 return a; 1121 } 1122 1123 void CodeStrings::add_comment(intptr_t offset, const char * comment) { 1124 check_valid(); 1125 CodeString* c = new CodeString(comment, offset); 1126 CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset); 1127 1128 if (inspos) { 1129 // insert after already existing comments with same offset 1130 c->set_next(inspos->next()); 1131 inspos->set_next(c); 1132 } else { 1133 // no comments with such offset, yet. Insert before anything else. 1134 c->set_next(_strings); 1135 _strings = c; 1136 } 1137 } 1138 1139 void CodeStrings::assign(CodeStrings& other) { 1140 other.check_valid(); 1141 assert(is_null(), "Cannot assign onto non-empty CodeStrings"); 1142 _strings = other._strings; 1143 #ifdef ASSERT 1144 _defunct = false; 1145 #endif 1146 other.set_null_and_invalidate(); 1147 } 1148 1149 // Deep copy of CodeStrings for consistent memory management. 1150 // Only used for actual disassembly so this is cheaper than reference counting 1151 // for the "normal" fastdebug case. 1152 void CodeStrings::copy(CodeStrings& other) { 1153 other.check_valid(); 1154 check_valid(); 1155 assert(is_null(), "Cannot copy onto non-empty CodeStrings"); 1156 CodeString* n = other._strings; 1157 CodeString** ps = &_strings; 1158 while (n != NULL) { 1159 *ps = new CodeString(n->string(), n->offset_raw()); 1160 ps = &((*ps)->_next); 1161 n = n->next(); 1162 } 1163 } 1164 1165 const char* CodeStrings::_prefix = " ;; "; // default: can be changed via set_prefix 1166 1167 void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const { 1168 check_valid(); 1169 if (_strings != NULL) { 1170 CodeString* c = find(offset); 1171 while (c && c->offset() == offset) { 1172 stream->bol(); 1173 stream->print("%s", _prefix); 1174 // Don't interpret as format strings since it could contain % 1175 stream->print_raw_cr(c->string()); 1176 c = c->next_comment(); 1177 } 1178 } 1179 } 1180 1181 // Also sets isNull() 1182 void CodeStrings::free() { 1183 CodeString* n = _strings; 1184 while (n) { 1185 // unlink the node from the list saving a pointer to the next 1186 CodeString* p = n->next(); 1187 n->set_next(NULL); 1188 delete n; 1189 n = p; 1190 } 1191 set_null_and_invalidate(); 1192 } 1193 1194 const char* CodeStrings::add_string(const char * string) { 1195 check_valid(); 1196 CodeString* s = new CodeString(string); 1197 s->set_next(_strings); 1198 _strings = s; 1199 assert(s->string() != NULL, "should have a string"); 1200 return s->string(); 1201 } 1202 1203 void CodeBuffer::decode() { 1204 ttyLocker ttyl; 1205 Disassembler::decode(decode_begin(), insts_end()); 1206 _decode_begin = insts_end(); 1207 } 1208 1209 1210 void CodeBuffer::skip_decode() { 1211 _decode_begin = insts_end(); 1212 } 1213 1214 1215 void CodeBuffer::decode_all() { 1216 ttyLocker ttyl; 1217 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1218 // dump contents of each section 1219 CodeSection* cs = code_section(n); 1220 tty->print_cr("! %s:", code_section_name(n)); 1221 if (cs != consts()) 1222 cs->decode(); 1223 else 1224 cs->dump(); 1225 } 1226 } 1227 1228 1229 void CodeSection::print(const char* name) { 1230 csize_t locs_size = locs_end() - locs_start(); 1231 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s", 1232 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity(), 1233 is_frozen()? " [frozen]": ""); 1234 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1235 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1236 if (PrintRelocations) { 1237 RelocIterator iter(this); 1238 iter.print(); 1239 } 1240 } 1241 1242 void CodeBuffer::print() { 1243 if (this == NULL) { 1244 tty->print_cr("NULL CodeBuffer pointer"); 1245 return; 1246 } 1247 1248 tty->print_cr("CodeBuffer:"); 1249 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1250 // print each section 1251 CodeSection* cs = code_section(n); 1252 cs->print(code_section_name(n)); 1253 } 1254 } 1255 1256 #endif // PRODUCT