1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/nmethod.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "memory/allocation.inline.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "runtime/frame.inline.hpp" 35 #include "runtime/signature.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_Defs.hpp" 38 #endif 39 #ifdef COMPILER2 40 #include "opto/optoreg.hpp" 41 #endif 42 #ifdef SPARC 43 #include "vmreg_sparc.inline.hpp" 44 #endif 45 46 // OopMapStream 47 48 OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) { 49 _stream = new CompressedReadStream(oop_map->write_stream()->buffer()); 50 _mask = oop_types_mask; 51 _size = oop_map->omv_count(); 52 _position = 0; 53 _valid_omv = false; 54 } 55 56 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) { 57 _stream = new CompressedReadStream(oop_map->data_addr()); 58 _mask = oop_types_mask; 59 _size = oop_map->count(); 60 _position = 0; 61 _valid_omv = false; 62 } 63 64 65 void OopMapStream::find_next() { 66 while(_position++ < _size) { 67 _omv.read_from(_stream); 68 if(((int)_omv.type() & _mask) > 0) { 69 _valid_omv = true; 70 return; 71 } 72 } 73 _valid_omv = false; 74 } 75 76 77 // OopMap 78 79 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 80 // slots to hold 4-byte values like ints and floats in the LP64 build. 81 OopMap::OopMap(int frame_size, int arg_count) { 82 // OopMaps are usually quite so small, so pick a small initial size 83 set_write_stream(new CompressedWriteStream(32)); 84 set_omv_count(0); 85 86 #ifdef ASSERT 87 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count; 88 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 89 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 90 #endif 91 } 92 93 94 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) { 95 // This constructor does a deep copy 96 // of the source OopMap. 97 set_write_stream(new CompressedWriteStream(source->omv_count() * 2)); 98 set_omv_count(0); 99 set_offset(source->offset()); 100 101 #ifdef ASSERT 102 _locs_length = source->_locs_length; 103 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 104 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 105 #endif 106 107 // We need to copy the entries too. 108 for (OopMapStream oms(source); !oms.is_done(); oms.next()) { 109 OopMapValue omv = oms.current(); 110 omv.write_on(write_stream()); 111 increment_count(); 112 } 113 } 114 115 116 OopMap* OopMap::deep_copy() { 117 return new OopMap(_deep_copy_token, this); 118 } 119 120 void OopMap::copy_data_to(address addr) const { 121 memcpy(addr, write_stream()->buffer(), write_stream()->position()); 122 } 123 124 int OopMap::heap_size() const { 125 int size = sizeof(OopMap); 126 int align = sizeof(void *) - 1; 127 size += write_stream()->position(); 128 // Align to a reasonable ending point 129 size = ((size+align) & ~align); 130 return size; 131 } 132 133 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 134 // slots to hold 4-byte values like ints and floats in the LP64 build. 135 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) { 136 137 assert(reg->value() < _locs_length, "too big reg value for stack size"); 138 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); 139 debug_only( _locs_used[reg->value()] = x; ) 140 141 OopMapValue o(reg, x); 142 143 if(x == OopMapValue::callee_saved_value) { 144 // This can never be a stack location, so we don't need to transform it. 145 assert(optional->is_reg(), "Trying to callee save a stack location"); 146 o.set_content_reg(optional); 147 } else if(x == OopMapValue::derived_oop_value) { 148 o.set_content_reg(optional); 149 } 150 151 o.write_on(write_stream()); 152 increment_count(); 153 } 154 155 156 void OopMap::set_oop(VMReg reg) { 157 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad()); 158 } 159 160 161 void OopMap::set_value(VMReg reg) { 162 // At this time, we only need value entries in our OopMap when ZapDeadCompiledLocals is active. 163 if (ZapDeadCompiledLocals) 164 set_xxx(reg, OopMapValue::value_value, VMRegImpl::Bad()); 165 } 166 167 168 void OopMap::set_narrowoop(VMReg reg) { 169 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); 170 } 171 172 173 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) { 174 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register); 175 } 176 177 178 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) { 179 if( reg == derived_from_local_register ) { 180 // Actually an oop, derived shares storage with base, 181 set_oop(reg); 182 } else { 183 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register); 184 } 185 } 186 187 // OopMapSet 188 189 OopMapSet::OopMapSet() { 190 set_om_size(MinOopMapAllocation); 191 set_om_count(0); 192 OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size()); 193 set_om_data(temp); 194 } 195 196 197 void OopMapSet::grow_om_data() { 198 int new_size = om_size() * 2; 199 OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size); 200 memcpy(new_data,om_data(),om_size() * sizeof(OopMap*)); 201 set_om_size(new_size); 202 set_om_data(new_data); 203 } 204 205 206 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) { 207 assert(om_size() != -1,"Cannot grow a fixed OopMapSet"); 208 209 if(om_count() >= om_size()) { 210 grow_om_data(); 211 } 212 map->set_offset(pc_offset); 213 214 #ifdef ASSERT 215 if(om_count() > 0) { 216 OopMap* last = at(om_count()-1); 217 if (last->offset() == map->offset() ) { 218 fatal("OopMap inserted twice"); 219 } 220 if(last->offset() > map->offset()) { 221 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d", 222 om_count(),last->offset(),om_count()+1,map->offset()); 223 } 224 } 225 #endif // ASSERT 226 227 set(om_count(),map); 228 increment_count(); 229 } 230 231 232 int OopMapSet::heap_size() const { 233 // The space we use 234 int size = sizeof(OopMap); 235 int align = sizeof(void *) - 1; 236 size = ((size+align) & ~align); 237 size += om_count() * sizeof(OopMap*); 238 239 // Now add in the space needed for the indivdiual OopMaps 240 for(int i=0; i < om_count(); i++) { 241 size += at(i)->heap_size(); 242 } 243 // We don't need to align this, it will be naturally pointer aligned 244 return size; 245 } 246 247 248 OopMap* OopMapSet::singular_oop_map() { 249 guarantee(om_count() == 1, "Make sure we only have a single gc point"); 250 return at(0); 251 } 252 253 254 OopMap* OopMapSet::find_map_at_offset(int pc_offset) const { 255 int i, len = om_count(); 256 assert( len > 0, "must have pointer maps" ); 257 258 // Scan through oopmaps. Stop when current offset is either equal or greater 259 // than the one we are looking for. 260 for( i = 0; i < len; i++) { 261 if( at(i)->offset() >= pc_offset ) 262 break; 263 } 264 265 assert( i < len, "oopmap not found" ); 266 267 OopMap* m = at(i); 268 assert( m->offset() == pc_offset, "oopmap not found" ); 269 return m; 270 } 271 272 class DoNothingClosure: public OopClosure { 273 public: 274 void do_oop(oop* p) {} 275 void do_oop(narrowOop* p) {} 276 }; 277 static DoNothingClosure do_nothing; 278 279 static void add_derived_oop(oop* base, oop* derived) { 280 #ifndef TIERED 281 COMPILER1_PRESENT(ShouldNotReachHere();) 282 #if INCLUDE_JVMCI 283 if (UseJVMCICompiler) { 284 ShouldNotReachHere(); 285 } 286 #endif 287 #endif // TIERED 288 #if defined(COMPILER2) || INCLUDE_JVMCI 289 DerivedPointerTable::add(derived, base); 290 #endif // COMPILER2 || JVMCI 291 } 292 293 294 #ifndef PRODUCT 295 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) { 296 // Print oopmap and regmap 297 tty->print_cr("------ "); 298 CodeBlob* cb = fr->cb(); 299 const ImmutableOopMapSet* maps = cb->oop_maps(); 300 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); 301 map->print(); 302 if( cb->is_nmethod() ) { 303 nmethod* nm = (nmethod*)cb; 304 // native wrappers have no scope data, it is implied 305 if (nm->is_native_method()) { 306 tty->print("bci: 0 (native)"); 307 } else { 308 ScopeDesc* scope = nm->scope_desc_at(fr->pc()); 309 tty->print("bci: %d ",scope->bci()); 310 } 311 } 312 tty->cr(); 313 fr->print_on(tty); 314 tty->print(" "); 315 cb->print_value_on(tty); tty->cr(); 316 reg_map->print(); 317 tty->print_cr("------ "); 318 319 } 320 #endif // PRODUCT 321 322 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) { 323 // add derived oops to a table 324 all_do(fr, reg_map, f, add_derived_oop, &do_nothing); 325 } 326 327 328 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map, 329 OopClosure* oop_fn, void derived_oop_fn(oop*, oop*), 330 OopClosure* value_fn) { 331 CodeBlob* cb = fr->cb(); 332 assert(cb != NULL, "no codeblob"); 333 334 NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);) 335 336 const ImmutableOopMapSet* maps = cb->oop_maps(); 337 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); 338 assert(map != NULL, "no ptr map found"); 339 340 // handle derived pointers first (otherwise base pointer may be 341 // changed before derived pointer offset has been collected) 342 OopMapValue omv; 343 { 344 OopMapStream oms(map,OopMapValue::derived_oop_value); 345 if (!oms.is_done()) { 346 #ifndef TIERED 347 COMPILER1_PRESENT(ShouldNotReachHere();) 348 #if INCLUDE_JVMCI 349 if (UseJVMCICompiler) { 350 ShouldNotReachHere(); 351 } 352 #endif 353 #endif // !TIERED 354 // Protect the operation on the derived pointers. This 355 // protects the addition of derived pointers to the shared 356 // derived pointer table in DerivedPointerTable::add(). 357 MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag); 358 do { 359 omv = oms.current(); 360 oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); 361 if ( loc != NULL ) { 362 oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map); 363 oop *derived_loc = loc; 364 oop val = *base_loc; 365 if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) { 366 // Ignore NULL oops and decoded NULL narrow oops which 367 // equal to Universe::narrow_oop_base when a narrow oop 368 // implicit null check is used in compiled code. 369 // The narrow_oop_base could be NULL or be the address 370 // of the page below heap depending on compressed oops mode. 371 } else 372 derived_oop_fn(base_loc, derived_loc); 373 } 374 oms.next(); 375 } while (!oms.is_done()); 376 } 377 } 378 379 // We want coop, value and oop oop_types 380 int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value; 381 { 382 for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) { 383 omv = oms.current(); 384 oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); 385 if ( loc != NULL ) { 386 if ( omv.type() == OopMapValue::oop_value ) { 387 oop val = *loc; 388 if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) { 389 // Ignore NULL oops and decoded NULL narrow oops which 390 // equal to Universe::narrow_oop_base when a narrow oop 391 // implicit null check is used in compiled code. 392 // The narrow_oop_base could be NULL or be the address 393 // of the page below heap depending on compressed oops mode. 394 continue; 395 } 396 #ifdef ASSERT 397 if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || 398 !Universe::heap()->is_in_or_null(*loc)) { 399 tty->print_cr("# Found non oop pointer. Dumping state at failure"); 400 // try to dump out some helpful debugging information 401 trace_codeblob_maps(fr, reg_map); 402 omv.print(); 403 tty->print_cr("register r"); 404 omv.reg()->print(); 405 tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc); 406 // do the real assert. 407 assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer"); 408 } 409 #endif // ASSERT 410 oop_fn->do_oop(loc); 411 } else if ( omv.type() == OopMapValue::value_value ) { 412 assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc), 413 "found invalid value pointer"); 414 value_fn->do_oop(loc); 415 } else if ( omv.type() == OopMapValue::narrowoop_value ) { 416 narrowOop *nl = (narrowOop*)loc; 417 #ifndef VM_LITTLE_ENDIAN 418 VMReg vmReg = omv.reg(); 419 // Don't do this on SPARC float registers as they can be individually addressed 420 if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) { 421 // compressed oops in registers only take up 4 bytes of an 422 // 8 byte register but they are in the wrong part of the 423 // word so adjust loc to point at the right place. 424 nl = (narrowOop*)((address)nl + 4); 425 } 426 #endif 427 oop_fn->do_oop(nl); 428 } 429 } 430 } 431 } 432 } 433 434 435 // Update callee-saved register info for the following frame 436 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) { 437 ResourceMark rm; 438 CodeBlob* cb = fr->cb(); 439 assert(cb != NULL, "no codeblob"); 440 441 // Any reg might be saved by a safepoint handler (see generate_handler_blob). 442 assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id), 443 "already updated this map; do not 'update' it twice!" ); 444 debug_only(reg_map->_update_for_id = fr->id()); 445 446 // Check if caller must update oop argument 447 assert((reg_map->include_argument_oops() || 448 !cb->caller_must_gc_arguments(reg_map->thread())), 449 "include_argument_oops should already be set"); 450 451 // Scan through oopmap and find location of all callee-saved registers 452 // (we do not do update in place, since info could be overwritten) 453 454 address pc = fr->pc(); 455 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc); 456 assert(map != NULL, "no ptr map found"); 457 DEBUG_ONLY(int nof_callee = 0;) 458 459 for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) { 460 OopMapValue omv = oms.current(); 461 VMReg reg = omv.content_reg(); 462 oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map); 463 reg_map->set_location(reg, (address) loc); 464 DEBUG_ONLY(nof_callee++;) 465 } 466 467 // Check that runtime stubs save all callee-saved registers 468 #ifdef COMPILER2 469 assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() || 470 (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT), 471 "must save all"); 472 #endif // COMPILER2 473 } 474 475 //============================================================================= 476 // Non-Product code 477 478 #ifndef PRODUCT 479 480 bool ImmutableOopMap::has_derived_pointer() const { 481 #ifndef TIERED 482 COMPILER1_PRESENT(return false); 483 #if INCLUDE_JVMCI 484 if (UseJVMCICompiler) { 485 return false; 486 } 487 #endif 488 #endif // !TIERED 489 #if defined(COMPILER2) || INCLUDE_JVMCI 490 OopMapStream oms(this,OopMapValue::derived_oop_value); 491 return oms.is_done(); 492 #else 493 return false; 494 #endif // COMPILER2 || JVMCI 495 } 496 497 #endif //PRODUCT 498 499 // Printing code is present in product build for -XX:+PrintAssembly. 500 501 static 502 void print_register_type(OopMapValue::oop_types x, VMReg optional, 503 outputStream* st) { 504 switch( x ) { 505 case OopMapValue::oop_value: 506 st->print("Oop"); 507 break; 508 case OopMapValue::value_value: 509 st->print("Value"); 510 break; 511 case OopMapValue::narrowoop_value: 512 st->print("NarrowOop"); 513 break; 514 case OopMapValue::callee_saved_value: 515 st->print("Callers_"); 516 optional->print_on(st); 517 break; 518 case OopMapValue::derived_oop_value: 519 st->print("Derived_oop_"); 520 optional->print_on(st); 521 break; 522 default: 523 ShouldNotReachHere(); 524 } 525 } 526 527 void OopMapValue::print_on(outputStream* st) const { 528 reg()->print_on(st); 529 st->print("="); 530 print_register_type(type(),content_reg(),st); 531 st->print(" "); 532 } 533 534 void ImmutableOopMap::print_on(outputStream* st) const { 535 OopMapValue omv; 536 st->print("ImmutableOopMap{"); 537 for(OopMapStream oms(this); !oms.is_done(); oms.next()) { 538 omv = oms.current(); 539 omv.print_on(st); 540 } 541 st->print("}"); 542 } 543 544 void OopMap::print_on(outputStream* st) const { 545 OopMapValue omv; 546 st->print("OopMap{"); 547 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) { 548 omv = oms.current(); 549 omv.print_on(st); 550 } 551 st->print("off=%d}", (int) offset()); 552 } 553 554 void ImmutableOopMapSet::print_on(outputStream* st) const { 555 const ImmutableOopMap* last = NULL; 556 for (int i = 0; i < _count; ++i) { 557 const ImmutableOopMapPair* pair = pair_at(i); 558 const ImmutableOopMap* map = pair->get_from(this); 559 if (map != last) { 560 st->cr(); 561 map->print_on(st); 562 st->print("pc offsets: "); 563 } 564 last = map; 565 st->print("%d ", pair->pc_offset()); 566 } 567 } 568 569 void OopMapSet::print_on(outputStream* st) const { 570 int i, len = om_count(); 571 572 st->print_cr("OopMapSet contains %d OopMaps\n",len); 573 574 for( i = 0; i < len; i++) { 575 OopMap* m = at(i); 576 st->print_cr("#%d ",i); 577 m->print_on(st); 578 st->cr(); 579 } 580 } 581 582 bool OopMap::equals(const OopMap* other) const { 583 if (other->_omv_count != _omv_count) { 584 return false; 585 } 586 if (other->write_stream()->position() != write_stream()->position()) { 587 return false; 588 } 589 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) { 590 return false; 591 } 592 return true; 593 } 594 595 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const { 596 ImmutableOopMapPair* pairs = get_pairs(); 597 ImmutableOopMapPair* last = NULL; 598 599 for (int i = 0; i < _count; ++i) { 600 if (pairs[i].pc_offset() >= pc_offset) { 601 last = &pairs[i]; 602 break; 603 } 604 } 605 606 assert(last->pc_offset() == pc_offset, "oopmap not found"); 607 return last->get_from(this); 608 } 609 610 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const { 611 return set->oopmap_at_offset(_oopmap_offset); 612 } 613 614 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) { 615 address addr = data_addr(); 616 oopmap->copy_data_to(addr); 617 } 618 619 #ifdef ASSERT 620 int ImmutableOopMap::nr_of_bytes() const { 621 OopMapStream oms(this); 622 623 while (!oms.is_done()) { 624 oms.next(); 625 } 626 return sizeof(ImmutableOopMap) + oms.stream_position(); 627 } 628 #endif 629 630 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _new_set(NULL), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1) { 631 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size()); 632 } 633 634 int ImmutableOopMapBuilder::size_for(const OopMap* map) const { 635 return align_size_up(sizeof(ImmutableOopMap) + map->data_size(), 8); 636 } 637 638 int ImmutableOopMapBuilder::heap_size() { 639 int base = sizeof(ImmutableOopMapSet); 640 base = align_size_up(base, 8); 641 642 // all of ours pc / offset pairs 643 int pairs = _set->size() * sizeof(ImmutableOopMapPair); 644 pairs = align_size_up(pairs, 8); 645 646 for (int i = 0; i < _set->size(); ++i) { 647 int size = 0; 648 OopMap* map = _set->at(i); 649 650 if (is_empty(map)) { 651 /* only keep a single empty map in the set */ 652 if (has_empty()) { 653 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty); 654 } else { 655 _empty_offset = _offset; 656 _empty = map; 657 size = size_for(map); 658 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 659 } 660 } else if (is_last_duplicate(map)) { 661 /* if this entry is identical to the previous one, just point it there */ 662 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last); 663 } else { 664 /* not empty, not an identical copy of the previous entry */ 665 size = size_for(map); 666 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 667 _last_offset = _offset; 668 _last = map; 669 } 670 671 assert(_mapping[i]._map == map, "check"); 672 _offset += size; 673 } 674 675 int total = base + pairs + _offset; 676 DEBUG_ONLY(total += 8); 677 _required = total; 678 return total; 679 } 680 681 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 682 assert(offset < set->nr_of_bytes(), "check"); 683 new ((address) pair) ImmutableOopMapPair(map->offset(), offset); 684 } 685 686 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 687 fill_pair(pair, map, offset, set); 688 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap 689 690 new (addr) ImmutableOopMap(map); 691 return align_size_up(sizeof(ImmutableOopMap) + map->data_size(), 8); 692 } 693 694 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) { 695 ImmutableOopMapPair* pairs = set->get_pairs(); 696 697 for (int i = 0; i < set->count(); ++i) { 698 const OopMap* map = _mapping[i]._map; 699 ImmutableOopMapPair* pair = NULL; 700 int size = 0; 701 702 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) { 703 size = fill_map(&pairs[i], map, _mapping[i]._offset, set); 704 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) { 705 fill_pair(&pairs[i], map, _mapping[i]._offset, set); 706 } 707 708 const ImmutableOopMap* nv = set->find_map_at_offset(map->offset()); 709 assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity"); 710 } 711 } 712 713 #ifdef ASSERT 714 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) { 715 for (int i = 0; i < 8; ++i) { 716 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check"); 717 } 718 719 for (int i = 0; i < set->count(); ++i) { 720 const ImmutableOopMapPair* pair = set->pair_at(i); 721 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size"); 722 const ImmutableOopMap* map = pair->get_from(set); 723 int nr_of_bytes = map->nr_of_bytes(); 724 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size"); 725 } 726 } 727 #endif 728 729 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) { 730 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8)); 731 732 _new_set = new (buffer) ImmutableOopMapSet(_set, _required); 733 fill(_new_set, _required); 734 735 DEBUG_ONLY(verify(buffer, _required, _new_set)); 736 737 return _new_set; 738 } 739 740 ImmutableOopMapSet* ImmutableOopMapBuilder::build() { 741 _required = heap_size(); 742 743 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps 744 address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); 745 return generate_into(buffer); 746 } 747 748 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) { 749 ResourceMark mark; 750 ImmutableOopMapBuilder builder(oopmap_set); 751 return builder.build(); 752 } 753 754 755 //------------------------------DerivedPointerTable--------------------------- 756 757 #if defined(COMPILER2) || INCLUDE_JVMCI 758 759 class DerivedPointerEntry : public CHeapObj<mtCompiler> { 760 private: 761 oop* _location; // Location of derived pointer (also pointing to the base) 762 intptr_t _offset; // Offset from base pointer 763 public: 764 DerivedPointerEntry(oop* location, intptr_t offset) { _location = location; _offset = offset; } 765 oop* location() { return _location; } 766 intptr_t offset() { return _offset; } 767 }; 768 769 770 GrowableArray<DerivedPointerEntry*>* DerivedPointerTable::_list = NULL; 771 bool DerivedPointerTable::_active = false; 772 773 774 void DerivedPointerTable::clear() { 775 // The first time, we create the list. Otherwise it should be 776 // empty. If not, then we have probably forgotton to call 777 // update_pointers after last GC/Scavenge. 778 assert (!_active, "should not be active"); 779 assert(_list == NULL || _list->length() == 0, "table not empty"); 780 if (_list == NULL) { 781 _list = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap 782 } 783 _active = true; 784 } 785 786 787 // Returns value of location as an int 788 intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); } 789 790 791 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) { 792 assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop"); 793 assert(derived_loc != base_loc, "Base and derived in same location"); 794 if (_active) { 795 assert(*derived_loc != (oop)base_loc, "location already added"); 796 assert(_list != NULL, "list must exist"); 797 intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc); 798 // This assert is invalid because derived pointers can be 799 // arbitrarily far away from their base. 800 // assert(offset >= -1000000, "wrong derived pointer info"); 801 802 if (TraceDerivedPointers) { 803 tty->print_cr( 804 "Add derived pointer@" INTPTR_FORMAT 805 " - Derived: " INTPTR_FORMAT 806 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")", 807 p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset 808 ); 809 } 810 // Set derived oop location to point to base. 811 *derived_loc = (oop)base_loc; 812 assert_lock_strong(DerivedPointerTableGC_lock); 813 DerivedPointerEntry *entry = new DerivedPointerEntry(derived_loc, offset); 814 _list->append(entry); 815 } 816 } 817 818 819 void DerivedPointerTable::update_pointers() { 820 assert(_list != NULL, "list must exist"); 821 for(int i = 0; i < _list->length(); i++) { 822 DerivedPointerEntry* entry = _list->at(i); 823 oop* derived_loc = entry->location(); 824 intptr_t offset = entry->offset(); 825 // The derived oop was setup to point to location of base 826 oop base = **(oop**)derived_loc; 827 assert(Universe::heap()->is_in_or_null(base), "must be an oop"); 828 829 *derived_loc = (oop)(((address)base) + offset); 830 assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check"); 831 832 if (TraceDerivedPointers) { 833 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT 834 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")", 835 p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset); 836 } 837 838 // Delete entry 839 delete entry; 840 _list->at_put(i, NULL); 841 } 842 // Clear list, so it is ready for next traversal (this is an invariant) 843 if (TraceDerivedPointers && !_list->is_empty()) { 844 tty->print_cr("--------------------------"); 845 } 846 _list->clear(); 847 _active = false; 848 } 849 850 #endif // COMPILER2 || JVMCI