src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8015774 Sdiff src/share/vm/code

src/share/vm/code/nmethod.cpp

Print this page




 488 }
 489 
 490 nmethod* nmethod::new_native_nmethod(methodHandle method,
 491   int compile_id,
 492   CodeBuffer *code_buffer,
 493   int vep_offset,
 494   int frame_complete,
 495   int frame_size,
 496   ByteSize basic_lock_owner_sp_offset,
 497   ByteSize basic_lock_sp_offset,
 498   OopMapSet* oop_maps) {
 499   code_buffer->finalize_oop_references(method);
 500   // create nmethod
 501   nmethod* nm = NULL;
 502   {
 503     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 504     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 505     CodeOffsets offsets;
 506     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 507     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 508     nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
 509                                             compile_id, &offsets,
 510                                             code_buffer, frame_size,
 511                                             basic_lock_owner_sp_offset,
 512                                             basic_lock_sp_offset, oop_maps);
 513     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 514     if (PrintAssembly && nm != NULL) {
 515       Disassembler::decode(nm);
 516     }
 517   }
 518   // verify nmethod
 519   debug_only(if (nm) nm->verify();) // might block
 520 
 521   if (nm != NULL) {
 522     nm->log_new_nmethod();
 523   }
 524 
 525   return nm;
 526 }
 527 
 528 #ifdef HAVE_DTRACE_H
 529 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 530                                      CodeBuffer *code_buffer,
 531                                      int vep_offset,
 532                                      int trap_offset,
 533                                      int frame_complete,
 534                                      int frame_size) {
 535   code_buffer->finalize_oop_references(method);
 536   // create nmethod
 537   nmethod* nm = NULL;
 538   {
 539     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 540     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 541     CodeOffsets offsets;
 542     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 543     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 544     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 545 
 546     nm = new (nmethod_size) nmethod(method(), nmethod_size,
 547                                     &offsets, code_buffer, frame_size);
 548 
 549     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 550     if (PrintAssembly && nm != NULL) {
 551       Disassembler::decode(nm);
 552     }
 553   }
 554   // verify nmethod
 555   debug_only(if (nm) nm->verify();) // might block
 556 
 557   if (nm != NULL) {
 558     nm->log_new_nmethod();
 559   }
 560 
 561   return nm;
 562 }
 563 
 564 #endif // def HAVE_DTRACE_H
 565 
 566 nmethod* nmethod::new_nmethod(methodHandle method,


 574   OopMapSet* oop_maps,
 575   ExceptionHandlerTable* handler_table,
 576   ImplicitExceptionTable* nul_chk_table,
 577   AbstractCompiler* compiler,
 578   int comp_level
 579 )
 580 {
 581   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 582   code_buffer->finalize_oop_references(method);
 583   // create nmethod
 584   nmethod* nm = NULL;
 585   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 586     int nmethod_size =
 587       allocation_size(code_buffer, sizeof(nmethod))
 588       + adjust_pcs_size(debug_info->pcs_size())
 589       + round_to(dependencies->size_in_bytes() , oopSize)
 590       + round_to(handler_table->size_in_bytes(), oopSize)
 591       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 592       + round_to(debug_info->data_size()       , oopSize);
 593 
 594     nm = new (nmethod_size)
 595     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 596             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 597             oop_maps,
 598             handler_table,
 599             nul_chk_table,
 600             compiler,
 601             comp_level);
 602 
 603     if (nm != NULL) {
 604       // To make dependency checking during class loading fast, record
 605       // the nmethod dependencies in the classes it is dependent on.
 606       // This allows the dependency checking code to simply walk the
 607       // class hierarchy above the loaded class, checking only nmethods
 608       // which are dependent on those classes.  The slow way is to
 609       // check every nmethod for dependencies which makes it linear in
 610       // the number of methods compiled.  For applications with a lot
 611       // classes the slow way is too slow.
 612       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 613         Klass* klass = deps.context_type();
 614         if (klass == NULL) {


 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information
 793     if (PrintNMethods) {
 794       print_code();
 795     }
 796     if (PrintRelocations) {
 797       print_relocations();
 798     }
 799     if (xtty != NULL) {
 800       xtty->tail("print_dtrace_nmethod");
 801     }
 802   }
 803 }
 804 #endif // def HAVE_DTRACE_H
 805 
 806 void* nmethod::operator new(size_t size, int nmethod_size) throw() {
 807   // Not critical, may return null if there is too little continuous memory
 808   return CodeCache::allocate(nmethod_size);

 809 }
 810 
 811 nmethod::nmethod(
 812   Method* method,
 813   int nmethod_size,
 814   int compile_id,
 815   int entry_bci,
 816   CodeOffsets* offsets,
 817   int orig_pc_offset,
 818   DebugInformationRecorder* debug_info,
 819   Dependencies* dependencies,
 820   CodeBuffer *code_buffer,
 821   int frame_size,
 822   OopMapSet* oop_maps,
 823   ExceptionHandlerTable* handler_table,
 824   ImplicitExceptionTable* nul_chk_table,
 825   AbstractCompiler* compiler,
 826   int comp_level
 827   )
 828   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1421   }
1422 
1423   // Make sweeper aware that there is a zombie method that needs to be removed
1424   NMethodSweeper::notify();
1425 
1426   return true;
1427 }
1428 
1429 void nmethod::flush() {
1430   // Note that there are no valid oops in the nmethod anymore.
1431   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1432   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1433 
1434   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1435   assert_locked_or_safepoint(CodeCache_lock);
1436 
1437   // completely deallocate this method
1438   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1439   if (PrintMethodFlushing) {
1440     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1441         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1442   }
1443 
1444   // We need to deallocate any ExceptionCache data.
1445   // Note that we do not need to grab the nmethod lock for this, it
1446   // better be thread safe if we're disposing of it!
1447   ExceptionCache* ec = exception_cache();
1448   set_exception_cache(NULL);
1449   while(ec != NULL) {
1450     ExceptionCache* next = ec->next();
1451     delete ec;
1452     ec = next;
1453   }
1454 
1455   if (on_scavenge_root_list()) {
1456     CodeCache::drop_scavenge_root_nmethod(this);
1457   }
1458 
1459 #ifdef SHARK
1460   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1461 #endif // SHARK
1462 
1463   ((CodeBlob*)(this))->flush();
1464 
1465   CodeCache::free(this);
1466 }
1467 
1468 
1469 //
1470 // Notify all classes this nmethod is dependent on that it is no
1471 // longer dependent. This should only be called in two situations.
1472 // First, when a nmethod transitions to a zombie all dependents need
1473 // to be clear.  Since zombification happens at a safepoint there's no
1474 // synchronization issues.  The second place is a little more tricky.
1475 // During phase 1 of mark sweep class unloading may happen and as a
1476 // result some nmethods may get unloaded.  In this case the flushing
1477 // of dependencies must happen during phase 1 since after GC any
1478 // dependencies in the unloaded nmethod won't be updated, so
1479 // traversing the dependency information in unsafe.  In that case this
1480 // function is called with a non-NULL argument and this function only
1481 // notifies instanceKlasses that are reachable
1482 
1483 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1484   assert_locked_or_safepoint(CodeCache_lock);
1485   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1486   "is_alive is non-NULL if and only if we are called during GC");
1487   if (!has_flushed_dependencies()) {
1488     set_has_flushed_dependencies();


2349                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2350   }
2351   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2352 };
2353 
2354 void nmethod::verify() {
2355 
2356   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2357   // seems odd.
2358 
2359   if( is_zombie() || is_not_entrant() )
2360     return;
2361 
2362   // Make sure all the entry points are correctly aligned for patching.
2363   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2364 
2365   // assert(method()->is_oop(), "must be valid");
2366 
2367   ResourceMark rm;
2368 
2369   if (!CodeCache::contains(this)) {
2370     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2371   }
2372 
2373   if(is_native_method() )
2374     return;
2375 
2376   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2377   if (nm != this) {
2378     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2379                   this));
2380   }
2381 
2382   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2383     if (! p->verify(this)) {
2384       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2385     }
2386   }
2387 
2388   VerifyOopsClosure voc(this);
2389   oops_do(&voc);




 488 }
 489 
 490 nmethod* nmethod::new_native_nmethod(methodHandle method,
 491   int compile_id,
 492   CodeBuffer *code_buffer,
 493   int vep_offset,
 494   int frame_complete,
 495   int frame_size,
 496   ByteSize basic_lock_owner_sp_offset,
 497   ByteSize basic_lock_sp_offset,
 498   OopMapSet* oop_maps) {
 499   code_buffer->finalize_oop_references(method);
 500   // create nmethod
 501   nmethod* nm = NULL;
 502   {
 503     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 504     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 505     CodeOffsets offsets;
 506     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 507     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 508     nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
 509                                             compile_id, &offsets,
 510                                             code_buffer, frame_size,
 511                                             basic_lock_owner_sp_offset,
 512                                             basic_lock_sp_offset, oop_maps);
 513     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 514     if (PrintAssembly && nm != NULL) {
 515       Disassembler::decode(nm);
 516     }
 517   }
 518   // verify nmethod
 519   debug_only(if (nm) nm->verify();) // might block
 520 
 521   if (nm != NULL) {
 522     nm->log_new_nmethod();
 523   }
 524 
 525   return nm;
 526 }
 527 
 528 #ifdef HAVE_DTRACE_H
 529 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 530                                      CodeBuffer *code_buffer,
 531                                      int vep_offset,
 532                                      int trap_offset,
 533                                      int frame_complete,
 534                                      int frame_size) {
 535   code_buffer->finalize_oop_references(method);
 536   // create nmethod
 537   nmethod* nm = NULL;
 538   {
 539     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 540     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 541     CodeOffsets offsets;
 542     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 543     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 544     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 545 
 546     nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
 547                                     &offsets, code_buffer, frame_size);
 548 
 549     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 550     if (PrintAssembly && nm != NULL) {
 551       Disassembler::decode(nm);
 552     }
 553   }
 554   // verify nmethod
 555   debug_only(if (nm) nm->verify();) // might block
 556 
 557   if (nm != NULL) {
 558     nm->log_new_nmethod();
 559   }
 560 
 561   return nm;
 562 }
 563 
 564 #endif // def HAVE_DTRACE_H
 565 
 566 nmethod* nmethod::new_nmethod(methodHandle method,


 574   OopMapSet* oop_maps,
 575   ExceptionHandlerTable* handler_table,
 576   ImplicitExceptionTable* nul_chk_table,
 577   AbstractCompiler* compiler,
 578   int comp_level
 579 )
 580 {
 581   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 582   code_buffer->finalize_oop_references(method);
 583   // create nmethod
 584   nmethod* nm = NULL;
 585   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 586     int nmethod_size =
 587       allocation_size(code_buffer, sizeof(nmethod))
 588       + adjust_pcs_size(debug_info->pcs_size())
 589       + round_to(dependencies->size_in_bytes() , oopSize)
 590       + round_to(handler_table->size_in_bytes(), oopSize)
 591       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 592       + round_to(debug_info->data_size()       , oopSize);
 593 
 594     nm = new (nmethod_size, comp_level)
 595     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 596             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 597             oop_maps,
 598             handler_table,
 599             nul_chk_table,
 600             compiler,
 601             comp_level);
 602 
 603     if (nm != NULL) {
 604       // To make dependency checking during class loading fast, record
 605       // the nmethod dependencies in the classes it is dependent on.
 606       // This allows the dependency checking code to simply walk the
 607       // class hierarchy above the loaded class, checking only nmethods
 608       // which are dependent on those classes.  The slow way is to
 609       // check every nmethod for dependencies which makes it linear in
 610       // the number of methods compiled.  For applications with a lot
 611       // classes the slow way is too slow.
 612       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 613         Klass* klass = deps.context_type();
 614         if (klass == NULL) {


 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information
 793     if (PrintNMethods) {
 794       print_code();
 795     }
 796     if (PrintRelocations) {
 797       print_relocations();
 798     }
 799     if (xtty != NULL) {
 800       xtty->tail("print_dtrace_nmethod");
 801     }
 802   }
 803 }
 804 #endif // def HAVE_DTRACE_H
 805 
 806 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
 807   // Nmethods are allocated on separate heaps and therefore do not share memory with critical CodeBlobs.
 808   // We nevertheless define the allocation as critical to make sure all heap memory is used.
 809   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), true);
 810 }
 811 
 812 nmethod::nmethod(
 813   Method* method,
 814   int nmethod_size,
 815   int compile_id,
 816   int entry_bci,
 817   CodeOffsets* offsets,
 818   int orig_pc_offset,
 819   DebugInformationRecorder* debug_info,
 820   Dependencies* dependencies,
 821   CodeBuffer *code_buffer,
 822   int frame_size,
 823   OopMapSet* oop_maps,
 824   ExceptionHandlerTable* handler_table,
 825   ImplicitExceptionTable* nul_chk_table,
 826   AbstractCompiler* compiler,
 827   int comp_level
 828   )
 829   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1422   }
1423 
1424   // Make sweeper aware that there is a zombie method that needs to be removed
1425   NMethodSweeper::notify();
1426 
1427   return true;
1428 }
1429 
1430 void nmethod::flush() {
1431   // Note that there are no valid oops in the nmethod anymore.
1432   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1433   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1434 
1435   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1436   assert_locked_or_safepoint(CodeCache_lock);
1437 
1438   // completely deallocate this method
1439   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1440   if (PrintMethodFlushing) {
1441     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1442         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
1443   }
1444 
1445   // We need to deallocate any ExceptionCache data.
1446   // Note that we do not need to grab the nmethod lock for this, it
1447   // better be thread safe if we're disposing of it!
1448   ExceptionCache* ec = exception_cache();
1449   set_exception_cache(NULL);
1450   while(ec != NULL) {
1451     ExceptionCache* next = ec->next();
1452     delete ec;
1453     ec = next;
1454   }
1455 
1456   if (on_scavenge_root_list()) {
1457     CodeCache::drop_scavenge_root_nmethod(this);
1458   }
1459 
1460 #ifdef SHARK
1461   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1462 #endif // SHARK
1463 
1464   ((CodeBlob*)(this))->flush();
1465 
1466   CodeCache::free(this, CodeCache::get_code_blob_type(_comp_level));
1467 }
1468 

1469 //
1470 // Notify all classes this nmethod is dependent on that it is no
1471 // longer dependent. This should only be called in two situations.
1472 // First, when a nmethod transitions to a zombie all dependents need
1473 // to be clear.  Since zombification happens at a safepoint there's no
1474 // synchronization issues.  The second place is a little more tricky.
1475 // During phase 1 of mark sweep class unloading may happen and as a
1476 // result some nmethods may get unloaded.  In this case the flushing
1477 // of dependencies must happen during phase 1 since after GC any
1478 // dependencies in the unloaded nmethod won't be updated, so
1479 // traversing the dependency information in unsafe.  In that case this
1480 // function is called with a non-NULL argument and this function only
1481 // notifies instanceKlasses that are reachable
1482 
1483 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1484   assert_locked_or_safepoint(CodeCache_lock);
1485   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1486   "is_alive is non-NULL if and only if we are called during GC");
1487   if (!has_flushed_dependencies()) {
1488     set_has_flushed_dependencies();


2349                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2350   }
2351   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2352 };
2353 
2354 void nmethod::verify() {
2355 
2356   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2357   // seems odd.
2358 
2359   if( is_zombie() || is_not_entrant() )
2360     return;
2361 
2362   // Make sure all the entry points are correctly aligned for patching.
2363   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2364 
2365   // assert(method()->is_oop(), "must be valid");
2366 
2367   ResourceMark rm;
2368 
2369   if (!CodeCache::contains_nmethod(this)) {
2370     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2371   }
2372 
2373   if(is_native_method() )
2374     return;
2375 
2376   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2377   if (nm != this) {
2378     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2379                   this));
2380   }
2381 
2382   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2383     if (! p->verify(this)) {
2384       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2385     }
2386   }
2387 
2388   VerifyOopsClosure voc(this);
2389   oops_do(&voc);


src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File