src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8015774 Sdiff src/share/vm/code

src/share/vm/code/nmethod.cpp

Print this page




 483 }
 484 
 485 nmethod* nmethod::new_native_nmethod(methodHandle method,
 486   int compile_id,
 487   CodeBuffer *code_buffer,
 488   int vep_offset,
 489   int frame_complete,
 490   int frame_size,
 491   ByteSize basic_lock_owner_sp_offset,
 492   ByteSize basic_lock_sp_offset,
 493   OopMapSet* oop_maps) {
 494   code_buffer->finalize_oop_references(method);
 495   // create nmethod
 496   nmethod* nm = NULL;
 497   {
 498     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 499     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 500     CodeOffsets offsets;
 501     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 502     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 503     nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
 504                                             compile_id, &offsets,
 505                                             code_buffer, frame_size,
 506                                             basic_lock_owner_sp_offset,
 507                                             basic_lock_sp_offset, oop_maps);
 508     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 509     if (PrintAssembly && nm != NULL) {
 510       Disassembler::decode(nm);
 511     }
 512   }
 513   // verify nmethod
 514   debug_only(if (nm) nm->verify();) // might block
 515 
 516   if (nm != NULL) {
 517     nm->log_new_nmethod();
 518   }
 519 
 520   return nm;
 521 }
 522 
 523 #ifdef HAVE_DTRACE_H
 524 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 525                                      CodeBuffer *code_buffer,
 526                                      int vep_offset,
 527                                      int trap_offset,
 528                                      int frame_complete,
 529                                      int frame_size) {
 530   code_buffer->finalize_oop_references(method);
 531   // create nmethod
 532   nmethod* nm = NULL;
 533   {
 534     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 535     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 536     CodeOffsets offsets;
 537     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 538     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 539     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 540 
 541     nm = new (nmethod_size) nmethod(method(), nmethod_size,
 542                                     &offsets, code_buffer, frame_size);
 543 
 544     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 545     if (PrintAssembly && nm != NULL) {
 546       Disassembler::decode(nm);
 547     }
 548   }
 549   // verify nmethod
 550   debug_only(if (nm) nm->verify();) // might block
 551 
 552   if (nm != NULL) {
 553     nm->log_new_nmethod();
 554   }
 555 
 556   return nm;
 557 }
 558 
 559 #endif // def HAVE_DTRACE_H
 560 
 561 nmethod* nmethod::new_nmethod(methodHandle method,


 569   OopMapSet* oop_maps,
 570   ExceptionHandlerTable* handler_table,
 571   ImplicitExceptionTable* nul_chk_table,
 572   AbstractCompiler* compiler,
 573   int comp_level
 574 )
 575 {
 576   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 577   code_buffer->finalize_oop_references(method);
 578   // create nmethod
 579   nmethod* nm = NULL;
 580   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 581     int nmethod_size =
 582       allocation_size(code_buffer, sizeof(nmethod))
 583       + adjust_pcs_size(debug_info->pcs_size())
 584       + round_to(dependencies->size_in_bytes() , oopSize)
 585       + round_to(handler_table->size_in_bytes(), oopSize)
 586       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 587       + round_to(debug_info->data_size()       , oopSize);
 588 
 589     nm = new (nmethod_size)
 590     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 591             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 592             oop_maps,
 593             handler_table,
 594             nul_chk_table,
 595             compiler,
 596             comp_level);
 597 
 598     if (nm != NULL) {
 599       // To make dependency checking during class loading fast, record
 600       // the nmethod dependencies in the classes it is dependent on.
 601       // This allows the dependency checking code to simply walk the
 602       // class hierarchy above the loaded class, checking only nmethods
 603       // which are dependent on those classes.  The slow way is to
 604       // check every nmethod for dependencies which makes it linear in
 605       // the number of methods compiled.  For applications with a lot
 606       // classes the slow way is too slow.
 607       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 608         Klass* klass = deps.context_type();
 609         if (klass == NULL) {


 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information
 793     if (PrintNMethods) {
 794       print_code();
 795     }
 796     if (PrintRelocations) {
 797       print_relocations();
 798     }
 799     if (xtty != NULL) {
 800       xtty->tail("print_dtrace_nmethod");
 801     }
 802   }
 803 }
 804 #endif // def HAVE_DTRACE_H
 805 
 806 void* nmethod::operator new(size_t size, int nmethod_size) throw() {
 807   // Not critical, may return null if there is too little continuous memory
 808   return CodeCache::allocate(nmethod_size);


 809 }
 810 
 811 nmethod::nmethod(
 812   Method* method,
 813   int nmethod_size,
 814   int compile_id,
 815   int entry_bci,
 816   CodeOffsets* offsets,
 817   int orig_pc_offset,
 818   DebugInformationRecorder* debug_info,
 819   Dependencies* dependencies,
 820   CodeBuffer *code_buffer,
 821   int frame_size,
 822   OopMapSet* oop_maps,
 823   ExceptionHandlerTable* handler_table,
 824   ImplicitExceptionTable* nul_chk_table,
 825   AbstractCompiler* compiler,
 826   int comp_level
 827   )
 828   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1515   if (TraceCreateZombies) {
1516     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1517   }
1518 
1519   NMethodSweeper::report_state_change(this);
1520   return true;
1521 }
1522 
1523 void nmethod::flush() {
1524   // Note that there are no valid oops in the nmethod anymore.
1525   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1526   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1527 
1528   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1529   assert_locked_or_safepoint(CodeCache_lock);
1530 
1531   // completely deallocate this method
1532   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1533   if (PrintMethodFlushing) {
1534     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1535         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1536   }
1537 
1538   // We need to deallocate any ExceptionCache data.
1539   // Note that we do not need to grab the nmethod lock for this, it
1540   // better be thread safe if we're disposing of it!
1541   ExceptionCache* ec = exception_cache();
1542   set_exception_cache(NULL);
1543   while(ec != NULL) {
1544     ExceptionCache* next = ec->next();
1545     delete ec;
1546     ec = next;
1547   }
1548 
1549   if (on_scavenge_root_list()) {
1550     CodeCache::drop_scavenge_root_nmethod(this);
1551   }
1552 
1553 #ifdef SHARK
1554   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1555 #endif // SHARK
1556 
1557   ((CodeBlob*)(this))->flush();
1558 
1559   CodeCache::free(this);
1560 }
1561 
1562 
1563 //
1564 // Notify all classes this nmethod is dependent on that it is no
1565 // longer dependent. This should only be called in two situations.
1566 // First, when a nmethod transitions to a zombie all dependents need
1567 // to be clear.  Since zombification happens at a safepoint there's no
1568 // synchronization issues.  The second place is a little more tricky.
1569 // During phase 1 of mark sweep class unloading may happen and as a
1570 // result some nmethods may get unloaded.  In this case the flushing
1571 // of dependencies must happen during phase 1 since after GC any
1572 // dependencies in the unloaded nmethod won't be updated, so
1573 // traversing the dependency information in unsafe.  In that case this
1574 // function is called with a non-NULL argument and this function only
1575 // notifies instanceKlasses that are reachable
1576 
1577 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1578   assert_locked_or_safepoint(CodeCache_lock);
1579   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1580   "is_alive is non-NULL if and only if we are called during GC");
1581   if (!has_flushed_dependencies()) {
1582     set_has_flushed_dependencies();


2410     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2411     return NULL;
2412   }
2413 }
2414 
2415 
2416 void nmethod::check_all_dependencies(DepChange& changes) {
2417   // Checked dependencies are allocated into this ResourceMark
2418   ResourceMark rm;
2419 
2420   // Turn off dependency tracing while actually testing dependencies.
2421   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2422 
2423  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2424                            &DependencySignature::equals, 11027> DepTable;
2425 
2426  DepTable* table = new DepTable();
2427 
2428   // Iterate over live nmethods and check dependencies of all nmethods that are not
2429   // marked for deoptimization. A particular dependency is only checked once.
2430   for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
2431     if (!nm->is_marked_for_deoptimization()) {



2432       for (Dependencies::DepStream deps(nm); deps.next(); ) {
2433         // Construct abstraction of a dependency.
2434         DependencySignature* current_sig = new DependencySignature(deps);
2435 
2436         // Determine if dependency is already checked. table->put(...) returns
2437         // 'true' if the dependency is added (i.e., was not in the hashtable).
2438         if (table->put(*current_sig, 1)) {
2439           if (deps.check_dependency() != NULL) {
2440             // Dependency checking failed. Print out information about the failed
2441             // dependency and finally fail with an assert. We can fail here, since
2442             // dependency checking is never done in a product build.
2443             changes.print();
2444             nm->print();
2445             nm->print_dependencies();
2446             assert(false, "Should have been marked for deoptimization");
2447           }
2448         }
2449       }
2450     }
2451   }




 483 }
 484 
 485 nmethod* nmethod::new_native_nmethod(methodHandle method,
 486   int compile_id,
 487   CodeBuffer *code_buffer,
 488   int vep_offset,
 489   int frame_complete,
 490   int frame_size,
 491   ByteSize basic_lock_owner_sp_offset,
 492   ByteSize basic_lock_sp_offset,
 493   OopMapSet* oop_maps) {
 494   code_buffer->finalize_oop_references(method);
 495   // create nmethod
 496   nmethod* nm = NULL;
 497   {
 498     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 499     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 500     CodeOffsets offsets;
 501     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 502     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 503     nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
 504                                             compile_id, &offsets,
 505                                             code_buffer, frame_size,
 506                                             basic_lock_owner_sp_offset,
 507                                             basic_lock_sp_offset, oop_maps);
 508     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 509     if (PrintAssembly && nm != NULL) {
 510       Disassembler::decode(nm);
 511     }
 512   }
 513   // verify nmethod
 514   debug_only(if (nm) nm->verify();) // might block
 515 
 516   if (nm != NULL) {
 517     nm->log_new_nmethod();
 518   }
 519 
 520   return nm;
 521 }
 522 
 523 #ifdef HAVE_DTRACE_H
 524 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 525                                      CodeBuffer *code_buffer,
 526                                      int vep_offset,
 527                                      int trap_offset,
 528                                      int frame_complete,
 529                                      int frame_size) {
 530   code_buffer->finalize_oop_references(method);
 531   // create nmethod
 532   nmethod* nm = NULL;
 533   {
 534     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 535     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 536     CodeOffsets offsets;
 537     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 538     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 539     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 540 
 541     nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
 542                                     &offsets, code_buffer, frame_size);
 543 
 544     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 545     if (PrintAssembly && nm != NULL) {
 546       Disassembler::decode(nm);
 547     }
 548   }
 549   // verify nmethod
 550   debug_only(if (nm) nm->verify();) // might block
 551 
 552   if (nm != NULL) {
 553     nm->log_new_nmethod();
 554   }
 555 
 556   return nm;
 557 }
 558 
 559 #endif // def HAVE_DTRACE_H
 560 
 561 nmethod* nmethod::new_nmethod(methodHandle method,


 569   OopMapSet* oop_maps,
 570   ExceptionHandlerTable* handler_table,
 571   ImplicitExceptionTable* nul_chk_table,
 572   AbstractCompiler* compiler,
 573   int comp_level
 574 )
 575 {
 576   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 577   code_buffer->finalize_oop_references(method);
 578   // create nmethod
 579   nmethod* nm = NULL;
 580   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 581     int nmethod_size =
 582       allocation_size(code_buffer, sizeof(nmethod))
 583       + adjust_pcs_size(debug_info->pcs_size())
 584       + round_to(dependencies->size_in_bytes() , oopSize)
 585       + round_to(handler_table->size_in_bytes(), oopSize)
 586       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 587       + round_to(debug_info->data_size()       , oopSize);
 588 
 589     nm = new (nmethod_size, comp_level)
 590     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 591             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 592             oop_maps,
 593             handler_table,
 594             nul_chk_table,
 595             compiler,
 596             comp_level);
 597 
 598     if (nm != NULL) {
 599       // To make dependency checking during class loading fast, record
 600       // the nmethod dependencies in the classes it is dependent on.
 601       // This allows the dependency checking code to simply walk the
 602       // class hierarchy above the loaded class, checking only nmethods
 603       // which are dependent on those classes.  The slow way is to
 604       // check every nmethod for dependencies which makes it linear in
 605       // the number of methods compiled.  For applications with a lot
 606       // classes the slow way is too slow.
 607       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 608         Klass* klass = deps.context_type();
 609         if (klass == NULL) {


 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information
 793     if (PrintNMethods) {
 794       print_code();
 795     }
 796     if (PrintRelocations) {
 797       print_relocations();
 798     }
 799     if (xtty != NULL) {
 800       xtty->tail("print_dtrace_nmethod");
 801     }
 802   }
 803 }
 804 #endif // def HAVE_DTRACE_H
 805 
 806 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
 807   // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
 808   // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
 809   bool is_critical = SegmentedCodeCache;
 810   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
 811 }
 812 
 813 nmethod::nmethod(
 814   Method* method,
 815   int nmethod_size,
 816   int compile_id,
 817   int entry_bci,
 818   CodeOffsets* offsets,
 819   int orig_pc_offset,
 820   DebugInformationRecorder* debug_info,
 821   Dependencies* dependencies,
 822   CodeBuffer *code_buffer,
 823   int frame_size,
 824   OopMapSet* oop_maps,
 825   ExceptionHandlerTable* handler_table,
 826   ImplicitExceptionTable* nul_chk_table,
 827   AbstractCompiler* compiler,
 828   int comp_level
 829   )
 830   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1517   if (TraceCreateZombies) {
1518     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1519   }
1520 
1521   NMethodSweeper::report_state_change(this);
1522   return true;
1523 }
1524 
1525 void nmethod::flush() {
1526   // Note that there are no valid oops in the nmethod anymore.
1527   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1528   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1529 
1530   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1531   assert_locked_or_safepoint(CodeCache_lock);
1532 
1533   // completely deallocate this method
1534   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1535   if (PrintMethodFlushing) {
1536     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1537         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
1538   }
1539 
1540   // We need to deallocate any ExceptionCache data.
1541   // Note that we do not need to grab the nmethod lock for this, it
1542   // better be thread safe if we're disposing of it!
1543   ExceptionCache* ec = exception_cache();
1544   set_exception_cache(NULL);
1545   while(ec != NULL) {
1546     ExceptionCache* next = ec->next();
1547     delete ec;
1548     ec = next;
1549   }
1550 
1551   if (on_scavenge_root_list()) {
1552     CodeCache::drop_scavenge_root_nmethod(this);
1553   }
1554 
1555 #ifdef SHARK
1556   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1557 #endif // SHARK
1558 
1559   ((CodeBlob*)(this))->flush();
1560 
1561   CodeCache::free(this);
1562 }
1563 

1564 //
1565 // Notify all classes this nmethod is dependent on that it is no
1566 // longer dependent. This should only be called in two situations.
1567 // First, when a nmethod transitions to a zombie all dependents need
1568 // to be clear.  Since zombification happens at a safepoint there's no
1569 // synchronization issues.  The second place is a little more tricky.
1570 // During phase 1 of mark sweep class unloading may happen and as a
1571 // result some nmethods may get unloaded.  In this case the flushing
1572 // of dependencies must happen during phase 1 since after GC any
1573 // dependencies in the unloaded nmethod won't be updated, so
1574 // traversing the dependency information in unsafe.  In that case this
1575 // function is called with a non-NULL argument and this function only
1576 // notifies instanceKlasses that are reachable
1577 
1578 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1579   assert_locked_or_safepoint(CodeCache_lock);
1580   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1581   "is_alive is non-NULL if and only if we are called during GC");
1582   if (!has_flushed_dependencies()) {
1583     set_has_flushed_dependencies();


2411     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2412     return NULL;
2413   }
2414 }
2415 
2416 
2417 void nmethod::check_all_dependencies(DepChange& changes) {
2418   // Checked dependencies are allocated into this ResourceMark
2419   ResourceMark rm;
2420 
2421   // Turn off dependency tracing while actually testing dependencies.
2422   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2423 
2424   typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2425                             &DependencySignature::equals, 11027> DepTable;
2426 
2427   DepTable* table = new DepTable();
2428 
2429   // Iterate over live nmethods and check dependencies of all nmethods that are not
2430   // marked for deoptimization. A particular dependency is only checked once.
2431   NMethodIterator iter;
2432   while(iter.next()) {
2433     nmethod* nm = iter.method();
2434     // Only notify for live nmethods
2435     if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2436       for (Dependencies::DepStream deps(nm); deps.next(); ) {
2437         // Construct abstraction of a dependency.
2438         DependencySignature* current_sig = new DependencySignature(deps);
2439 
2440         // Determine if dependency is already checked. table->put(...) returns
2441         // 'true' if the dependency is added (i.e., was not in the hashtable).
2442         if (table->put(*current_sig, 1)) {
2443           if (deps.check_dependency() != NULL) {
2444             // Dependency checking failed. Print out information about the failed
2445             // dependency and finally fail with an assert. We can fail here, since
2446             // dependency checking is never done in a product build.
2447             changes.print();
2448             nm->print();
2449             nm->print_dependencies();
2450             assert(false, "Should have been marked for deoptimization");
2451           }
2452         }
2453       }
2454     }
2455   }


src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File