< prev index next >

src/share/vm/code/codeCache.cpp

Print this page
rev 13198 : 8183573: Refactor CodeHeap and AOTCodeHeap to devirtualize hot methods
Reviewed-by: rbackman


 398   // Check if heap is needed
 399   if (!heap_available(code_blob_type)) {
 400     return;
 401   }
 402 
 403   // Create CodeHeap
 404   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 405   add_heap(heap);
 406 
 407   // Reserve Space
 408   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
 409   size_initial = align_up(size_initial, os::vm_page_size());
 410   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 411     vm_exit_during_initialization("Could not reserve enough space for code cache");
 412   }
 413 
 414   // Register the CodeHeap
 415   MemoryService::add_code_heap_memory_pool(heap, name);
 416 }
 417 










 418 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 419   assert(cb != NULL, "CodeBlob is null");
 420   FOR_ALL_HEAPS(heap) {
 421     if ((*heap)->contains_blob(cb)) {
 422       return *heap;
 423     }
 424   }
 425   ShouldNotReachHere();
 426   return NULL;
 427 }
 428 
 429 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 430   FOR_ALL_HEAPS(heap) {
 431     if ((*heap)->accepts(code_blob_type)) {
 432       return *heap;
 433     }
 434   }
 435   return NULL;
 436 }
 437 


 585 }
 586 
 587 bool CodeCache::contains(nmethod *nm) {
 588   return contains((void *)nm);
 589 }
 590 
 591 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 592 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 593 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 594 CodeBlob* CodeCache::find_blob(void* start) {
 595   CodeBlob* result = find_blob_unsafe(start);
 596   // We could potentially look up non_entrant methods
 597   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 598   return result;
 599 }
 600 
 601 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 602 // what you are doing)
 603 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 604   // NMT can walk the stack before code cache is created
 605   if (_heaps != NULL && !_heaps->is_empty()) {
 606     FOR_ALL_HEAPS(heap) {
 607       CodeBlob* result = (*heap)->find_blob_unsafe(start);
 608       if (result != NULL) {
 609         return result;
 610       }
 611     }
 612   }
 613   return NULL;
 614 }
 615 
 616 nmethod* CodeCache::find_nmethod(void* start) {
 617   CodeBlob* cb = find_blob(start);
 618   assert(cb->is_nmethod(), "did not find an nmethod");
 619   return (nmethod*)cb;
 620 }
 621 
 622 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 623   assert_locked_or_safepoint(CodeCache_lock);
 624   FOR_ALL_HEAPS(heap) {
 625     FOR_ALL_BLOBS(cb, *heap) {
 626       f(cb);
 627     }
 628   }
 629 }
 630 




 398   // Check if heap is needed
 399   if (!heap_available(code_blob_type)) {
 400     return;
 401   }
 402 
 403   // Create CodeHeap
 404   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 405   add_heap(heap);
 406 
 407   // Reserve Space
 408   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
 409   size_initial = align_up(size_initial, os::vm_page_size());
 410   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 411     vm_exit_during_initialization("Could not reserve enough space for code cache");
 412   }
 413 
 414   // Register the CodeHeap
 415   MemoryService::add_code_heap_memory_pool(heap, name);
 416 }
 417 
 418 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 419   assert(start != NULL, "start is null");
 420   FOR_ALL_HEAPS(heap) {
 421     if ((*heap)->contains(start)) {
 422       return *heap;
 423     }
 424   }
 425   return NULL;
 426 }
 427 
 428 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 429   assert(cb != NULL, "CodeBlob is null");
 430   FOR_ALL_HEAPS(heap) {
 431     if ((*heap)->contains_blob(cb)) {
 432       return *heap;
 433     }
 434   }
 435   ShouldNotReachHere();
 436   return NULL;
 437 }
 438 
 439 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 440   FOR_ALL_HEAPS(heap) {
 441     if ((*heap)->accepts(code_blob_type)) {
 442       return *heap;
 443     }
 444   }
 445   return NULL;
 446 }
 447 


 595 }
 596 
 597 bool CodeCache::contains(nmethod *nm) {
 598   return contains((void *)nm);
 599 }
 600 
 601 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 602 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 603 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 604 CodeBlob* CodeCache::find_blob(void* start) {
 605   CodeBlob* result = find_blob_unsafe(start);
 606   // We could potentially look up non_entrant methods
 607   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 608   return result;
 609 }
 610 
 611 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 612 // what you are doing)
 613 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 614   // NMT can walk the stack before code cache is created
 615   if (_heaps != NULL) {
 616     CodeHeap* heap = get_code_heap_containing(start);
 617     if (heap != NULL) {
 618       return heap->find_blob_unsafe(start);


 619     }
 620   }
 621   return NULL;
 622 }
 623 
 624 nmethod* CodeCache::find_nmethod(void* start) {
 625   CodeBlob* cb = find_blob(start);
 626   assert(cb->is_nmethod(), "did not find an nmethod");
 627   return (nmethod*)cb;
 628 }
 629 
 630 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 631   assert_locked_or_safepoint(CodeCache_lock);
 632   FOR_ALL_HEAPS(heap) {
 633     FOR_ALL_BLOBS(cb, *heap) {
 634       f(cb);
 635     }
 636   }
 637 }
 638 


< prev index next >